seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41378387257 | from django import urls
from django.conf.urls import url
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^allVehicles/(?P<otype>[\w]+)/$',
views.filterVehicle_view, name='VehicleFilter'),
url(r'^editPerson/(?P<pid>[\w]+)/$', views.editPerson, name='editPerson'),
url(r'^editVehicle/(?P<vid>[\w]+)/$',
views.editVehicle, name='editVehicle'),
url(r'^updatePerson/(?P<upid>[\w]+)/$',
views.person_update_view, name='person_update_view'),
url(r'^updateManager/(?P<umid>[\w]+)/$',
views.manager_update_view, name='manager_update_view'),
url(r'^updateVehicle/(?P<uvid>[\w]+)/$',
views.vehicle_update_view, name='vehicle_update_view'),
url(r'^addNewManagerVehicle/(?P<mid>[\w]+)/$',
views.add_manager_vehicle_view, name='add_manager_vehicle_view'),
url(r'^editRecord/(?P<rid>[\w]+)/$',
views.edit_manager_record, name='edit_manager_record'),
path('', views.fleetManager, name='fleetManager'),
path('allVehicles', views.allVehicles, name='allVehicles'),
path('addVehicle', views.addVehicle, name='addVehicle'),
path('addManager', views.addManager, name='addManager'),
path('newManager', views.newManager, name='newManager'),
path('login', views.login, name='login'),
path('rentVehicle', views.rentVehicle, name='rentVehicle'),
path('selectedVehicle', views.selectedVehicle_view,
name='selectedVehicleView'),
path('managedVehicle', views.managedVehicle, name='managedVehicle'),
path('adminPanel', views.adminPanel, name='adminPanel'),
path('addManagerVehicle', views.addManagerVehicle, name='addManagerVehicle'),
path('addPerson', views.addPerson, name='addPerson'),
path('addService', views.addService, name='addService'),
path('addServiceplan', views.addServiceplan, name='addServiceplan'),
path('editPerson', views.editPerson, name='editPerson'),
path('editPersonel', views.editPersonel, name='editPersonel'),
path('editVehicles', views.editVehicles, name='editVehicles'),
path('editVehicle', views.editVehicle, name='editVehicle'),
path('editService', views.editService, name='editService'),
path('managerManager', views.managerManager, name='managerManager'),
path('editPlans', views.editPlans, name='editPlans'),
path('updatePlan', views.updatePlan, name='updatePlan'),
path('editServiceplan', views.editServiceplan, name='editServiceplan'),
path('managerPanel', views.managerPanel, name='managerPanel'),
path('generateReport', views.generateReport, name='generateReport'),
path('reserveVehicle', views.reserveVehicle, name='reserveVehicle'),
path('startRent', views.startRent, name='startRent'),
path('endRent', views.endRent, name='endRent'),
path('yourVehicles', views.yourVehiclesView, name='yourVehicles'),
path('toStartRent', views.toStartRent, name='toStartRent'),
path('toEndRent', views.toEndRent, name='toEndRent'),
path('rentalDetails', views.rentalDetailsView, name='rentalDetails'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| WWalusiak/Fleetmanager | FleetManager/manager/urls.py | urls.py | py | 3,185 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django... |
7740117405 | import pandas as pd
import numpy as np
import os
path, dirs, files = next(os.walk("./input/Dataset/GlobalDataset/Splitted/"))
file_count = len(files)
data1 = pd.DataFrame()
for nb_files in range(file_count):
datag = pd.read_csv(f'{path}{files[nb_files]}', encoding="ISO-8859โ1", dtype = str)
data1 = pd.concat([data1, datag], ignore_index = True)
print("nb total instances in the file : ", len(data1.values))
# Delete two columns (U and V in the excel)
cols = list(set(list(data1.columns )) - set(list(['Flow Bytes/s',' Flow Packets/s'])) )
data1 = data1[cols]
# Mise en forme des noeuds
data1[' Source IP'] = data1[' Source IP'].apply(str)
data1[' Source Port'] = data1[' Source Port'].apply(str)
data1[' Destination IP'] = data1[' Destination IP'].apply(str)
data1[' Destination Port'] = data1[' Destination Port'].apply(str)
data1[' Source IP'] = data1[' Source IP'] + ':' + data1[' Source Port']
data1[' Destination IP'] = data1[' Destination IP'] + ':' + data1[' Destination Port']
data1.drop(columns=['Flow ID',' Source Port',' Destination Port',' Timestamp'], inplace=True)
# -------------------- ????????????????????????????????????????? --------------------
# simply do : nom = list(data1[' Label'].unique())
nom = []
nom = nom + [data1[' Label'].unique()[0]]
for i in range(1, len(data1[' Label'].unique())):
nom = nom + [data1[' Label'].unique()[i]]
nom.insert(0, nom.pop(nom.index('BENIGN')))
# Naming the two classes BENIGN {0} / Any Intrusion {1}
data1[' Label'].replace(nom[0], 0,inplace = True)
for i in range(1,len(data1[' Label'].unique())):
data1[' Label'].replace(nom[i], 1,inplace = True)
data1.rename(columns={" Label": "label"},inplace = True)
label1 = data1.label
data1.drop(columns=['label'],inplace = True)
# split train and test
# data1 = pd.concat([data1, label1], axis=1)
cols = list(set(list(data1.columns )) - set(list([' Source IP', ' Destination IP'])) )
data1 = data1[cols]
##########
import category_encoders as ce
from sklearn.preprocessing import StandardScaler
encoder1 = ce.TargetEncoder(cols=[' Protocol', 'Fwd PSH Flags', ' Fwd URG Flags', ' Bwd PSH Flags', ' Bwd URG Flags'])
encoder1.fit(data1, label1)
data1 = encoder1.transform(data1)
scaler1 = StandardScaler()
cols_to_norm1 = list(set(list(data1.iloc[:, :].columns )) - set(list(['label', ' Source IP', ' Destination IP'])) )
data1[cols_to_norm1] = scaler1.fit_transform(data1[cols_to_norm1])
##########
X = data1.values
Y = label1.values
print(Y)
print("********************")
# Import the necessary libraries first
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import chi2
# Feature extraction
nb_features_to_select = 35
test = SelectKBest(score_func = mutual_info_classif, k = nb_features_to_select)
fit = test.fit(X, Y)
# Summarize scores
np.set_printoptions(precision = 3)
print(fit.scores_)
val = fit.scores_
feature_indx = []
for i in range(nb_features_to_select):
f_indx = np.argmax(val)
feature_indx.append(f_indx)
val[f_indx] = float('-inf')
print(feature_indx)
print(data1.columns[feature_indx])
print(len(data1.columns[feature_indx]))
important_features = ['label', ' Source IP', ' Destination IP', 'Flow ID',' Source Port',' Destination Port',' Timestamp', 'Flow Bytes/s',' Flow Packets/s']
final_features = list(set(list(data1.columns[feature_indx]))) + list(set(list(important_features)))
print(final_features)
print(len(final_features)) | EagleEye1107/E-GNNExplainer | src/dataset_analysis/select_k_best.py | select_k_best.py | py | 3,500 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.walk",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_numb... |
38729573666 | import tensorflow as tf
# Reading data and set variables
# MNIST Dataset
from tensorflow.examples.tutorials.mnist import input_data
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# print(mnist)
print (mnist.train.labels[1])
print (mnist.train.images[1])
# # ๊ทธ๋ฆผ์ผ๋ก ๊ทธ๋ ค๋ณด๋ฉด.
# import numpy as np
#
# arr = np.array(mnist.train.images[1])
# arr.shape = (28,28)
#
# # %matplotlib inline
# import matplotlib.pyplot as plt
# plt.imshow(arr)
# plt.show()
nb_classes = 10
# MNIST data image of shape 28 * 28 = 784
X = tf.placeholder(tf.float32, [None, 784])
# 0 - 9 digits recognition = 10 classes
Y = tf.placeholder(tf.float32, [None, nb_classes])
W = tf.Variable(tf.random_normal([784, nb_classes]))
b = tf.Variable(tf.random_normal([nb_classes]))
# Softmax!
# Hypothesis (using softmax)
hypothesis = tf.nn.softmax(tf.matmul(X, W) + b)
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
# Test model
is_correct = tf.equal(tf.arg_max(hypothesis, 1), tf.arg_max(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
# Training epoch/batch
# parameters
training_epochs = 15
batch_size = 100
with tf.Session() as sess:
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
c, _ = sess.run([cost, optimizer], feed_dict={X: batch_xs, Y: batch_ys})
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print("Learning finished")
# Training epoch / batch
# In the neural network terminology:
# one epoch = one forward pass and one backward pass of all the training examples
# batch size = the number of training examples in one forward/backward pass.
# The higher the batch size, the more memory space you will need.
# number of iterations = number of passes, each pass using [batch size] number of examples.
# To be clear, one pass = one forward pass + one backward pass (we do not count the forward pass and backward pass as two different passes).
# Example: if you have 1000 training examples, and your batch size is 500, then it will take 2 iterations to complete 1 epoch.
# 4. epoch
# ํ๋ จ์ฉ ์ฌ์ง ์ ์ฒด๋ฅผ ๋ฑ ํ ๋ฒ ์ฌ์ฉํ์ ๋ ํ ์ธ๋(์ดํญ, epoch)์ด ์ง๋๊ฐ๋ค๊ณ ๋งํฉ๋๋ค.
# cifar10์ ๊ฒฝ์ฐ ์ฌ์ง 60,000์ฅ ์ค 50,000์ฅ์ด ํ๋ จ์ฉ, 10,000์ฅ์ด ๊ฒ์ฌ์ฉ์ผ๋ก ์ง์ ๋์ด ์์ต๋๋ค.
# ๊ทธ๋ฐ๋ฐ max_iter์์ ํ๋ จ์ ์ฌ์ง 6,000,000์ฅ์ ์ฌ์ฉํ๊ธฐ๋ก ํ๊ธฐ ๋๋ฌธ์ 50,000์ฅ์ ํ๋ จ์ฉ ์ฌ์ง์ด
# ์ฌ๋ฌ๋ฒ ์ฌ์ฌ์ฉ๋๊ฒ ๋ฉ๋๋ค. ์ ํํ ๊ณ์ฐํด๋ณด๋ฉด 6,000,000 / 50,000 = 120 ์ด๋ ํ ์ฌ์ง์ด 120๋ฒ ์ฉ ์ฌ์ฌ์ฉ๋ ๊ฒ์
๋๋ค.
# ์ด ๊ฒฝ์ฐ 120 ์ธ๋(epoch)๋ผ๊ณ ๋งํฉ๋๋ค. ๊ฒ์ฌ์ฉ์ ๊ฒฝ์ฐ ์ฌ์ง 10,000์ฅ์ ์ฌ์ฉํ๊ธฐ๋ก ํ๋๋ฐ ์ค์ ๋ก๋
# ์ฌ์ง์ด 10,000์ฅ ์์ผ๋ ๋ฑ ํ ์ธ๋๋ง ์๋ ๊ฒ์
๋๋ค.
# 1. batch_size
# ๋ฐฐ์น(batch)๋ ํ ๋ฒ์ ์ฒ๋ฆฌํ๋ ์ฌ์ง์ ์ฅ ์๋ฅผ ๋งํฉ๋๋ค.
# Caffe์์ ๊ธฐ๋ณธ์ผ๋ก ์ ๊ณต๋๋ cifar10 ์์ ์ cifar10_full_train_test.prototxt ํ์ผ์
# ์ด์ด๋ณด๋ฉด batch_size: 100 ์ด๋ผ๋ ๋ถ๋ถ์ด ์์ต๋๋ค.
# ํ ๋ฒ์ 100์ฅ์ ์ฌ์ง์ ์ฒ๋ฆฌํ๋ค๋ ์๋ฏธ์
๋๋ค.
# Report results on test dataset
# Test the model using test sets
print("Accuracy: ", accuracy.eval(session=sess, feed_dict={X: mnist.test.images, Y: mnist.test.labels}))
# ์ด๋ถ๋ถ ์๋ฌ ๋ฐ์ํ๋ค... ์ด์ ๊ฐ ๋ญ์ง?!
# Sample image show and prediction
import matplotlib.pyplot as plt
import random
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label:", sess.run(tf.argmax(mnist.test.labels[r:r+1], 1)))
print("Prediction:", sess.run(tf.argmax(hypothesis, 1),
feed_dict={X: mnist.test.images[r:r+1]}))
plt.imshow(mnist.test.images[r:r+1].
reshape(28,28), cmap="Greys", interpolation='nearest')
plt.show()
################### ๋ค๋ฅธ ๋ฐฉ์ ์์ ์ค์ ###############
# # Reading data and set variables
# # MNIST Dataset
# from tensorflow.examples.tutorials.mnist import input_data
# # Check out https://www.tensorflow.org/get_started/mnist/beginners for
# # more information about the mnist dataset
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
#
# print (mnist.train.labels[1])
# print (mnist.train.images[1])
#
# import tensorflow as tf
# import numpy as np
#
# arr = np.array(mnist.train.images[1])
# arr.shape = (28,28)
#
# import matplotlib.pyplot as plt
# plt.imshow(arr)
# plt.show()
#
# # ์ด์ train ์ ํด๋ณด๋ฉด
# x = tf.placeholder(tf.float32, [None, 784])
# W = tf.Variable(tf.zeros([784,10]))
# b = tf.Variable(tf.zeros([10]))
#
# y = tf.nn.softmax(tf.matmul(x, W) + b)
#
# y_ = tf.placeholder(tf.float32, [None, 10])
# cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
# train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
#
# init = tf.global_variables_initializer()
# sess = tf.Session()
# sess.run(init)
#
# for i in range(1000):
# batch_xs, batch_ys = mnist.train.next_batch(100)
# sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
#
# # input์ ์ด 55000๊ฐ์ 784 pixel ์ ๊ฐ์ง ํ์ต ๋ฐ์ดํฐ๊ฐ ์ฌ์ฉ๋๋ฉฐ, output์ 10๊ฐ(0~9, ์ซ์)์
# # classification์ ๊ฐ์ง 55000๊ฐ์ ๊ฒฐ๊ณผ๊ฐ ๋ง๋ค์ด ์ง ๊ฒ์ด๋ค.
#
# # ์ฐ๋ฆฌ๋ ์ด์ tensorflow ์ฐ์ฐ ์ ๋ฐ์ดํฐ๋ฅผ tensorflow ์๊ฒ ๋ณด๋ด๊ธฐ ์ํ ๊ณต๊ฐ์ ๋ง๋ค ํ์๊ฐ ์๋ค.
# # placeholder๋ผ๊ณ ํ๋ ํจ์๋ฅผ ์ด์ฉํ์. [None, 784]๋ ํ ๊ธธ์ด๋ ์ ํ์ ๋์ง ์๊ฒ ๋ค๋ ๊ฒ์ ์๋ฏธํ๋ค.
# # W ๋ 784 x 10 ์ฐจ์์ 0 ๊ฐ์ ๊ฐ์ง ํ๋ ฌ๋ก ์ ์ํ์. None x 784 ํ๋ ฌ์ 10๊ฐ์ class๋ก ๋ถ๋ฅํ๊ธฐ ์ํด W๋
# # 10 ์ฐจ์์ ํ๋ ฌ์ด ๋์ด์ผ ํ๋ค. 0์ ์ด๊ธฐ๊ฐ์ด๋ฉฐ ํ์ต์ ํตํด ๊ทธ ๊ฐ์ ๊ณ์ ๋ณ๊ฒฝํด ๋๊ฐ ๊ฒ์ด๋ค.
# # b๋ 10์ฐจ์์ 0 ๊ฐ์ ๊ฐ์ง ํ๋ ฌ๋ก ์ ์ํด ๋์. b๋ bias ์ ์ค์ ํํ์ผ๋ก W์
# # ์
๋ ฅ๊ฐ์ ๊ฒฐ๊ณผ์ ์ถ๊ฐ์ ์ธ ์ ๋ณด๋ฅผ ๋ํ๊ธฐ ์ํ ๊ฐ์ ์๋งํ๋ค.
#
# # y๊ฐ์ ๊ณ์ฐํ๋ ๊ฒ์ ๋ณด๋ฉด tf.matmul(x.W) + b ๋ผ๊ณ ์ ํ ์๋๋ฐ ์ด๊ฒ์ ๋จ์ํ๊ฒ
# # ํ๋ ฌ ๊ณฑ์ ์๋งํ๋ฉฐ ๋ฐฉ์ ์์ผ๋ก ๋งํ์๋ฉด Wx+b ๋ฅผ ์๋ฏธํ๋ค.
# # ์ด ๊ฒฐ๊ณผ ๊ฐ์ ๋ํด softmax๋ผ๋ ํจ์๋ฅผ ์ทจํ๋๋ฐ ํด๋น ๊ฒฐ๊ณผ ๊ฐ์ ๋ํด์ softmax๋ฅผ ์ํ๊ฒ ๋๋ฉด
# # ํ๋ฅ ๊ฐ์ผ๋ก ๋ณํ๊ฒ ๋๋ค.
# # ์์ 8์ด๋ ํ๊ธฐ์ฒด ์ฌ์ง์ ์๋ก ๋ค์ด๋ณด๋ฉด ์์ ์ฌ์ง์ 80% ๊ฐ๋ 8์ด๋ผ๊ณ ์ธ์ํ ์ ์์ง๋ง 10%๊ฐ๋์ 9๋ผ๊ณ ์ธ์ํ ์ ์๊ณ
# # ๊ทธ ๋๋จธ์ง๋ ๋๋จธ์ง ์ซ์๋ค๋ก ์ธ์ํ ์๊ฐ ์๋ค.
# # Wx ๋ ํด๋น ์ซ์๊ฐ ๋ฌด์์ ๋ํ๋ด๋์ง ๊ทธ ์ฆ๊ฑฐ๋ฅผ ์ฐพ๋ ๊ณผ์ ์ด๋ผ๊ณ ํ ์ ์์ผ๋ฉฐ
# # b๋ ์ถ๊ฐ์ ์ธ ์ฆ๊ฑฐ๋ฅผ ๋ํ๋ ๊ณผ์ ์ด๋ผ๊ณ ์๊ฐํ๋ฉด ๋๋ค.
#
# # ์์ ๊ทธ๋ฆผ์์ ์์ด ํธ๋ฅธ์์ผ๋ก ๋ํ๋๋ ์ง์ ์ W๋ฅผ ๋ง์ด๋์ค๋ฅผ ์ค์ผ๋ก์จ ํด๋น ์ฆ๊ฑฐ ๊ฒฐ๊ณผ ๊ฐ์ ์์ ๊ฐ์ ๋๊ฒ ํ๊ณ ,
# # ์ซ์ ๋ถ๋ถ์ W๋ฅผ ํ๋ฌ์ค ๊ฐ์ ์ค์ผ๋ก์จ ํด๋น ์ฆ๊ฑฐ ๊ฐ์ ์์ ๊ฐ์ ๋๊ฒ ํ๋ค.
# # ์ด๋ ๊ฒ ๋ํ๋ ์ฆ๊ฑฐ ๊ฒฐ๊ณผ์ ๋ํด์ softmax ๊ฐ์ ์ทจํจ์ผ๋ก์จ ํ๋ฅ ๋ก ๋ณํ์์ผ ์ฃผ๋ ๊ฒ์ด๋ค.
# #
# # y_ ๋ tensorflow๋ก ๋ถํฐ ๊ฒฐ๊ณผ ๊ฐ์ ๋ฐ์์ค๊ธฐ ์ํ placeholder๋ฅผ ์ ์ํ ๊ฒ์ด๋ค.
# #
# # ๊ทธ ํ์ loss ํจ์๋ฅผ ์ ์ํด ์ฃผ๋๋ฐ, cross_entropy๋ผ ๋ถ๋ฆฌ์ฐ๋ ๋ฐฉ์์ ์ฌ์ฉํ๋ค.
# # ๊ธฐ์กด RMSE์ ๊ทธ ์๋ฏธ ๋ฐ ๋ชฉํ๋ ๊ฐ๋ค๊ณ ๋ณผ ์ ๊ฐ ์๋ค. ๋ค๋ฅธ์ ์ cross_entropy๋ ํ๋ฅ ๋ถํฌ์
# # ๋ํ ์ฐจ์ด์ ์ ๋ํ๋ด๋ ๊ฒ์ด๋ผ๊ณ ํ๊ฒ ๋ค.
# #
# # ์ฐ๋ฆฌ๊ฐ one_hot encode๋ก ํํํ ํ๋ฅ ๋ถํฌ์ ์ค์ ๊ณ์ฐํด์ ๋์จ ํ๋ฅ ๋ถํฌ ๊ฐ์ ์ฐจ์ด๋ฅผ ๊ตฌํด์ ๊ทธ ๊ฐ์ด ๊ฐ์ฅ ์์ ์ง์ ์์์
# # weight ๊ฐ์ ์ฐพ์๋ด๋ ๊ฒ์ด๋ค.
# #
# # loss ํจ์ ๊น์ง ์ ์๊ฐ ๋๋ฌ์ผ๋ฉด ์ด์ gradient descent optimizer์ learning rate์ loss ํจ์๋ฅผ
# # ๋ฑ๋กํด ์ฃผ๋ฉด ์ฌ์ ์์
์ ๋ชจ๋ ๋๋ฌ๋ค.
# #
# # ์ด์ training์ ๋๋ฆฌ๊ธฐ ์ ์ tf์ ๋ชจ๋ ๋ณ์๋ค์ ์ด๊ธฐํใ
์์ผ์ค๋ค. tensor flow๋ lazy evaluation ๋ฐฉ์์ด๋ผ
# # ์ฝ๋๊ฐ ์์ฑ๋๋ฉด ๋ฐ๋ก ์คํ๋๋ ๊ฒ์ด ์๋๋ผ session.run์ด ์คํ๋์ด์ผ ์ค์ ํจ์๊ฐ ๋์ํ๋ค. ์ธ์
์ ์ ์ธํ ํ
# # session.run์ ์ ์ํ init ํจ์๋ฅผ ์ง์ด ๋ฃ์.
# #
# # ์ด์ for๋ฌธ์ 1000๋ฒ์ ๋๋ ค, 100๊ฐ ์ฉ input_images ๋ฐ์ดํฐ์ input_labels ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ ธ์จ๋ค. session.run์
# # ์คํํ์ฌ ์๊น ์ ์ํ training ํจ์๋ฅผ ์คํ์ํค๋ฉด ๋ชจ๋ training์ด ์๋ฃ๋๋ค.
# #
# # ์ด์ ๋ง๋ค์ด์ง model ์ ๋ํ ํ
์คํธ ๋ฐ์ดํฐ๋ฅผ ๋๋ ค ๊ฒ์ฆ์ ํด๋ณด์.
#
# correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#
# print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#
# # argmax ๋ ํด๋น output์์ ๊ฐ์ฅ index๊ฐ ํฐ ๊ฒฐ๊ณผ๋ฅผ ๊ฐ์ ธ์จ๋ค. index๊ฐ ํฌ๋ค๋ ์๋ฏธ๋ ๊ฐ์ฅ ์ ์๊ฐ ๋๊ฒ ์ค์ ๋์๋ค๋
# # ๋ง์ด๊ณ ํด๋น ๊ฒฐ๊ณผ๋ฅผ ์ ๋ต์ผ๋ก ๋ณผ ์ ์๋ค๋ ๋ง์ด ๋๋ค. ์์ธกํ ๊ฐ์์์ argmax์ ์ค์ onehot encode์์์ argmax๋ฅผ
# # ๊ฐ๊ฐ ๊ฐ์ ธ์์ ๋น๊ตํด ๋ณด์. ํด๋น ๊ฐ์ด ๊ฐ์ผ๋ฉด true, ํ๋ฆฌ๋ฉด false๋ฅผ ๋ฆฌํดํ ๊ฒ์ด๋ค.
# # correct_prediction์ true, false ๋ฐฐ์ด์ ๋ํ๋ธ๋ค.
# # correct_prediction์ ๋ํ ์ถ๋ ฅํด ๋ณด๊ณ ์ถ๋ค๋ฉด ์ง์ ํธ์ถํ ์๋ ์๊ณ session.run์ ์คํํด์ ๊ฒฐ๊ณผ๋ฅผ
# # ํ์ธํด์ผ ํ๋ค. ์๋์ ๊ฐ์ด ์ฝ๋๋ฅผ ์์ฑํ ํ ๊ฒฐ๊ณผ๋ฅผ ํ์ธํด ๋ณด์.
#
# print(sess.run(correct_prediction, feed_dict={x:mnist.test.images, y_:mnist.test.labels}))
#
# # accuracy๋ ์์ boolean ๋ฐฐ์ด์ True์ผ ๊ฒฝ์ฐ์๋ False์ผ ๊ฒฝ์ฐ์๋ 0์ผ๋ก ๋ณํ ํ ํ ํ๊ท ์ ๊ตฌํ ๊ฐ์ด๋ค.
# # ํด๋น ๊ฒฐ๊ณผ๋ฅผ ํ์ธํด ๋ณด์.
#
# print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#
# # mnist.test.images์ mnist.test.labels ์ ์ค์ ๊ฐ๋ค์ ์ง์ ๋ณด๊ณ ์ถ๋ค๋ฉด ์๋์ ๊ฐ์ด ์์ฑํ ํ ํ์ธํ๋ค.
# # ์ค์ ๋ก 1000๋ฒ์ ๋๋ ค์ ํด๋ดค๋๋ฐ jupyter๊ฐ ๋ป์ ๋ป ํ๋ค.... (print๊ฐ ๊ณต์๊ฐ ๋ง์ด ๋๋ ๋ก์ง์ธ๊ฐ...)
# # range๋ฅผ 1๋ก๋ง ์ฃผ๊ณ ํ์ธํด๋ณด์.
#
# for i in range(1):
# batch_x, batch_y = mnist.test.next_batch(100)
# diff_a = sess.run(tf.argmax(y,1), feed_dict={x:batch_x})
# diff_b = sess.run(tf.argmax(y_,1), feed_dict={y_:batch_y})
#
# print(diff_a)
# print(diff_b)
#
# # ์กฐ๊ธ ๋ ๋ณด๊ธฐ ํธํ๊ฒ ์๋์ ๊ฐ์ด ์์ ํ์๋ค.
# for i in range(2):
# result_boolean = []
# batch_x, batch_y = mnist.test.next_batch(9)
# diff_a = sess.run(tf.argmax(y,1), feed_dict={x:batch_x})
# diff_b = sess.run(tf.argmax(y_,1), feed_dict={y_:batch_y})
# print("sample output : " + str(diff_a))
#
# for k in range(9):
# if diff_a[k] == diff_b[k]:
# result_boolean.append("T")
# else:
# result_boolean.append("F")
# print("compare : " + str(result_boolean))
#
# plt.figure(i)
# coordi = [191,192,193,194,195,196,197,198,199]
#
# for index, image in enumerate(batch_x):
# image.shape(28,28)
# plt.subplot(coordi[index])
# plt.imshow(image)
# print("sample input :") | The-G/PYTHON_study | Tensorflow study/Lecture07-Learning rate, Evaluation, MNIST/Lab7-2.py | Lab7-2.py | py | 11,779 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tensorflow.placeholder",
"line_num... |
26466912861 | '''
Created on 17.2.2016
@author: Claire
'''
import urllib, codecs
from requests import Request, Session
import requests, json, logging
logger = logging.getLogger('lasQuery')
hdlr = logging.FileHandler('/tmp/linguistics.log')
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
class lasQuery:
def __init__(self, file_name_pattern="", path="", full_path=""):
self.__file_name_pattern = file_name_pattern
self.__path = path
def analysis(self, input):
res = " "
j = self.morphological_analysis(input)
reader = codecs.getreader("utf-8")
prevword=""
upos=""
for w in j:
word = w['word']
analysis = w['analysis']
for r in analysis:
if word != prevword and len(upos)<1:
prevword=word
wp = r['wordParts']
for part in wp:
lemma = part['lemma']
upos=""
if 'tags' in part:
p = part['tags']
if 'UPOS' in p:
p1 = p['UPOS']
if len(p1)>0:
upos = part['tags']['UPOS'][0]
if upos == 'NOUN' or upos == 'PROPN':
res = res + lemma + " "
return res
#morphological_analysis
def morphological_analysis(self,input):
# do POST
url = 'http://demo.seco.tkk.fi/las/analyze'
#values = dict(text=input)
params = {'text': input, 'locale':'fi', "forms":"V+N+Nom+Sg"}
data = urllib.parse.urlencode(params).encode()
content = None
content = self.prepared_request_morphological(input)
if content == None:
return ""
#print(str(content)+" / "+str(input))
json=None#print(str(content)+" / "+str(input))
try:
json= content.json()
except:
json={}
print("Unablto to produce json:"+str(content))
return json
def lexical_analysis(self,input):
#cookie_jar = cookielib.CookieJar()
#opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))
#urllib2.install_opener(opener)
# acquire cookie
#url_1 = 'http://www.bkstr.com/webapp/wcs/stores/servlet/BuybackMaterialsView?langId=-1&catalogId=10001&storeId=10051&schoolStoreId=15828'
#req = urllib2.Request(url_1)
#rsp = urllib2.urlopen(req)
# do POST
url = 'http://demo.seco.tkk.fi/las/baseform'
#values = dict(text=input)
params = {'text': input}
data = urllib.parse.urlencode(params).encode()
#with urllib.request.urlopen(url, data) as f:
# content = f.read().decode('utf-8')
# print(content)
#url = 'http://example.com:8080/testAPI/testAPIServlet'
#response = requests.post(url, data=data)
#print(response)
#content = rsp.read()
# print result
#import re
#pat = re.compile('Title:.*')
#print pat.search(content).group()
#print(response.headers)
#print (response.status_code, response.text, response.headers)
#print(params)
content = None
content = self.prepared_request(input)
if content == None:
return ""
return content.content
def prepared_request(self, input):
s = Session()
url = 'http://demo.seco.tkk.fi/las/baseform'
#values = dict(text=input)
#print(input)
params = {'text': input, 'locale' : 'fi'}
data = urllib.parse.urlencode(params).encode()
req = Request('POST','http://demo.seco.tkk.fi/las/baseform',headers={'X-Custom':'Test'},data=params)
prepared = req.prepare()
#print(prepared.headers)
#print(prepared.body)
logger.info(prepared.headers)
logger.info(prepared.body)
#self.pretty_print_POST(req)
try:
resp = s.send(prepared)
return resp
except requests.ConnectionError as ce:
logger.warn("Unable to open with native function. Error: " + str(ce))
return None
def prepared_request_morphological(self, input):
s = Session()
url = 'http://demo.seco.tkk.fi/las/baseform'
#values = dict(text=input)
params = {'text': input, 'locale':'fi', "forms":"V+N+Nom+Sg"}
data = urllib.parse.urlencode(params).encode()
req = Request('POST','http://demo.seco.tkk.fi/las/analyze',headers={'X-Custom':'Test'},data=params)
prepared = req.prepare()
#print(input)
#print(prepared.headers)
#print(prepared.body)
#logger.info(prepared.headers)
#logger.info(prepared.body)
#self.pretty_print_POST(req)
try:
resp = s.send(prepared)
return resp
except requests.ConnectionError as ce:
logger.warn("Unable to open with native function. Error: " + str(ce))
return None
def pretty_print_POST(self,req):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.method + ' ' + req.url,
'\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
req.body,
))
| SemanticComputing/aatos | las_query.py | las_query.py | py | 5,981 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG... |
41210654822 | from pymongo import MongoClient
client= MongoClient('localhost:27017')
db = client.train
def read():
try:
trainCol=db.traincsv.find()
print("All data From database")
for train in trainCol:
print(train)
except Exception as e:
print(str(e))
read()
| kaif3120/manuals | BIG DATA PRACTICALS/PRAC 8 MONGO FIND.py | PRAC 8 MONGO FIND.py | py | 303 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 2,
"usage_type": "call"
}
] |
43901190306 | import tkinter as tk
import random
from names import name_list
from traits import trait_list
from appearence import appearence_list
from inventory import inventory_list
# tkinter shit
root = tk.Tk()
root.configure(bg = 'grey')
# functions
def save():
with open("Saved NPCs.txt", "a") as file:
file.write("\n\n" + name_label.cget("text") + "\n")
file.write(gender_label.cget("text"))
file.write(race_label.cget("text"))
file.write(traits_label.cget("text"))
file.write(appearence_label.cget("text"))
file.write(motivation_label.cget("text"))
file.write(inventory_label.cget("text"))
file.write(gold_label.cget("text"))
def generate():
choose_name()
choose_gender()
choose_race()
choose_traits()
choose_appearence()
choose_motivation()
choose_inventory()
choose_gold()
def choose_name():
choice = random.choice(name_list)
name_label.configure(text = "Name: " + choice)
def choose_gender():
choice = random.choice((["male","female"]))
gender_label.configure(text = "\nGender: " + choice)
def choose_race():
choice = random.choice((["Elf", "Dwarf", "Tabaxi", "Half-Orc", "Goblin", "Human", "Owlin", "Frog person", "Dragonborne", "Half-Elf", "Gnome", "Halfing", "Tiefling", "Bugbear", "Genesai", "Fairy", "Kenku", "Lizardfolk", "Tortle", "Firbolg"]))
race_label.configure(text="\nRace: " + choice)
def choose_appearence():
choice1 = random.choice(appearence_list)
choice2 = random.choice(appearence_list)
appearence_label.configure(text = "\nAppearence: " + choice1 + " and " + choice2)
def choose_traits():
choice1 = random.choice(trait_list)
choice2 = random.choice(trait_list)
traits_label.configure(text="\nTraits: " + choice1 + " and " + choice2)
def choose_motivation():
choice = random.choice((["A drive for exploration, discovery, and adventure", "seeking treasure and riches", "Heroism", "Solitude", "Revenge", "Peace", "Being good/being right", "being wanted/ being loved", "being valuable/being admired", "Being Authentic/To find meaning", "Being Competent / Being Capable", "Being Secure / Being Supported ", "Being Satisfied / Being Content", "Being Independent/ To Protect Themselves", "Being at Peace/ Being Harmonious"]))
motivation_label.configure(text="\nMotivation: " + choice)
def choose_inventory():
choice1 = random.choice((inventory_list))
choice2 = random.choice((inventory_list))
choice3 = random.choice((inventory_list))
choice4 = random.choice((inventory_list))
inventory_label.configure(text = "\nInventory: " + choice1 + ", " + choice2 + ", " + choice3 + ", and " + choice4)
def choose_gold():
choice = random.randint(0,100)
gold_label.configure(text = "\nGold: " + str(choice))
# main label
name_label = tk.Label(root, text = "Name: ", font = 14,)
name_label.grid(row = 1, sticky = 'W')
gender_label = tk.Label(root, text = "\nGender: ", font = 14)
gender_label.grid(row = 2, sticky = 'W')
race_label = tk.Label(root, text = "\nRace: ", font = 14)
race_label.grid(row = 3, sticky = 'W')
appearence_label = tk.Label(root, text = "\nAppearence: ", font = 14)
appearence_label.grid(row = 4, sticky = 'W')
traits_label = tk.Label(root, text = "\nTraits: ", font = 14)
traits_label.grid(row = 5, sticky = 'W')
motivation_label = tk.Label(root, text = "\nMotivation: ", font = 14)
motivation_label.grid(row = 6, sticky = 'W')
inventory_label = tk.Label(root, text = "\nInventory: ", font = 14)
inventory_label.grid(row = 7, sticky = 'W')
gold_label = tk.Label(root, text = "\nGold: ", font = 14)
gold_label.grid(row = 8, sticky = 'W')
# buttons
generate_button = tk.Button(root, text = "\nGenerate", font = 14, command = generate)
generate_button.grid(row = 9, sticky = 'W')
save_button = tk.Button(root, text = "\nSave", font = 14, command = save)
save_button.grid(row = 10, sticky = 'W')
# main loop
root.mainloop()
| bonsaipropaganda/NPC-Generator | main.py | main.py | py | 4,049 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Tk",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "names.name_list",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "random.choice",
"line... |
5873286899 | from credentials import aws_key, aws_id, aws_region, sqs_name, arn
from time import sleep
import json
import boto.sqs
import boto.sns
from boto.sqs.message import Message
import ast
from alchemyapi import AlchemyAPI
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
import sys
from concurrent.futures import ThreadPoolExecutor
class NotificationManager():
def __init__(self, aws_id, aws_key, es, aws_region='us-west-2', sqs_name='new-tweet-notifs'):
try:
#connect with sqs
self.sqs = boto.sqs.connect_to_region(aws_region, aws_access_key_id=aws_id, aws_secret_access_key=aws_key)
self.sqs_queue = self.sqs.get_queue(sqs_name)
self.alc = AlchemyAPI()
self.sns = boto.sns.connect_to_region(aws_region)
self.es = es
self.thread_pool = ThreadPoolExecutor(max_workers=4)
except Exception as e:
print('Could not connect')
print(e)
print('Connected to AWS SQS: '+ str(self.sqs))
def worker_task(self, m):
error = False
print('Opening notification')
body = m.get_body()
tweet= ast.literal_eval(body)
#do something with the tweet
print(tweet['text'])
response = self.alc.sentiment("text", tweet['text'])
if(response['status']=='ERROR'):
print('ERROR')
error = True
if not error:
tweet['sentiment'] = response["docSentiment"]["type"]
print("Sentiment: "+ tweet['sentiment'])
#add to Elasticsearch
try:
self.es.index(index="tweets", doc_type="twitter_twp", body=tweet)
except Exception as e:
print('Elasticserch indexing failed')
print(e)
json_string = json.dumps(tweet)
#send processed tweet to SNS
self.sns.publish(arn, json_string, subject='Sub')
#delete notification when done
self.sqs_queue.delete_message(m)
print('Done')
def openNotifications(self):
while True:
#poll for new notifs every second
rs = self.sqs_queue.get_messages() #result set
if len(rs) > 0:
for m in rs:
self.thread_pool.submit(self.worker_task, m)
# init Elasticsearch
awsauth = AWS4Auth(aws_id, aws_key,'us-west-2','es')
es = Elasticsearch(
hosts=[{'host': 'search-es-twitter-yarekxa5djp3rkj7kp735gvacy.us-west-2.es.amazonaws.com', 'port': 443}],
use_ssl=True,
http_auth=awsauth,
verify_certs=True,
connection_class=RequestsHttpConnection
)
#do the magic
#sys.setdefaultencoding('utf-8')
notman = NotificationManager(aws_id, aws_key, es)
notman.openNotifications() | litesaber15/elastictweetmap | Worker/worker.py | worker.py | py | 2,465 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "boto.sqs.sqs.connect_to_region",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "credentials.aws_region",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "boto.sqs.sqs",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_n... |
30721629451 | import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
import sklearn as sk
#from rouge_score import rouge_scorer
from transformers import T5Tokenizer, T5ForConditionalGeneration
import os
import torch
if torch.cuda.is_available():
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
torch.cuda.current_device()
TRAIN_BATCH_SIZE = 2 # input batch size for training (default: 64)
TEST_BATCH_SIZE = 2 # input batch size for testing (default: 1000)
TEST_EPOCHS = 1 # number of epochs to train (default: 10)
VAL_EPOCHS = 4
LEARNING_RATE = 1e-4 # learning rate (default: 0.01)
SEED = 42 # random seed (default: 42)
MAX_LEN = 512
SUMMARY_LEN = 150
torch.manual_seed(SEED) # pytorch random seed
np.random.seed(SEED) # numpy random seed
torch.backends.cudnn.deterministic = True
tokenizer = T5Tokenizer.from_pretrained("t5-base")
#create test dataloader
test_params = {
'batch_size': TEST_BATCH_SIZE,
'shuffle': False,
'num_workers': 0
}
class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer, source_len, summ_len):
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = summ_len
self.text = self.data.text
self.ctext = self.data.ctext
def __len__(self):
return len(self.text)
def __getitem__(self, index):
ctext = str(self.ctext[index])
ctext = ' '.join(ctext.split())
text = str(self.text[index])
text = ' '.join(text.split())
source = self.tokenizer.batch_encode_plus([ctext], max_length= self.source_len, pad_to_max_length=True,return_tensors='pt')
target = self.tokenizer.batch_encode_plus([text], max_length= self.summ_len, pad_to_max_length=True,return_tensors='pt')
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {
'source_ids': source_ids.to(dtype=torch.long),
'source_mask': source_mask.to(dtype=torch.long),
'target_ids': target_ids.to(dtype=torch.long),
'target_ids_y': target_ids.to(dtype=torch.long)
}
def generate(epoch, tokenizer, model, device, loader):
model.eval()
predictions = []
actuals = []
rscores = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype = torch.long)
ids = data['source_ids'].to(device, dtype = torch.long)
mask = data['source_mask'].to(device, dtype = torch.long)
generated_ids = model.generate(
input_ids = ids,
attention_mask = mask,
max_length=150,
num_beams=2,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True
)
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
target = [tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True)for t in y]
#if _%100==0:
#print(f'Completed {_}')
predictions.extend(preds)
actuals.extend(target)
return predictions, actuals
def getsummaryusingT5(_df_predictionset):
df = _df_predictionset[['Crawled Article Text', 'Crawled Article Text']]
print(df.head())
df.columns = ['text','ctext']
df.ctext = 'summarize: ' + df.ctext
test_dataset=df.reset_index(drop=True)
print(len(test_dataset))
#Create Test Set
tokenizer = T5Tokenizer.from_pretrained("t5-base")
test_set = CustomDataset(test_dataset, tokenizer, MAX_LEN, SUMMARY_LEN)
test_loader = DataLoader(test_set, **test_params)
if torch.cuda.is_available():
device = torch.device('cuda')
torch.cuda.empty_cache()
else:
device = torch.device('cpu')
#device = torch.device('cuda')
fine_tuned_T5_model = T5ForConditionalGeneration.from_pretrained("t5-base")
fine_tuned_T5_model = fine_tuned_T5_model.to(device)
path = "TrainedModels/T5NewsSummary_ds_weights_30_lr-0.0001.pt"
fine_tuned_T5_model.load_state_dict(torch.load(path))
#device = torch.device('cuda')
TEST_EPOCHS = 1
finetunedT5_liar_summaries_df = {}
print('FineTuned T5 Model')
for epoch in range(TEST_EPOCHS):
print("Generating Summaries")
generated_text, actual_text = generate(epoch, tokenizer, fine_tuned_T5_model, device, test_loader)
finetunedT5_liar_summaries_df = pd.DataFrame({'Generated Text':generated_text,'Actual Text':actual_text})
#final_df.to_csv('predictions.csv')
print("Summaries generated")
_summary = ""
for i,text in enumerate(finetunedT5_liar_summaries_df['Generated Text']):
generated = finetunedT5_liar_summaries_df['Generated Text'][i]
_summary = _summary + "..." + generated
return _summary
| vksoniya/fakenewsdetectionframework | Utils/T5Summarizer.py | T5Summarizer.py | py | 5,261 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.cu... |
40903031893 | '''
In this project, you will visualize the feelings and language used in a set of
Tweets. This starter code loads the appropriate libraries and the Twitter data you'll
need!
'''
import json
from textblob import TextBlob
import matplotlib.pyplot as plt
from wordcloud import WordCloud
#Get the JSON data
tweetFile = open("TwitterData/tweets_small.json", "r")
tweetData = json.load(tweetFile)
tweetFile.close()
# Continue your program below!
# Textblob sample:
tb = TextBlob("You are a brilliant computer scientist.")
print(tb.polarity)
#Create a polarity list (this list stores positive and negative numbers which tell us about tweet)
polarity_list = []
subjectivity_list = []
for tweet in tweetData:
tweetBlob = TextBlob(tweet["text"])
polarity_list.append(tweetBlob.polarity)
subjectivity_list.append(tweetBlob.subjectivity)
'''sum = 0
for polarity in polarity_list:
sum = sum + polarity
print(sum)
avgPolarity = sum/len(polarity_list)
print("The average polarity is: %f " %(avgPolarity))
sub_sum = 0
for subjectivity in subjectivity_list:
sub_sum = sub_sum + subjectivity
avgSubjectivity = sub_sum/len(subjectivity_list)
print("The average subjectivity is: %f " %(avgSubjectivity))'''
print(polarity_list)
#print(subjectivity_list)
#This is a histogram for Twitter Data
'''
#Create the graph
plt.hist(polarity_list, bins = [-1.1, -.75, -0.5, -0.25, 0, 0.25, 0.50, 0.75, 1.1])
plt.xlabel('Polarities')
plt.ylabel('Number of Tweets')
plt.title('Tweet Polarity')
plt.axis([-1.1,1.1,0,100])
plt.grid(True)
plt.show() #shows our graph'''
#Creating a Word Cloud with TwitterData
#initilizaing a variable called 'combinedTweets' which is an empty string that will hold all of the tweets in tweetData
combinedTweets = ""
for tweet in tweetData:
combinedTweets += tweet['text']
tweetBlob = TextBlob(combinedTweets)
#print(dir(tweetBlob))
wordsToFiller = ["about", "https","say", "make", "from", "be", "an", "our", "got","as", "your", "see", "that", "us", "but", "we","at", "and", "of", "with", "you","is", "to", "for", "by", "it","in", "the", "thing", "will", "could", "automation"]
filteredDictionary = dict()
#point of for loop is to filter words in our big tweet
for word in tweetBlob.words:
#the following if-statement are conditions for what types of words I want in my word cloud
#skip small words
if len(word) < 2:
continue
#skip words with random characters or numbers
if not word.isalpha():
continue
#skip words in filler list
if word.lower() in wordsToFiller:
continue
filteredDictionary[word.lower()] = tweetBlob.word_counts[word.lower()]
#Create the word Cloud
#this is the word cloud variable
wordCloud = WordCloud().generate_from_frequencies(filteredDictionary)
plt.imshow(wordCloud, interpolation= 'bilinear')
plt.axis("off")
#this shows our Cloud
plt.show()
| RachelA314/Aboutme | DataVisualizationProject/Data_vis_project_pt1.py | Data_vis_project_pt1.py | py | 2,883 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
... |
25463176927 | import bz2
import csv
import argparse
import os
import numpy as np
import tensorflow as tf
from sklearn.naive_bayes import GaussianNB
def parse_argument():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--input_dir', default='cp_loss_count_per_game')
parser.add_argument('--gpu', default=0, type=int)
return parser.parse_args()
def normalize(data):
norm = np.linalg.norm(data)
data_norm = data/norm
return data_norm
def read_npy(input_dir):
player_list = {}
for input_data in os.listdir(input_dir):
# will split into [player_name, 'train/test/val']
input_name = input_data.split('_')
if len(input_name) > 2:
player_name = input_name[:-1]
player_name = '_'.join(player_name)
else:
player_name = input_name[0]
# add into player list
if player_name not in player_list:
player_list[player_name] = 1
player_list = list(player_list.keys())
player_data = {}
for player_name in player_list:
player_data[player_name] = {'train': None, 'validation': None, 'test': None}
train_path = os.path.join(input_dir, player_name + '_{}.npy'.format('train'))
val_path = os.path.join(input_dir, player_name + '_{}.npy'.format('validation'))
test_path = os.path.join(input_dir, player_name + '_{}.npy'.format('test'))
player_data[player_name]['train'] = np.load(train_path, allow_pickle=True)
player_data[player_name]['train'] = player_data[player_name]['train'].item()
player_data[player_name]['validation'] = np.load(val_path, allow_pickle=True)
player_data[player_name]['validation'] = player_data[player_name]['validation'].item()
player_data[player_name]['test'] = np.load(test_path, allow_pickle=True)
player_data[player_name]['test'] = player_data[player_name]['test'].item()
return player_data
def construct_datasets(player_data):
player_index = {}
train_list = []
train_labels = []
validation_list = []
validation_labels = []
test_list = []
test_labels = []
i = 0
for player in player_data.keys():
label = i
player_index[player] = i
for key, value in player_data[player]['train'].items():
train_list.append(normalize(value))
train_labels.append(label)
for key, value in player_data[player]['validation'].items():
validation_list.append(normalize(value))
validation_labels.append(label)
for key, value in player_data[player]['test'].items():
test_list.append(normalize(value))
test_labels.append(label)
i += 1
# convert lists into numpy arrays
train_list_np = np.stack(train_list, axis=0)
validation_list_np = np.stack(validation_list, axis=0)
test_list_np = np.stack(test_list, axis=0)
train_labels_np = np.stack(train_labels, axis=0)
validation_labels_np = np.stack(validation_labels, axis=0)
test_labels_np = np.stack(test_labels, axis=0)
return train_list_np, train_labels_np, validation_list_np, validation_labels_np, test_list_np, test_labels_np, player_index
def init_net(output_size):
l2reg = tf.keras.regularizers.l2(l=0.5 * (0.0001))
input_var = tf.keras.Input(shape=(50, ))
dense_1 = tf.keras.layers.Dense(40, kernel_initializer='glorot_normal', kernel_regularizer=l2reg, bias_regularizer=l2reg, activation='relu')(input_var)
dense_2 = tf.keras.layers.Dense(30, kernel_initializer='glorot_normal', kernel_regularizer=l2reg, bias_regularizer=l2reg)(dense_1)
model= tf.keras.Model(inputs=input_var, outputs=dense_2)
return model
def train(train_dataset, train_labels, val_dataset, val_labels, test_dataset, test_labels, player_index):
net = init_net(max(test_labels) + 1)
net.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001, clipnorm=1),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
net.fit(train_dataset, train_labels, batch_size=32, epochs=10, validation_data=(val_dataset, val_labels))
test_loss, test_acc = net.evaluate(test_dataset, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
return net
# predict is to verify if keras test is correct
def predict(net, test, test_labels):
probability_model = tf.keras.Sequential([net,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(test)
correct = 0
total = 0
for i, prediction in enumerate(predictions):
if test_labels[i] == np.argmax(prediction):
correct += 1
total += 1
print('test accuracy is: {}'.format(correct / total))
if __name__ == '__main__':
args = parse_argument()
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus[args.gpu], 'GPU')
tf.config.experimental.set_memory_growth(gpus[args.gpu], True)
player_data = read_npy(args.input_dir)
train_dataset, train_labels, val_dataset, val_labels, test_dataset, test_labels, player_index = construct_datasets(player_data)
net = train(train_dataset, train_labels, val_dataset, val_labels, test_dataset, test_labels, player_index)
# predict is to verify if test is correct
# predict(net, test_dataset, test_labels)
| CSSLab/maia-individual | 4-cp_loss_stylo_baseline/train_cploss_per_game.py | train_cploss_per_game.py | py | 5,449 | python | en | code | 18 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.listdi... |
35672480130 | import os
import json
import csv
class dirSummary:
def __init__(self, dirName):
self.dirName = dirName
self.file = open(os.path.join(self.dirName, self.dirName+"_map.csv"), "w")
fieldnames = ["ID", "Title", "Acitvity Type", "Date", "Time", "Distance","Moving Time"]
self.writer = csv.DictWriter(self.file, fieldnames=fieldnames)
self.writer.writeheader()
def get(self):
for subdir, dirs, files in os.walk(self.dirName):
for dir in dirs:
for subdir2, dirs2, files2 in os.walk(os.path.join(self.dirName,dir)):
print(files2)
if dir+"_overview.json" in files2:
with open(os.path.join(self.dirName,dir,dir+"_overview.json"),'r') as f:
over = json.load(f)
try:
time = over["Basic Stats"]["Moving Time"]
except KeyError:
try:
time = over["Basic Stats"]["Elapsed Time"]
except KeyError:
time = over["Basic Stats"]["Duration"]
# self.file.write(dir+",\""+over["Title"]+"\",\""+over["Acitvity Type"]+"\",\""+over["Date"]+"\",\""+over["Time"]+"\","+over["Basic Stats"]["Distance"]+","+time+"\n")
self.writer.writerow({"ID":dir, "Title":over["Title"], "Acitvity Type":over["Acitvity Type"] , "Date":over["Date"], "Time":over["Time"], "Distance":over["Basic Stats"]["Distance"],"Moving Time":time})
if __name__ == "__main__":
d = dirSummary("./Robert Gesink")
d.get()
| Abhiram98/strava-scraper | scraper/dirSummary.py | dirSummary.py | py | 1,381 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "csv.DictWriter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 1... |
72495902115 | import argparse
from functools import partial
import json
import logging
from multiprocessing import Pool
import os
import sys
sys.path.append(".") # an innocent hack to get this to run from the top level
from tqdm import tqdm
from openfold.data.mmcif_parsing import parse
from openfold.np import protein, residue_constants
def parse_file(
f,
args,
chain_cluster_size_dict
):
file_id, ext = os.path.splitext(f)
if(ext == ".cif"):
with open(os.path.join(args.data_dir, f), "r") as fp:
mmcif_string = fp.read()
mmcif = parse(file_id=file_id, mmcif_string=mmcif_string)
if mmcif.mmcif_object is None:
logging.info(f"Could not parse {f}. Skipping...")
return {}
else:
mmcif = mmcif.mmcif_object
out = {}
for chain_id, seq in mmcif.chain_to_seqres.items():
full_name = "_".join([file_id, chain_id])
out[full_name] = {}
local_data = out[full_name]
local_data["release_date"] = mmcif.header["release_date"]
local_data["seq"] = seq
local_data["resolution"] = mmcif.header["resolution"]
if(chain_cluster_size_dict is not None):
cluster_size = chain_cluster_size_dict.get(
full_name.upper(), -1
)
local_data["cluster_size"] = cluster_size
elif(ext == ".pdb"):
with open(os.path.join(args.data_dir, f), "r") as fp:
pdb_string = fp.read()
protein_object = protein.from_pdb_string(pdb_string, None)
chain_dict = {}
chain_dict["seq"] = residue_constants.aatype_to_str_sequence(
protein_object.aatype,
)
chain_dict["resolution"] = 0.
if(chain_cluster_size_dict is not None):
cluster_size = chain_cluster_size_dict.get(
full_name.upper(), -1
)
chain_dict["cluster_size"] = cluster_size
out = {file_id: chain_dict}
return out
def main(args):
chain_cluster_size_dict = None
if(args.cluster_file is not None):
chain_cluster_size_dict = {}
with open(args.cluster_file, "r") as fp:
clusters = [l.strip() for l in fp.readlines()]
for cluster in clusters:
chain_ids = cluster.split()
cluster_len = len(chain_ids)
for chain_id in chain_ids:
chain_id = chain_id.upper()
chain_cluster_size_dict[chain_id] = cluster_len
accepted_exts = [".cif", ".pdb"]
files = list(os.listdir(args.data_dir))
files = [f for f in files if os.path.splitext(f)[-1] in accepted_exts]
fn = partial(
parse_file,
args=args,
chain_cluster_size_dict=chain_cluster_size_dict,
)
data = {}
with Pool(processes=args.no_workers) as p:
with tqdm(total=len(files)) as pbar:
for d in p.imap_unordered(fn, files, chunksize=args.chunksize):
data.update(d)
pbar.update()
with open(args.output_path, "w") as fp:
fp.write(json.dumps(data, indent=4))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"data_dir", type=str, help="Directory containing mmCIF or PDB files"
)
parser.add_argument(
"output_path", type=str, help="Path for .json output"
)
parser.add_argument(
"--cluster_file", type=str, default=None,
help=(
"Path to a cluster file (e.g. PDB40), one cluster "
"({PROT1_ID}_{CHAIN_ID} {PROT2_ID}_{CHAIN_ID} ...) per line. "
"Chains not in this cluster file will NOT be filtered by cluster "
"size."
)
)
parser.add_argument(
"--no_workers", type=int, default=4,
help="Number of workers to use for parsing"
)
parser.add_argument(
"--chunksize", type=int, default=10,
help="How many files should be distributed to each worker at a time"
)
args = parser.parse_args()
main(args)
| aqlaboratory/openfold | scripts/generate_chain_data_cache.py | generate_chain_data_cache.py | py | 4,124 | python | en | code | 2,165 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
32434406258 | from contextlib import ExitStack, contextmanager
from fnmatch import fnmatch
from glob import glob
from params_proto import ParamsProto, Proto, Flag
class UploadArgs(ParamsProto):
""" ML-Logger upload command
Example:
ml-upload --list # to see all files in the current directory for upload
ml-upload --target /fast_nerf/fast_nerf/panda_exp/2022 # uploads to this folder
ml-upload --target /$USER/sratch/tmp --overwrite # overwrite existing files
ml-upload --target /$USER/scratch/tmp --archive # upload the files as a tar file
"""
list = Flag("List all of the folders if set.")
target: str = Proto(help="The target prefix on the logging server") # , required=True)
workdir = Proto(".", help="cache directory")
source = Proto("*", help="""Query pattern for the files to be tarred.""")
archival = Flag("Use archive to upload the files")
exclude = Proto(".git*", help="Exclude files matching this pattern when uploading")
overwrite = Flag("overwrite existing folders in the cache directory")
@contextmanager
def WorkDir(path):
"""Sets the cwd within the context
Args:
path (Path): The path to the cwd
Yields:
None
"""
import os
origin = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(origin)
# def list(source, query):
def upload(source, target, overwrite=False):
"""download dataset from source to the target folder (local)
Args:
source (str): source folder. Example: "/fast_nerf/fast_nerf/panda_exp/2022"
target (str): target folder. Example: "$DATASETS/panda_exp/2022"
overwrite (bool, optional): overwrite the target folder. Defaults to False.
Other-wise it will skip the download if the target folder exists.
"""
from ml_logger import logger
raise NotImplementedError("This is a snippet. You need to implement this function.")
def entrypoint():
from ml_logger import logger
"""list the current directory into a tree"""
with WorkDir(UploadArgs.workdir):
folders = glob(UploadArgs.source, recursive=True)
exclude_patterns = UploadArgs.exclude.split(';')
if exclude_patterns:
# show me the code for match the child string against a list of exclude patterns, step by step
folders = [f for f in folders if not any([fnmatch(f, e) for e in exclude_patterns])]
if UploadArgs.list:
print(UploadArgs.workdir + ":", *folders, sep="\n")
return
if UploadArgs.target is None:
logger.print("setting the upload target to ", logger.prefix)
PCntx = ExitStack()
else:
PCntx = logger.Prefix(UploadArgs.target)
with logger.Sync(), PCntx, WorkDir(UploadArgs.workdir): # use synchronous mode to make sure the upload finished
from tqdm import tqdm
pbar = tqdm(folders)
for local_name in pbar:
desc = f"Uploading the {local_name} to {logger.get_dash_url()}/{local_name}"
pbar.write(desc)
import os
if os.path.isfile(local_name):
logger.upload_file(local_name, local_name)
continue
tar_filename = local_name + ".tar"
if tar_filename in (logger.glob(tar_filename) or []):
if UploadArgs.overwrite:
pbar.write(f"overwriting {tar_filename} on the server")
logger.remove(tar_filename)
else:
pbar.write(f"{tar_filename} alread exists on the server"
"Set the --overwrite flag to overwrite it.")
continue
logger.upload_dir(local_name, tar_filename, excludes=exclude_patterns, archive="tar")
if local_name in (logger.glob(local_name) or []):
if UploadArgs.overwrite:
pbar.write(f"overwriting {local_name} on the server")
logger.remove(local_name)
else:
pbar.write(f"{local_name} alread exists on the server"
"Set the --overwrite flag to overwrite it.")
continue
if not UploadArgs.archival:
logger.shell(f"mkdir -p {local_name} && tar -xvf {tar_filename} --directory {local_name}")
logger.remove(tar_filename)
pbar.write("Decompressed the archive on the server")
print("Uploading completed")
if __name__ == "__main__":
# UploadArgs.list = True
# UploadArgs.overwrite = True
# UploadArgs.archive = False
# UploadArgs.target = "/fast_nerf/fast_nerf/panda_exp/2023/ge_upload_example/"
# UploadArgs.prefix = os.path.expandvars("/fast_nerf/fast_nerf/panda_exp/2022")
# UploadArgs.output = os.path.expandvars("$DATASETS/panda_exp/2022")
entrypoint()
| geyang/ml_logger | ml_logger/cli/upload.py | upload.py | py | 4,894 | python | en | code | 176 | github-code | 1 | [
{
"api_name": "params_proto.ParamsProto",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "params_proto.Flag",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "params_proto.Proto",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "params_p... |
25327899692 |
import datetime
import pandas as pd
import random
import simpy
import numpy as np
from scipy.stats import uniform
class Elevator:
"""
Elevator that move people from floor to floor
Has a max compatity
Uses a event to notifiy passengers when they can get on the elevator
and when they arrive at their destination floor
"""
next_id = 1
@classmethod
def get_next_id(cls):
id = cls.next_id
cls.next_id += 1
return id
def __init__(self, env, settings, floors, boarding_queues, arrive_events, retrigger, name):
self.id = self.get_next_id()
self.name = name
self.env = env
self.floors = floors
self.on_floor = self.floors[0]
self.move_inc = 1
self.current_load = 0
self.area = settings['length'] * settings['width']
self.boarding_queues = boarding_queues
self.arrive_events = arrive_events
# list of passengers on elevator, one per floor
self.on_board = {f: [] for f in self.floors}
# start elevator
self.moving = env.process(self._move_next_floor())
self.history = []
self.reactivate = self.env.event()
self.status = 'IDLE'
self.retrigger = retrigger
# insert all distributions to draw from:
self.unloading_time = uniform(settings['unloading_time']['min'], settings['unloading_time']['max'])
self.loading_time = uniform(settings['loading_time']['min'], settings['loading_time']['max'])
self.closing_time = settings['door_close_time']
self.move_time = settings['move_time']
self.repairman = simpy.Resource(self.env, capacity=1)
self.env.process(self.break_elevator())
self.broken = False
self.door_status = 'CLOSED'
self.outage_probability = settings['failure']['probability']
self.outage_duration = settings['failure']['duration']
def time_to_failure(self):
return np.random.uniform(low=0.5 * 1 / self.outage_probability, high=1.5 * 1 / self.outage_probability)
def break_elevator(self):
while True:
yield self.env.timeout(self.time_to_failure())
if not self.broken:
self.moving.interrupt()
def calculate_load(self):
return sum([el.user.area for v in self.on_board.values() for el in v])
def can_load(self):
return self.calculate_load() <= self.area * 0.8
def _move_next_floor(self):
"""
Moves the elevator up and down
Elevator stops at every floor
"""
def _update_direction():
update_time = self.move_time
floors_below = [f for f in self.floors if f < self.on_floor]
floors_above = [f for f in self.floors if f > self.on_floor]
customers_below = [len(self.on_board[floor]) + len(self.boarding_queues[floor]) for floor in floors_below]
customers_above = [len(self.on_board[floor]) + len(self.boarding_queues[floor]) for floor in floors_above]
if sum(customers_below) == 0 and sum(customers_above) == 0:
update_time = 0
else:
if (sum(customers_below) == 0 and self.move_inc == -1) or \
(sum(customers_above) == 0 and self.move_inc == 1):
print(f'Elevator {self.id}: Smart change of direction!')
self.move_inc *= -1
self.history.append([self.env.now, self.calculate_load(), self.on_floor, 'START MOVING'])
idx_next_floor = self.floors.index(self.on_floor) + self.move_inc
next_floor = self.floors[idx_next_floor]
diff_floor = next_floor - self.on_floor
self.on_floor = next_floor
update_time *= abs(diff_floor)
return update_time
def _determine_unloading_time():
# unloading time of an rc == 10
# unloading time of personnel == 2
unloading_time = 0
for unboarder in self.on_board[self.on_floor]:
if unboarder.user.area == 0.35:
# this is a person
unloading_time += 2
else:
# this is a load carrier
unloading_time += 10
return unloading_time
def _unload_arriving_passengers():
while len(self.on_board[self.on_floor]) > 0:
p = self.on_board[self.on_floor].pop()
p.user.add_event(time=self.env.now, event='unloading', process='elevator', location=self.name)
p.user.current_floor = self.on_floor
p.onboard_event.succeed()
arrive_events = self.arrive_events[self.name][self.on_floor]
self.arrive_events[self.name][self.on_floor] = simpy.Event(self.env)
arrive_events.succeed()
def _load_departing_passengers():
boarding = []
current_load = self.calculate_load()
for el in self.boarding_queues[self.on_floor]:
loaded_users = [el for v in self.on_board.values() for el in v]
if len(loaded_users) > 0:
user_type = loaded_users[0].user.user_type
if el.user.user_type != user_type:
continue
if current_load + el.user.area < 0.8 * self.area:
boarding.append(el)
current_load += el.user.area
for b in boarding:
self.boarding_queues[self.on_floor].remove(b)
b.arrive_event = self.arrive_events[self.name][b.dest_floor]
b.elevator = self.name
b.user.add_event(time=self.env.now, event='loading', process='elevator', location=self.name)
self.on_board[b.dest_floor].append(b)
return np.sum(self.loading_time.rvs(len(boarding)))
def _has_task():
if sum([len(v) for v in self.boarding_queues.values()]) > 0:
return True
elif sum([len(v) for v in self.on_board.values()]) > 0:
return True
return False
while True:
try:
if not _has_task():
self.status = 'IDLE'
self.history.append([self.env.now, self.calculate_load(), self.on_floor, 'IDLE'])
yield self.reactivate
print(f'{self.env.now:.2f} Triggered reactivation of elevator {self.id}')
self.status = 'ACTIVE'
else:
unloading_time = _determine_unloading_time()
if unloading_time > 0:
yield self.env.process(self.open_door())
self.history.append([self.env.now, self.calculate_load(), self.on_floor, 'START UNLOADING'])
yield self.env.timeout(unloading_time)
_unload_arriving_passengers()
while True:
loading_time = _load_departing_passengers()
if loading_time > 0:
yield self.env.process(self.open_door())
else:
break
self.history.append([self.env.now, self.calculate_load(), self.on_floor, 'START LOADING'])
yield self.env.timeout(loading_time)
yield self.env.process(self.close_door())
if len(self.boarding_queues[self.on_floor]) > 0:
self.retrigger.succeed(value={
'from_floor': self.boarding_queues[self.on_floor][0].start_floor,
'to_floor': self.boarding_queues[self.on_floor][0].dest_floor
})
move_time = _update_direction()
yield self.env.timeout(move_time)
except simpy.Interrupt:
self.broken = True
self.history.append([self.env.now, self.calculate_load(), self.on_floor, 'BREAKDOWN'])
with self.repairman.request() as request:
yield request
yield self.env.timeout(self.outage_duration)
self.broken = False
def open_door(self):
if self.door_status != 'OPEN':
self.history.append([self.env.now, self.calculate_load(), self.on_floor, 'OPEN DOORS'])
self.door_status = 'OPEN'
yield self.env.timeout(self.closing_time)
def close_door(self):
if self.door_status != 'CLOSED':
self.history.append([self.env.now, self.calculate_load(), self.on_floor, 'CLOSING DOORS'])
self.door_status = 'CLOSED'
yield self.env.timeout(self.closing_time)
def to_pandas(self):
df = pd.DataFrame(self.history, columns=['_time', 'load', 'floor', 'reportingStatus'])
# insert a task in between each section
df['duration'] = df['_time'].shift(-1) - df['_time']
df['aasCode'] = self.name
df['reportingStatus'] = df['reportingStatus'].str.replace('START ', '')
df.drop(index=df.loc[df['duration'] == 0].index, inplace=True)
df['group'] = (df['reportingStatus'] != df['reportingStatus'].shift()).cumsum().rename('group')
agg_df = df.groupby(by='group').agg({
'_time': 'first',
'load': 'first',
'floor': 'first',
'reportingStatus': 'first',
'duration': 'sum',
'aasCode': 'first'
})
return agg_df
| jeroensimacan/simulating_logistics_processes | elevator.py | elevator.py | py | 9,910 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.stats.uniform",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "scipy.stats.uniform",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "simpy.Resource",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.random.u... |
22386637474 | import serial
import time
import binascii
ser = serial.Serial("COM8", 9600)
t = (0x1F00FFFF).to_bytes(4, byteorder="big")
print(t)
while True:
time.sleep(0.1)
ser.write(t)
result = ser.read_all()
if result != b'':
print(result) | yato-Neco/Tukuba_Challenge | main_program/rust/Robot/sw.py | sw.py | py | 254 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "serial.Serial",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
}
] |
34002926250 | from urllib2 import Request, urlopen
import xml.etree.ElementTree as ET
import json
url_request = Request('http://inciweb.nwcg.gov/feeds/rss/incidents/state/3')
try:
url_response = urlopen(url_request)
rss_content = url_response.read()
except Exception as e:
print(str(e))
xml_root = ET.fromstring(rss_content)
incidents_dict = {}
incidents = []
for xml_child in xml_root:
for item_child in xml_child.findall("item"):
incidents_dict['title'] = item_child.find('title').text
incidents_dict['link'] = item_child.find('link').text
incidents_dict['description'] = item_child.find('description').text
incidents_dict['pubDate'] = item_child.find('pubDate').text
geo_namespaces = {'geo': 'http://www.w3.org/2003/01/geo/wgs84_pos#'}
incidents_dict['lat'] = item_child.find('geo:lat', namespaces=geo_namespaces).text
incidents_dict['long'] = item_child.find('geo:long', namespaces=geo_namespaces).text
incidents.append(incidents_dict)
json_formatted_string = "callback({\"incidents\": " + str(json.dumps
(incidents, indent=4, skipkeys=True, sort_keys=True) + "});")
try:
filename = "./wildfire01.json"
fob = open(filename, 'rU')
old_json_string = fob.read()
fob.close()
if old_json_string != json_formatted_string:
fd = open(filename, 'w')
print(json_formatted_string)
fd.write(json_formatted_string)
fd.close()
except Exception as e:
print ('ERROR writing: {}'.format(e))
| anshulankush/CronkitePython | PhpToPython/wildfire_python_parser.py | wildfire_python_parser.py | py | 1,518 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib2.Request",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "xml.et... |
32195310986 | from django.conf.urls.defaults import *
from django.contrib.syndication.views import feed as feed_view
from django.views.generic import date_based, list_detail
from django.contrib import admin
from ebblog.blog.models import Entry
from ebblog.blog import feeds
admin.autodiscover()
info_dict = {
'queryset': Entry.objects.order_by('pub_date'),
'date_field': 'pub_date',
}
FEEDS = {
'rss': feeds.BlogEntryFeed,
}
urlpatterns = patterns('',
(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>\w+)/$', date_based.object_detail, dict(info_dict, slug_field='slug')),
(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$', date_based.archive_day, info_dict),
(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/$', date_based.archive_month, info_dict),
(r'^(?P<year>\d{4})/$', date_based.archive_year, info_dict),
(r'^(rss)/$', feed_view, {'feed_dict': FEEDS}),
(r'^archives/', list_detail.object_list, {'queryset': Entry.objects.order_by('-pub_date'), 'template_name': 'blog/archive.html'}),
(r'^$', date_based.archive_index, dict(info_dict, template_name='homepage.html')),
('^admin/', include(admin.site.urls)),
)
| brosner/everyblock_code | ebblog/ebblog/urls.py | urls.py | py | 1,167 | python | en | code | 130 | github-code | 1 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "ebblog.blog.models.Entry.objects.order_by",
"line_number": 11,
"usage_type": "call"
... |
35939540734 | '''
Created on Nov 28, 2012
@author: cosmin
'''
from google.appengine.ext import webapp, db
import jinja2
import os
import logging as log
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class ClustersP(webapp.RequestHandler):
def get(self):
'''
The class serving the page for the clusters
'''
selectable_counts=[10,11,12,13,14,15,16,18,20,22,24,26]
# Get the selected clusters_count
clusters_count = self.request.get("clusters_count")
log.info("K-means clusters for %s clusters." % clusters_count)
#Get the clusters from the database
clusters=None
if clusters_count!='':
clusters_count=int(clusters_count)
clusters =db.GqlQuery("SELECT * "
"FROM Clusters "
"WHERE count = :1",
clusters_count)
clusters=clusters.get()
if clusters!=None:
clusters.expand()
log.info(str(clusters))
else:
clusters_count=-1
#Generate the page
template_values = { 'selectable': selectable_counts, 'clusters_count': clusters_count, 'clusters': clusters}
template = jinja_environment.get_template('templates/clusters.html')
self.response.out.write(template.render(template_values))
| cosminstefanxp/freely-stats | remote-code/ClustersP.py | ClustersP.py | py | 1,456 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "jinja2.Environment",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
23228107132 | import cv2
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
import subprocess
import kakao_MES_api
facenet = cv2.dnn.readNet('face_detector/deploy.prototxt', 'face_detector/res10_300x300_ssd_iter_140000.caffemodel')
model = load_model('detector.model')
cap = cv2.VideoCapture(0)
#video save
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out_video = cv2.VideoWriter('video.avi',fourcc,5,(640,480))
flag = 0 #with mask state = 0, without mask state = 1
if not cap.isOpened():
print("Could not open cam")
exit()
# loop through frames
while cap.isOpened():
faces = []
locs = []
ret, frame = cap.read()
frame = cv2.flip(frame,-1)
if not ret:
print("Could not read frame")
exit()
h, w = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, scalefactor=1., size=(300, 300), mean=(104., 177., 123.))
facenet.setInput(blob)
detections = facenet.forward()
confidence = detections[0, 0, 0, 2]
if confidence > 0.5:
box = detections[0, 0, 0, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = frame[startY:endY, startX:endX]
if face.any():
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
faces.append(face)
locs.append((startX, startY, endX, endY))
faces = np.array(faces, dtype="float32")
preds = model.predict(faces, batch_size=32)
for (box, no_mask) in zip(locs, preds):
(startX, startY, endX, endY) = box
if no_mask > 0.6:
if flag == 0:
cv2.imwrite('find.jpg',frame)
name = subprocess.check_output("python3 naver_OCR_api.py -i find.jpg", shell = True)
p = subprocess.Popen(['python3','kakao_TTS_api.py','-n',name])
p2 = subprocess.Popen(['python3','send_message.py','-n',name, '-l','cam01','-i','find.jpg'])
p3 = subprocess.Popen(['python3','kakao_MES_api.py'])
flag = 1
color = (0,0,255)
label = "No Mask ({:.2f}%)".format(no_mask[0]*100)
cv2.putText(frame, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color,2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color,2)
else:
flag = 0
color = (0,255,0)
label = "Mask ({:.2f}%)".format( (1-no_mask[0]) * 100)
cv2.putText(frame, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
cv2.imshow('mask',frame)
out_video.write(frame)
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
out_video.release()
cv2.destroyAllWindows()
| parksj0923/KORartilleryman | 5corps_artillery/makerthon/final/raspberry/main.py | main.py | py | 2,810 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.dnn.readNet",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.Vid... |
20157899929 | import os
os.environ['PYOPENGL_PLATFORM'] = 'egl'
from render_utils import load_obj_mesh, param_to_tensor, rotate_mesh, \
pers_get_depth_maps, get_depth_maps, pers_add_lights, add_lights
from tqdm import tqdm
import numpy as np
import pickle
import smplx
import cv2
import torch
from scipy.spatial.transform import Rotation as R_
import argparse
import trimesh
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
os.environ["TORCH_DISTRIBUTED_DEBUG"]="DETAIL"
def none_or_str(value):
if value == 'None':
return None
return value
parser = argparse.ArgumentParser()
# parser.add_argument('--data_path', type=str, default='/workspace/code_github/data/', help='path to mesh data')
parser.add_argument('--data_path', type=str, default='/workspace/dataset/data/', help='path to mesh data')
parser.add_argument('--data_name', type=str, default='2K2K', help='folder name of rendered dataset')
parser.add_argument('--smpl_model_path', type=none_or_str, default='None', help='path to smplx model')
parser.add_argument('--file', type=str, default='ply', help='obj or ply')
parser.add_argument('--render_ORTH', type=bool, default=False, help='render orthgonal images')
# parser.add_argument('--data_name', type=str, default='RP', help='folder name of rendered dataset')
# parser.add_argument('--smpl_model_path', type=none_or_str, default='None', help='path to smplx model')
# parser.add_argument('--file', type=str, default='obj', help='only obj for RP')
# parser.add_argument('--render_ORTH', type=bool, default=False, help='render orthgonal images')
# parser.add_argument('--data_name', type=str, default='THuman2', help='folder name of rendered dataset')
# parser.add_argument('--smpl_model_path', type=str, default='/workspace/code_github/render/smpl_related/models', help='path to smplx model')
# parser.add_argument('--file', type=str, default='obj', help='only obj for THuman2')
# parser.add_argument('--render_ORTH', type=bool, default=False, help='render orthgonal images')
args = parser.parse_args()
def make_train_list(data_path, data_name, angle_min_x, angle_max_x, interval_x, axis_x, axis_y):
os.makedirs(os.path.join(data_path, 'list'), exist_ok=True)
list_file = os.path.join(data_path, 'list', data_name+'_all.txt')
list_name_degree = []
for x in range(angle_min_x, angle_max_x + 1, interval_x):
list_name_degree.append('{}_{}_{}_{}_{}'.format(data_name, 0, axis_y, x, axis_x))
if data_name=="THuman2":
data = sorted(os.listdir(os.path.join(data_path, 'obj', data_name, 'data')))
else:
data = sorted(os.listdir(os.path.join(data_path, 'obj', data_name)))
if os.path.isfile(list_file):
os.remove(list_file)
with open(list_file, "a") as f:
for d in data: # ['rp_aaron_posed_013_OBJ']
if data_name=="RP":
item_name = d[:-4] # 'rp_aaron_posed_013'
elif data_name=="THuman2" or data_name=="2K2K":
item_name = d
else:
raise Exception("Only for RenderPeople, THuman2, and 2K2K dataset.")
for name_degree in list_name_degree: # ['RP_0_y_-30_x', 'RP_0_y_-20_x', 'RP_0_y_-10_x', 'RP_0_y_0_x', 'RP_0_y_10_x', 'RP_0_y_20_x', 'RP_0_y_30_x']
line = '/PERS/COLOR/SHADED/{0}/{1}_front.png /PERS/COLOR/NOSHADING/{0}/{1}_front.png /PERS/DEPTH/{0}/{1}_front.png\n'.format(name_degree, item_name)
f.write(line)
os.chmod(list_file, 0o777)
def render_mesh(data_path, # '/workspace/code_github/data/'
data_name, # 'RP', 'THuman2'
f='obj',
cnt=0,
fov=50,
cam_res=2048,
angle_min_x=0,
angle_max_x=0,
interval_x=5,
angle_min_y=0,
angle_max_y=0,
interval_y=3,
axis_x='x',
axis_y='y',
shad_num=1,
save_img=True,
smpl_model_path=None,
render_orth=False,
device=torch.device("cuda:0")):
make_train_list(data_path, data_name, angle_min_x, angle_max_x, interval_x, axis_x, axis_y)
PERS_COLOR_ROOT = os.path.join(data_path, 'PERS', 'COLOR', 'NOSHADING')
PERS_SHAD_COLOR_ROOT = os.path.join(data_path, 'PERS', 'COLOR', 'SHADED')
PERS_DEPTH_ROOT = os.path.join(data_path, 'PERS', 'DEPTH') # '/workspace/code/data/PERS/DEPTH/'
ORTH_COLOR_ROOT = os.path.join(data_path, 'ORTH', 'COLOR', 'NOSHADING')
ORTH_SHAD_COLOR_ROOT = os.path.join(data_path, 'ORTH', 'COLOR', 'SHADED')
ORTH_DEPTH_ROOT = os.path.join(data_path, 'ORTH', 'DEPTH')
os.makedirs(PERS_COLOR_ROOT, exist_ok=True)
os.makedirs(PERS_SHAD_COLOR_ROOT, exist_ok=True)
os.makedirs(PERS_DEPTH_ROOT, exist_ok=True)
if render_orth:
os.makedirs(ORTH_COLOR_ROOT, exist_ok=True)
os.makedirs(ORTH_SHAD_COLOR_ROOT, exist_ok=True)
os.makedirs(ORTH_DEPTH_ROOT, exist_ok=True)
folder_pers_shad_color = []
folder_orth_shad_color = []
folder_pers_color = []
folder_orth_color = []
folder_pers_depth = []
folder_orth_depth = []
rot_angle_x = []
rot_angle_y = []
if smpl_model_path is not None:
smpl = smplx.create(model_path = smpl_model_path,
model_type = 'smplx',
gender = 'male', # 'neutral',
num_pca_comps = 12,
# use_pca = True,
# use_face_contour = True,
).to(device)
# for y in range(angle_min_y, angle_max_y + 1, interval_y):
for x in range(angle_min_x, angle_max_x + 1, interval_x):
folder_pers_shad_color.append('{}_{}_{}_{}_{}'.format(data_name, 0, axis_y, x, axis_x))
folder_orth_shad_color.append('{}_{}_{}_{}_{}'.format(data_name, 0, axis_y, x, axis_x))
folder_pers_color.append('{}_{}_{}_{}_{}'.format(data_name, 0, axis_y, x, axis_x))
folder_orth_color.append('{}_{}_{}_{}_{}'.format(data_name, 0, axis_y, x, axis_x))
folder_pers_depth.append('{}_{}_{}_{}_{}'.format(data_name, 0, axis_y, x, axis_x))
folder_orth_depth.append('{}_{}_{}_{}_{}'.format(data_name, 0, axis_y, x, axis_x))
rot_angle_y.append(0)
rot_angle_x.append(x)
for k in range(len(folder_pers_shad_color)):
dir_pers_shad_color = os.path.join(PERS_SHAD_COLOR_ROOT, folder_pers_shad_color[k])
dir_orth_shad_color = os.path.join(ORTH_SHAD_COLOR_ROOT, folder_orth_shad_color[k])
dir_pers_color = os.path.join(PERS_COLOR_ROOT, folder_pers_color[k])
dir_orth_color = os.path.join(ORTH_COLOR_ROOT, folder_orth_color[k])
dir_pers_depth = os.path.join(PERS_DEPTH_ROOT, folder_pers_depth[k])
dir_orth_depth = os.path.join(ORTH_DEPTH_ROOT, folder_orth_depth[k])
if os.path.isdir(dir_pers_shad_color) is False and save_img is True:
os.mkdir(dir_pers_shad_color)
if os.path.isdir(dir_pers_color) is False and save_img is True:
os.mkdir(dir_pers_color)
if os.path.isdir(dir_pers_depth) is False and save_img is True:
os.mkdir(dir_pers_depth)
if render_orth:
if os.path.isdir(dir_orth_shad_color) is False and save_img is True:
os.mkdir(dir_orth_shad_color)
if os.path.isdir(dir_orth_color) is False and save_img is True:
os.mkdir(dir_orth_color)
if os.path.isdir(dir_orth_depth) is False and save_img is True:
os.mkdir(dir_orth_depth)
if data_name=="THuman2":
data = sorted(os.listdir(os.path.join(data_path, 'obj', data_name, 'data')))
else:
data = sorted(os.listdir(os.path.join(data_path, 'obj', data_name)))
for d in tqdm(data):
if data_name=="RP":
item_name = d[:-4] # 'rp_aaron_posed_013'
obj_name = d[:-4]+'_100k.obj'
obj_path = os.path.join(data_path, 'obj', data_name, d, obj_name)
if not os.path.exists(obj_path):
obj_path = os.path.join(data_path, 'obj', data_name, d, obj_name)[:-3]+'OBJ'
if not os.path.exists(obj_path):
obj_path = os.path.join(data_path, 'obj', data_name, d, obj_name)[:-8]+'200k.obj'
tex_path = os.path.join(data_path, 'obj', data_name, d, 'tex', d[:-3]+'dif_8k.jpg')
if not os.path.exists(tex_path):
tex_path = os.path.join(data_path, 'obj', data_name, d, 'tex', d[:-3]+'dif.jpg')
mesh = load_obj_mesh(obj_path, tex_path)
if not os.path.exists(obj_path):
print('ERROR: obj file does not exist!!', obj_path)
return
glob_rotation = np.array([0., 0., 0.], dtype=np.float32)
elif data_name=="2K2K":
item_name = d
if f=="obj":
obj_path = os.path.join(data_path, 'obj', data_name, d, d+'.obj')
tex_path = os.path.join(data_path, 'obj', data_name, d, d+'.png')
mesh = load_obj_mesh(obj_path, tex_path)
if not os.path.exists(obj_path):
print('ERROR: obj file does not exist!!', obj_path)
return
elif f=="ply":
ply_path = os.path.join(data_path, 'obj', data_name, d, d+'.ply')
mesh = trimesh.load(ply_path)
if not os.path.exists(ply_path):
print('ERROR: ply file does not exist!!', ply_path)
return
glob_rotation = np.array([0., 0., 0.], dtype=np.float32)
elif data_name=="THuman2":
item_name = d
obj_name = d+'.obj'
obj_path = os.path.join(data_path, 'obj', data_name, 'data', d, obj_name)
tex_path = os.path.join(data_path, 'obj', data_name, 'data', d, 'material0.jpeg')
pose_path = os.path.join(data_path, 'obj', data_name, 'smplx', d, 'smplx_param.pkl')
if not os.path.isfile(pose_path):
pose_path = None
mesh = load_obj_mesh(obj_path, tex_path)
if not os.path.exists(obj_path):
print('ERROR: obj file does not exist!!', obj_path)
return
# SMPLX
glob_rotation = np.array([0., 0., 0.], dtype=np.float32)
if pose_path is not None:
with open(pose_path, 'rb') as smplx_file:
smpl_param = pickle.load(smplx_file, encoding='latin1')
glob_rotation[1] = smpl_param['global_orient'][0][1]
smpl_param = param_to_tensor(smpl_param, device)
smpl_mesh = smpl(
betas = smpl_param['betas'],
expression = smpl_param['expression'],
# transl = smpl_param['transl'],
global_orient = smpl_param['global_orient'],
body_pose = smpl_param['body_pose'],
jaw_pose = smpl_param['jaw_pose'],
left_hand_pose = smpl_param['left_hand_pose'], # [15,3]
right_hand_pose = smpl_param['right_hand_pose'],
leye_pose = smpl_param['leye_pose'],
reye_pose = smpl_param['reye_pose'],
return_verts=True,
)
smpl_mesh.vertices *= smpl_param['scale']
smpl_mesh.vertices += smpl_param['translation']
cnt += 1
for p in range(len(rot_angle_x)):
pers_depth_name = os.path.join(PERS_DEPTH_ROOT, folder_pers_depth[p], item_name)
pers_img_name = os.path.join(PERS_COLOR_ROOT, folder_pers_color[p], item_name)
pers_shad_img_name = os.path.join(PERS_SHAD_COLOR_ROOT, folder_pers_shad_color[p], item_name)
orth_depth_name = os.path.join(ORTH_DEPTH_ROOT, folder_orth_depth[p], item_name)
orth_img_name = os.path.join(ORTH_COLOR_ROOT, folder_orth_color[p], item_name)
orth_shad_img_name = os.path.join(ORTH_SHAD_COLOR_ROOT, folder_orth_shad_color[p], item_name)
pers_color_front_name = pers_img_name + '_front.png'
pers_color_back_name = pers_img_name + '_back.png'
pers_depth_front_name = pers_depth_name + '_front.png'
pers_depth_back_name = pers_depth_name + '_back.png'
pers_shad_color_front_name = pers_shad_img_name + '_front.png'
# pers_shad_color_back_name = pers_shad_img_name + '_back.png'
orth_color_front_name = orth_img_name + '_front.png'
orth_color_back_name = orth_img_name + '_back.png'
orth_depth_front_name = orth_depth_name + '_front.png'
orth_depth_back_name = orth_depth_name + '_back.png'
orth_shad_color_front_name = orth_shad_img_name + '_front.png'
# orth_shad_color_back_name = orth_shad_img_name + '_back.png'
vertices = (mesh.vertices - mesh.centroid)
vertices_np = np.array(vertices)
val = np.maximum(np.max(vertices_np), np.abs(np.min(vertices_np)))
vertices /= val * 2.8
# For RenderPeople Dataset
turn_right = size = 0
if d in [
'rp_wendy_posed_002_OBJ',
'rp_toshiro_posed_021_OBJ',
'rp_scott_posed_037_OBJ',
'rp_pamela_posed_012_OBJ',
'rp_oliver_posed_029_OBJ',
'rp_noah_posed_011_OBJ',
'rp_mira_posed_001_OBJ',
'rp_luke_posed_008_OBJ',
'rp_luke_posed_007_OBJ',
'rp_jessica_posed_006_OBJ',
'rp_helen_posed_038_OBJ',
'rp_eve_posed_003_OBJ',
'rp_eric_posed_036_OBJ',
'rp_eric_posed_007_OBJ',
'rp_emma_posed_025_OBJ',
'rp_dennis_posed_008_OBJ',
'rp_chloe_posed_004_OBJ',
'rp_anna_posed_001_OBJ',
'rp_andrew_posed_004_OBJ',
'rp_maya_posed_027_OBJ',
'rp_petra_posed_006_OBJ',
]:
turn_right = 1
if d in [
'rp_michael_posed_019_OBJ',
'rp_mei_posed_007_OBJ',
'rp_joel_posed_006_OBJ',
'rp_ethan_posed_003_OBJ',
'rp_elena_posed_013_OBJ',
'rp_dennis_posed_001_OBJ',
'rp_christine_posed_017_OBJ',
'rp_beatrice_posed_034_OBJ',
'rp_andrew_posed_007_OBJ',
]:
size = 1
if turn_right:
vertices = vertices
rotation_axis = np.array([0, 1, 0])
rotation_degrees = -90
rotation_radians = np.radians(rotation_degrees)
rotation_vector = rotation_radians * rotation_axis
rotation = R_.from_rotvec(rotation_vector)
rot_max = rotation.as_matrix()
vertices = np.einsum('ij,Bj ->Bi', rot_max, vertices)
if size:
vertices = vertices * 0.9
angle_y = rot_angle_y[p]
angle_x = rot_angle_x[p]
mesh_local = rotate_mesh(vertices, angle_x, glob_rotation, mesh.faces, mesh.visual.vertex_colors, axis=axis_x)
scene = mesh_local.scene()
scene.camera.resolution = [cam_res, cam_res]
pers_color_front, pers_depth_front, pers_depth_back, pers_color_back = \
pers_get_depth_maps(mesh_local, scene, cam_res, fov, item_name=item_name)
if render_orth:
orth_depth_front, orth_depth_back, orth_color_front, orth_color_back = \
get_depth_maps(mesh_local, scene, cam_res, fov, 'front', item_name=item_name)
cv2.imwrite(pers_color_front_name, (pers_color_front * 255).astype(np.int64))
cv2.imwrite(pers_color_back_name, (pers_color_back * 255).astype(np.int64))
if render_orth:
cv2.imwrite(orth_color_front_name, (orth_color_front * 255).astype(np.int64))
cv2.imwrite(orth_color_back_name, (orth_color_back * 255).astype(np.int64))
cv2.imwrite(pers_depth_front_name, (pers_depth_front * 32.0).astype(np.uint16))
cv2.imwrite(pers_depth_back_name, (pers_depth_back * 32.0).astype(np.uint16))
if render_orth:
cv2.imwrite(orth_depth_front_name, (orth_depth_front * 32.0).astype(np.uint16))
cv2.imwrite(orth_depth_back_name, (orth_depth_back * 32.0).astype(np.uint16))
# orthogonal-projection with shading
for sh in range(shad_num):
pers_shad_color_front = pers_add_lights(mesh_local, cam_res, rot_angle_x, scene, fov)
pers_shad_color_front[pers_depth_front == 0, :] = [0, 0, 0]
cv2.imwrite(pers_shad_color_front_name, (pers_shad_color_front))
if render_orth:
orth_shad_color_front = add_lights(mesh_local, cam_res, rot_angle_x, scene, fov)
orth_shad_color_front[orth_depth_front == 0, :] = [0, 0, 0]
cv2.imwrite(orth_shad_color_front_name, (orth_shad_color_front))
print('')
if __name__ == '__main__':
folders = sorted(os.listdir(args.data_path))
# folder_num = 0
cnt_x = 0
cnt_y = 0
cnt_z = 0
os.makedirs(os.path.join(args.data_path, 'PERS', 'COLOR'), exist_ok=True)
os.makedirs(os.path.join(args.data_path, 'PERS', 'DEPTH'), exist_ok=True)
os.makedirs(os.path.join(args.data_path, 'ORTH', 'COLOR'), exist_ok=True)
os.makedirs(os.path.join(args.data_path, 'ORTH', 'DEPTH'), exist_ok=True)
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
print("WARNING: CPU only, this will be slow!")
render_mesh(args.data_path,
args.data_name,
f=args.file,
cnt=cnt_y,
fov=50,
cam_res=2048,
# cam_res=256,
angle_min_x=-30,
angle_max_x=30,
interval_x=10,
angle_min_y=0,
angle_max_y=0,
interval_y=0,
axis_x='x',
axis_y='y',
shad_num=1,
smpl_model_path=args.smpl_model_path,
render_orth=args.render_ORTH,
device=device) | SangHunHan92/2K2K | render/render.py | render.py | py | 18,787 | python | en | code | 170 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line... |
2879811950 | import tensorflow as tf
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from tensorflow.keras import optimizers
from datetime import datetime as dt
from load_data import load_wph_train, inverse_transform, load_wph_test
from EnvConfounderIRM import EnvAware
path = '/data/user18100643/dataset/water/'
city='water'
inorout='water'
emb_size = 32
seq_len = 12
steps = 50
batchsize = 128
batch_num = 2500
learning_rate = 1e-3
div_en_n = 3 # the number of environments maximun=5
span=1
current_time = dt.now().strftime("%Y%m%d-%H%M%S")
reg_weight = 0
irm_weight_init = 10
contrs_weight_init = 10
# physical_devices = tf.config.list_physical_devices('GPU')
# # print('available devuces:',physical_devices)
# tf.config.experimental.set_visible_devices(physical_devices[3], device_type='GPU')
# tf.config.experimental.set_memory_growth(physical_devices[3], enable=True)
# log files
log_dir = '../logs/'+city+'/gradient_tape/' + current_time + '/PIRM' +'-span'+str(span)+str(irm_weight_init)+'-'+str(contrs_weight_init)+'-'+str(learning_rate)+'seq'+str(seq_len)+'emb'+str(emb_size)
train_loss = tf.keras.metrics.Mean(name='train_mse')
train_real_mse = tf.keras.metrics.Mean(name='train_real_mse')
# test_mse_err = tf.keras.metrics.Mean(name='test_mse')
# test_mae_err = tf.keras.metrics.Mean(name='test_mae')
result_summary_writer = tf.summary.create_file_writer(log_dir)
# initialize model
MODEL = EnvAware(emb_size)
optimizer = optimizers.Adam(learning_rate=learning_rate)
loss_func = tf.keras.losses.MeanSquaredError()
def train_step(train_env_set, div_en_n,rec_flg,irm_weight,contrs_weight):
# loss_y and loss_irm in each env
# loss_c between envs
env_label_set = []
irm_feature_set = []
env_feature_set = []
loss_y_env = []
penalty_env = []
outputs_env = []
y_true_env = []
with tf.GradientTape(persistent=True) as g:
for e in range(div_en_n):
train_x = tf.convert_to_tensor(train_env_set[e][:, :seq_len, :], dtype=tf.float32)
train_y = tf.convert_to_tensor(train_env_set[e][:, seq_len, :], dtype=tf.float32)
outputs, irm_features, env_features = MODEL(train_x)
outputs_env.append(outputs)
y_true_env.append(train_y)
# environment label [0,..,0,1,...,1]
env_label_set.append(tf.ones(shape=tf.shape(outputs)[0]) * e)
env_feature_set.append(env_features)
irm_feature_set.append(irm_features)
MSE_loss = loss_func(train_y, outputs)
with tf.GradientTape() as gg:
irm_loss, scale = MODEL.loss_irm(train_y, outputs)
scale_grads = gg.gradient(irm_loss, [scale])
penalty = tf.pow(scale_grads, 2)
penalty_env.append(penalty)
loss_y_env.append(MSE_loss)
# mean for mse loss and penalty in each env
loss_y = tf.reduce_mean(tf.stack(loss_y_env))
loss_irm = tf.reduce_mean(tf.stack(penalty_env))
# regulazation
l2_reg = tf.reduce_mean([tf.nn.l2_loss(v) for v in MODEL.trainable_variables])
# y_loss = MSE_loss+ reg_weight * l2_reg
loss_contrastive = MODEL.loss_c(env_feature_set, env_label_set)
loss4irm = loss_y + irm_weight * loss_irm
# if irm_weight > 1.: loss4irm /= irm_weight
loss4contrs = loss_y + contrs_weight * loss_contrastive
# if contrs_weight > 1.: loss4contrs /= contrs_weight
loss4pred = loss_y + contrs_weight * loss_contrastive + irm_weight * loss_irm
if irm_weight > 1. or contrs_weight > 1.:
trade_max = np.max([irm_weight, contrs_weight])
loss4pred /= trade_max
grads4irm = g.gradient(loss4irm, MODEL.get_env_irm_v())
grads4contras = g.gradient(loss4contrs, MODEL.get_env_rel_v())
grads4pred = g.gradient(loss4pred, MODEL.get_out_v())
optimizer.apply_gradients(zip(grads4irm, MODEL.get_env_irm_v()))
optimizer.apply_gradients(zip(grads4contras, MODEL.get_env_rel_v()))
optimizer.apply_gradients(zip(grads4pred, MODEL.get_out_v()))
if rec_flg:
res_rec_f = '../res/train_emb_res'+current_time+'.npz'
np.savez(res_rec_f,irm_feature=irm_feature_set,env_feature = env_feature_set)
return tf.concat(outputs_env, axis=0), tf.concat(y_true_env, axis=0), loss4pred
def test_step(test_x, test_y,rec_flg):
pred,irm_feat_test,env_feat_test = MODEL.predict(test_x)
# inverse transform
real_pred = inverse_transform(path, city, pred, inorout)
real_label = inverse_transform(path, city, test_y.numpy(), inorout)
real_mse = mean_squared_error(real_label, real_pred)
real_mae = mean_absolute_error(real_label, real_pred)
nonz_id = np.where(real_label!=0)
diff = np.abs(real_label[nonz_id]-real_pred[nonz_id])/real_label[nonz_id]
real_mape = np.sum(diff)/np.size(real_label)
if rec_flg:
res_rec_f = '../res/test_emb_res' + current_time + '.npz'
np.savez(res_rec_f, irm_feature=irm_feat_test, env_feature=env_feat_test)
return real_mse, real_mae, real_mape
def test(s,rec_flg):
# test_pred = []
test_set = load_wph_test(path,seq_len,span)
# for t_s in range(len(test_set)):
# test_x_tensor = tf.convert_to_tensor(test_set[t_s, :seq_len, :], dtype=tf.float32)
# test_y_tensor = tf.convert_to_tensor(test_set[t_s, seq_len, :], dtype=tf.float32)
# mse, mae = test_step(test_x_tensor, test_y_tensor)
# # record prediction result at each timestamp
# # test_pred.append(mse1)
#
# # mean of mse and mae for all timestamps
# test_mse_err(mse)
# test_mae_err(mae)
test_x_tensor = tf.convert_to_tensor(test_set[:, :seq_len, :], dtype=tf.float32)
test_y_tensor = tf.convert_to_tensor(test_set[:, seq_len, :], dtype=tf.float32)
mse, mae,mape = test_step(test_x_tensor,test_y_tensor,rec_flg)
rmse1 = np.sqrt(mse)
print('RMSE for test set 1:', rmse1)
print('MSE for test set 1:', mse)
with result_summary_writer.as_default():
tf.summary.scalar(name='test_rmse', data=rmse1, step=s)
tf.summary.scalar(name='test_mse', data=mse, step=s)
tf.summary.scalar(name='test_mae', data=mae, step=s)
tf.summary.scalar(name='test_mape', data=mape, step=s)
loss_e = []
# penalty_e = []
# TRAIN
# get same num of samples from each environment
generator = load_wph_train(path,seq_len,div_en_n,batchsize)
record_flg=0.
for s in range(steps):
if s<5:
irm_weight = 0.
contrs_weight = 0.
else:
irm_weight = irm_weight_init
contrs_weight = contrs_weight_init
for batch in range(batch_num):
if s == steps - 1 and batch ==batch_num - 1: record_flg = 1.
tr_set = next(generator)
outputs, y_true, loss_t = train_step(tr_set, div_en_n, record_flg,irm_weight,contrs_weight)
real_pred = inverse_transform(path, city, outputs.numpy(), inorout)
real_label = inverse_transform(path, city, y_true.numpy(), inorout)
real_mse = mean_squared_error(real_label, real_pred)
print('step:', s, 'sample:', batch, 'train_mse:', loss_t)
train_real_mse(real_mse)
train_loss(loss_t)
with result_summary_writer.as_default():
tf.summary.scalar(name='train_env_mean_loss', data=train_loss.result(), step=s)
tf.summary.scalar(name='train_real_mse', data=train_real_mse.result(), step=s)
train_loss.reset_state()
train_real_mse.reset_state()
test(s,record_flg)
print('=======================')
result_summary_writer.close()
| RoeyW/ood-for-smart-cities | Model/PIRM_wph.py | PIRM_wph.py | py | 7,828 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.metrics.Mean",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": ... |
25541238216 | import traceback
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import redirect
import json
from . import ibood_db
from .ibood_scraper import POSSIBLE_FILTERS
def home(request):
if request.method == 'POST':
data = request.POST
if not check_if_edit_recipient_post(data):
if not check_if_remove_recipient_post(data):
if not check_if_add_recipient_post(data):
if not check_if_search_change_post(data):
print("Tried to post something but couldn't figure out what it was")
#get the recipients from the database
recipients = ibood_db.get_all_recipients()
context = {
'recipients': recipients,
'possible_filters':POSSIBLE_FILTERS,
}
template = loader.get_template('ibood/ibood_home.html')
return HttpResponse(template.render(context,request))
def check_if_edit_recipient_post(data):
name = data.get('name_edit')
mail = data.get('mail_edit')
id = data.get('id')
if name and mail and id is not None:
#an edit button was clicked
ibood_db.update_recipient(name,mail,id)
return True
else:
return False
def check_if_remove_recipient_post(data):
id = data.get('remove')
if id is not None:
#a button to remove a recipient was clicked
ibood_db.remove_recipient(id)
return True
else:
return False
def check_if_add_recipient_post(data):
mail = data.get('mail')
name = data.get('name')
if mail and name is not None:
#want to add a new recipient to the list
ibood_db.add_recipient(name=name,mail=mail)
return True
else:
return False
def search(request,id):
if request.method == 'POST':
data = request.body.decode("UTF-8")
post_data = request.POST
if not check_if_search_change_post(data):
if not check_if_search_remove_post(post_data):
print("Tried to post something but couldn't figure out what it was")
recipient = ibood_db.get_recipient_with_id(id)
context = {
'recipient':recipient,
'possible_filters':POSSIBLE_FILTERS,
}
template = loader.get_template('ibood/ibood_searches.html')
return HttpResponse(template.render(context,request))
def check_if_search_remove_post(data):
if data.get('remove') is not None:
id = data.get('remove')
ibood_db.remove_search(id)
return True
return False
def check_if_search_change_post(data):
try:
if data is not None:
json_data = json.loads(data)
if json_data.get('type') == 'search':
id = json_data.get('id')
if id == 'new_id':
#want to add a new search to the db
#first check if we have all the correct values
name = json_data.get('name')
filters = json_data.get('filters')
recipient_id = json_data.get('recipientId')
ibood_db.add_search(recipient_Id=recipient_id,search_action=json.dumps(filters),name=name)
else:
filters = json_data.get('filters')
filters = get_filters_as_lower_case(filters)
name = json_data.get('name')
ibood_db.update_search(json.dumps(filters),id,name)
return True
else:
return False
else:
return False
except Exception as e:
print(traceback.print_exc())
return False
def get_filters_as_lower_case(filters):
#filters could be entered as upper case -> need to parse to lower case before storing
for key in filters.keys():
filters[key] = filters[key].lower()
return filters
| wardgeronimussmets/Aviato | master/aviato/iBOOD/views.py | views.py | py | 3,913 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ibood_scraper.POSSIBLE_FILTERS",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 28,
"usage_type": "name"
},
... |
11207175018 | """
Tests for data obfuscation tasks.
"""
import errno
import json
import logging
import os
import shutil
import tarfile
import tempfile
import xml.etree.ElementTree as ET
from unittest import TestCase
from luigi import LocalTarget
from mock import MagicMock, sentinel
import edx.analytics.tasks.export.data_obfuscation as obfuscate
from edx.analytics.tasks.util.obfuscate_util import reset_user_info_for_testing
from edx.analytics.tasks.util.opaque_key_util import get_filename_safe_course_id
from edx.analytics.tasks.util.tests.target import FakeTarget
from edx.analytics.tasks.util.tests.test_obfuscate_util import get_mock_user_info_requirements
from edx.analytics.tasks.util.url import url_path_join
LOG = logging.getLogger(__name__)
class TestDataObfuscation(TestCase):
"""Tests for all data obfuscation tasks."""
def run_task(self, task_cls, source):
"""Runs the task with fake targets."""
task = task_cls(
course=sentinel.ignored,
output_directory=sentinel.ignored,
data_directory=sentinel.ignored,
auth_user_path=sentinel.ignored,
auth_userprofile_path=sentinel.ignored,
)
fake_input = {'data': [FakeTarget(value=source)]}
task.input = MagicMock(return_value=fake_input)
output_target = FakeTarget()
task.output = MagicMock(return_value=output_target)
task.user_info_requirements = get_mock_user_info_requirements()
reset_user_info_for_testing()
task.run()
return output_target.buffer.read()
def reformat(self, data):
"""Reformat data to make it like a TSV."""
return "\n".join(["\t".join(row) for row in data]) + '\n'
def check_output(self, cls, input_value, expected_value):
"""Compares input and expected values."""
output = self.run_task(task_cls=cls, source=self.reformat(input_value))
self.assertEquals(output, self.reformat(expected_value))
def test_auth_user_obfuscation(self):
header = ['id', 'username', 'first_name', 'last_name', 'email', 'password', 'is_staff', 'is_active',
'is_superuser', 'last_login', 'date_joined', 'status', 'email_key', 'avatar_type', 'country',
'show_country', 'date_of_birth', 'interesting_tags', 'ignored_tags', 'email_tag_filter_strategy',
'display_tag_filter_strategy', 'consecutive_days_visit_count']
data = [
header,
['123456', 'JohnDoe', 'John', 'Doe', 'johndoe@edx.org', '', '1', '1',
'0', '2015-11-15 22:08:37', '2013-07-08 14:42:50', '', 'NULL', '', '',
'0', 'NULL', '', '', '0',
'0', '0']
]
expected = [
header,
['273678626', 'username_273678626', '', '', '', '', '1', '1',
'0', '2015-11-15 22:08:37', '2013-07-08 14:42:50', '', '', '', '',
'', '', '', '', '',
'', '']
]
self.check_output(obfuscate.ObfuscateAuthUserTask, data, expected)
def test_auth_user_profile_obfuscation(self):
header = ['id', 'user_id', 'name', 'language', 'location',
'meta', 'courseware', 'gender',
'mailing_address', 'year_of_birth', 'level_of_education', 'goals', 'country',
'city', 'bio', 'profile_image_uploaded_at']
data = [
header,
['123', '123456', 'John Doe', 'English', 'Batcave, USA',
'{"old_names": [["old name", "Name change", "2015-09-07T02:30:17.735773+00:00"]]}', 'course.xml', 'm',
'4th Street', '1984', 'hs', 'To be someone', 'NA',
'ID', 'I like to code', '2015-11-21 22:17:57']
]
expected = [
header,
['123', '273678626', '', '', '',
'', '', 'm',
'', '1984', 'hs', 'To be someone', 'NA',
'', '', '2015-11-21 22:17:57']
]
self.check_output(obfuscate.ObfuscateAuthUserProfileTask, data, expected)
def test_student_course_enrollment_obfuscation(self):
header = ['id', 'user_id', 'course_id', 'created', 'is_active', 'mode']
data = [
header,
['123', '123456', 'course-v1:edX+DemoX+Test_2014', '2015-07-16 19:19:10', '1', 'honor'],
['124', '123457', 'course-v1:edX+DemoX+Test_2014', '2015-07-28 12:41:13', '0', 'verified'],
]
expected = [
header,
['123', '273678626', 'course-v1:edX+DemoX+Test_2014', '2015-07-16 19:19:10', '1', 'honor'],
['124', '273680674', 'course-v1:edX+DemoX+Test_2014', '2015-07-28 12:41:13', '0', 'verified'],
]
self.check_output(obfuscate.ObfuscateStudentCourseEnrollmentTask, data, expected)
def test_student_language_proficiency_obfuscation(self):
header = ['id', 'user_profile_id', 'code']
data = [
header,
['1', '145', 'en'],
['2', '941', 'zh'],
['3', '81724', 'ar'],
]
expected = [
header,
['1', '145', 'en'],
['2', '941', 'zh'],
['3', '81724', 'ar'],
]
self.check_output(obfuscate.ObfuscateStudentLanguageProficiencyTask, data, expected)
def test_courseware_student_module_obfuscation(self):
header = ['id', 'module_type', 'module_id', 'student_id',
'state',
'grade', 'created', 'modified', 'max_grade', 'done', 'course_id']
data = [
header,
['1', 'problem', 'block-v1:edX+DemoX+Test_2014+type@problem+block@123091b4012312r210r120r12r', '2',
'{"correct_map": {"123091b4012312r210r120r12r_2_1": {"hint": "", "hintmode": null, '
'"correctness": "correct", '
'"msg": "\\\\nRandom HTML stuff:\\\\n\\\\ntest@example.com\\\\n+1-234-123456 will reach John.",'
'"answervariable": null, "npoints": 1.0, "queuestate": null}}, '
'"input_state": {"123091b4012312r210r120r12r_2_1": {}}, "last_submission_time": "2015-12-13T06:17:05Z",'
'"attempts": 2, "seed": 1, "done": true, '
'"student_answers": {"123091b4012312r210r120r12r_2_1": '
'"The answer\\\\r\\\\nwith multiple lines\\\\r\\\\naudit needed\\\\r\\\\n213-4567"}}',
'0', '2015-10-13 19:22:24', '2015-10-13 19:40:20', '1', 'na', 'course-v1:edX+DemoX+Test_2014'],
]
expected = [
header,
['1', 'problem', 'block-v1:edX+DemoX+Test_2014+type@problem+block@123091b4012312r210r120r12r', '2147483648',
'{"correct_map": {"123091b4012312r210r120r12r_2_1": {"hint": "", "hintmode": null, '
'"correctness": "correct", '
'"msg": "\\\\nRandom HTML stuff:\\\\n\\\\n<<EMAIL>>\\\\n<<PHONE_NUMBER>> will reach <<FULLNAME>>.", '
'"answervariable": null, "npoints": 1.0, "queuestate": null}}, '
'"input_state": {"123091b4012312r210r120r12r_2_1": {}}, "last_submission_time": "2015-12-13T06:17:05Z", '
'"attempts": 2, "seed": 1, "done": true, '
'"student_answers": {"123091b4012312r210r120r12r_2_1": '
'"The answer\\\\r\\\\nwith multiple lines\\\\r\\\\n<<FULLNAME>> needed\\\\r\\\\n<<PHONE_NUMBER>>"}}',
'0', '2015-10-13 19:22:24', '2015-10-13 19:40:20', '1', 'na', 'course-v1:edX+DemoX+Test_2014'],
]
self.check_output(obfuscate.ObfuscateCoursewareStudentModule, data, expected)
def test_courseware_student_module_obfuscation_unmapped_id(self):
header = ['id', 'module_type', 'module_id', 'student_id',
'state', 'grade', 'created', 'modified', 'max_grade', 'done', 'course_id']
data = [
header,
['1', 'problem', 'block-v1:edX+DemoX+Test_2014+type@problem+block@123091b4012312r210r120r12r', '123456',
'{}', '0', '2015-10-13 19:22:24', '2015-10-13 19:40:20', '1', 'na', 'course-v1:edX+DemoX+Test_2014'],
]
expected = [
header,
['1', 'problem', 'block-v1:edX+DemoX+Test_2014+type@problem+block@123091b4012312r210r120r12r', '273678626',
'{}', '0', '2015-10-13 19:22:24', '2015-10-13 19:40:20', '1', 'na', 'course-v1:edX+DemoX+Test_2014'],
]
self.check_output(obfuscate.ObfuscateCoursewareStudentModule, data, expected)
def test_courseware_student_module_obfuscation_bad_state(self):
header = ['id', 'module_type', 'module_id', 'student_id',
'state', 'grade', 'created', 'modified', 'max_grade', 'done', 'course_id']
data = [
header,
['1', 'problem', 'block-v1:edX+DemoX+Test_2014+type@problem+block@123091b4012312r210r120r12r', '2',
'this does not parse', '0', '2015-10-13 19:22:24', '2015-10-13 19:40:20', '1', 'na',
'course-v1:edX+DemoX+Test_2014'],
]
expected = [
header,
['1', 'problem', 'block-v1:edX+DemoX+Test_2014+type@problem+block@123091b4012312r210r120r12r', '2147483648',
'{}', '0', '2015-10-13 19:22:24', '2015-10-13 19:40:20', '1', 'na', 'course-v1:edX+DemoX+Test_2014'],
]
self.check_output(obfuscate.ObfuscateCoursewareStudentModule, data, expected)
def test_certificates_generated_certificate_obfuscation(self):
header = ['id', 'user_id', 'download_url', 'grade', 'course_id', 'key', 'distinction', 'status',
'verify_uuid', 'download_uuid', 'name', 'created_date', 'modified_date',
'error_reason', 'mode']
data = [
header,
['1', '123456', 'some_url', '0.21', 'course-v1:edX+DemoX+Test_2014', 'key', '0', 'notpassing',
'verify_uuid', 'download_uuid', 'John Doe', '2015-10-16 12:53:49', '2015-10-16 12:53:49',
'error_reason', 'honor']
]
expected = [
header,
['1', '273678626', '', '0.21', 'course-v1:edX+DemoX+Test_2014', '', '0', 'notpassing',
'', '', '', '2015-10-16 12:53:49', '2015-10-16 12:53:49',
'', 'honor']
]
self.check_output(obfuscate.ObfuscateCertificatesGeneratedCertificate, data, expected)
def test_teams_obfuscation(self):
header = ['id', 'team_id', 'name', 'course_id', 'topic_id',
'date_created', 'description', 'country', 'language', 'discussion_topic_id', 'last_activity_at',
'team_size']
data = [
header,
['1', 'A-Team-8883d3b43094f0e9e6ec7e190e7600e', 'A Team', 'course-v1:edX+DemoX+Test_2014', 'some_topic',
'2015-10-13 13:14:41', 'description', 'GB', 'en', 'topic_id', '2015-10-31 21:32:17',
'8']
]
expected = [
header,
['1', 'A-Team-8883d3b43094f0e9e6ec7e190e7600e', 'A Team', 'course-v1:edX+DemoX+Test_2014', 'some_topic',
'2015-10-13 13:14:41', 'description', 'GB', 'en', 'topic_id', '2015-10-31 21:32:17',
'8']
]
self.check_output(obfuscate.ObfuscateTeamsTask, data, expected)
def test_teams_membership_obfuscation(self):
header = ['id', 'user_id', 'team_id', 'date_joined', 'last_activity_at']
data = [
header,
['1', '123456', '1', '2015-10-13 13:14:41', '2015-10-14 18:41:24']
]
expected = [
header,
['1', '273678626', '1', '2015-10-13 13:14:41', '2015-10-14 18:41:24']
]
self.check_output(obfuscate.ObfuscateTeamsMembershipTask, data, expected)
def test_verification_status_obfuscation(self):
header = ['timestamp', 'status', 'course_id',
'checkpoint_location', 'user_id']
data = [
header,
['2015-09-03 07:19:10', 'submitted', 'course-v1:edX+DemoX+Test_2014',
'block-v1:edX+DemoX+Test_2014+type@edx', '123456']
]
expected = [
header,
['2015-09-03 07:19:10', 'submitted', 'course-v1:edX+DemoX+Test_2014',
'block-v1:edX+DemoX+Test_2014+type@edx', '273678626']
]
self.check_output(obfuscate.ObfuscateVerificationStatusTask, data, expected)
def test_wiki_article_obfuscation(self):
header = ['id', 'current_revision_id', 'created', 'modified', 'owner_id', 'group_id', 'group_read',
'group_write', 'other_read', 'other_write']
data = [
header,
['1234', '27567', '2013-08-08 22:00:58', '2013-09-30 16:52:21', 'owner_id', 'group_id', '1',
'2', '3', '4']
]
expected = [
header,
['1234', '27567', '2013-08-08 22:00:58', '2013-09-30 16:52:21', '', '', '1',
'2', '3', '4']
]
self.check_output(obfuscate.ObfuscateWikiArticleTask, data, expected)
def test_wiki_article_revision_obfuscation(self):
header = ['id', 'revision_number', 'user_message', 'automatic_log', 'ip_address', 'user_id', 'modified',
'created', 'previous_revision_id', 'deleted', 'locked', 'article_id', 'content', 'title']
data = [
header,
['23456', '1', 'This is a user message', 'automatic_log', '192.168.1.1', '4', '2013-08-08 22:00:58',
'2013-08-22 08:00:58', '123', '0', '0', '123',
'This is revised by Static Staff and not Vera, and contains staff@example.com. For help, call 381-1234.',
'Article Title']
]
expected = [
header,
['23456', '1', '', '', '', '8388608', '2013-08-08 22:00:58',
'2013-08-22 08:00:58', '123', '0', '0', '123',
'This is revised by <<FULLNAME>> and not Vera, and contains <<EMAIL>>. For help, call <<PHONE_NUMBER>>.',
'Article Title']
]
self.check_output(obfuscate.ObfuscateWikiArticleRevisionTask, data, expected)
def test_wiki_article_revision_obfuscation_unmapped_userid(self):
header = ['id', 'revision_number', 'user_message', 'automatic_log', 'ip_address', 'user_id', 'modified',
'created', 'previous_revision_id', 'deleted', 'locked', 'article_id', 'content', 'title']
data = [
header,
['23456', '1', 'This is a user message', 'automatic_log', '192.168.1.1', '12345', '2013-08-08 22:00:58',
'2013-08-08 22:00:58', '123', '0', '0', '123',
'This is revised by Static Staff and not Vera, and contains staff@example.com. For help, call 381-1234.',
'Article Title']
]
expected = [
header,
['23456', '1', '', '', '', '302000641', '2013-08-08 22:00:58',
'2013-08-08 22:00:58', '123', '0', '0', '123',
'This is revised by Static Staff and not Vera, and contains <<EMAIL>>. For help, call <<PHONE_NUMBER>>.',
'Article Title']
]
self.check_output(obfuscate.ObfuscateWikiArticleRevisionTask, data, expected)
def test_wiki_article_revision_obfuscation_null_userid(self):
header = ['id', 'revision_number', 'user_message', 'automatic_log', 'ip_address', 'user_id', 'modified',
'created', 'previous_revision_id', 'deleted', 'locked', 'article_id', 'content', 'title']
data = [
header,
['23456', '1', 'This is a user message', 'automatic_log', '192.168.1.1', 'NULL', '2013-08-08 22:00:58',
'2013-08-08 22:00:58', '123', '0', '0', '123',
'This is revised by Static Staff and not Vera, and contains staff@example.com. For help, call 381-1234.',
'Article Title']
]
expected = [
header,
['23456', '1', '', '', '', 'NULL', '2013-08-08 22:00:58',
'2013-08-08 22:00:58', '123', '0', '0', '123',
'This is revised by Static Staff and not Vera, and contains <<EMAIL>>. For help, call <<PHONE_NUMBER>>.',
'Article Title']
]
self.check_output(obfuscate.ObfuscateWikiArticleRevisionTask, data, expected)
def test_mongo_obfuscation(self):
data = '{"author_id":"3","author_username":"deliberately_not_verified",' \
'"body":"Hi All,\\nI am having trouble. Cell: 321-215-9152\\nEmail: vera@test.edx.org\\n\\nVera",' \
'"title":"Reply from Vera Verified (vera@test.edx.org)","course_id":"course-v1:edX+DemoX+Test_2014",' \
'"votes":{"down":["123456"],"up":["12345"],"count":2,"point":0,"down_count":1,"up_count":1},' \
'"endorsement": {"user_id": "4", "time": {"$date": "2015-09-18T01:01:56.743Z"}},' \
'"abuse_flaggers":["12345"],"historical_abuse_flaggers":["123456"]}'
expected = '{"author_id":"2147485696","author_username":"username_2147485696",' \
'"body":"Hi All,\\nI am having trouble. Cell: <<PHONE_NUMBER>>\\nEmail: <<EMAIL>>\\n\\n<<FULLNAME>>", ' \
'"title":"Reply from <<FULLNAME>> <<FULLNAME>> (<<EMAIL>>)","course_id":"course-v1:edX+DemoX+Test_2014",' \
'"votes":{"down":["273678626"],"up":["302000641"],"count":2,"point":0,"down_count":1,"up_count":1},' \
'"endorsement": {"user_id": "8388608", "time": {"$date": "2015-09-18T01:01:56.743Z"}},' \
'"abuse_flaggers":["302000641"],"historical_abuse_flaggers":["273678626"]}'
output = self.run_task(task_cls=obfuscate.ObfuscateMongoDumpsTask, source=data)
self.assertDictEqual(json.loads(output), json.loads(expected))
def test_mongo_obfuscation_with_nonint_id(self):
data = '{"author_id":"nonint","author_username":"nonint_user",' \
'"body":"Hi All,\\nI am having trouble. Cell: 321-215-9152\\nEmail: vera@test.edx.org\\n\\nVera",' \
'"title":"Reply from Vera Verified (vera@test.edx.org)","course_id":"course-v1:edX+DemoX+Test_2014"}'
expected = '{"author_id":"nonint","author_username":"nonint_user",' \
'"body":"Hi All,\\nI am having trouble. Cell: <<PHONE_NUMBER>>\\nEmail: <<EMAIL>>\\n\\nVera", ' \
'"title":"Reply from Vera Verified (<<EMAIL>>)","course_id":"course-v1:edX+DemoX+Test_2014"}'
output = self.run_task(task_cls=obfuscate.ObfuscateMongoDumpsTask, source=data)
self.assertDictEqual(json.loads(output), json.loads(expected))
def test_mongo_obfuscation_with_nonmapped_id(self):
data = '{"author_id":"12345","author_username":"nonmapped_user",' \
'"body":"Hi All,\\nI am having trouble. Cell: 321-215-9152\\nEmail: vera@test.edx.org\\n\\nVera",' \
'"title":"Reply from Vera Verified (vera@test.edx.org)","course_id":"course-v1:edX+DemoX+Test_2014"}'
expected = '{"author_id":"302000641","author_username":"username_302000641",' \
'"body":"Hi All,\\nI am having trouble. Cell: <<PHONE_NUMBER>>\\nEmail: <<EMAIL>>\\n\\nVera", ' \
'"title":"Reply from Vera Verified (<<EMAIL>>)","course_id":"course-v1:edX+DemoX+Test_2014"}'
output = self.run_task(task_cls=obfuscate.ObfuscateMongoDumpsTask, source=data)
self.assertDictEqual(json.loads(output), json.loads(expected))
def test_course_structure(self):
data = json.dumps({
'block0': {
'category': 'unknownblock',
'metadata': {
'foo': 'bar',
'baz': 10
}
},
'block1': {
'category': 'course',
'metadata': {
'lti_passports': 'x:foo:bar',
'mobile_available': True,
'unrecognized': 10
},
'children': [
'block0'
]
},
'block2': {
'category': 'lti',
'metadata': {
'lti_id': 'foo'
}
}
})
expected = {
'block0': {
'category': 'unknownblock',
'metadata': {},
'redacted_metadata': ['foo', 'baz']
},
'block1': {
'category': 'course',
'metadata': {
'mobile_available': True
},
'redacted_metadata': ['lti_passports', 'unrecognized'],
'children': [
'block0'
]
},
'block2': {
'category': 'lti',
'metadata': {},
'redacted_metadata': ['lti_id'],
}
}
output = self.run_task(task_cls=obfuscate.CourseStructureTask, source=data)
self.assertDictEqual(json.loads(output), expected)
class TestObfuscateCourseDumpTask(TestCase):
"""Test for ObfuscateCourseDumpTask."""
def create_paths(self, course, dates):
"""Setups directory structure and files as expected by ObfuscateCourseDumpTask task."""
self.temp_rootdir = tempfile.mkdtemp()
self.dump_root = os.path.join(self.temp_rootdir, "dump_root")
self.output_root = os.path.join(self.temp_rootdir, "output_root")
filename_safe_course_id = get_filename_safe_course_id(course)
for date in dates:
filepath = os.path.join(self.dump_root, filename_safe_course_id, 'state', date, 'auth_userprofile_file')
os.makedirs(os.path.dirname(filepath))
open(filepath, 'a').close()
def tearDown(self):
"Remove temp dir. after running the test."
if os.path.exists(self.temp_rootdir):
shutil.rmtree(self.temp_rootdir)
def test_data_directory(self):
"""Test to check whether the data_directory for a course is being set up correctly."""
coursename = 'edx_demo_course'
self.create_paths(coursename, dates=['2015-11-25', '2015-11-28', '2015-12-06'])
task = obfuscate.ObfuscatedCourseDumpTask(
course=coursename, dump_root=self.dump_root, output_root=self.output_root,
auth_user_path=sentinel.ignored, auth_userprofile_path=sentinel.ignored,
)
self.assertEquals(task.data_directory, url_path_join(self.dump_root, coursename, 'state', '2015-12-06'))
class TestCourseContentTask(TestCase):
"""Ensure sensitive fields are removed from the course content export"""
COURSE_ID = 'course-v1:edX+DemoX+Test_2014'
def setUp(self):
self.archive_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.archive_root)
course_id_filename = get_filename_safe_course_id(self.COURSE_ID)
self.course_root = os.path.join(self.archive_root, course_id_filename)
os.makedirs(self.course_root)
with open(os.path.join(self.course_root, 'course.xml'), 'w') as course_file:
course_file.write('<course url_name="foo" org="edX" course="DemoX"/>')
policy_dir_path = os.path.join(self.course_root, 'policies', 'foo')
os.makedirs(policy_dir_path)
with open(os.path.join(policy_dir_path, 'policy.json'), 'w') as policy_file:
json.dump({}, policy_file)
def run_task(self):
"""Runs the task with fake targets."""
output_archive_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, output_archive_root)
with tempfile.NamedTemporaryFile() as tmp_input_archive:
with tarfile.open(mode='w:gz', fileobj=tmp_input_archive) as input_archive_file:
input_archive_file.add(self.archive_root, arcname='')
tmp_input_archive.seek(0)
task = obfuscate.CourseContentTask(
course=sentinel.ignored,
output_directory=sentinel.ignored,
data_directory=sentinel.ignored,
auth_user_path=sentinel.ignored,
auth_userprofile_path=sentinel.ignored,
)
fake_input = {'data': [LocalTarget(path=tmp_input_archive.name)]}
task.input = MagicMock(return_value=fake_input)
output_target = FakeTarget()
task.output = MagicMock(return_value=output_target)
task.user_info_requirements = get_mock_user_info_requirements()
reset_user_info_for_testing()
task.run()
with tarfile.open(mode='r:gz', fileobj=output_target.buffer) as output_archive_file:
output_archive_file.extractall(output_archive_root)
self.output_course_root = os.path.join(output_archive_root, get_filename_safe_course_id(self.COURSE_ID))
def test_draft_removal(self):
os.makedirs(os.path.join(self.course_root, 'drafts'))
self.run_task()
self.assertTrue(os.path.exists(os.path.join(self.output_course_root, 'course.xml')))
self.assertFalse(os.path.exists(os.path.join(self.output_course_root, 'drafts')))
def test_policy_cleaning(self):
policy_obj = {
'course/foo': {
'video_upload_pipeline': {
'course_video_upload_token': 'abcdefg'
},
'start': '2015-10-05T00:00:00Z',
'rerandomize': 'always'
}
}
self.write_file('policies/foo/policy.json', json.dumps(policy_obj))
self.run_task()
self.assertDictEqual(
{
'course/foo': {
'start': '2015-10-05T00:00:00Z',
'rerandomize': 'always',
'redacted_attributes': ['video_upload_pipeline']
}
},
json.loads(self.read_file('policies/foo/policy.json'))
)
def test_single_course_xml(self):
content = '<course url_name="foo" org="edX" course="DemoX">' \
'<chapter>' \
'<foo a="0" b="1" url_name="bar"><p>hello</p><p>world!</p></foo>' \
'</chapter>' \
'</course>'
expected = '<course url_name="foo" org="edX" course="DemoX">' \
'<chapter>' \
'<foo redacted_attributes="a,b" redacted_children="p" url_name="bar" />' \
'</chapter>' \
'</course>'
self.write_file('course.xml', content)
self.run_task()
self.assert_xml_equal(expected, self.read_file('course.xml'))
def write_file(self, relative_path, content):
"""Write a file in the staging area that will be included in the test course package"""
full_path = os.path.join(self.course_root, relative_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError as ose:
if ose.errno != errno.EEXIST:
raise
with open(full_path, 'w') as course_file:
course_file.write(content)
def read_file(self, relative_path):
"""Read a file from the temporary directory setup to hold the output after the course has been processed"""
with open(os.path.join(self.output_course_root, relative_path), 'r') as course_file:
return course_file.read()
def assert_xml_equal(self, expected, actual):
"""Compare two XML documents to ensure they are equivalent"""
return self.assert_xml_element_equal(ET.fromstring(expected), ET.fromstring(actual), [])
def assert_xml_element_equal(self, expected, actual, path):
"""Compare two XML elements to ensure they are equivalent"""
new_path = path + [actual.tag]
try:
self.assertEqual(expected.tag, actual.tag)
self.assertDictEqual(expected.attrib, actual.attrib)
self.assertEqual(len(expected), len(actual))
self.assertEqual(expected.text, actual.text)
self.assertEqual(expected.tail, actual.tail)
except AssertionError:
LOG.error('Difference found at path "%s"', '.'.join(new_path))
LOG.error('Expected XML: %s', ET.tostring(expected))
LOG.error('Actual XML: %s', ET.tostring(actual))
raise
for expected_child, actual_child in zip(expected, actual):
self.assert_xml_element_equal(expected_child, actual_child, new_path)
def test_separate_course_xml(self):
content = '<course course_image="foo.png" lti_passports="foo" unknown="1">' \
'<chapter url_name="abcdefg"/>' \
'</course>'
expected = '<course course_image="foo.png" redacted_attributes="lti_passports,unknown">' \
'<chapter url_name="abcdefg"/>' \
'</course>'
self.write_file('course/course.xml', content)
self.run_task()
self.assert_xml_equal(expected, self.read_file('course/course.xml'))
def test_problem_with_children(self):
self.assert_unchanged_xml(
'problem/sky.xml',
'<problem display_name="Sky Color" markdown="null">'
'<p>What color is the sky?</p>'
'<multiplechoiceresponse>'
'<choice correct="false">Red</choice>'
'<choice correct="true">Blue</choice>'
'</multiplechoiceresponse>'
'</problem>'
)
def assert_unchanged_xml(self, relative_path, content):
"""Clean the XML and make sure nothing was changed"""
self.write_file(relative_path, content)
self.run_task()
self.assert_xml_equal(content, self.read_file(relative_path))
def test_subelement_field_mixed_with_children(self):
# "textbook" is a field that is serialized to a sub-element of course, it should be excluded from further
# analysis
content = '<course url_name="foo" org="edX" course="DemoX">' \
'<chapter><cleanme cleaned="0"/></chapter>' \
'<textbook title="Textbook" book_url="https://s3.amazonaws.com/bucket/foo.txt">' \
'<unchanged cleaned="0"/>' \
'</textbook>' \
'</course>'
expected = '<course url_name="foo" org="edX" course="DemoX">' \
'<chapter><cleanme redacted_attributes="cleaned"/></chapter>' \
'<textbook title="Textbook" book_url="https://s3.amazonaws.com/bucket/foo.txt">' \
'<unchanged cleaned="0"/>' \
'</textbook>' \
'</course>'
self.write_file('course.xml', content)
self.run_task()
self.assert_xml_equal(expected, self.read_file('course.xml'))
def test_unknown_children_status_with_children(self):
# this block has not declared has_children=True, however, we should log a warning and clean any children if
# they do exist
content = '<poll display_name="Has children for some reason">' \
'<cleanme cleaned="0"/>' \
'</poll>'
expected = '<poll display_name="Has children for some reason">' \
'<cleanme redacted_attributes="cleaned"/>' \
'</poll>'
self.write_file('poll/test.xml', content)
self.run_task()
self.assert_xml_equal(expected, self.read_file('poll/test.xml'))
| openedx/edx-analytics-pipeline | edx/analytics/tasks/export/tests/test_data_obfuscation.py | test_data_obfuscation.py | py | 31,225 | python | en | code | 90 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.ignored",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "mock.s... |
72374385954 | import MySQLdb
import MySQLdb.cursors as cursors
from Pattern import Pattern
import datetime
from pprint import pprint
import uuid
from pypika import MySQLQuery, Table, Field, Order, functions as fn, JoinType
import time
import json
import socket
from openpyxl import Workbook
import copy
import requests
import time
import datetime
db = MySQLdb.connect(host="127.0.0.1",
user="root", # your username
passwd="liverpoolfc", # your password
db="mmt-its")
# mesin = MySQLdb.connect(host="192.168.0.33",
# user="root", # your username
# passwd="mmtitsmmtits", # your password
# db="mmtitsbaru")
ptrn = Pattern()
ptrn.findPattern('kelas', None, None)
ptrn.main()
blacklist = [
'kelas',
'mesin_log'
]
listTable = {}
def check(query, tableName, ip, id_kelas):
mesin = MySQLdb.connect(host=str(ip),
user="root", # your username
passwd="mmtitsmmtits", # your password
db="mmtitsbaru")
cursor = db.cursor()
cursor.execute(query)
hasil = list(cursor.fetchall())
query = 'select * from '+tableName
cursor = mesin.cursor()
cursor.execute(query)
cursor = list(cursor.fetchall())
jumlahDiServer = len(hasil)
jumlahDiMesin = len(cursor)
print('Jumlah di Server / Jumlah di Mesin : '+ str(jumlahDiServer)+'/'+ str(jumlahDiMesin))
# for row in cursor:
# if row not in hasil:
# print(row)
for row in hasil:
if row not in cursor:
# print(row)
pass
# if jumlahDiMesin != jumlahDiServer:
print('Perbedaan Data : ' + str(+(jumlahDiMesin-jumlahDiServer)))
if id_kelas not in listTable:
listTable[id_kelas] = {}
listTable[id_kelas][tableName] = +(jumlahDiMesin-jumlahDiServer)
tables = "SELECT table_name FROM information_schema.tables where table_schema='mmt-its'"
cursor = db.cursor(cursors.DictCursor)
cursor.execute(tables)
r = requests.get('http://127.0.0.1:9999/get/ruangan')
# print(r.text)
result = json.loads(r.text)
# pprint(result)
for id_kelas in result['ruangan']:
print(id_kelas)
for socketId in result['ruangan'][id_kelas]:
# print(socket)
print(result['ip'][socketId])
for row in cursor:
if row['table_name'] in blacklist:
continue
ts = time.time()
dateTimeString = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
print(dateTimeString+' -> '+row['table_name'])
# print()
query = copy.deepcopy(ptrn.dictOfPattern[row['table_name']]['query'])
query = ' UNION '.join(query)
query = query.replace(socket.gethostname(), str(id_kelas))
# print(query + '\n')
check(query, row['table_name'], result['ip'][socketId], id_kelas)
print('')
print('')
# exit()
print(listTable)
# exit()
wb = Workbook()
ws = wb.active
header = ['Nama Tabel']
for key in listTable:
header.append(key)
ws.append(header)
for row in cursor:
if row['table_name'] in blacklist:
continue
baris = [row['table_name']]
for key in listTable:
baris.append(listTable[key][row['table_name']])
ws.append(baris)
wb.save("checkData.xlsx") | hlmn/TA | checkReplikasi.py | checkReplikasi.py | py | 3,402 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "MySQLdb.connect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Pattern.Pattern",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "MySQLdb.connect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "MySQLdb.cursors.DictC... |
18096915998 | import requests
import csv
import bs4 as bs
from calendar import monthrange as mr
import pandas as pd
import arrow
# Grabs the url for the selected month and parses it using html
urls = ['http://clubomgsf.com/calendar/month/2019/01/']
for url in urls:
response = requests.get(url)
soup = bs.BeautifulSoup(response.content, 'html.parser')
# Searches for all table rows
events = soup.find_all('tr')
# Current venue
venue = "Club OMG"
# finds the month and year using the url
sdate = url.strip('http://clubomgsf.com/calendar/month/')
# finds the amount of days in the given month
sdate2 = sdate.split("/")
mrange = mr(int(sdate2[0]), int(sdate2[1]))[1]
# Starts to strip events, strips \t and \n first
event_list = []
for event in events:
k = event.text.strip()
k = k.replace("\t", "")
k = k.replace("\n", "")
event_list.append(k)
# Removes the first table row as its days of the week and gets rid of commonwhitespaces
another_list2 = []
for i in event_list[1:]:
other_events = i
other_events = other_events.replace(' ', ',')
other_events = other_events.replace(' ', ',')
other_events = other_events.replace(' ', ',')
other_events = other_events.replace('โข', '')
other_events = other_events.replace(' ', ",")
other_events = other_events.split(',')
# Concatenates the table rows together
another_list = []
another_list = another_list + other_events
# Parses through the variables and removes any whitespace to the left
for x in another_list:
k = x.lstrip()
another_list2.append(k)
# Finds the index of the first of the month as even when hidden old events are still embedded in the html
for i in another_list2:
try:
if int(i) == 1:
start = another_list2.index(i)
break
except ValueError:
continue
# Resets to the first of the month
another_list2 = another_list2[start:]
# Looks for the date of the last of the month
for i in another_list2:
try:
if int(i) == mrange:
end = another_list2.index(i)
break
except ValueError:
continue
# rests the main list to end of the month
another_list2 = another_list2[:end+4]
# Finds where numbers (days) occurs and then parses them into a list
number_breaks = []
for i in another_list2:
try:
for j in range(1, mrange+1):
if int(i) == j:
number_breaks.append(another_list2.index(i))
# If a string is tried to turn into an int then the loop just continues
except ValueError:
continue
# Creates a nested list of the different days and their events, times, etc.
final_list = []
# Number breaks being the index of the overall list where a new day occurs
for index in number_breaks:
if index != number_breaks[-1]:
# finds the index of the next number in the list
next_num = number_breaks.index(index) + 1
next_num = number_breaks[next_num]
# Appends the list from current index to next index
final_list.append(another_list2[index:next_num])
# The last day goes to the end
else:
final_list.append(another_list2[index:])
# Appending numbers into lists
num_list = []
event_list = []
time_list = []
price_list = []
for i in final_list:
# Appends the num list with numbers of the month
num_list.append(i[0])
# If there is data other than the day of the month then parse it into lists
if len(i) > 1:
event_list.append(i[1])
time_list.append(i[2])
price_list.append(i[3])
# If no other data replace it with an arbitrary value
elif len(i) == 1:
event_list.append('NaN')
time_list.append('NaN')
price_list.append('NaN')
# For finding the date using the num list
dates = []
for i in num_list:
# Replaces the / from the web url with a space
k = sdate.replace("/", " ")
# Adds a space and the day number
k = k + " " + i
# Reformats the date to american date and parses it to the dates list
dates.append(arrow.get(k, "YYYY MM D").format("MM/DD/YY"))
# # Opens a csv and adds a header
# t = ""
# try:
# pd.read_csv('timetable.csv')
# except FileNotFoundError:
# t = "NaN"
# if t == 'NaN':
with open('timetable.csv', 'w') as ttable:
filewriter = csv.writer(ttable)
filewriter.writerow(["Venue", "Event", "Date", "Time", "Price"])
# Opens the csv and appends the data points
with open('timetable.csv', 'a') as ttable:
filewriter = csv.writer(ttable)
for i in range(0, len(event_list)):
filewriter.writerow([venue, event_list[i], dates[i], time_list[i], price_list[i]])
| Astatham98/EventWebScrape | webscrape1/clubomg.py | clubomg.py | py | 5,250 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "calendar.monthrange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "arrow.get",
"l... |
72861963873 | import os
from read_configure import ReadConfigure
import requests
rc = ReadConfigure()
class InterfaceTest:
global rc
def __init__(self):
self.__protocol = rc.getmethod('protocol')
self.__method = rc.getmethod('method')
self.__url = rc.geturl('url')
pidict = rc.getparameters_int()
self.__pdict = rc.getparameters_string()
self.__pdict.update(pidict)
# ๅ้http่ฏทๆฑ๏ผ่ฟๅ
def sendrequest(self):
method = self.__method
if method == 'get':
res = requests.get(url=self.__url, params=self.__pdict)
print(res)
print(res.text)
print(type(res.text))
if __name__ == '__main__':
it = InterfaceTest()
it.sendrequest() | cwk0099/PythonProject | request_test/test.py | test.py | py | 758 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "read_configure.ReadConfigure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
}
] |
29598589421 |
# coding: utf-8
# In[2]:
#!pip install --upgrade pip
#!pip install casadi
# In[3]:
# Import casadi
from casadi import *
# Import Numpy
import numpy as np
# Import matplotlib
import matplotlib.pyplot as plt
# Import Scipy to load .mat file
import scipy.io as sio
import pdb
# In[4]:
def simulate_MPC(d_full, S = 100, N=10, x_init = np.array([[21],[150000]])):
##Define a linear system as a CasADi function"""
A = np.array([[0.8511, 0],[0, 1]])
B = np.array([[0.0035, 0, 0],[0, -5, 0]])
E = (1e-03)*np.array([[22.217, 1.7912, 42.212],[0, 0, 0]])
D = np.array([[-1, 1, 1], [1, 1, 1]])
G_mixed = np.array([[0, 0.5, 0], [0, 0.5, 0]])
## Define the optimization variables for MPC
nx = A.shape[1]
nu = B.shape[1]
nm = D.shape[1] # this is for the mixed variables
nd = E.shape[1] # this is for the disturbance variable
x = SX.sym("x",nx,1)
u = SX.sym("u",nu,1)
m = SX.sym("m",nm,1) # Mixed variable
d = SX.sym("d",nd,1) # Disturbance variable
print('nx=%s'%nx)
print('nu=%s'%nu)
print('nm=%s'%nm)
print('nd=%s'%nd)
"""## Choose the reference battery energy """
#@title choose Ebat_ref
Ebat_ref = 50000 #@param {type:"slider", min:0, max:200000, step:1000}
"""## Choose the tuning of MPC"""
#@title Choose prediction horizon N
#N = 7 #@param {type:"slider", min:1, max:15, step:1}
#@title Choose number of steps S
# S = 100 #@param {type:"slider", min:1, max:144, step:1}
#@title Choose the penalty parameter gamma
gamma = 4.322 #@param {type:"slider", min:0, max:10, step:0.0001}
"""# Define the dynamics as a CasADi expression"""
# Fill d here from the .mat disturbance file
# For collab only
#!wget -O external_disturbances.mat https://www.dropbox.com/s/57ta25v9pg94lbw/external_disturbances.mat?dl=0
#!ls
#mat_disturbance = sio.loadmat('external_disturbances.mat')
#d_full = np.column_stack((mat_disturbance['room_temp'], mat_disturbance['sol_rad'], mat_disturbance['int_gains']))
#print('disturbance vector successfully loaded in vector d_full')
print('length of d_full:%i'%(d_full.shape[0]))
d_0 = d_full[0, 0]
d_1 = d_full[0, 1]
d_2 = d_full[0, 2]
print('first line of d (3 columns)')
print('d[0,0] = %f'%d_0)
print('d[0,1] = %f'%d_1)
print('d[0,2] = %f'%d_2)
# Definition of the system, and the mixed constraint equations
output_sys = mtimes(A,x) + mtimes(B,u) + mtimes(E, d)
output_mixed = mtimes(D,u) + mtimes(G_mixed,d)
system = Function("sys", [x,u,d], [output_sys])
mixed = Function("sys", [u,d], [output_mixed])
"""### Construct CasADi objective function"""
### state cost
J_stage_exp = u[2] + gamma*mtimes((x[1]-Ebat_ref),(x[1]-Ebat_ref))
J_stage = Function('J_stage',[x,u],[J_stage_exp])
# ### terminal cost ?? How ?
# Suggestion : Terminal cost is stage cost function at last x_k (x_k[N])
J_terminal_exp = gamma*mtimes((x[1]-Ebat_ref),(x[1]-Ebat_ref))
J_terminal = Function('J_terminal',[x],[J_terminal_exp])
# J_terminal = Function('J_terminal',[x],[J_terminal_exp])
"""## Define optimization variables"""
X = SX.sym("X",(N+1)*nx,1)
U = SX.sym("U",N*nu,1)
# Added by me : Mixed constraints optimization variable M
M = SX.sym("M",N*nu,1)
"""## Define constraints"""
# state constraints : 20.0<=Tr<=23 and 0.0 โค SoC โค 200000
lbx = np.array([[20],[0]])
ubx = np.array([[23],[200000]])
# input constraints
lbu = np.array([[-1000],[-500],[-500]])
ubu = np.array([[1000],[500],[500]])
# mixed constraints ?
lbm = np.array([[0], [0]])
ubm = np.array([[inf], [inf]])
"""## Initialize vectors and matrices"""
# Initializing the vectors
# initial state vector has to be initialize with a feasible solution
############### Commented out to modularize the code ########
# x_init = np.array([[21],[150000]]) #Arbitrary (random) feasible solution
# #############################################################
# Storing u_k and x_k in history matrices mpc_x and mpc_u
mpc_x = np.zeros((S+1,nx))
mpc_x[0,:] = x_init.T
mpc_u = np.zeros((S,nu))
#added by me to store mixed constraints values at each step
mpc_g_mixed = np.zeros((S, G_mixed.shape[0]))
"""## MPC loop"""
for step in range(S):
### formulate optimization problem
J = 0
lb_X = []
ub_X = []
lb_U = []
ub_U = []
# Added by me : bound vectors for mixed constraints
lb_M = []
ub_M = []
#####################
G = []
lbg = []
ubg = []
###
for k in range(N):
d_k = d_full[step + k,:] # check correct index!
x_k = X[k*nx:(k+1)*nx,:]
x_k_next = X[(k+1)*nx:(k+2)*nx,:]
u_k = U[k*nu:(k+1)*nu,:]
# objective
J += J_stage(x_k,u_k)
# equality constraints (system equation)
x_next = system(x_k,u_k,d_k)
# mixed constraints vector calculation
g_mixed = mixed(u_k, d_k)
if k == 0:
G.append(x_k)
lbg.append(x_init)
ubg.append(x_init)
G.append(x_next - x_k_next)
lbg.append(np.zeros((nx,1)))
ubg.append(np.zeros((nx,1)))
# Added by me : mixed constraints with their bounds
G.append(g_mixed)
lbg.append(lbm)
ubg.append(ubm)
# inequality constraints
lb_X.append(lbx)
ub_X.append(ubx)
lb_U.append(lbu)
ub_U.append(ubu)
# added by me
#lb_M.append(lbm)
#ub_M.append(ubm)
####################
## Terminal cost and constraints
x_k = X[N*nx:(N+1)*nx,:]
J += J_terminal(x_k)
lb_X.append(lbx)
ub_X.append(ubx)
### solve optimization problem
lb = vertcat(vertcat(*lb_X),vertcat(*lb_U))
ub = vertcat(vertcat(*ub_X),vertcat(*ub_U))
prob = {'f':J,'x':vertcat(X,U),'g':vertcat(*G)}
solver = nlpsol('solver','ipopt',prob)
res = solver(lbx=lb,ubx=ub,lbg=vertcat(*lbg),ubg=vertcat(*ubg))
u_opt = res['x'][(N+1)*nx:(N+1)*nx+nu,:]
# Ignore this
# g_constrained = res['g'][N*2]
# print('res["x"] = %s'%res['x'])
# print('u_opt = %s'%u_opt)
# print('res["g"] = : %s'%g_constrained)
####################################
### simulate the system
x_plus = system(x_init.T,u_opt, d_full[step,:])
mpc_x[step+1,:] = x_plus.T
mpc_u[step,:] = u_opt.T
x_init = x_plus
# added by me
g_plus = mixed(u_opt, d_full[step,:])
mpc_g_mixed[step, :] = g_plus.T
# print(mpc_g_mixed)
######################
return mpc_u, mpc_x, mpc_g_mixed, d_full
def import_disturbance(filepath='external_disturbances.mat'):
mat_disturbance = sio.loadmat(filepath)
print('disturbance vector loaded')
d_full = np.column_stack((mat_disturbance['room_temp'], mat_disturbance['sol_rad'], mat_disturbance['int_gains']))
print('peek into d_full (First 5 elements) :')
print(d_full[0:5, :])
return d_full
# Creates new disturbances by adding gaussian (normal) noise
def create_new_disturbance(d_full, noise_level=10):
noise = noise_level*np.random.normal(0, 1, d_full.shape)
d_full_withNoise = d_full + noise
print('Original disturbance')
#plot_disturbance(d_full)
return d_full_withNoise
def plot_mpc(mpc_u, mpc_x, mpc_g_mixed):
"""### Plot the results"""
# matplotlib to plot the results
import matplotlib.pyplot as plt
print('*As a reminder, x_init = %s*'%mpc_x[0, :])
# plot the states
plt.figure(1)
plt.hold = True;
plt.plot(mpc_x[:,0])
plt.title('state x[0] (room temp Tr)')
plt.xlabel('t')
plt.figure(2)
plt.hold = True;
plt.plot(mpc_x[:,1])
plt.title('state x[1] (Energy in battery Ebat)')
plt.xlabel('t')
# plot the inputs
plt.figure(3)
plt.hold = True;
for k in range(mpc_u.shape[1]):
plt.plot(mpc_u[:,k])
plt.title('inputs')
plt.xlabel('t')
# plot the constraints
plt.figure(4)
plt.hold = True
for k in range(mpc_g_mixed.shape[1]):
plt.plot(mpc_g_mixed[:, k])
plt.title('mixed constraints')
plt.xlabel('t')
# show the plots
plt.show()
# Generates nb_x0 possible allowed combinations of the initial state vector x0
# Returns the array of combinations
def generate_list_x0(nb_x0 = 1000):
x0_Tr = np.linspace(20, 23, num=int(sqrt(nb_x0)))
x0_Ebat = np.linspace(0, 200000, num=int(sqrt(nb_x0)))
x0_combinations = []
import itertools
counter = 0
for i in itertools.product(x0_Tr, x0_Ebat):
x0_combinations.append([i[0], i[1]])
# print(i)
counter += 1
return np.array(x0_combinations)
# This will be used to save training/testing data in csv format
def csv_dump(X_data, y_data, filepath='last_simulation_data100000lines.csv'):
temp = np.concatenate((X_data, y_data), axis=1)
import pandas as pd
# df = pd.DataFrame(temp, columns=['Tr0','Ebat0','dT','dsr','dint','Phvac','Pbat','Pgrid'])
df = pd.DataFrame(temp)
print(df.head())
try:
df.to_csv(filepath)
print('csv data file successfully written to %s'%filepath)
except IOError as e:
print(e)
# Core function of this script :
# Takes the list of combinations of x0
# Then simulates MPC optimization for each different x0 for N=5 and S=100
# Returns the simulation data : x and u
def generate_data(list_x0, d_training, N=5, S=100):
data = np.array([])
mpc_x_all = np.array([])
mpc_u_all = np.array([])
# d_matrix contains (N*3 unrolled disturbance vectors) and should be of size S
d_matrix = np.array([])
for i in range(S):
d_temp = create_new_disturbance(d_training[i:(i+N), :], noise_level=10)
d_matrix = np.append(d_matrix, d_temp.reshape((1,N*3)))
d_matrix = d_matrix.reshape((S, N*3))
for i in range(list_x0.shape[0]):
mpc_u, mpc_x, _, _ = simulate_MPC(d_training, x_init=list_x0[i,:], N=N, S=S)
mpc_x_all = np.append(mpc_x_all, mpc_x[0:(mpc_x.shape[0]-1),:])
mpc_u_all = np.append(mpc_u_all, mpc_u)
mpc_x_all = mpc_x_all.reshape((list_x0.shape[0]*(mpc_x.shape[0]-1), 2))
mpc_u_all = mpc_u_all.reshape((list_x0.shape[0]*mpc_u.shape[0], 3))
data_x_full = np.zeros((mpc_x_all.shape[0], mpc_x_all.shape[1]+d_matrix.shape[1]))
# duplicating disturbance list_x0.shape[0] times :
d_final = np.array([])
for i in range(list_x0.shape[0]):
d_final = np.append(d_final, d_matrix)
d_final = d_final.reshape((mpc_x_all.shape[0], d_matrix.shape[1]))
data_x_full[:, 0:2] = mpc_x_all
data_x_full[:, 2:] = d_final
return data_x_full, mpc_u_all
if __name__ == '__main__':
d_full = import_disturbance()
list_x0 = generate_list_x0()
data_x, data_y = generate_data(list_x0, d_full)
csv_dump(data_x, data_y, filepath='Varying_disturbance_simulation_data10000lines.csv')
| ell-hol/mpc-DL-controller | data_generator.py | data_generator.py | py | 11,272 | python | en | code | 61 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
44302804442 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 21 08:43:08 2016
@author: RDCHLMTR
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
x = np.array([41,79,82,85,87,89,90,92,93,94,95,96,97,98,99,100,101,102,103,106])
y = np.array([4,11,14,16,17,18,21,23,25,27,30,32,34,37,40,44,47,50,57,73])
plt.plot(x,y,'ro',label='original data')
def func(x,a,b):
return a*np.exp(0.1*b*x)
popt, pcov = opt.curve_fit(func, x, y)
fit = func(x, *popt)
print('y = {:.2f} * e ^ (x * {:.2f})'.format(popt[0],popt[1]/10))
#
plt.plot(x, fit, label='fitted curve')
plt.legend(loc='best')
plt.show() | passaloutre/kitchensink | python/exp_fit_example.py | exp_fit_example.py | py | 641 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
37900967768 | import random
import sosbet
import datetime
import math
import sosfish_constants
def SellerText(data, user):
fish = FishOfTheDay(data)
output = f"You hear a local merchant offering to buy three {fish} for a {sosbet.CURRENCY}."
if fish in data[user]["catchlog"].keys():
if fish not in data[user]["sell_log"].keys():
data[user]["sell_log"][fish] = 0
count = data[user]["catchlog"][fish] - data[user]["sell_log"][fish]
output += f" You have {count} {fish}"
if count>=3:
output += ", sell three of them with \"!fish sell\"."
else:
output += "."
else:
output += "\nUnfortunately you do not have any."
return output
def Sell(data, user):
fish = FishOfTheDay(data)
if fish in data[user]["catchlog"].keys():
if fish not in data[user]["sell_log"].keys():
data[user]["sell_log"][fish] = 0
count = data[user]["catchlog"][fish] - data[user]["sell_log"][fish]
if count>=3:
data[user]["sell_log"][fish] += 3
sosbet.addMoney(user, 1)
sosbet.saveMoney()
return f"You sell three {fish} for {sosbet.CURRENCY}.\nYour new balance: {sosbet.balance(user, user)}"
else:
return f"You do not have enough {fish}."
return f"You do not have any {fish}"
def FishOfTheDay(data):
current_time = datetime.datetime.now()
index = current_time.day + current_time.month * 31
#index = random.randrange(1000)
location_id = int( math.floor(index/4) % len(data.keys()) )
location_fish_id = (index%4)
location = list(data.keys())[location_id]
while "Mead and Madness" in location or "Cat Cafe" in location:
index+=10
location_id = int( math.floor(index/4) % len(data.keys()) )
location_fish_id = (index%4)
location = list(data.keys())[location_id]
location_fish = data[location]["fish"][str(location_fish_id)]["name"]
while location_fish in sosfish_constants.pokemon["ALL"]:
index+=10
location_id = int( math.floor(index/4) % len(data.keys()) )
location_fish_id = (index%4)
location = list(data.keys())[location_id]
location_fish = data[location]["fish"][str(location_fish_id)]["name"]
return location_fish
def amendProfile(data, name):
if "sell_log" not in data[name]:
data[name]["sell_log"] = {} | Aster-Iris/menatbot | sosfish_market.py | sosfish_market.py | py | 2,169 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sosbet.CURRENCY",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sosbet.addMoney",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sosbet.saveMoney",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sosbet.CURRENCY... |
71074785634 | # Quadratic Model (in x) from the UQ4K paper
#
# Author : Mike Stanley
# Created : Sep 30, 2021
# Last Modified : Sep 30, 2021
from collections.abc import Iterable
import numpy as np
from uq4k.models.base_model import BaseModel, Modelparameter
class QuadraticModel(BaseModel):
"""
Implementation of the Quadratic (in X) model
Key:
- d = number of model parameters
- n = number of data points
Parameters:
-----------
theta (np arr) : model parameters (d)
theta_bounds (np arr) : bounds on model parameters (d x 2)
x (np arr) : observed x locations
"""
def __init__(self, weight, weight_bounds):
self.weight = weight
self.weight_bounds = weight_bounds
@property
def modelparameter_weight(self):
if isinstance(self.weight, Iterable):
return Modelparameter(
"weight",
"numeric",
self.weight_bounds,
len(self.weight),
)
else:
return Modelparameter("weight", "numeric", self.weight_bounds)
def __call__(self, X):
"""
Evaluates the forward model for a vector of inputs
Parameters:
-----------
X (np arr) : input values (N)
Returns:
--------
forward output of data values
"""
N = X.shape[0]
D = self.weight.shape[0]
powers = np.tile(np.arange(D)[:, np.newaxis], N).T
X = np.power(np.tile(X[:, np.newaxis], D), powers)
return X @ self.weight
| JPLMLIA/UQ4K | uq4k/models/quadratic_model.py | quadratic_model.py | py | 1,603 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "uq4k.models.base_model.BaseModel",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "collections.abc.Iterable",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "uq4k.models.base_model.Modelparameter",
"line_number": 37,
"usage_type": "c... |
70122758435 | from typing import Any, Dict
from django.forms.models import BaseModelForm
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth.views import LoginView, LogoutView
from django.urls import reverse_lazy
from django.views.generic import CreateView, ListView, DetailView, UpdateView, DeleteView
from main import models, forms
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from main.services import export_to_excel_service
import os
from uuid import uuid4
from django.core.files.base import ContentFile
class CustomLoginView(LoginView):
redirect_authenticated_user = True
template_name='account/login.html'
def get_success_url(self):
return reverse_lazy('main_menu')
def form_invalid(self, form):
messages.error(self.request,'Invalid username or password')
return self.render_to_response(self.get_context_data(form=form))
class CustomLogoutView(LogoutView):
next_page = reverse_lazy('login')
class CustomRegistrationView(CreateView):
template_name = "account/registration.html"
model = models.CustomUser
form_class = forms.CustomUserRegistrationForm
from django.contrib.auth.decorators import login_required, permission_required
@login_required
@permission_required('main.add_license')
def the_view(request):
return render(request, 'main/index.html')
class MainMenuView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
permission_required = 'main.add_license'
template_name = "main/main_menu.html"
queryset = models.Well
def get_queryset(self):
user = self.request.user.get_all_permissions()
if self.request.GET.get("target") == "objects":
queryset = models.License.objects.all()
if self.request.GET.get('order'):
ordering = self.request.GET.get('order')
queryset = models.License.objects.order_by(ordering).all()
elif self.request.GET.get("target") == "users":
queryset = models.CustomUser.objects.exclude(is_admin=True).all()
elif self.request.GET.get("target") == "documents":
queryset = models.Documentation.objects.order_by('-id').all()
elif self.request.GET.get("target") == "mine":
queryset = models.Mine.objects.all()
else:
queryset = models.Task.objects.all()
if self.request.GET.get('order'):
ordering = self.request.GET.get('order')
queryset = models.Task.objects.order_by(ordering).all()
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['target'] = self.request.GET.get('target')
return context
"""OBJECTS CLASS-BASED VIEWS"""
class ObjectCreateView(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
permission_required = ('main.add_license',)
template_name = "main/objects/new.html"
model = models.License
form_class = forms.ObjectCreateForm
success_url = "/main_menu?target=objects"
# success_url = reverse_lazy("main_menu", kwargs={'target': 'objects'},)
class ObjectDetailView(LoginRequiredMixin, DetailView):
template_name = "main/objects/index.html"
model = models.License
queryset = models.License.objects.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['lines'] = models.LineLicenseWaterCourse.objects.filter(license=self.get_object()).all()
context['watercourses'] = models.LicenseWaterCourse.objects.filter(license=self.get_object()).all()
return context
class ObjectEditView(LoginRequiredMixin, UpdateView):
template_name = "main/objects/edit.html"
model = models.License
form_class = forms.ObjectUpdateForm
success_url = "/main_menu?target=objects"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['formm'] = forms.LicenseWaterCourseCreateForm
context['lines'] = models.LineLicenseWaterCourse.objects.filter(license=self.get_object()).all()
context['watercourses'] = models.LicenseWaterCourse.objects.filter(license=self.get_object()).all()
return context
# def get_object(self, queryset):
# queryset = self.queryset
# return super().get_object(queryset)
"""TASKS CLASS-BASED VIEWS"""
class TaskCreateView(LoginRequiredMixin, CreateView):
template_name = "main/tasks/new.html"
model = models.Task
form_class = forms.TaskCreateForm
success_url = "/main_menu?target=tasks"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class TaskDetailView(LoginRequiredMixin, DetailView):
template_name = "main/tasks/index.html"
model = models.Task
queryset = models.Task.objects.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['wells'] = models.WellTask.objects.filter(task=self.get_object()).all()
context['images'] = models.TaskImage.objects.filter(task=self.get_object()).all()
return context
class TaskEditView(LoginRequiredMixin, UpdateView):
template_name = "main/tasks/edit.html"
model = models.Task
form_class = forms.TaskUpdateForm
success_url = "/main_menu?target=tasks"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['wells'] = models.WellTask.objects.filter(task=self.get_object()).all()
context['images'] = models.TaskImage.objects.filter(task=self.get_object()).all()
return context
class TaskImageRemoveView(LoginRequiredMixin, DeleteView):
template_name = "main/tasks/task_images/remove.html"
model = models.TaskImageSingle
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['task'] = models.Task.objects.get(pk=self.kwargs.get('task_id'))
return context
def get_success_url(self):
success_url = f"/tasks/edit/{self.kwargs.get('task_id')}"
return success_url
"""USERS CLASS-BASED VIEWS"""
class CustomUserCreateView(LoginRequiredMixin, CreateView):
template_name = "main/users/new.html"
model = models.CustomUser
form_class = forms.CustomUserCreateForm
success_url = "/main_menu?target=users"
class CustomUserDetailView(LoginRequiredMixin, DetailView):
template_name = "main/users/index.html"
model = models.CustomUser
queryset = models.CustomUser.objects.all()
class CustomUserEditView(LoginRequiredMixin, UpdateView):
template_name = "main/users/edit.html"
model = models.CustomUser
form_class = forms.CustomUserUpdateForm
success_url = "/main_menu?target=users"
# def get_object(self, queryset):
# queryset = self.queryset
# return super().get_object(queryset)
class CustomUserPasswordChangeView(LoginRequiredMixin, UpdateView):
template_name = "main/users/change_password.html"
model = models.CustomUser
form_class = forms.CustomUserPasswordChangeForm
def get_success_url(self):
success_url = "/users/edit/%s"%self.get_object().pk
return success_url
def get_form(self, *args, **kwargs):
form = super(CustomUserPasswordChangeView, self).get_form(*args, **kwargs)
form.fields['password'].required = False
return form
"""WATERCOURSES CLASS-BASED VIEWS"""
class WaterCourseCreateView(LoginRequiredMixin, CreateView):
template_name = "main/objects/watercourses/new.html"
model = models.WaterCourse
form_class = forms.WaterCourseCreateForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("license_id")
context['license_id'] = license_id
return context
def get_success_url(self):
success_url = f"/objects/set_watercourses/{self.kwargs.get('license_id')}"
return success_url
class LicenseWaterCourseCreateView(LoginRequiredMixin, CreateView):
template_name = "main/objects/watercourses_licenses/new.html"
model = models.WaterCourse
form_class = forms.LicenseWaterCourseCreateForm
# success_url = "/main_menu?target=users"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("pk")
context['license_id'] = license_id
context['license_name'] = models.License.objects.get(pk=license_id).short_name
return context
def get_success_url(self):
success_url = f"/objects/set_watercourses/{self.kwargs.get('pk')}"
return success_url
class LicenseWaterCourseRemoveListView(LoginRequiredMixin, ListView):
template_name = "main/objects/watercourses_licenses/remove.html"
model = models.LicenseWaterCourse
# success_url = "/main_menu?target=users"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("pk")
context['license_id'] = license_id
context['license_name'] = models.License.objects.get(pk=license_id).short_name
context['object_list'] = models.LicenseWaterCourse.objects.filter(license=self.kwargs.get("pk"))
return context
def get_success_url(self):
success_url = f"/objects/edit/{self.kwargs.get('pk')}"
return success_url
class LicenseWaterCourseRemoveView(LoginRequiredMixin, DeleteView):
template_name = "main/objects/watercourses_licenses/remove.html"
model = models.LicenseWaterCourse
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("pk")
context['license_id'] = license_id
context['license_name'] = models.License.objects.get(pk=license_id).short_name
context['object_list'] = models.LicenseWaterCourse.objects.filter(license=self.kwargs.get("pk"))
return context
def get_success_url(self):
success_url = f"/objects/edit/{self.kwargs.get('license_id')}"
return success_url
"""LINES CLASS-BASED VIEWS"""
class LineCreateView(LoginRequiredMixin, CreateView):
template_name = "main/objects/lines/new.html"
model = models.Line
form_class = forms.LineCreateForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("license_id")
context['license_id'] = license_id
return context
def get_success_url(self):
success_url = f"/objects/set_lines/{self.kwargs.get('license_id')}"
return success_url
class LineLicenseWaterCourseCreateView(LoginRequiredMixin, CreateView):
template_name = "main/objects/lines_watercourses_licenses/new.html"
model = models.LineLicenseWaterCourse
form_class = forms.LineLicenseWaterCourseCreateForm
# success_url = "/main_menu?target=users"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("pk")
context['license_id'] = license_id
context['license_name'] = models.License.objects.get(pk=license_id).short_name
return context
def get_success_url(self):
success_url = f"/objects/edit/{self.kwargs.get('pk')}"
return success_url
class LineLicenseWaterCourseRemoveListView(LoginRequiredMixin, ListView):
template_name = "main/objects/lines_watercourses_licenses/remove.html"
model = models.LineLicenseWaterCourse
form_class = forms.LineLicenseWaterCourseCreateForm
# success_url = "/main_menu?target=users"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("pk")
context['license_id'] = license_id
context['license_name'] = models.License.objects.get(pk=license_id).short_name
context['object_list'] = models.LineLicenseWaterCourse.objects.filter(license=self.kwargs.get("pk"))
return context
def get_success_url(self):
success_url = f"/objects/edit/{self.kwargs.get('pk')}"
return success_url
class LineLicenseWaterCourseRemoveView(LoginRequiredMixin, DeleteView):
template_name = "main/objects/lines_watercourses_licenses/remove.html"
model = models.LineLicenseWaterCourse
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("pk")
context['license_id'] = license_id
context['license_name'] = models.License.objects.get(pk=license_id).short_name
return context
def get_success_url(self):
success_url = f"/objects/edit/{self.kwargs.get('license_id')}"
return success_url
"""WELLS CLASS-BASED VIEWS"""
class WellCreateView(LoginRequiredMixin, CreateView):
template_name = "main/tasks/wells/new.html"
model = models.Well
form_class = forms.WellCreateForm
success_url = "/main_menu?target=tasks"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
license_id = self.kwargs.get("license_id")
context['license_id'] = license_id
return context
def get_success_url(self):
license_id = self.kwargs.get("license_id")
success_url = f"/objects/set_watercourses/{license_id}"
return success_url
class WellDetailView(LoginRequiredMixin, DetailView):
template_name = "main/tasks/wells/index.html"
model = models.Well
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['task_id'] = self.kwargs.get("task_id")
context['layers'] = models.Layer.objects.filter(well=self.get_object()).all()
return context
class WellEditView(LoginRequiredMixin, UpdateView):
template_name = "main/tasks/wells/edit.html"
model = models.Well
form_class = forms.WellUpdateForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['task_id'] = self.kwargs.get("task_id")
return context
def get_success_url(self):
success_url = "/wells/%s/%s"%(self.kwargs.get("task_id"), self.get_object().pk)
return success_url
class WellTaskCreateView(LoginRequiredMixin, CreateView):
template_name = "main/tasks/wells/well_tasks/new.html"
model = models.WellTask
form_class = forms.WellTaskCreateForm
success_url = "/main_menu?target=tasks"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
task_id = self.kwargs.get("pk")
context['task_id'] = task_id
context['task_name'] = models.License.objects.get(pk=task_id).short_name
return context
"""LAYERS CLASS-BASED VIEWS"""
class LayerCreateView(LoginRequiredMixin, CreateView):
template_name = "main/tasks/wells/layers/new.html"
model = models.Layer
form_class = forms.LayerCreateForm
def get_success_url(self):
well_id = self.request.GET.get("well")
return f"/wells/{well_id}"
class LayerDetailView(LoginRequiredMixin, DetailView):
template_name = "main/tasks/wells/layers/index.html"
model = models.Layer
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
well_id = self.request.GET.get("well")
task_id = self.request.GET.get("task")
context['back_url'] = f"/wells/{task_id}/{well_id}"
context['well_id'] = well_id
return context
class LayerUpdateView(LoginRequiredMixin, UpdateView):
template_name = "main/tasks/wells/layers/edit.html"
model = models.Layer
form_class = forms.LayerCreateForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
well_id = self.request.GET.get("well")
context['back_url'] = f"/wells/{well_id}"
return context
def get_success_url(self):
well_id = self.request.GET.get("well")
return f"/wells/{well_id}"
"""DOCUMENTATION CLASS-BASED VIEWS"""
class DocumentationCreateView(LoginRequiredMixin, CreateView):
template_name = "main/documents/new.html"
model = models.Documentation
form_class = forms.DocumentsCreateForm
success_url = "/main_menu?target=documents"
def form_valid(self, form):
license = form.cleaned_data.get('license')
watercourse = form.cleaned_data.get('watercourse')
line = form.cleaned_data.get('line')
well = form.cleaned_data.get('well')
watercourse_bound = models.LicenseWaterCourse.objects.get(watercourse = watercourse)
export_service = export_to_excel_service.ExportToExcelService()
export_service.build_document(license=license, watercourse=watercourse, watercourse_bound=watercourse_bound, line=line, well=well)
return super().form_valid(form)
def get_success_url(self):
url = f'/documents/{self.object.id}'
return url
class DocumentationDetailView(LoginRequiredMixin, DetailView):
template_name = "main/documents/index.html"
model = models.Documentation
success_url = "/main_menu?target=documents"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['file'] = '/media/example.xlsx'
return context
class DocumentationUpdateView(LoginRequiredMixin, UpdateView):
template_name = "main/documents/edit.html"
model = models.Documentation
form_class = forms.DocumentsCreateForm
success_url = "/main_menu?target=documents"
"""MINE CLASS-BASED VIEWS"""
class MineCreateView(LoginRequiredMixin, CreateView):
template_name = "main/mine/new.html"
model = models.Mine
form_class = forms.MineCreateForm
success_url = "/main_menu?target=mine"
# def post(self, request, *args, **kwargs):
# json = [
# {
# 'slice_number': str(ะฝะพะผะตั ะปะธะฝะธะธ),
# 'river': str(ะฝะฐะทะฒะฐะฝะธะต ะฒะพะดะพัะพะบะฐ),
# 'borehole_number': str(ะฝะพะผะตั ัะบะฒะฐะถะธะฝั),
# 'gps': [float, float, float](ะบะพะพัะดะธะฝะฐัั),
# 'layers_power':[float, float,..., float](ะผะพัะฝะพััั ะธะฝัะตัะฒะฐะปะพะฒ),
# 'layers_id':[float, float,..., float](ัะธะฟั ะผะฐัะตัะธะฐะปะพะฒ),
# },
# ...,
# {
# }
# ]
# return super().post(request, *args, **kwargs)
class MineDetailView(LoginRequiredMixin, DetailView):
template_name = "main/mine/index.html"
model = models.Mine
success_url = "/main_menu?target=mine"
class MineUpdateView(LoginRequiredMixin, UpdateView):
template_name = "main/mine/edit.html"
model = models.Mine
form_class = forms.MineCreateForm
success_url = "/main_menu?target=mine"
# class MineImageCreateView(CreateView):
| Lifanna/geology_proj | geology_proj/main/views.py | views.py | py | 19,256 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.views.LoginView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 25,
"usage_type": "call"
},... |
21181509403 | from mpl_toolkits.mplot3d import axes3d
import numpy as np
import matplotlib.pyplot as plt
def read(filename, delimiter=','):
return np.genfromtxt(filename, delimiter=delimiter)
def plot(array):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d') # 111 means "1x1 grid, first subplot"
p = ax.plot(array[:, 0], array[:, 1], array[:, 2], label='target')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.legend()
plt.show()
def main():
import transformations
data = read('vo.csv')
data = data[1:len(data), 2:8]
current = np.array([0., 0., 0.]) # .transpose()
# current = np.matrix(np.identity(4))
num_examples = len(data)
ts = np.empty((num_examples, 3))
poses = np.empty((num_examples, 12))
i = 0
for t in data:
# Devuelve una matriz 4x4
# t[3] = roll, t[4] = pitch, t[5] = yaw
T = transformations.euler_matrix(t[3], t[4], t[5], 'sxyz')
T[0:3, 3] = t[0:3]
current = t[0:3] + current # np.linalg.inv(T) *current #np.linalg.inv(T) * current
ts[i] = current # [0:3,3].transpose()
# poses[i] = current[0:3,:].reshape(12)
i += 1
np.savetxt("poses.txt", poses, delimiter=" ")
plot(ts)
if __name__ == "__main__":
main()
| CIFASIS/wganvo | vgg_trainable/test/plot_traj.py | plot_traj.py | py | 1,301 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "numpy.genfromtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib... |
31298542819 |
'''
Read COVID-19 case data from HDX and store as a set of json files.
This can be used to provide a no-backend API if the files are saved
in the DocumentRoot of a server. For example:
http://some.host/all.json # global data, plus manifest of other countries
http://some.host/CAN.json # a specific country
Usage:
cvapi.py TARGET_DIR
'''
import requests
import pandas as pd
import numpy as np
from wbgapi.economy import coder
from datetime import datetime
import os
import json
from docopt import docopt
options = docopt(__doc__)
config = {
'hdx_url': 'https://data.humdata.org/api/3/action/package_show?id=novel-coronavirus-2019-ncov-cases',
'build_date': datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%M:%S'),
'build_dir': options['TARGET_DIR'],
}
def to_json(c, d, r, **kwargs):
global config
data = {
'meta': {
'build_date': config['build_date'],
'update_date': config['update_date'],
},
'data': []
}
data['meta'].update(kwargs)
for i in c.index:
ts = datetime.strptime(i, '%m/%d/%y') # e.g., 3/12/20
confirmed, deaths, recovered = int(np.nan_to_num(c[i])), int(np.nan_to_num(d[i])), int(np.nan_to_num(r[i]))
key = datetime.strftime(ts, '%Y/%m/%d')
data['data'].append({'date': key, 'confirmed': confirmed, 'deaths': deaths, 'recovered': recovered})
return data
# get the latest data resource links
response = requests.get(config['hdx_url'])
ckan = response.json()
meta_mod = datetime.strptime(ckan['result']['metadata_modified'], '%Y-%m-%dT%H:%M:%S.%f')
config['update_date'] = datetime.strftime(meta_mod, '%Y-%m-%dT%H:%M:%S')
# assume that Confirmed, Deaths, and Recoveries are the 1st-3rd datasets
confirmed_url, deaths_url, recovery_url = map(lambda x: x['url'], ckan['result']['resources'][0:3])
manifest = {}
c = pd.read_csv(confirmed_url).replace(0, np.nan).dropna(how='all', axis=1)
d = pd.read_csv(deaths_url).replace(0, np.nan).dropna(how='all', axis=1)
r = pd.read_csv(recovery_url).replace(0, np.nan).dropna(how='all', axis=1)
date_columns = list(filter(lambda x: x not in ['Lat', 'Long', 'Province/State', 'Country/Region'], c.columns))
# this is the file name for subnational estimates
c['stp_key'] = c['Province/State'].fillna('').str.replace(r'\W','').str.upper()
# c, d & r aren't always in the same order, so we need to create a common index
c['geokey'] = c['Province/State'].fillna('_') + ':' + c['Country/Region'].fillna('_')
d['geokey'] = d['Province/State'].fillna('_') + ':' + d['Country/Region'].fillna('_')
r['geokey'] = r['Province/State'].fillna('_') + ':' + r['Country/Region'].fillna('_')
c.set_index('geokey', inplace=True)
d.set_index('geokey', inplace=True)
r.set_index('geokey', inplace=True)
data = to_json(c.sum()[date_columns], d.sum()[date_columns], r.sum()[date_columns], iso='WLD', name='World')
with open(os.path.join(config['build_dir'], 'world.json'), 'w') as fd:
json.dump(data, fd)
# aggregate by country
aggs = c.groupby('Country/Region').agg([np.min, np.max, 'count'])
c2 = c.groupby('Country/Region').sum()[date_columns]
d2 = d.groupby('Country/Region').sum()[date_columns]
r2 = r.groupby('Country/Region').sum()[date_columns]
for key in c2.index:
iso = coder(key)
if iso:
manifest[iso] = {'name': key, 'locales': []}
with open(os.path.join(config['build_dir'], iso + '.json'), 'w') as fd:
meta = dict(iso=iso, name=key)
if aggs.loc[key]['Province/State']['count'] == 0:
meta['lon'] = aggs.loc[key]['Long']['amin']
meta['lat'] = aggs.loc[key]['Lat']['amin']
data = to_json(c2.loc[key], d2.loc[key], r2.loc[key], **meta)
json.dump(data, fd)
# now write subnational data
for key in c.dropna(subset=['Province/State']).index:
row = c.loc[key]
iso = coder(row['Country/Region'])
# we skip rows where the latest day is empty. This eliminates county-level records
# in the US where a handful of cases were recorded but later counted at the state level
if iso and not np.isnan(row[date_columns[-1]]):
manifest[iso]['locales'].append(os.path.join(iso, row['stp_key']))
try:
os.mkdir(os.path.join(config['build_dir'], iso))
except:
pass
with open(os.path.join(config['build_dir'], iso, row['stp_key'] + '.json'), 'w') as fd:
data = to_json(c.loc[key][date_columns], d.loc[key][date_columns], r.loc[key][date_columns], iso=iso, name=row['Province/State'], lat=row['Lat'], lon=row['Long'])
json.dump(data, fd)
with open(os.path.join(config['build_dir'], 'manifest.json'), 'w') as fd:
json.dump(manifest, fd)
| hkashiwase/decdg-covid19 | python/cvapi.py | cvapi.py | py | 4,710 | python | en | code | null | github-code | 1 | [
{
"api_name": "docopt.docopt",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "datetime.d... |
5053673486 | from application.Models.models import User
from flask import escape
from base64 import b64decode, b64encode
import json
from datetime import datetime
from application import app
import os
from geopy.distance import geodesic
notLoggedIn = dict({
"isLoggedIn": False,
'message': 'Your are not logged in'
})
found = dict({
"isLoggedIn": True,
})
dataUpdateResponse = dict({
"isLoggedIn": True,
'isUpdated': True,
})
dataNotUpdateResponse = dict({
"isLoggedIn": True,
'isUpdated': False,
})
dataSavedResponse = dict({
"isLoggedIn": True,
'isSaved': True,
})
dataNotSavedResponse = dict({
"isLoggedIn": True,
'isSaved': False,
})
invalidArgsResponse = dict({
"isLoggedIn": True,
"isError": True,
'message': 'Invalid data',
})
def AuthorizeRequest(headers):
if not 'Authorization' in headers:
return False
token = headers['Authorization']
token = escape(token)
token_str = str(token).encode('ascii')
missing_padding = len(token_str) % 4
if missing_padding:
return False
token = b64decode(token_str)
user = User.query.filter_by(token=token)
if not user.count() > 0:
return False
return user.first()
def isBase64(s):
try:
if b64encode(b64decode(s)) == s:
return get_decoded(s)
return False
except Exception:
return False
def get_decoded(data):
data = str(data).encode('ascii')
missing_padding = len(data) % 4
if missing_padding:
return False
try:
data = b64decode(data)
data = json.loads(data)
return data
except:
return False
def b64_to_data(data):
data = str(data).encode('ascii')
missing_padding = len(data) % 4
if missing_padding:
return False
try:
data = b64decode(data)
return data
except:
return False
def uploadPostImage(image, user):
dt_obj = datetime.strptime(
str(datetime.now()), "%Y-%m-%d %H:%M:%S.%f"
)
millisec = str(dt_obj.timestamp() * 1000)
time = millisec.replace(".", "")
imageName = user.fullname.replace(" ","") + str(user.user_id) + time + ".jpg"
try:
folder = os.path.join(app.root_path, 'static/posts')
file_path = os.path.join(folder, imageName)
image.save(file_path)
return True, imageName
except Exception as e:
print(e)
return False, None
def get_location_distance(location_1, location_2):
return geodesic(location_1, location_2).km
| theirfanirfi/flask-book-exchange-apis | application/API/utils.py | utils.py | py | 2,536 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.escape",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "application.Models.models.User.query.filter_by",
"line_number": 55,
"usage_type": "call"
},
{
"api_... |
4614264680 | import pygame
import os
pygame.init()
FONTS = [
pygame.font.Font(pygame.font.get_default_font(), font_size) for font_size in [48, 36, 16, 12]
]
DEFAULT_FONT = 2
COLORS = {
"bg": (200, 200, 200), # ่ๆฏ้ข่ฒ
"select": (0, 139, 139),
"current": (255, 192, 203),
"line": (175, 175, 175),
"wall": (50, 50, 50),
"start": (65, 105, 225), # RoyalBlue
"end": (0, 128, 0),
"visited":(153, 50, 204),
"current-text": (255,255,255),
"visited-text": (255,255,255),
"visit-line": (0, 255, 0),
"cost-text": (255, 255, 255),
"heuristic-text": (255, 255, 255),
"priority-text": (255,255,0),
"path": (0, 0, 255),
}
COLORS["current"] = (135, 206, 250)
COLORS["visited"] = (100, 100, 125)
COLORS["wall"] = (251, 114, 153)
DIRECTIONS = {
"UP": (0, -1),
"RIGHT": (1, 0),
"DOWN": (0, 1),
"LEFT": (-1, 0),
}
pos_x, pos_y = 40, 50
class NodeValueError(Exception):
pass
def read_graph_from_txt(filename):
with open(filename, 'r') as f:
lines = f.read().split("\n")
graph = []
for line in lines:
row = []
line = line.replace(" ", "")
for cell in line:
if cell == "0":
row.append("0")
elif cell == "1":
row.append("1")
else:
raise NodeValueError("Invalid cell value: %s" % cell)
if row:
graph.append(row)
return graph
class BasicAnimation():
def __init__(self, graph_list, size, title, fps):
self.clock = pygame.time.Clock()
self.r = len(graph_list)
self.c = len(graph_list[0])
self.cell_size = size
# ่ฎพ็ฝฎ็ชๅฃไฝ็ฝฎ
os.environ["SDL_VIDEO_WINDOW_POS"] = "%d, %d" % (pos_x, pos_y)
width, height = self.c * self.cell_size, self.r * self.cell_size
self.win = pygame.display.set_mode((width, height))
pygame.display.set_caption(title)
self.graph = graph_list
self.fps = fps
self.win.fill(COLORS["bg"])
self.start = None
self.end = None
self.last_points = None
self.count = 0
def init(self):
self.display(self.draw_graph)
def draw_cell(self, ci, ri, color):
rect = (ci * self.cell_size, ri * self.cell_size, self.cell_size, self.cell_size)
pygame.draw.rect(self.win, COLORS[color], rect)
def draw_graph(self):
for ri, row in enumerate(self.graph):
for ci, cell in enumerate(row):
if cell == "1":
self.draw_cell(ci, ri, "wall")
def draw_line(self, line_color="line"): # ็ปๅถๆนๆ ผ็บฟ
for ci in range(self.c):
cx = self.cell_size * ci
pygame.draw.line(self.win, COLORS[line_color], (cx, 0), (cx, self.r * self.cell_size))
for ri in range(self.r):
ry = self.cell_size * ri
pygame.draw.line(self.win, COLORS[line_color], (0, ry), (self.c * self.cell_size, ry))
def draw_start_end(self, start, end):
self.start = start
self.end = end
sc, sr = self.start
self.draw_cell(sc, sr, "start")
ec, er = self.end
self.draw_cell(ec, er, "end")
def get_neighbours(self, point):
neigh = []
pc, pr = point
for d in DIRECTIONS:
dc, dr = DIRECTIONS[d]
nc, nr = pc + dc, pr + dr
if nc < 0 or nc >= self.c or nr < 0 or nr >= self.r:
continue
if self.graph[nr][nc] == "0":
neigh.append((nc, nr))
return neigh
def display(self, func=None, *args, **kwargs):
for event in pygame.event.get():
if event.type == pygame.QUIT:
# ๅคๆญๅฝๅไบไปถๆฏๅฆไธบ็นๅปๅณไธ่ง้ๅบ้ฎ
pygame.quit()
if func:
func(*args, **kwargs)
self.draw_line()
pygame.display.update()
self.clock.tick(self.fps)
def update(self):
pygame.display.update()
self.clock.tick(self.fps)
def delay(self, tc):
for _ in range(tc):
self.clock.tick(self.fps)
pygame.display.update()
def done(self):
while True:
# ่ทๅๆๆไบไปถ
for event in pygame.event.get():
if event.type == pygame.QUIT:
# ๅคๆญๅฝๅไบไปถๆฏๅฆไธบ็นๅปๅณไธ่ง้ๅบ้ฎ
pygame.quit()
return
self.clock.tick(self.fps)
pygame.display.update()
class Animation(BasicAnimation):
def __init__(self, graph_list, size=32, title="Path-finding Animation by BigShuang",
fps=30):
super().__init__(self, graph_list, size, title, fps)
self.depth_map = [
[None for i in range(self.c)] for j in range(self.r) # prev_cr, depth
]
def draw_start_end(self, start, end):
super().draw_cell(self, start, end)
self.depth_map[sr][sc] = (None, 0)
def draw_count(self, ci, ri, kind, size=2):
text = FONTS[size].render("%s" % self.depth_map[ri][ci][1], True, COLORS[kind+"-text"])
cx, cy = ci * self.cell_size + self.cell_size // 2, ri * self.cell_size + self.cell_size // 2
text_rect = text.get_rect(center=(cx, cy))
self.win.blit(text, text_rect)
def draw_vline(self, v, prev_v):
end_pos = (v[0] * self.cell_size + self.cell_size // 2,
v[1] * self.cell_size + self.cell_size // 2)
start_pos = (prev_v[0] * self.cell_size + self.cell_size // 2,
prev_v[1] * self.cell_size + self.cell_size // 2)
pygame.draw.line(self.win, COLORS["visit-line"], start_pos, end_pos, width=3)
def set_prev(self, v, prev_v):
vc, vr = v
pvc, pvr = prev_v
depth = self.depth_map[pvr][pvc][1] + 1
self.depth_map[vr][vc] = (prev_v, depth)
def draw_points(self, *points, **kwargs):
size = kwargs.get("size", DEFAULT_FONT)
draw_line = kwargs.get("line", False)
# ็ปๅถๅพๅฝข
if self.last_points:
for point in self.last_points:
pc, pr = point
self.draw_cell(pc, pr, "visited")
if draw_line:
prev_v, depth = self.depth_map[pr][pc]
if prev_v:
self.draw_vline(point, prev_v)
else:
self.draw_count(pc, pr, "visited", size)
# ็ปๅถ่ฎกๆฐ
self.count += 1
for point in points:
pc, pr = point
self.draw_cell(pc, pr, "current")
if self.depth_map[pr][pc] is not None:
if draw_line:
prev_v, depth = self.depth_map[pr][pc]
if prev_v:
self.draw_vline(point, prev_v)
else:
self.draw_count(pc, pr, "current", size)
self.last_points = points
class AStarAnimation(BasicAnimation):
def __init__(self, graph_list, size=48, title="Path-finding Animation by BigShuang",
fps=30):
super().__init__(graph_list, size, title, fps)
self.prev_map = {
# cur_vertex: prev_vertex
}
width, height = self.c * self.cell_size, self.r * self.cell_size
self.explored = {}
self.neigh = {}
def draw_count(self, ci, ri, kind, values, size=2):
tlst = [
FONTS[size].render("%s" % values[0], True, COLORS["cost-text"]),
FONTS[size].render("%s" % values[1], True, COLORS["heuristic-text"]),
FONTS[size].render("%s" % values[2], True, COLORS["priority-text"])
]
top, left = ri * self.cell_size, ci * self.cell_size
quarter = self.cell_size // 4
cxy_list = [
(left + quarter, top + quarter),
(left + 3 * quarter, top + quarter),
(left + quarter, top + 3 * quarter),
]
for i in range(3):
cxy = cxy_list[i]
text_rect = tlst[i].get_rect(center=cxy)
self.win.blit(tlst[i], text_rect)
def draw_astar_points(self, point, values, **kwargs):
size = kwargs.get("size", DEFAULT_FONT)
kind = kwargs.get("kind")
draw_line = kwargs.get("line", False)
# ็ปๅถ
self.count += 1
pc, pr = point
self.draw_cell(pc, pr, kind)
self.draw_count(pc, pr, kind, values, size=2)
def set_prev(self, next_v, current):
self.prev_map[next_v] = current
def draw_vline(self, v, prev_v):
sx = prev_v[0] * self.cell_size + self.cell_size // 2
sy = prev_v[1] * self.cell_size + self.cell_size // 2
ex = v[0] * self.cell_size + self.cell_size // 2
ey = v[1] * self.cell_size + self.cell_size // 2
dx, dy = (ex - sx), (ey - sy)
start_pos = (sx + dx // 4, sy + dy // 4)
end_pos = (ex - dx // 4, ey - dy // 4)
pygame.draw.line(self.win, COLORS["visit-line"], start_pos, end_pos, width=3)
arrow_hw = 5
if dy == 0:
x1 = ex - dx // 8
x2 = ex - dx * 3 // 8
arrow_points = [(x1, sy), (x2, sy - arrow_hw), (x2, sy + arrow_hw)]
pygame.draw.polygon(self.win, COLORS["visit-line"], arrow_points)
if dx == 0:
y1 = ey - dy // 8
y2 = ey - dy * 3 // 8
arrow_points = [(sx, y1), (sx - arrow_hw, y2), (sx + arrow_hw, y2)]
pygame.draw.polygon(self.win, COLORS["visit-line"], arrow_points)
def add_node_data(self, point, data, kind):
if kind == "neigh":
self.neigh[point] = data
elif kind == "used":
self.explored[point] = data
self.neigh = {}
def draw_path(self):
vertex = self.end
while vertex:
self.draw_astar_points(vertex, self.explored[vertex], kind="path")
vertex = self.prev_map.get(vertex)
for v in self.prev_map:
pv = self.prev_map[v]
if pv:
vl = self.draw_vline(v, pv)
pygame.display.update()
self.clock.tick(self.fps)
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
# ๅคๆญๅฝๅไบไปถๆฏๅฆไธบ็นๅปๅณไธ่ง้ๅบ้ฎ
pygame.quit()
self.draw_line()
for p in self.explored:
# print(p, self.explored[p])
self.draw_astar_points(p, self.explored[p], kind="current")
for q in self.neigh:
self.draw_astar_points(q, self.neigh[q], kind="visited")
for v in self.prev_map:
pv = self.prev_map[v]
if pv:
vl = self.draw_vline(v, pv)
pygame.display.update()
self.clock.tick(self.fps)
if __name__ == '__main__':
filename = "txt/big_shuang.txt"
graph = read_graph_from_txt(filename)
anima = Animation(graph)
anima.display(anima.init)
start = (2, 2)
end = (37, 14)
anima.display(anima.draw_start_end, start, end)
anima.done() | BigShuang/Pathfinding-algorithm-display | square block grid/basic_animation.py | basic_animation.py | py | 11,175 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.font.Font",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_fo... |
73619854753 | from bs4 import BeautifulSoup
import time
from openpyxl import Workbook
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
# URL of the site to fetch data
siteUrl = 'https://leetcode.com/problemset/all/'
questionNameList = []
questionUrlList = []
questionDifficultyList = []
def xcelSheet():
excelFileName = 'LeetCode.xlsx'
sheetName = 'LeetCode Problems'
df = pd.DataFrame({
'Question Name': questionNameList,
'Question Url': questionUrlList,
'Question Difficulty': questionDifficultyList
})
wb = Workbook()
sheet1 = wb.create_sheet(sheetName)
sheet1.cell(1, 1, 'Question Name')
sheet1.cell(1, 2, 'Question URL')
sheet1.cell(1, 3, 'Question Difficulty')
for i in range(0, df.__len__()):
sheet1.cell(i + 2, 1, df['Question Name'][i])
sheet1.cell(i + 2, 2, df['Question Url'][i])
sheet1.cell(i + 2, 3, df['Question Difficulty'][i])
wb.save(excelFileName)
wb.close()
print(" ****Excel sheet created***** ")
def openBrowser(url):
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('--incognito')
options.add_argument('--headless')
# driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
# headless browser
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()),
options=options)
driver.get(url)
driver.maximize_window()
return driver
def closeBrowser(driver):
driver.close()
def fetchPageData(pageUrl):
sleepTime = 3
# print("Page URL: ", pageUrl)
browser = openBrowser(pageUrl)
time.sleep(sleepTime)
pageSource = browser.page_source
WebDriverWait(browser, 10).until(EC.title_contains("Problems - LeetCode"))
# print(f"title is: {browser.title}")
soup = BeautifulSoup(pageSource, 'lxml')
if (browser.title == "Problems - LeetCode"):
# page to fetch data
newSoup = BeautifulSoup(pageSource, 'lxml')
# fetch the block of questions div
questionBlock = newSoup.find('div', role='rowgroup')
# fetch all the questions
questionList = questionBlock.find_all('div', role='row')
for question in questionList:
row = question.find_all('div', role='cell')
questionName = row[1].find('a').text.split(". ")[1]
questionUrl = row[1].find('a')['href']
questionUrl = 'https://leetcode.com' + questionUrl
questionDifficulty = row[4].find('span').text
questionNameList.append(questionName)
questionUrlList.append(questionUrl)
questionDifficultyList.append(questionDifficulty)
# print(questionName, questionUrl, questionDifficulty)
print("********Done*********")
closeBrowser(browser)
else:
print("Page does not exist o connection Failed, status code: ",
soup.status_code)
return
def getData():
try:
# Opening browser with Headless mode and wait for 2 seconds for page to load
browser = openBrowser(siteUrl)
time.sleep(2)
# Fetching the first page data and util the title is "Problems - LeetCode"
pageSource = browser.page_source
WebDriverWait(browser, 10).until(EC.title_contains("Problems - LeetCode"))
soup = BeautifulSoup(pageSource, 'lxml')
# If title is "Problems - LeetCode" then fetch the data
if (browser.title == "Problems - LeetCode"):
# Fetching total number of pages
totalPage = soup.find_all(class_ = "flex items-center justify-center px-3 h-8 rounded select-none focus:outline-none bg-fill-3 dark:bg-dark-fill-3 text-label-2 dark:text-dark-label-2 hover:bg-fill-2 dark:hover:bg-dark-fill-2")
totalPage = totalPage[-2].text
totalPage = int(totalPage)
print(f"Total {totalPage} pages available")
closeBrowser(browser)
# Fetching data from each page
for page in range(1, totalPage + 1):
print(f"\n********Fetching Page {page}********")
pageUrl = siteUrl + '?page=' + str(page)
fetchPageData(pageUrl)
# All fetched data and now creating excel sheet with the data
print("*****Done all pages*****")
print(f"Total {questionNameList.__len__()} questions fetched")
xcelSheet()
else:
print("Connection Failed")
return
except Exception as e:
print("Some error occured, error: ", e)
return
if __name__ == "__main__":
getData() | Debraj-Das/Search_Engine | Web_Scripting/LeetCodeTemp.py | LeetCodeTemp.py | py | 5,020 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "openpyxl.Workbook",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "s... |
72963427555 | from __future__ import division
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
def load_dataset():
CLASS_NUM = 3
FILE_NUM = 1000
dataset = list()
for itr_class in range(CLASS_NUM):
file_dir = "./data/Data_Train/Class{:d}/".format(itr_class + 1)
for idx in range(FILE_NUM):
file_name = "faceTrain{:d}_{:d}.bmp".format(itr_class + 1, idx + 1)
# load image from directory and transfer into 1-d vector (30, 30) -> (900)
tmp_img = np.array(Image.open(file_dir + file_name))
tmp_img = tmp_img.reshape(tmp_img.shape[0]*tmp_img.shape[1])
label = itr_class
dataset.append((tmp_img, label))
return dataset
def normalize_preliminary(data):
dimension = data.shape[1]
x_max = data.max(axis=0)
x_min = data.min(axis=0)
return x_max, x_min
def normalize_dataset(data, x_max, x_min, scaling):
normalised_data = (data - x_min) / (x_max - x_min) * scaling
return normalised_data
def decision_boundary(model, X, model_name):
# create a mesh to plot in
h = 0.2
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
fig, ax = plt.subplots()
feature = np.c_[xx.ravel(), yy.ravel()]
feature = np.concatenate((feature, np.ones((feature.shape[0], 1))), axis=1)
Z = model.predict(feature)
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=plt.cm.Set3)
ax.axis('off')
#Plot also the training points
X = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1)
prediction = model.predict(X)
for i in range(0, X.shape[0]):
if(prediction[i] == 0):
plt.scatter(X[i][0], X[i][1], c='r', label='0', s=3)
elif(prediction[i] == 1):
plt.scatter(X[i][0], X[i][1], c='g', label='1', s=3)
else:
plt.scatter(X[i][0], X[i][1], c='b', label='2', s=3)
ax.set_title(model_name + " decision boundary")
plt.show()
def one_hot(a):
a = np.array(a) # ensure type is numpy array
b = np.zeros((a.size, a.max()+1))
b[np.arange(a.size),a] = 1
return b | wu0607/2018-Spring-ML-Graduate | HW3/Machine Learning hw3/src/util.py | util.py | py | 2,458 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.meshgrid",
"line_numbe... |
6086114417 | import torch.nn as nn
import torch.distributed as dist
def initialize_weights(model):
for m in model.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
# m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
| jinxixiang/low_rank_wsi | mil/models/model_utils.py | model_utils.py | py | 515 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "torch.nn.Linear",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.xavier_normal_",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn.init... |
43472299216 | from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import Group
from django.db import transaction
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import *
from django.http import HttpResponse, JsonResponse
from apps.backEnd import nombre_empresa
from apps.cliente.forms import ClienteForm
from apps.cliente.models import Cliente
from django.http import HttpResponseRedirect
import json
from django.db.models import Q
from apps.mixins import ValidatePermissionRequiredMixin
from apps.user.models import User
from apps.proveedor.models import Proveedor
opc_icono = 'fa fa-user'
opc_entidad = 'Clientes'
crud = '/cliente/nuevo'
empresa = nombre_empresa()
class lista(ValidatePermissionRequiredMixin, ListView):
model = User
template_name = "front-end/cliente/cliente_list.html"
permission_required = 'cliente.view_cliente'
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = {}
try:
action = request.POST['action']
if action == 'list':
data = []
for c in self.model.objects.filter(tipo=0):
data.append(c.toJSON())
elif action == 'search':
data = []
term = request.POST['term']
query = self.model.objects.filter(Q(first_name__icontains=term) | Q(last_name__icontains=term) |
Q(cedula__icontains=term), tipo=0)[0:10]
for a in query:
item = a.toJSON()
item['text'] = a.get_full_name()
data.append(item)
else:
data['error'] = 'No ha seleccionado una opcion'
except Exception as e:
data['error'] = 'No ha seleccionado una opcion'
return JsonResponse(data, safe=False)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['icono'] = opc_icono
data['entidad'] = opc_entidad
data['boton'] = 'Nuevo Cliente'
data['titulo'] = 'Listado de Clientes'
data['form'] = ClienteForm
data['nuevo'] = '/cliente/nuevo'
data['empresa'] = empresa
return data
class CrudView(ValidatePermissionRequiredMixin, TemplateView):
form_class = ClienteForm
template_name = 'front-end/cliente/cliente_form.html'
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = {}
action = request.POST['action']
try:
print(action)
if action == 'add':
f = ClienteForm(request.POST)
datos = request.POST
data = self.save_data(f, datos)
elif action == 'edit':
pk = request.POST['id']
cliente = User.objects.get(pk=int(pk))
f = ClienteForm(request.POST, instance=cliente)
if f.is_valid():
f.edit()
else:
data['error'] = f.errors
elif action == 'delete':
pk = request.POST['id']
cli = Cliente.objects.get(pk=pk)
cli.delete()
data['resp'] = True
else:
data['error'] = 'No ha seleccionado ninguna opciรณn'
except Exception as e:
data['error'] = str(e)
return HttpResponse(json.dumps(data), content_type='application/json')
def save_data(self, f, datos):
data = {}
if f.is_valid():
if verificar(f.data['cedula']):
use = User()
use.username = datos['cedula']
use.cedula = datos['cedula']
use.first_name = datos['first_name']
use.last_name = datos['last_name']
use.sexo = datos['sexo']
use.email = datos['email']
use.telefono = datos['telefono']
use.celular = datos['celular']
use.direccion = datos['direccion']
use.tipo = 0
use.password = make_password(datos['cedula'])
use.save()
data['resp'] = True
data['cliente'] = use.toJSON()
grupo = Group.objects.get(name__icontains='cliente')
usersave = User.objects.get(id=use.id)
usersave.groups.add(grupo)
usersave.save()
else:
f.add_error("cedula", "Numero de Cedula no valido para Ecuador")
data['error'] = f.errors
else:
data['error'] = f.errors
return data
class report(ValidatePermissionRequiredMixin, ListView):
model = Cliente
template_name = 'front-end/cliente/cliente_report.html'
permission_required = 'cliente.view_cliente'
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get_queryset(self):
return Cliente.objects.none()
def post(self, request, *args, **kwargs):
data = {}
action = request.POST['action']
if action == 'report':
data = []
start_date = request.POST.get('start_date', '')
end_date = request.POST.get('end_date', '')
try:
if start_date == '' and end_date == '':
query = User.objects.filter(tipo=0)
else:
query = Cliente.objects.filter(tipo=0, fecha__range=[start_date, end_date])
for p in query:
data.append(p.toJSON())
except:
pass
return JsonResponse(data, safe=False)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data['icono'] = opc_icono
data['entidad'] = opc_entidad
data['titulo'] = 'Reporte de Clientes'
data['empresa'] = empresa
return data
def verificar(nro):
error = ''
l = len(nro)
if l == 10 or l == 13: # verificar la longitud correcta
cp = int(nro[0:2])
if cp >= 1 and cp <= 22: # verificar codigo de provincia
tercer_dig = int(nro[2])
if tercer_dig >= 0 and tercer_dig < 6: # numeros enter 0 y 6
if l == 10:
return __validar_ced_ruc(nro, 0)
elif l == 13:
return __validar_ced_ruc(nro, 0) and nro[
10:13] != '000' # se verifica q los ultimos numeros no sean 000
elif tercer_dig == 6:
return __validar_ced_ruc(nro, 1) # sociedades publicas
elif tercer_dig == 9: # si es ruc
return __validar_ced_ruc(nro, 2) # sociedades privadas
else:
error = 'Tercer digito invalido'
return False and error
else:
error = 'Codigo de provincia incorrecto'
return False and error
else:
error = 'Longitud incorrecta del numero ingresado'
return False and error
def __validar_ced_ruc(nro, tipo):
total = 0
if tipo == 0: # cedula y r.u.c persona natural
base = 10
d_ver = int(nro[9]) # digito verificador
multip = (2, 1, 2, 1, 2, 1, 2, 1, 2)
elif tipo == 1: # r.u.c. publicos
base = 11
d_ver = int(nro[8])
multip = (3, 2, 7, 6, 5, 4, 3, 2)
elif tipo == 2: # r.u.c. juridicos y extranjeros sin cedula
base = 11
d_ver = int(nro[9])
multip = (4, 3, 2, 7, 6, 5, 4, 3, 2)
for i in range(0, len(multip)):
p = int(nro[i]) * multip[i]
if tipo == 0:
total += p if p < 10 else int(str(p)[0]) + int(str(p)[1])
else:
total += p
mod = total % base
val = base - mod if mod != 0 else 0
return val == d_ver
| chrisstianandres/pagos | apps/cliente/views.py | views.py | py | 8,331 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "apps.backEnd.nombre_empresa",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "apps.mixins.ValidatePermissionRequiredMixin",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "apps.user.models.User",
"line_number": 29,
"usage_type": "name"
... |
22147146770 | import json
import os
import time
from flask import Flask, jsonify, make_response
from flask import request
from flask_cors import CORS
import logging
import requests
from models.reqdb import Request, Base
from models.model import Model
from models.container import Container
from models.configurations import RequestsStoreConfiguration
import sqlalchemy as db
from sqlalchemy.orm import sessionmaker
from sqlalchemy import and_
from prometheus_client import make_wsgi_app, Counter, Gauge, generate_latest
app = Flask(__name__)
CORS(app)
active = False
config = None
config_filename = 'config.json'
status = None
db_engine = None
db_session = None
MAX_RESP_REQS = 1000
models = []
containers = []
# Prometheus metrics
metrics_prefix = "nodemanager_"
m_completed = Gauge(metrics_prefix + "completed", "Completed requests", ["model", "version"])
m_created = Gauge(metrics_prefix + "created", "Created requests", ["model", "version"])
m_input_reqs = Gauge(metrics_prefix + "input_reqs", "Input requests", ["model", "version"])
m_on_gpu = Gauge(metrics_prefix + "on_gpu", "Number of requests completed by the GPU", ["model", "version"])
m_on_cpu = Gauge(metrics_prefix + "on_cpu", "Number of requests completed by the CPU", ["model", "version"])
m_rt_avg = Gauge(metrics_prefix + "avg", "Mean response time", ["model", "version"])
m_process_avg = Gauge(metrics_prefix + "avg_process", "Mean processing time", ["model", "version"])
m_rt_dev = Gauge(metrics_prefix + "rt_dev", "Standard deviation response time", ["model", "version"])
m_rt_min = Gauge(metrics_prefix + "rt_min", "Minimum response time", ["model", "version"])
m_rt_max = Gauge(metrics_prefix + "rt_max", "Maximum response time", ["model", "version"])
last_ts = 0
@app.route('/', methods=['GET'])
def get_status():
return {"status": status}
@app.route('/requests', methods=['DELETE'])
def delete_requests():
if not active and not configure():
return {'error': 'component not configured'}
db_session.query(Request).delete()
db_session.commit()
return {"result": "ok"}
@app.route('/requests', methods=['POST'])
def post_requests():
if not active and not configure():
return {'error': 'component not configured'}
rs = request.get_json()
req = db_session.query(Request).get(rs["id"])
if req:
# update
req.ts_wait = rs["ts_wait"]
req.ts_out = rs["ts_out"]
req.process_time = rs["process_time"]
req.resp_time = rs["resp_time"]
req.node = rs["node"]
req.container = rs["container"]
req.container_id = rs["container_id"]
req.device = rs["device"]
req.state = rs["state"]
else:
# insert
req = Request(id=rs["id"],
model=rs["model"],
version=rs["version"],
ts_in=rs["ts_in"],
ts_wait=rs["ts_wait"],
ts_out=rs["ts_out"],
process_time=rs["process_time"],
resp_time=rs["resp_time"],
node=rs["node"],
container=rs["container"],
container_id=rs["container_id"],
device=rs["device"],
state=rs["state"])
db_session.add(req)
db_session.commit()
# app.logger.info("+ %s", rs)
return jsonify(rs)
@app.route('/requests', methods=['GET'])
def get_requests():
if not active and not configure():
return {'error': 'component not configured'}
max_reqs = int(request.args.get('max_reqs') or MAX_RESP_REQS)
reqs = db_session.query(Request).order_by(Request.ts_in.desc()).limit(max_reqs)
return jsonify([req.to_json() for req in reqs])
@app.route('/requests/<node>', methods=['GET'])
def get_requests_by_node(node):
if not active and not configure():
return {'error': 'component not configured'}
max_reqs = int(request.args.get('max_reqs') or MAX_RESP_REQS)
reqs = db_session.query(Request)\
.filter(Request.node == node).limit(max_reqs)\
.order_by(Request.ts_in.desc())
return jsonify([req.to_json() for req in reqs])
# # Add prometheus wsgi middleware to route /metrics requests
# app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
# '/metrics': make_wsgi_app()
# })
@app.route('/metrics/model', methods=['GET'])
def get_metrics_by_model():
if not active and not configure():
return {'error': 'component not configured'}
metrics = []
from_ts = request.args.get('from_ts')
for model in models:
# filter the reqs associated with the model
reqs = db_session.query(Request)\
.filter(and_(Request.model == model.name, Request.version == model.version))\
.order_by(Request.ts_in.desc())
if from_ts is not None:
# compute the metrics from ts
metrics.append(
{"model": model.name,
"version": model.version,
"metrics_from_ts": Request.metrics(reqs, from_ts)})
else:
# compute the metrics
metrics.append(
{"model": model.name,
"version": model.version,
"metrics": Request.metrics(reqs)})
return jsonify(metrics)
@app.route('/metrics')
def get_prometheus_metrics():
global last_ts
# update the metrics
for model in models:
# filter the reqs associated with the model
reqs = db_session.query(Request) \
.filter(and_(Request.model == model.name, Request.version == model.version)) \
.order_by(Request.ts_in.desc())
metrics = Request.metrics(reqs, last_ts)
m_completed.labels(model=model.name, version=model.version).set(
metrics["completed"] if metrics["completed"] is not None else 0)
m_created.labels(model=model.name, version=model.version).set(
metrics["created"] if metrics["created"] is not None else 0)
m_input_reqs.labels(model=model.name, version=model.version).set(
metrics["input_reqs"] if metrics["input_reqs"] is not None else 0)
m_on_gpu.labels(model=model.name, version=model.version).set(
metrics["on_gpu"] if metrics["on_gpu"] is not None else 0)
m_on_cpu.labels(model=model.name, version=model.version).set(
metrics["on_cpu"] if metrics["on_cpu"] is not None else 0)
m_rt_avg.labels(model=model.name, version=model.version).set(
metrics["avg"] if metrics["avg"] is not None else 0)
m_process_avg.labels(model=model.name, version=model.version).set(
metrics["avg_process"] if metrics["avg_process"] is not None else 0)
m_rt_dev.labels(model=model.name, version=model.version).set(
metrics["dev"] if metrics["dev"] is not None else 0)
m_rt_min.labels(model=model.name, version=model.version).set(
metrics["min"] if metrics["min"] is not None else 0)
m_rt_max.labels(model=model.name, version=model.version).set(
metrics["max"] if metrics["max"] is not None else 0)
response = make_response(generate_latest(), 200)
response.mimetype = "text/plain"
last_ts = time.time()
return response
@app.route('/metrics/container', methods=['GET'])
def get_metrics_by_container():
if not active and not configure():
return {'error': 'component not configured'}
metrics = []
from_ts = request.args.get('from_ts')
for container in containers:
# filter the reqs associated with the container
reqs = db_session.query(Request)\
.filter(and_(Request.model == container.model, Request.container_id == container.container_id))\
.order_by(Request.ts_in.desc())
if from_ts is not None:
# compute the metrics from ts
metrics.append({"container": container.to_json(),
"metrics_from_ts": Request.metrics(reqs, from_ts)})
else:
# compute the metrics
metrics.append({"container": container.to_json(),
"metrics": Request.metrics(reqs)})
return jsonify(metrics)
@app.route('/metrics/container/model', methods=['GET'])
def get_metrics_by_container_model():
if not active and not configure():
return {'error': 'component not configured'}
metrics = {}
from_ts = request.args.get('from_ts')
if from_ts is None:
from_ts = 0
for container in containers:
# filter the reqs associated with the container
reqs = db_session.query(Request) \
.filter(and_(Request.container_id == container.container_id, Request.ts_in > from_ts))\
.order_by(Request.ts_in.desc())
# .filter(and_(Request.container_id == container.container_id, or_(Request.ts_in > from_ts, Request.ts_wait > from_ts, Request.ts_out > from_ts)))
reqs_by_model = {}
for model in models:
reqs_model = list(filter(lambda r: r.model == model.name, reqs))
reqs_metrics = Request.metrics(reqs_model, from_ts)
reqs_by_model[model.name] = reqs_metrics
# compute the metrics
metrics[container.container_id] = reqs_by_model
return jsonify(metrics)
@app.route('/metrics/container/model/created', methods=['GET'])
def get_created_by_container_model():
if not active and not configure():
return {'error': 'component not configured'}
metrics = {}
for container in containers:
reqs_by_model = {}
for model in models:
# filter the reqs associated with the container
# TODO: richieste create
reqs_model = db_session.query(Request)\
.filter(and_(Request.container_id == container.container_id,
Request.model == model.name,
Request.ts_out == None))\
.order_by(Request.ts_in.desc())\
.count()
reqs_created = {"created": reqs_model}
reqs_by_model[model.name] = reqs_created
# compute the metrics
metrics[container.container_id] = reqs_by_model
return jsonify(metrics)
def configure():
global status, config, models, containers, active
if not config:
logging.info("reading config from file")
if not read_config_from_file():
logging.error("configuration reading error")
return False
else:
logging.info("configuration read from file")
logging.info("configuration read: " + str(config.__dict__))
# get models information
models = [Model(json_data=json_model) for json_model in get_data(config.models_endpoint)]
logging.info("Models: %s", [model.to_json() for model in models])
# get containers information
containers = [Container(json_data=json_container) for json_container in get_data(config.containers_endpoint)]
logging.info("Containers: %s", [container.to_json() for container in containers])
status = "active"
active = True
logging.info(status)
return {"result": "ok"}, 200
@app.route('/configuration', methods=['GET'])
def get_configuration():
global config, status
logging.info("get configuration")
# read from file
logging.info("read configuration from file")
if config or read_config_from_file():
status = "configured"
return {"configuration": config.__dict__}, 200
else:
logging.warning("configuration not found")
return {"configuration": "not found"}, 404
@app.route('/configuration', methods=['POST'])
def post_configuration():
global status, config
logging.info("configuration started...")
# read data
data = request.get_json()
config = RequestsStoreConfiguration(json_data=data)
logging.info("configuration: " + str(config.__dict__))
logging.info("Getting models from: %s", config.models_endpoint)
logging.info("Getting containers from: %s", config.containers_endpoint)
with open(config_filename, 'w') as config_file:
json.dump(config.__dict__, config_file)
status = "configured"
logging.info(status)
return {"result": "ok"}, 200
def read_config_from_file():
global config
try:
with open(config_filename) as json_file:
data = json.load(json_file)
config = RequestsStoreConfiguration(json_data=data)
return True
except IOError as e:
logging.error("configuration error")
return False
def get_data(url):
try:
response = requests.get(url)
except Exception as e:
logging.warning(e)
response = []
print(response)
return response.json()
def create_app(db_echo=False, delete_config=True):
global status, db_engine, db_session
# init log
log_format = "%(asctime)s:%(levelname)s:%(name)s:" \
"%(filename)s:%(lineno)d:%(message)s"
logging.basicConfig(level='DEBUG', format=log_format)
status = "inactive"
logging.info(status)
# delete config file
if delete_config:
logging.info("deleting config file")
try:
os.remove(config_filename)
except FileNotFoundError as e:
logging.info("file not found")
db_engine = db.create_engine('postgresql://postgres:romapwd@localhost/postgres', echo=db_echo)
Base.metadata.create_all(db_engine)
Session = sessionmaker(bind=db_engine)
db_session = Session()
# clean db
logging.info("cleaning db")
db_session.query(Request).delete()
db_session.commit()
return app
if __name__ == '__main__':
create_app()
| NicholasRasi/ROMA2 | components/requests_store/main.py | main.py | py | 13,696 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.reqdb",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "prometheus_client.Gauge",
... |
71552736995 | import numpy as np
import cv2
import tqdm
import argparse
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", help="path to the image", required=True)
parser.add_argument("--patch_size", default="15", help="patch size")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
img = cv2.imread(args.image_path, 0)
patch_size = int(args.patch_size)
img_wave = np.ones(img.shape) * 255
for i in tqdm.tqdm(range(0, img.shape[0]-patch_size, patch_size)):
for j in range(0, img.shape[1]-patch_size, patch_size):
patch = img[i:i+patch_size, j:j+patch_size]
blackness = np.sum(patch) / patch_size**2
e = 0.0000000001
frequency = (2*np.pi)/patch_size * np.log(np.sqrt(blackness)+e)
amplitude = (1*patch_size) * (1-blackness/255)
x = np.arange(0, patch_size, 1)
y = amplitude * np.sin(frequency * x) - patch_size
x += j
y = np.int32(np.abs(y/2)) + i
for k in range(x.shape[0]-1):
cv2.line(img_wave, (x[k], y[k]), (x[k+1], y[k+1]), 0, 1)
extension = '.'+args.image_path.split(".")[-1]
img_name = os.path.basename(args.image_path)
save_path = os.path.join(os.path.dirname(args.image_path), img_name.replace(extension, "_waves.png"))
cv2.imwrite(save_path, img_wave) | ErmiasBahru/wave-art | main.py | main.py | py | 1,440 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_nu... |
73546122274 | #!python
import json
import argparse
import sys
from datetime import datetime
class Hypothesis:
'''
this class represents a guess
'''
def __init__(self, name, hypothesis, confidence, notes, dtime):
self.name = name
self.hypothesis = hypothesis
self.confidence = confidence
self.notes = notes
self.dtime = dtime
def write_to_yml(self, page, data):
with open(page, 'w') as file:
yaml.dump(data, file)
def read_the_yml(self):
with open('guesses.yml', 'r') as stream:
data = yaml.load(stream, Loader=yaml.Loader)
return data
@classmethod
def write_to_json(self, page, data):
with open(page, 'w') as file:
json.dump(data, file)
@classmethod
def read_the_json(self):
with open('guesses.json', 'r') as stream:
data = json.load(stream)
return data
usage = '''
validate [-h|--help]
[--debug]
'''
if __name__ == '__main__':
# noinspection PyTypeChecker
#breakpoint()
parser = argparse.ArgumentParser(
prog="validate",
usage=usage,
description="Run syntax validation on all Janus specs in the .janus directory.")
parser.add_argument(
"--name",
type=str,
required=False,
help="Give it a name.")
parser.add_argument(
"--hypothesis",
type=str,
required=False,
help="Make your guess.")
parser.add_argument(
"--confidence",
type=str,
required=False,
help="How confident are you?.")
parser.add_argument(
"--notes",
type=str,
required=False,
help="What's the deets?")
#args = parser.parse_args()
if len(sys.argv) < 2:
name = input("Name of entry : ")
else:
name = sys.argv[-1]
if not "-d" in sys.argv:
hypothesis = input("Enter your hypothesis : ")
confidence = input("What's your confidence level? : ")
notes = input("any notes, links, etc? : ")
#date = input("(Optional) date: ")
#dt_object = datetime.datetime.now()
#if not date:
# date = "_".join([str(i) for i in [dt_object.year, dt_object.month, dt_object.day]])
#time = ":".join([str(i) for i in [dt_object.hour, dt_object.minute]])
thedatetime = datetime.now().isoformat(timespec='minutes')
h = Hypothesis(name=name, hypothesis=hypothesis, confidence=confidence, notes=notes, dtime=thedatetime)
data = h.read_the_json()
data[h.name] = {'hypothesis': h.hypothesis, 'confidence': h.confidence, 'notes': h.notes, 'dtime': h.dtime}
else:
h = Hypothesis
data = h.read_the_json()
data.pop(sys.argv[-1])
h.write_to_json('guesses.json', data)
| josh-mcq/hypothesis | guess.py | guess.py | py | 2,813 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dump",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_numb... |
26979634933 | import math
import numpy
from numpy.typing import ArrayLike
from search import embedding
from sklearn.cluster import KMeans
from tenseal.tensors.ckksvector import CKKSVector
class Index:
"""
Index class for efficient searching in a corpus using clustering and matrix representation.
Parameters:
- model (embedding.Model): The embedding model for encoding text.
- corpus (list[str]): List of texts to build the index upon.
Attributes:
- corpus (ArrayLike): Numerical vectors representing the encoded corpus.
- clusters (int): The number of clusters formed during initialization.
- centroids (ArrayLike): The centroids of clusters.
- matching (list[tuple[int, ArrayLike]]): Pairs of cluster labels and corresponding embeddings.
- matrix (ArrayLike): Matrix representation of the clustered embeddings.
Methods:
- _clusterize() -> None: Private method to clusterize the corpus using KMeans.
- _index() -> None: Private method to create a matrix index from the clustered embeddings.
- search(query: CKKSVector) -> ArrayLike: Search the index for the closest match to the input query.
Example:
```python
model = embedding.Model("bert-base-nli-mean-tokens")
corpus = ["text1", "text2", "text3"]
index = Index(model, corpus)
query_vector = model.encode("search query")
result = index.search(query_vector)
```
"""
def __init__(self, model: embedding.Model, corpus: list[str]):
self.corpus = model.encode(corpus)
self.embedding_size = len(self.corpus[0])
self._clusterize()
self._index()
def _clusterize(self) -> None:
"""
Clusterize the corpus using KMeans algorithm.
Returns:
- None
"""
n_clusters = math.ceil(math.sqrt(len(self.corpus)))
clustering = KMeans(n_clusters=n_clusters, n_init="auto").fit(self.corpus)
self.clusters = n_clusters
self.centroids = clustering.cluster_centers_
self.matching = list(zip(clustering.labels_, self.corpus))
def _index(self) -> None:
"""
Create a matrix index from the clustered embeddings.
Returns:
- None
"""
index = [[] for _ in range(self.clusters)]
for cluster, embedding in self.matching:
index[cluster].append(embedding)
filler = numpy.ones(self.embedding_size) * -10_000
self.max_size = max([len(val) for val in index])
for cluster, embedding in enumerate(index):
cluster_size = len(embedding)
index[cluster].extend([filler] * (self.max_size - cluster_size))
self.matrix = numpy.array(index)
def search(self, query: CKKSVector) -> ArrayLike:
"""
Search the index for the closest match to the input query.
Parameters:
- query (CKKSVector): The encrypted query vector.
Returns:
- ArrayLike: Result vector representing the closest match.
"""
matrix = self.matrix.reshape(self.clusters, self.max_size * self.embedding_size)
return query @ matrix
| fpiedrah/private-search | search/index.py | index.py | py | 3,120 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "search.embedding.Model",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "search.embedding",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
... |
8027377031 | # -*- coding: utf-8 -*-
'''
'''
#############
## LOGGING ##
#############
import logging
from fitsbits import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
| waqasbhatti/fitsbits | fitsbits/_modtemplate.py | _modtemplate.py | py | 544 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.DEBUG",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.basicC... |
11514219682 | # Released under the MIT License. See LICENSE for details.
#
"""Tools related to ios development."""
from __future__ import annotations
import pathlib
import subprocess
import sys
from dataclasses import dataclass
from efrotools import getprojectconfig, getlocalconfig
MODES = {
'debug': {'configuration': 'Debug'},
'release': {'configuration': 'Release'},
}
@dataclass
class Config:
"""Configuration values for this project."""
# Same as XCode setting.
product_name: str
# Project relative xcodeproj path ('MyAppName/MyAppName.xcodeproj').
projectpath: str
# App bundle name ('MyAppName.app').
# app_bundle_name: str
# Base name of the ipa archive to be pushed ('myappname').
# archive_name: str
# Scheme to build ('MyAppName iOS').
scheme: str
@dataclass
class LocalConfig:
"""Configuration values specific to the machine."""
# Sftp host ('myuserid@myserver.com').
sftp_host: str
# Path to push ipa to ('/home/myhome/dir/where/i/want/this/).
sftp_dir: str
def push_ipa(
root: pathlib.Path, modename: str, signing_config: str | None
) -> None:
"""Construct ios IPA and push it to staging server for device testing.
This takes some shortcuts to minimize turnaround time;
It doesn't recreate the ipa completely each run, uses rsync
for speedy pushes to the staging server, etc.
The use case for this is quick build iteration on a device
that is not physically near the build machine.
"""
from efrotools.xcodebuild import project_build_path
# Load both the local and project config data.
# FIXME: switch this to use dataclassio.
cfg = Config(**getprojectconfig(root)['push_ipa_config'])
lcfg = LocalConfig(**getlocalconfig(root)['push_ipa_local_config'])
if modename not in MODES:
raise RuntimeError(f'invalid mode: "{modename}"')
mode = MODES[modename]
xcprojpath = pathlib.Path(root, cfg.projectpath)
app_dir = project_build_path(
projroot=str(root),
project_path=str(xcprojpath),
scheme=cfg.scheme,
configuration=mode['configuration'],
executable=False,
)
built_app_path = pathlib.Path(app_dir, f'{cfg.product_name}.app')
workdir = pathlib.Path(root, 'build', 'push_ipa')
workdir.mkdir(parents=True, exist_ok=True)
pathlib.Path(root, 'build').mkdir(parents=True, exist_ok=True)
exportoptionspath = pathlib.Path(root, workdir, 'exportoptions.plist')
ipa_dir_path = pathlib.Path(root, workdir, 'ipa')
ipa_dir_path.mkdir(parents=True, exist_ok=True)
# Inject our latest build into an existing xcarchive (creating if needed).
archivepath = _add_build_to_xcarchive(
workdir, xcprojpath, built_app_path, cfg, signing_config
)
# Export an IPA from said xcarchive.
ipa_path = _export_ipa_from_xcarchive(
archivepath, exportoptionspath, ipa_dir_path, cfg, signing_config
)
# And lastly sync said IPA up to our staging server.
print('Pushing to staging server...')
sys.stdout.flush()
subprocess.run(
[
'rsync',
'--verbose',
ipa_path,
'-e',
'ssh -oBatchMode=yes -oStrictHostKeyChecking=yes',
f'{lcfg.sftp_host}:{lcfg.sftp_dir}',
],
check=True,
)
print('iOS Package Updated Successfully!')
def _add_build_to_xcarchive(
workdir: pathlib.Path,
xcprojpath: pathlib.Path,
built_app_path: pathlib.Path,
cfg: Config,
ba_signing_config: str | None,
) -> pathlib.Path:
archivepathbase = pathlib.Path(workdir, cfg.product_name)
archivepath = pathlib.Path(workdir, cfg.product_name + '.xcarchive')
# Rebuild a full archive if one doesn't exist.
if not archivepath.exists():
print('Base archive not found; doing full build (can take a while)...')
sys.stdout.flush()
args = [
'tools/pcommand',
'xcodebuild',
'archive',
'-project',
str(xcprojpath),
'-scheme',
cfg.scheme,
'-configuration',
MODES['debug']['configuration'],
'-archivePath',
str(archivepathbase),
'-allowProvisioningUpdates',
]
if ba_signing_config is not None:
args += ['-baSigningConfig', ba_signing_config]
subprocess.run(args, check=True, capture_output=False)
# Now copy our just-built app into the archive.
print('Copying build to archive...')
sys.stdout.flush()
archive_app_path = pathlib.Path(
archivepath, f'Products/Applications/{cfg.product_name}.app'
)
subprocess.run(['rm', '-rf', archive_app_path], check=True)
subprocess.run(['cp', '-r', built_app_path, archive_app_path], check=True)
return archivepath
def _export_ipa_from_xcarchive(
archivepath: pathlib.Path,
exportoptionspath: pathlib.Path,
ipa_dir_path: pathlib.Path,
cfg: Config,
signing_config: str | None,
) -> pathlib.Path:
import textwrap
print('Exporting IPA...')
exportoptions = textwrap.dedent(
"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
"https://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>compileBitcode</key>
<false/>
<key>destination</key>
<string>export</string>
<key>method</key>
<string>development</string>
<key>signingStyle</key>
<string>automatic</string>
<key>stripSwiftSymbols</key>
<true/>
<key>teamID</key>
<string>G7TQB7SM63</string>
<key>thinning</key>
<string><none></string>
</dict>
</plist>
"""
).strip()
with exportoptionspath.open('w') as outfile:
outfile.write(exportoptions)
sys.stdout.flush()
args = [
'tools/pcommand',
'xcodebuild',
'-allowProvisioningUpdates',
'-exportArchive',
'-archivePath',
str(archivepath),
'-exportOptionsPlist',
str(exportoptionspath),
'-exportPath',
str(ipa_dir_path),
]
if signing_config is not None:
args += ['-baSigningConfig', signing_config]
try:
subprocess.run(args, check=True, capture_output=True)
except Exception:
print(
'Error exporting code-signed archive; '
' perhaps try running "security unlock-keychain login.keychain"'
)
raise
ipa_path_exported = pathlib.Path(ipa_dir_path, cfg.product_name + '.ipa')
# ipa_path = pathlib.Path(ipa_dir_path, cfg.archive_name + '.ipa')
# subprocess.run(['mv', ipa_path_exported, ipa_path], check=True)
return ipa_path_exported
| efroemling/ballistica | tools/efrotools/ios.py | ios.py | py | 6,959 | python | en | code | 468 | github-code | 1 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "efrotoo... |
9539722024 | from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize
from gensim import corpora
import gensim
import gensim.downloader as api
from gensim.matutils import softcossim
#from gensim import fasttext_model300
from gensim import *
import fasttext
import gensim.downloader as api
#import csv
'''data = ["I love machine learning. Its awesome.",
"I love coding in python",
"I love building chatbots",
"they chat amagingly well"]
'''
data =[]
with open('out_mod.txt','r') as f:
docs=f.readlines()
for l in docs:
st=l.strip('\n')
if st!='':
data.append(st)
#print(data)
documents=[]
for d in data:
documents.append(d.split())
dictionary= corpora.Dictionary(documents)
fasttext_model300 = api.load('fasttext-wiki-news-subwords-300')
similarity_matrix = fasttext_model300.similarity_matrix(dictionary, tfidf=None, threshold=0.0, exponent=2.0, nonzero_limit=100)
tagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(data)]
max_epochs = 100
vec_size = 20
alpha = 0.025
model = Doc2Vec(size=vec_size,
alpha=alpha,
min_alpha=0.00025,
min_count=1,
dm =1)
model.build_vocab(tagged_data)
for epoch in range(max_epochs):
#print('iteration {0}'.format(epoch))
model.train(tagged_data,
total_examples=model.corpus_count,
epochs=model.iter)
# decrease the learning rate
model.alpha -= 0.0002
# fix the learning rate, no decay
model.min_alpha = model.alpha
model.save("d2v.model")
print("Model Saved")
model= Doc2Vec.load("d2v.model")
#to find the vector of a document which is not in training data
print("Model loaded Input the document")
s= input()
s=s.lower()
test_data = word_tokenize(s)
v1 = model.infer_vector(test_data)
print("V1_infer", v1)
#s = dictionary.doc2bow(s)
max=0
idx=0
for d in data:
if softcossim(dictionary.doc2bow((s.lower()).split()), dictionary.doc2bow((d.lower()).split()), similarity_matrix) > max:
max= softcossim(dictionary.doc2bow((s.lower()).split()), dictionary.doc2bow((d.lower()).split()), similarity_matrix)
idx= data.index(d)
print(max)
print(data[idx])
# to find most similar doc using tags
#similar_doc = model.docvecs.most_similar('1')
#print("work")
#print(similar_doc)
| kungfumas/similaritas-dokumen | Doc2Vec/train.py | train.py | py | 2,421 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gensim.corpora.Dictionary",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "gensim.corpora",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "gensim.downloader.load",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "gensi... |
19074849435 | import logging
from odoo.addons.base_rest import restapi
from odoo.addons.base_rest.components.service import to_int
from odoo.addons.base_rest_datamodel.restapi import Datamodel
from odoo.addons.component.core import Component
_logger = logging.getLogger(__name__)
class CyclosService(Component):
_inherit = "base.rest.service"
_name = "cyclos.service"
_usage = "cyclos"
_collection = "lokavaluto.private.services"
_description = """
Ping Services
Access to the ping services is allowed to everyone
"""
@restapi.method(
[(["/credit"], "POST")],
input_param=Datamodel("cyclos.credit.info"),
output_param=Datamodel("cyclos.credit.response"),
)
def credit(self, params):
"""
Credit user account with amount, and generate accounting entry
"""
partner = self.env.user.partner_id
base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url")
_logger.debug("PARTNER ?: %s(%s)" % (partner.name, partner.id))
owner_id = params.owner_id
amount = params.amount
CyclosCreditResponse = self.env.datamodels["cyclos.credit.response"]
cyclos_response = CyclosCreditResponse(partial=True)
if owner_id and amount:
new_order = partner.cyclosCreateOrder(owner_id, amount)
cyclos_response.order_url = base_url + new_order.get_portal_url()
return cyclos_response
@restapi.method(
[(["/contact"], "POST")],
input_param=Datamodel("cyclos.partners.info"),
)
def contact(self, params):
"""Return public name for contact matching comchain addresses"""
partner = self.env["res.partner"]
partner_ids = partner.search(
[("lcc_backend_ids.cyclos_id", "in", params.addresses)]
)
res = {}
for partner in partner_ids:
backend_data = partner._cyclos_backend()
res[backend_data.cyclos_id] = {
"partner_id": partner.id,
"public_name": partner.public_name,
}
return res
| Lokavaluto/lokavaluto-addons | lcc_cyclos_base/services/cyclos_services.py | cyclos_services.py | py | 2,113 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "odoo.addons.component.core.Component",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "odoo.addons.base_rest.restapi.method",
"line_number": 20,
"usage_type": "call"
},... |
833867895 | #coding:utf-8
import requests
import threading
from bs4 import BeautifulSoup
import re
import os
import time
import sys
content_url = "http://www.biquge.com.tw/12_12603/"
kv = {'user_agent': 'Mozilla/5.0'} # ่กจ็คบๆฏไธไธชๆต่งๅจ
try:
r = requests.get(content_url, headers=kv)
r.raise_for_status()
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text, 'html.parser')
article_name = soup.select('#wrapper .box_con #maininfo #info h1')[0].text
# article_author = soup.select('#wrapper .box_con #maininfo #info p')[0].text
article_intro = soup.select('#wrapper .box_con #maininfo #intro p')[0].text.strip()
print('article name:'+article_name)
# print('article author:'+article_author)
print('article intro:'+article_intro)
content_list = soup.find(id='list')
chapter_list = soup.find_all('dd')
fo = open(article_name+'.txt', "ab+")
fo.write((article_name+"\r\n").encode('UTF-8'))
fo.write(("*******็ฎไป*******\r\n").encode('UTF-8'))
fo.write(("\t"+article_intro + "\r\n").encode('UTF-8'))
fo.write(("******************\r\n").encode('UTF-8'))
count = 0
while (count < len(chapter_list)):
print('this count is:'+str(count))
print(chapter_list[count].find('a').text)
zhangval = chapter_list[count].find('a')['href'].split('/')[2]
urlll = content_url+str(zhangval)
print('url:'+urlll)
res = requests.get(content_url+str(zhangval), headers=kv)
res.encoding = 'gb18030'
soups = BeautifulSoup(res.text,"html.parser")
section_text = soups.select('#wrapper .content_read .box_con #content')[0]
# mytxt = re.sub( '\s+', '\r\n\t', section_text.text).strip('\r\n')
fo.write(('\r'+chapter_list[count].find('a').text+'\r\n').encode('UTF-8'))
fo.write((section_text.text).encode('UTF-8'))
count = count + 1
fo.close()
except:
print('error')
| smilepasta/PythonDemo | basic/note.py | note.py | py | 1,893 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
29147395643 | import matplotlib.image as mpimg
from tensorflow.keras.utils import img_to_array, load_img
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
# Load the model
model = load_model('model12.h5')
# Convert the model to a quantized model
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
quantized_model = converter.convert()
# Save the quantized model
with open("quantized_model.tflite", "wb") as f:
f.write(quantized_model)
# Load the quantized model
interpreter = tf.lite.Interpreter(model_content=quantized_model)
interpreter.allocate_tensors()
# Get input and output tensors
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
image_path = 'chest_xray\\test\\PNEUMONIA\\test_pneu_130.jpg'
image = mpimg.imread(image_path)
test_image = load_img(image_path, target_size = (224, 224))
test_image = img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
pred = model.predict(test_image)
predict = np.argmax(pred, axis=-1)
if predict == 0:
prediction = 'Normal'
else:
prediction = 'Pneumonia +VE'
plt.imshow(image);plt.suptitle(prediction, fontsize = 20);plt.axis("off");plt.show() | maazjamshaid123/early_detection_pneumonia | detect.py | detect.py | py | 1,337 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.models.load_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.lite.TFLiteConverter.from_keras_model",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.lite",
"line_number": 12,
"usage_type": "attribute"
... |
15133768093 | """
Benjamin Granat
ITP 449
Assginment 9
Trains and tests a logistic regression based on diabetes classification data
Produces confusion matrix visualization
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix as cmatrix
from sklearn.metrics import accuracy_score as accscore
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import ConfusionMatrixDisplay as cms
# Function: remove_outliers
# Parameters: dataframe, columns
# Creates lower and upper quartile bounds for columns in dataframe.
# Drops values outside of the lower and upper bounds in the dataframe
# Returns updated dataframe
def remove_outliers(data, columns):
for column in columns:
Q3 = data[column].quantile(0.75)
Q1 = data[column].quantile(0.25)
IQR = Q3 - Q1
lower = Q1 - 1.5 * IQR
upper = Q3 + 1.5 * IQR
df_filter = (data[column] >= lower) & (data[column] <= upper)
data = data[df_filter]
return data
def main():
# Reading file into dataframe
df = pd.read_csv('diabetes.csv')
# Sorting columns most correlated with 'Outcome' column in descending order
corr = df.corr()['Outcome'].abs().sort_values(ascending=False)
# Isolates most correlated attributes
corr_attrs = corr[1:4].index
# Outcome column
outcome = corr[0:1].index
# Drop duplicates and null values
df.drop_duplicates()
df.dropna()
# Removes outliers from dataframe using correlated columns and outcome column
remove_outliers(df, corr_attrs)
remove_outliers(df, outcome)
# Creates feature vector and target vector
X = df[corr_attrs].values
Y = df[outcome].values
# Partitions data into training and testing subsets
X_train, X_test, Y_train, Y_test = tts(X, Y, random_state=42)
# Runs logistic regression
model = LogisticRegression()
model.fit(X_train, Y_train)
predict_Y = model.predict(X_test)
# Creates confusion matrix based on predictions
matrix = cmatrix(Y_test, predict_Y)
# Calculates accuracy score and concatinates into a string
accuracy = accscore(Y_test, predict_Y)
accuracy_string = "Accuracy is " + str(accuracy)
# Displaying the confusion matrix
cm_disp = cms(confusion_matrix=matrix, display_labels=model.classes_)
fig, axes = plt.subplots()
cm_disp.plot(ax=axes)
axes.set(title="Diabetes Logistic Regression Confusion Matrix" + "\n" + accuracy_string)
plt.savefig('Diabetes Logistic Regression Confusion Matrix.png')
if __name__ == '__main__':
main() | bengranat/ITP449 | Diabetes Classification.py | Diabetes Classification.py | py | 2,658 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 56,
"usage_type": "call... |
35914507591 | #! /usr/bin/env python3
# (re)construit les fichiers README.md de description des challenges
import json
import glob
import os
import io
from collections import namedtuple
import yaml
# tuple
Slug = namedtuple('Slug', ['order', # numรฉro pour maintenir l'ordre
'link', # lien markdown vers le fichier local
'track', # lien markdown vers la sous-section du site HackerRank
'domain', # lien markdown vers le domaine sur www.hackerrank.com
'main_track', # identifiant du domaine (pour retrouver la description)
'url']) # url vers le site web hackerrank
# globals
models = {} # liste des challenges indexรฉs par (contest, slug)
descriptions = {}
playlists = {}
def get_models():
""" charge les dรฉfinitions des challenges """
# les playlists
for i in glob.iglob(os.path.join("offline", "playlists", "*.json")):
with open(i, "r") as f:
data = json.load(f)
playlists[data['slug']] = data
# les contests (y compris master_<domain>)
order = 0
for i in glob.iglob(os.path.join("offline", "contests", "*.json")):
with open(i, "r") as f:
data = json.load(f)
# la description d'un contest
if 'name' in data:
desc = (data['description'] or '').partition('<br')[0]
descriptions[data['slug']] = {'name': data['name'],
'description': desc}
# pour tous les challenges dans un contest
for m in data['models']:
if 'contest_slug' not in m:
continue
order += 1
m['order'] = order # ajoute un numรฉro pour maintenir l'ordre des chapters
if m['contest_slug'] == 'projecteuler':
m['order'] -= 10000 # met le ProjectEuler+ en tรชte des contests
models[(m['contest_slug'], m['slug'])] = m
def do_domain(domain):
slugs = {}
#
# STEP 1 : analyse un rรฉpertoire ร la recherche des challenges (rรฉcursivement)
#
for i in glob.iglob(os.path.join(domain, "**/*"), recursive=True):
# pas encore trouvรฉ de solution รฉlรฉgante pour exclure les rรฉpertoires solution
if "/js10-create-a-button/" in i or "/js10-buttons-container/" in i or '/js10-binary-calculator/' in i: # noqa
continue
if os.path.isdir(i):
# crรฉe aussi les README.md dans chaque sous-domaine
do_domain(i)
if not os.path.isfile(i):
continue
name = os.path.basename(i)
if name == 'CMakeLists.txt':
continue
if name == 'README.md':
continue
contest_challenge, lang = os.path.splitext(name)
langs = {'.hs': 'Haskell',
'.erl': 'Erlang',
'.py': 'Python',
'.c': 'C',
'.cpp': 'C++',
'.sh': 'bash',
'.sql': 'SQL',
'.txt': 'text',
'.java': 'Java',
'.js': 'Javascript',
'.html': 'HTML',
'.pl': 'Perl'}
lang = langs.get(lang)
if not lang:
# nominal: fichier ร ignorer
# print("LANG NOT FOUND:", name, os.path.splitext(name))
continue
contest = 'master' # par dรฉfaut
zz = os.path.split(os.path.dirname(i))
if zz[0] == "contests":
contest = zz[1]
if (contest, contest_challenge) not in models:
print("SLUG NOT FOUND:", name, contest_challenge, lang, i, domain)
continue
source = os.path.relpath(os.path.realpath(i), start=domain)
if os.path.islink(source):
print(source)
exit()
r = slugs.get((contest, contest_challenge))
if r is None:
m = models[(contest, contest_challenge)]
m['onboarding'] = None
if contest != "master":
url = 'https://www.hackerrank.com/contests/{}/challenges/{}'.format(contest, contest_challenge) # noqa
else:
url = 'https://www.hackerrank.com/challenges/{}'.format(contest_challenge)
if zz[0] == "interview-preparation-kit":
# print("--->", zz)
title = "title"
track = "track"
main_track = "main_track"
if zz[0] in playlists:
playlist = playlists[zz[0]]
chapter = None
for i, c in enumerate(playlist['playlists']):
if c['slug'] == zz[1]:
chapter = c
m['order'] = i + 100000000
break
title = "[{}]({})".format(
playlist['name'],
"https://www.hackerrank.com/interview/{}".format(zz[0]))
track = "[{}]({})".format(
chapter['name'],
"https://www.hackerrank.com/interview/{}/{}/challenges".format(zz[0], zz[1])) # noqa
url = "https://www.hackerrank.com/challenges/{}/problem?h_l=playlist&slugs%5B%5D=interview&slugs%5B%5D={}&slugs%5B%5D={}".format( # noqa
contest_challenge,
zz[0],
zz[1])
elif m['track'] is not None:
title = "[{}]({})".format(
m['track']['track_name'],
"https://www.hackerrank.com/domains/" + m['track']['track_slug'])
track = "[{}]({}) > [{}]({})".format(
m['track']['track_name'],
"https://www.hackerrank.com/domains/" + m['track']['track_slug'],
m['track']['name'],
"https://www.hackerrank.com/domains/" +
m['track']['track_slug'] + "/" + m['track']['slug'])
track = "[{}]({})".format(
m['track']['name'],
"https://www.hackerrank.com/domains/" +
m['track']['track_slug'] + "/" + m['track']['slug'])
main_track = m['track']['track_slug']
else:
x = descriptions.get(m['contest_slug'])['name']
title = "[{}]({})".format(x, "https://www.hackerrank.com/contests/" +
m['contest_slug'])
track = ""
main_track = m['contest_slug']
r = Slug(order=m['order'],
link=['[{}]({})'.format(lang, source)],
domain=title,
main_track=main_track,
track=track,
url=url)
slugs[(contest, contest_challenge)] = r
else:
r.link.append('[{}]({})'.format(lang, source))
order = [(v.order, contest_challenge) for contest_challenge, v in slugs.items()]
order.sort()
#
# STEP 2 : crรฉe l'index des challenges en respectant l'ordre
#
with io.StringIO() as out:
if os.path.exists(os.path.join(domain, "README.md.in")):
with open(os.path.join(domain, "README.md.in")) as f:
out.write(f.read())
prev_contest = None
prev_domain = None
prev_track = None
for _, contest_challenge in order:
m = models[contest_challenge]
s = slugs[contest_challenge]
if prev_domain != s.domain:
prev_domain = s.domain
print("", file=out)
print("### " + prev_domain, file=out)
if s.main_track in descriptions:
print(descriptions[s.main_track]['description'], file=out)
print("", file=out)
if prev_track != s.track or prev_contest != contest_challenge[0]:
prev_contest = contest_challenge[0]
prev_track = s.track
if prev_track != "":
print("", file=out)
print("#### " + prev_track, file=out)
print("", file=out)
print("Name | Preview | Code | Difficulty", file=out)
print("---- | ------- | ---- | ----------", file=out)
links = ' '.join(sorted(s.link))
preview = m['preview']
if not preview:
preview = m['name']
preview = preview.replace("\n", " ").strip()
print('[%s](%s)|%s|%s|%s' % (m['name'], s.url, preview, links,
m['difficulty_name']), file=out)
print("", file=out)
md = out.getvalue()
#
# STEP 3 : met ร jour le fichier README.md
#
fn = os.path.join(domain, "README.md")
if len(md.strip()) == 0:
if os.path.exists(fn):
print("delete", fn)
os.unlink(fn)
elif not os.path.exists(fn) or md != open(fn, "rt").read():
print("rewrite", fn)
open(fn, "wt").write(md)
def main():
domains = yaml.load(open(os.path.join(os.path.dirname(__file__), ".hr_conf.yaml")))["domains"]
os.chdir(os.path.dirname(__file__))
get_models()
for domain in domains:
do_domain(domain)
do_domain("coding-dojo")
if __name__ == '__main__':
main()
| rene-d/hackerrank | hr_table.py | hr_table.py | py | 9,670 | python | en | code | 72 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "glob.iglob",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
29861647037 | import streamlit as st
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
st.title("""
EXPLORE DIFFERENT CLASSIFIERS
""")
dataset_name=st.sidebar.selectbox("select Data Set",("Iris","Breast Cancer","Wine","Digits","DIABETES","BOSTON","LINNERUD"))
classifier_name=st.sidebar.selectbox("Select Classifier",("KNN","SVM","RANDOM FOREST","DECISION TREE","GAUSSION NB","MLP","ADABoost","QUADRATIC DISCRIMINANT"))
def get_dataset(dataset_name):
if dataset_name=="Iris":
data=datasets.load_iris()
elif dataset_name=="Breast Cancer":
data=datasets.load_breast_cancer()
elif dataset_name=="Wine":
data=datasets.load_wine()
elif dataset_name:
data=datasets.load_digits()
x=data.data
y=data.target
return x,y
x,y=get_dataset(dataset_name)
st.write("SHAPE OF DATASET",x.shape)
def add_parameter(clf_name):
p=dict()
if clf_name=="KNN":
K=st.sidebar.slider("K",1,15)
p["K"]=K
elif clf_name == "SVM":
C = st.sidebar.slider("C", 0.01,10.00)
p["C"] =C
elif clf_name == "RANDOM FOREST":
M_D= st.sidebar.slider("M_D",2,15)
N_E=st.sidebar.slider("N_E",1,100)
p["M_D"] =M_D
p["N_E"]=N_E
elif clf_name == "DECISION TREE":
M_DD = st.sidebar.slider("M_DD", 2, 15)
p["M_DD"] = M_DD
elif clf_name == "MLP":
p["A"] = 1
return p
p=add_parameter(classifier_name)
def get_classifier(clf_name,p):
if clf_name=="KNN":
clf=KNeighborsClassifier(n_neighbors=p["K"])
elif clf_name=="SVM":
clf=SVC(C=p["C"])
elif clf_name=="RANDOM FOREST":
clf=RandomForestClassifier(max_depth=p["M_D"],n_estimators=p["N_E"],random_state=900)
elif clf_name=="DECISION TREE":
clf=DecisionTreeClassifier(max_depth=p["M_DD"])
elif clf_name=="GAUSSION NB":
clf=GaussianNB()
elif clf_name=="MLP":
clf=MLPClassifier(alpha=p["A"], max_iter=1000)
elif clf_name=="ADABoost":
clf=AdaBoostClassifier()
elif clf_name == "QUADRATIC DISCRIMINANT":
clf = QuadraticDiscriminantAnalysis()
return clf
clf=get_classifier(classifier_name,p)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.33,random_state=900)
clf.fit(x_train,y_train)
y_p = clf.predict(x_test)
a=accuracy_score(y_test,y_p)
st.write(f"CLASSIFIER={classifier_name}")
st.write(f"ACCURACY={a}")
pca=PCA(2)
x_project=pca.fit_transform(x)
x1=x_project[:,0]
x2=x_project[:,1]
fig=plt.figure()
plt.scatter(x1,x2,c=y,alpha=0.8,cmap="viridis")
plt.xlabel("principle component 1")
plt.ylabel("principle component 2")
plt.colorbar()
st.pyplot(bbox_inches='tight')
st.set_option('deprecation.showPyplotGlobalUse', False) | yaswanth2802/machine-learning-web-app | app.py | app.py | py | 3,258 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.title",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "st... |
74480044513 | import main
import alg_cluster
import random
import matplotlib.pyplot as plt
import time
def get_random_clusters(num_clusters):
result_list = []
for num in range(num_clusters):
result_list.append(alg_cluster.Cluster(set([num]), random.random()*2 - 1, random.random()*2 - 1,0,0))
return result_list
x_data = []
y_data = []
x1_data = []
y1_data = []
for n in range(2,201):
cluster_list = get_random_clusters(n)
start_time = time.time()
main.slow_closest_pair(cluster_list)
stop_time = time.time()
elapsed_time = stop_time - start_time
x_data.append(n)
y_data.append(elapsed_time)
start_time = time.time()
main.fast_closest_pair(cluster_list)
stop_time = time.time()
elapsed_time = stop_time - start_time
x1_data.append(n)
y1_data.append(elapsed_time)
plt.plot(x_data,y_data, "g-", label = "Slow Closest Pair")
plt.plot(x1_data,y1_data, "r-", label = "Fast Closest Pair")
plt.legend()
plt.title("Running Time Comparison of Closest Pair Algorithms")
plt.suptitle("Desktop Python (PyCharm)")
plt.xlabel("Number of Initial Clusters")
plt.ylabel("Running time (seconds)")
plt.savefig("performanceTime.png")
plt.show()
| pakzaban/Clustering_Algorithmic_Thinking_Project_3 | myPlots.py | myPlots.py | py | 1,189 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alg_cluster.Cluster",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "main.slow_closest_pair",
... |
29376346831 | from django.urls import path
from .views import solicitar_turno, turnos_cliente, turnos_veterinario, VerTurnoVeterinario, ver_turno_cliente
urlpatterns = [
path('solicitar_turno', solicitar_turno, name='solicitar_turno'),
path('turnos_cliente', turnos_cliente, name='turnos_cliente'), # No me gusta el nombre, despuรฉs lo discutimos
path('turnos_veterinario', turnos_veterinario, name='turnos_veterinario'),
path('ver_turno_veterinario/<int:turno_id>', VerTurnoVeterinario.as_view(), name="ver_turno_veterinario"),
path('ver_turno_cliente/<int:turno_id>', ver_turno_cliente, name='ver_turno_cliente')
] | bautimercado/oh-my-dog | ohmydog/turnos/urls.py | urls.py | py | 624 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "views.solicitar_turno",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.turnos... |
36937078898 | import bpy
import os
import logging
from pathlib import Path
log = logging.getLogger(__name__)
# in future remove_prefix should be renamed to rename prefix and a target prefix should be specifiable via ui
def fixBones(remove_prefix=False, name_prefix="mixamorig:"):
bpy.ops.object.mode_set(mode = 'OBJECT')
if not bpy.ops.object:
log.warning('[Mixamo Root] Could not find amature object, please select the armature')
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.context.object.show_in_front = True
if remove_prefix:
for rig in bpy.context.selected_objects:
if rig.type == 'ARMATURE':
for mesh in rig.children:
for vg in mesh.vertex_groups:
new_name = vg.name
new_name = new_name.replace(name_prefix,"")
rig.pose.bones[vg.name].name = new_name
vg.name = new_name
for bone in rig.pose.bones:
bone.name = bone.name.replace(name_prefix,"")
for action in bpy.data.actions:
fc = action.fcurves
for f in fc:
f.data_path = f.data_path.replace(name_prefix,"")
def scaleAll():
bpy.ops.object.mode_set(mode='OBJECT')
prev_context=bpy.context.area.type
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.context.area.type = 'GRAPH_EDITOR'
bpy.context.space_data.dopesheet.filter_text = "Location"
bpy.context.space_data.pivot_point = 'CURSOR'
bpy.context.space_data.dopesheet.use_filter_invert = False
bpy.ops.anim.channels_select_all(action='SELECT')
bpy.ops.transform.resize(value=(1, 0.01, 1), orient_type='GLOBAL',
orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)),
orient_matrix_type='GLOBAL',
constraint_axis=(False, True, False),
mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH',
proportional_size=1,
use_proportional_connected=False,
use_proportional_projected=False)
def copyHips(root_bone_name="Root", hip_bone_name="mixamorig:Hips", name_prefix="mixamorig:"):
bpy.context.area.ui_type = 'FCURVES'
#SELECT OUR ROOT MOTION BONE
bpy.ops.pose.select_all(action='DESELECT')
bpy.context.object.pose.bones[name_prefix + root_bone_name].bone.select = True
# SET FRAME TO ZERO
bpy.ops.graph.cursor_set(frame=0.0, value=0.0)
#ADD NEW KEYFRAME
bpy.ops.anim.keyframe_insert_menu(type='Location')
#SELECT ONLY HIPS AND LOCTAIUON GRAPH DATA
bpy.ops.pose.select_all(action='DESELECT')
bpy.context.object.pose.bones[hip_bone_name].bone.select = True
bpy.context.area.ui_type = 'DOPESHEET'
bpy.context.space_data.dopesheet.filter_text = "Location"
bpy.context.area.ui_type = 'FCURVES'
#COPY THE LOCATION VALUES OF THE HIPS AND DELETE THEM
bpy.ops.graph.copy()
bpy.ops.graph.select_all(action='DESELECT')
myFcurves = bpy.context.object.animation_data.action.fcurves
for i in myFcurves:
hip_bone_fcvurve = 'pose.bones["'+hip_bone_name+'"].location'
if str(i.data_path)==hip_bone_fcvurve:
myFcurves.remove(i)
bpy.ops.pose.select_all(action='DESELECT')
bpy.context.object.pose.bones[name_prefix + root_bone_name].bone.select = True
bpy.ops.graph.paste()
bpy.context.area.ui_type = 'VIEW_3D'
bpy.ops.object.mode_set(mode='OBJECT')
def fix_bones_nla(remove_prefix=False, name_prefix="mixamorig:"):
bpy.ops.object.mode_set(mode = 'OBJECT')
if not bpy.ops.object:
log.warning('[Mixamo Root] Could not find amature object, please select the armature')
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.context.object.show_in_front = True
def scale_all_nla(armature):
bpy.ops.object.mode_set(mode='OBJECT')
# prev_context=bpy.context.area.type
for track in [x for x in armature.animation_data.nla_tracks]:
bpy.context.active_nla_track = track
for strip in track.strips:
bpy.context.active_nla_strip = strip
print(bpy.context.active_nla_strip)
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.context.area.type = 'GRAPH_EDITOR'
bpy.context.space_data.dopesheet.filter_text = "Location"
bpy.context.space_data.pivot_point = 'CURSOR'
bpy.context.space_data.dopesheet.use_filter_invert = False
bpy.ops.anim.channels_select_all(action='SELECT')
bpy.ops.transform.resize(value=(1, 0.01, 1), orient_type='GLOBAL',
orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)),
orient_matrix_type='GLOBAL',
constraint_axis=(False, True, False),
mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH',
proportional_size=1,
use_proportional_connected=False,
use_proportional_projected=False)
def copy_hips_nla(root_bone_name="Root", hip_bone_name="mixamorig:Hips", name_prefix="mixamorig:"):
hip_bone_name="Ctrl_Hips"
bpy.ops.object.mode_set(mode='POSE')
previous_context = bpy.context.area.ui_type
bpy.ops.pose.select_all(action='DESELECT')
while False:
#SELECT OUR ROOT MOTION BONE
# bpy.context.object.pose.bones[name_prefix + root_bone_name].bone.select = True
# bpy.ops.nla.tweakmode_enter()
# bpy.context.area.ui_type = 'FCURVES'
# # SET FRAME TO ZERO
# bpy.ops.graph.cursor_set(frame=0.0, value=0.0)
# #ADD NEW KEYFRAME
# bpy.ops.anim.keyframe_insert_menu(type='Location')
# #SELECT ONLY HIPS AND LOCTAIUON GRAPH DATA
# bpy.ops.pose.select_all(action='DESELECT')
# bpy.context.object.pose.bones[hip_bone_name].bone.select = True
# bpy.context.area.ui_type = 'DOPESHEET'
# bpy.context.space_data.dopesheet.filter_text = "Location"
# bpy.context.area.ui_type = 'FCURVES'
# #COPY THE LOCATION VALUES OF THE HIPS AND DELETE THEM
# bpy.ops.graph.copy()
# bpy.ops.graph.select_all(action='DESELECT')
# myFcurves = bpy.context.object.animation_data.action.fcurves
# for i in myFcurves:
# hip_bone_fcvurve = 'pose.bones["'+hip_bone_name+'"].location'
# if str(i.data_path)==hip_bone_fcvurve:
# myFcurves.remove(i)
# bpy.ops.pose.select_all(action='DESELECT')
# bpy.context.object.pose.bones[name_prefix + root_bone_name].bone.select = True
# bpy.ops.graph.paste()
# for animation data in object
# for
pass
for track in bpy.context.object.animation_data.nla_tracks:
bpy.context.object.animation_data.nla_tracks.active = track
for strip in track.strips:
bpy.context.object.pose.bones[name_prefix + root_bone_name].bone.select = True
bpy.context.area.ui_type = 'NLA_EDITOR'
bpy.ops.nla.tweakmode_enter()
bpy.context.area.ui_type = 'FCURVES'
hip_curves = [fc for fc in strip.fcurves if hip_bone_name in fc.data_path and fc.data_path.startswith('location')]
# Copy Hips to root
## Insert keyframe for root bone
start_frame = strip.action.frame_range[0]
# frame sets the x axis cursor (determines the frame, and value the y axis cursor, which is the amplitude of the curve)
bpy.ops.graph.cursor_set(frame=start_frame, value=0.0)
bpy.ops.anim.keyframe_insert_menu(type='Location')
bpy.ops.pose.select_all(action='DESELECT')
## Copy Location fcruves
bpy.context.object.pose.bones[hip_bone_name].bone.select = True
bpy.context.area.ui_type = 'DOPESHEET'
bpy.context.space_data.dopesheet.filter_text = "Location"
bpy.context.area.ui_type = 'FCURVES'
bpy.ops.graph.copy()
bpy.ops.graph.select_all(action='DESELECT')
## We want to delete the hips locations
allFcurves = strip.fcurves
for fc in hip_curves:
allFcurves.remove(fc)
## Paste location fcurves to the root bone
bpy.ops.pose.select_all(action='DESELECT')
bpy.context.object.pose.bones[name_prefix + root_bone_name].bone.select = True
bpy.ops.graph.paste()
loc_fcurves = [fc for fc in strip.fcurves if root_bone_name in fc.data_path and fc.data_path.startswith('location')]
# Update Root Bone
# set z of root to min 0 (not negative).
for fc in loc_fcurves:
# Z axis location curve
if fc.array_index == 2:
for kp in fc.keyframe_points:
kp.co.z = min(0, abs(kp.co.z))
# Delete rotation curves for x(0) and y(1) axis. Should we delet Z rotation too?
# rot_fcurves = [fc for fc in strip.fcurves if root_bone_name in fc.data_path and fc.data_path.startswith('rotation') and (fc.array_index == 0 or fc.array_index == 1)]
# for fc in rot_fcurves:
# strip.fcurves.remove(fc)
# while(rot_fcurves):
# fc = rot_fcurves.pop()
# strip.fcurves.remove(fc)
bpy.context.area.ui_type = 'NLA_EDITOR'
bpy.ops.nla.tweakmode_exit()
bpy.context.area.ui_type = previous_context
bpy.ops.object.mode_set(mode='OBJECT')
def deleteArmature(imported_objects=set()):
armature = None
if bpy.context.selected_objects:
armature = bpy.context.selected_objects[0]
if imported_objects == set():
log.warning("[Mixamo Root] No armature imported, nothing to delete")
else:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
for obj in imported_objects:
bpy.data.objects[obj.name].select_set(True)
bpy.ops.object.delete(use_global=False, confirm=False)
if bpy.context.selected_objects:
bpy.context.view_layer.objects.active = armature
def import_armature(filepath, root_bone_name="Root", hip_bone_name="mixamorig:Hips", remove_prefix=False, name_prefix="mixamorig:", insert_root=False, delete_armatures=False):
old_objs = set(bpy.context.scene.objects)
if insert_root:
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.ops.import_scene.fbx(filepath = filepath)#, automatic_bone_orientation=True)
else:
bpy.ops.import_scene.fbx(filepath = filepath)#, automatic_bone_orientation=True)
imported_objects = set(bpy.context.scene.objects) - old_objs
imported_actions = [x.animation_data.action for x in imported_objects if x.animation_data]
print("[Mixamo Root] Now importing: " + str(filepath))
imported_actions[0].name = Path(filepath).resolve().stem # Only reads the first animation associated with an imported armature
if insert_root:
add_root_bone(root_bone_name, hip_bone_name, remove_prefix, name_prefix)
def add_root_bone(root_bone_name="Root", hip_bone_name="mixamorig:Hips", remove_prefix=False, name_prefix="mixamorig:"):
armature = bpy.context.selected_objects[0]
bpy.ops.object.mode_set(mode='EDIT')
root_bone = armature.data.edit_bones.new(name_prefix + root_bone_name)
root_bone.tail.y = 30
armature.data.edit_bones[hip_bone_name].parent = armature.data.edit_bones[name_prefix + root_bone_name]
bpy.ops.object.mode_set(mode='OBJECT')
fixBones(remove_prefix=remove_prefix, name_prefix=name_prefix)
scaleAll()
copyHips(root_bone_name=root_bone_name, hip_bone_name=hip_bone_name, name_prefix=name_prefix)
def add_root_bone_nla(root_bone_name="Root", hip_bone_name="mixamorig:Hips", name_prefix="mixamorig:"):#remove_prefix=False, name_prefix="mixamorig:"):
armature = bpy.context.selected_objects[0]
bpy.ops.object.mode_set(mode='EDIT')
# Add root bone to edit bones
root_bone = armature.data.edit_bones.new(name_prefix + root_bone_name)
root_bone.tail.z = .25
armature.data.edit_bones[hip_bone_name].parent = armature.data.edit_bones[name_prefix + root_bone_name]
bpy.ops.object.mode_set(mode='OBJECT')
# fix_bones_nla(remove_prefix=remove_prefix, name_prefix=name_prefix)
# scale_all_nla()
copy_hips_nla(root_bone_name=root_bone_name, hip_bone_name=hip_bone_name, name_prefix=name_prefix)
def push(obj, action, track_name=None, start_frame=0):
# Simulate push :
# * add a track
# * add an action on track
# * lock & mute the track
# * remove active action from object
tracks = obj.animation_data.nla_tracks
new_track = tracks.new(prev=None)
if track_name:
new_track.name = track_name
strip = new_track.strips.new(action.name, start_frame, action)
obj.animation_data.action = None
def get_all_anims(source_dir, root_bone_name="Root", hip_bone_name="mixamorig:Hips", remove_prefix=False, name_prefix="mixamorig:", insert_root=False, delete_armatures=False):
files = os.listdir(source_dir)
num_files = len(files)
current_context = bpy.context.area.ui_type
old_objs = set(bpy.context.scene.objects)
for file in files:
print("file: " + str(file))
try:
filepath = source_dir+"/"+file
import_armature(filepath, root_bone_name, hip_bone_name, remove_prefix, name_prefix, insert_root, delete_armatures)
imported_objects = set(bpy.context.scene.objects) - old_objs
if delete_armatures and num_files > 1:
deleteArmature(imported_objects)
num_files -= 1
except Exception as e:
log.error("[Mixamo Root] ERROR get_all_anims raised %s when processing %s" % (str(e), file))
return -1
bpy.context.area.ui_type = current_context
bpy.context.scene.frame_start = 0
bpy.ops.object.mode_set(mode='OBJECT')
def apply_all_anims(delete_applied_armatures=False, control_rig=None, push_nla=False):
if control_rig and control_rig.type == 'ARMATURE':
bpy.ops.object.mode_set(mode='OBJECT')
imported_objects = set(bpy.context.scene.objects)
imported_armatures = [x for x in imported_objects if x.type == 'ARMATURE' and x.name != control_rig.name]
for obj in imported_armatures:
action_name = obj.animation_data.action.name
bpy.context.scene.mix_source_armature = obj
bpy.context.view_layer.objects.active = control_rig
bpy.ops.mr.import_anim_to_rig()
bpy.context.view_layer.objects.active = control_rig
selected_action = control_rig.animation_data.action
selected_action.name = 'ctrl_' + action_name
# created_actions.append(selected_action)
if push_nla:
push(control_rig, selected_action, None, int(selected_action.frame_start))
if delete_applied_armatures:
bpy.context.view_layer.objects.active = control_rig
deleteArmature(set([obj]))
if __name__ == "__main__":
dir_path = "" # If using script in place please set this before running.
get_all_anims(dir_path)
print("[Mixamo Root] Run as plugin, or copy script in text editor while setting parameter defaults.")
| RichardPerry/Mixamo-Root | mixamoroot.py | mixamoroot.py | py | 15,617 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bpy.ops.object.mode_set",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops",
"... |
25093423154 | from __future__ import print_function
import argparse
import codecs
import numpy as np
import json
import requests
"""
This file is part of the computer assignments for the course DD1418/DD2418 Language engineering at KTH.
Created 2017 by Johan Boye and Patrik Jonell.
"""
"""
This module computes the minimum-cost alignment of two strings.
"""
"""
When printing the results, only print BREAKOFF characters per line.
"""
BREAKOFF = 60
def compute_backpointers(s0, s1):
"""
<p>Computes and returns the backpointer array (see Jurafsky and Martin, Fig 3.27)
arising from the calculation of the minimal edit distance of two strings
<code>s0</code> and <code>s1</code>.</p>
<p>The backpointer array has three dimensions. The first two are the row and
column indices of the table in Fig 3.27. The third dimension either has
the value 0 (in which case the value is the row index of the cell the backpointer
is pointing to), or the value 1 (the value is the column index). For example, if
the backpointer from cell (5,5) is to cell (5,4), then
<code>backptr[5][5][0]=5</code> and <code>backptr[5][5][1]=4</code>.</p>
:param s0: The first string.
:param s1: The second string.
:return: The backpointer array.
"""
if s0 == None or s1 == None:
raise Exception('Both s0 and s1 have to be set')
backptr = [[[0, 0] for y in range(len(s1)+1)] for x in range(len(s0)+1)]
# YOUR CODE HERE
D = [[[0] for y in range(len(s1)+1)] for x in range(len(s0)+1)] # Distance matrix
# Basic setup
# First row, all columns
for k in range(len(s1) + 1):
D[0][k][0] = k
if k == 0:
continue
backptr[0][k][0], backptr[0][k][1] = 0, k-1
# First column, all rows
for k in range(len(s0) + 1):
D[k][0][0] = k
if k == 0:
continue
backptr[k][0][0], backptr[k][0][1] = k-1, 0
# Loop through both strings (inner part of matrix, i.e. excluding first column and first row)
for i in range(1, len(s0)+1):
for j in range(1, len(s1)+1):
left_cost = D[i][j-1][0] + 1 # Cost to come from the left
below_cost = D[i-1][j][0] + 1 # Cost to come from below
# Cost to come from diagonally behind
diag_cost = D[i-1][j-1][0]
if (s0[i-1] != s1[j-1]):
diag_cost += 2
# Check which cost is cheapest (preference for diag_cost)
if ((left_cost < below_cost) and (left_cost < diag_cost)):
# We should come from the left in our backtrace matrix
D[i][j][0] = left_cost
backptr[i][j][0], backptr[i][j][1] = i, j-1
elif ((below_cost <= left_cost) and (below_cost < diag_cost)):
# We should come from below
D[i][j][0] = below_cost
backptr[i][j][0], backptr[i][j][1] = i-1, j
elif ((diag_cost <= left_cost) and (diag_cost <= below_cost)):
# We should come from diagonally behind
D[i][j][0] = diag_cost
backptr[i][j][0], backptr[i][j][1] = i-1, j-1
return backptr
def subst_cost(c0, c1):
"""
The cost of a substitution is 2 if the characters are different
or 0 otherwise (when, in fact, there is no substitution).
"""
return 0 if c0 == c1 else 2
def align(s0, s1, backptr):
"""
<p>Finds the best alignment of two different strings <code>s0</code>
and <code>s1</code>, given an array of backpointers.</p>
<p>The alignment is made by padding the input strings with spaces. If, for
instance, the strings are <code>around</code> and <code>rounded</code>,
then the padded strings should be <code>around </code> and
<code> rounded</code>.</p>
:param s0: The first string.
:param s1: The second string.
:param backptr: A three-dimensional matrix of backpointers, as returned by
the <code>diff</code> method above.
:return: An array containing exactly two strings. The first string (index 0
in the array) contains the string <code>s0</code> padded with spaces
as described above, the second string (index 1 in the array) contains
the string <code>s1</code> padded with spaces.
"""
result = ['', '']
# YOUR CODE HERE
i0 = len(s0)
j0 = len(s1)
# (i0, j0) gives us top right corner of backptr matrix
while True:
i, j = backptr[i0][j0][0], backptr[i0][j0][1]
if ((i0 - 1 == i) and (j0 - 1 == j)):
# Then, this is a diagonal move.
result[0] += s0[i0 - 1]
result[1] += s1[j0 - 1]
elif (j0 - 1 == j):
# This is a move from the left
result[0] += ' '
result[1] += s1[j0 - 1]
elif (i0 - 1 == i):
# This is a move from below
result[0] += s0[i0 - 1]
result[1] += ' '
i0, j0 = i, j
if ((i0 == 0) and (j0 == 0)):
# We have reached index (0, 0) in the backptr matrix.
break
#print(backptr)
return result
def print_alignment(s):
"""
<p>Prints two aligned strings (= strings padded with spaces).
Note that this printing method assumes that the padded strings
are in the reverse order, compared to the original strings
(because we are following backpointers from the end of the
original strings).</p>
:param s: An array of two equally long strings, representing
the alignment of the two original strings.
"""
if s[0] == None or s[1] == None:
return None
start_index = len(s[0]) - 1
while start_index > 0:
end_index = max(0, start_index - BREAKOFF + 1)
print_list = ['', '', '']
for i in range(start_index, end_index-1 , -1):
print_list[0] += s[0][i]
print_list[1] += '|' if s[0][i] == s[1][i] else ' '
print_list[2] += s[1][i]
for x in print_list: print(x)
start_index -= BREAKOFF
def main():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description='Aligner')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--file', '-f', type=str, nargs=2, help='align two strings')
group.add_argument('--string', '-s', type=str, nargs=2, help='align the contents of two files')
parser.add_argument('--check', action='store_true', help='check if your alignment is correct')
arguments = parser.parse_args()
if arguments.file:
f1, f2 = arguments.file
with codecs.open(f1, 'r', 'utf-8') as f:
s1 = f.read().replace('\n', '')
with codecs.open(f2, 'r', 'utf-8') as f:
s2 = f.read().replace('\n', '')
elif arguments.string:
s1, s2 = arguments.string
if arguments.check:
payload = json.dumps({
's1': s1,
's2': s2,
'result': align(s1, s2, compute_backpointers(s1, s2))
})
response = requests.post(
'https://language-engineering.herokuapp.com/correct',
data=payload,
headers={'content-type': 'application/json'}
)
response_data = response.json()
if response_data['correct']:
print_alignment( align(s1, s2, compute_backpointers(s1, s2)))
print('Success! Your results are correct')
else:
print('Your results:\n')
print_alignment( align(s1, s2, compute_backpointers(s1, s2)))
print("The server's results\n")
print_alignment(response_data['result'])
print("Your results differ from the server's results")
else:
print_alignment( align(s1, s2, compute_backpointers(s1, s2)))
if __name__ == "__main__":
main()
| aljica/spraktek | assignment-1/Aligner/Aligner.py | Aligner.py | py | 8,035 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"... |
20176565752 | #!/usr/bin/env python
# -----------------------
# Supplementary Material for Deith and Brodie 2020; โPredicting defaunation โ accurately mapping bushmeat hunting pressure over large areasโ
# doi: 10.1098/rspb.2019-2677
#------------------------
# Code to iterate through GFLOW results files, modify the outputs based on
# human population density, and then sum this into a final cumulative
# accessibility map.
#
# This script uses maps created with 'MSYBorneo_ResistanceMapLayers_Preparation.py'
# and GFLOW.
# Created by Mairin Deith on Nov12 2017
# Last edited on Nov15 2019
#------------------------
# Import libraries
import os, sys
import subprocess
import gdal
import re
import glob
import rasterio as rio
import numpy as np
from pathlib import Path
from datetime import datetime
from collections import defaultdict
### HELPER FUNCTIONS
# From https://stackoverflow.com/questions/5419204/index-of-duplicates-items-in-a-python-list
# This function passes through a list and keeps a list of locations seen for each item,
# and returns items seen more than once
def list_duplicates(seq):
tally = defaultdict(list)
for i,item in enumerate(seq):
tally[item].append(i)
return ((key,locs) for key,locs in tally.items()
if len(locs)>1)
def list_singles(seq):
tally = defaultdict(list)
for i,item in enumerate(seq):
tally[item].append(i)
return ((key,locs) for key,locs in tally.items()
if len(locs)==1)
### GLOBAL INFO
# File paths/gdalcalc paths
basedir = os.path.abspath('/home/mairin/Documents/GradSchool/Research/CircuitTheory_Borneo/Revised_CTMapCreation/')
basefname = 'ClusterOutput_Summation'
walk_types = ['AllNodes','VillageSabah','VillageSabahOther'] # types of nodes to identify from gazetteers
for w in walk_types:
if w == 'AllNodes':
rivers = ['impassableRivers', 'passableRivers'] #, 'passableRivers'] # uncomment after untarred
else:
rivers = ['impassableRivers']
for r in rivers:
print(w+", " + r)
src_dir = os.path.join(basedir, 'SourceSinks_WalkingOnly_'+w, r,'NodesTSV')
out_dir = os.path.join(basedir, 'GFlowOutputs','ClusterFlexOutputs', 'SepTmpOutputs_Walking'+w, r)
asc_dir = out_dir
output_fname = basefname + '_Walking' + w + '_' + r + '.asc'
cum_outmap = os.path.join(asc_dir, output_fname)
if os.path.exists(cum_outmap): # Don't bother if the file doesn't exist
exists = True
delete = str(input("\n\nShould the existing map (%s) be deleted?\n'Y' for yes, 'N' for no > " %(os.path.basename(cum_outmap))))
delete = delete.upper()
while delete!='N' and delete!='Y':
delete = str(input("Sorry, I was expecting either 'Y' or 'N'. \nShould the existing map %s be deleted?\n'Y' for yes, 'N' for no >" %(os.path.basename(cum_outmap))))
delete = delete.upper()
if delete=='Y':
os.remove(cum_outmap)
exists = False
if delete=='N':
exists = True
else:
exists = False
### WORK ON FILES TO GENERATE CALCULATION INFORMATION
asc_list = glob.glob(asc_dir+"/output_temp_*")
# Untar the files if they have not already been extracted
if len(asc_list)==0:
tar_file = glob.glob(asc_dir+"/*.tar.gz")
print(len(tar_file))
if len(tar_file) == 0:
print("!!! \n No .tar or .asc files in %s, moving on...\n!!!" %(asc_dir))
continue
if len(tar_file) == 1:
print("...Untarring " + tar_file[0])
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(['tar','-C',asc_dir,'-zxvf', tar_file[0]], stdout=devnull, stderr=subprocess.STDOUT)
# os.system('tar -C ' + asc_dir + ' -zxvf ' + asc_dir + '/*.tar.gz')
asc_list = glob.glob(asc_dir+"/output_temp_*")
if len(tar_file) > 1:
print("!!!\nMore than one .tar file in %s, moving on... \n!!!" %(asc_dir))
continue
# Modify file names to isolate just numbers
iterpop = [a.replace('output_temp_', '').replace('_nodes', '').replace('.asc','') for a in asc_list]
# Separate the number of iterations from population size (for lookups)
niter = []
p = []
p_mult = []
p_mult_tmp = []
for a in iterpop:
ntmp, ptmp = re.split("_", os.path.basename(a))[0:2]
niter.append(int(ntmp))
p.append(float(ptmp))
multtmp = np.log10(float(ptmp)/78.54)*-0.054357+0.179789
if multtmp < 0:
multtmp = 0.0001
p_mult_tmp.append(multtmp)
p_mult.append(multtmp*float(ptmp))
# Calculates the expected number of hunters based on mini lit review
# Each population density was calculated within a 5km buffer; so divide
# by 78.54 (approximate area of a 5km-radius circle) for density per km2
# Find singles/duplicates:
single_src = sorted(list_singles(p))
dup_src = sorted(list_duplicates(p))
### CREATE EMPTY MAP OR READ EXISTING
# Use same dimensions/transformation as asc_list[0]
if exists == False:
with rio.open(os.path.join(asc_dir,asc_list[0]), 'r') as ds:
tmp_meta = ds.profile
tmp_shape = ds.shape # read in height and width
intmap = np.empty(tmp_shape, dtype=np.float32)
print("Creating blank output map: %s" %(os.path.basename(cum_outmap)))
with rio.open(cum_outmap, 'w', **tmp_meta) as ds:
ds.write_band(1, intmap)
elif exists == True:
print("Reading existing cumulative map %s" %(os.path.basename(cum_outmap)))
with rio.open(cum_outmap, 'r') as ds:
intmap = ds.read()
with rio.open(os.path.join(asc_dir,asc_list[0]), 'r') as ds:
tmp_meta = ds.profile
### BEGIN CALCULATIONS
print("Calculating single-source file maps...")
save_counter = 0 # save every 5 steps
singles_counter = 0
total = len(single_src)
for s in single_src:
singles_counter += 1
print("\n...Processing map %s of %s...\n" %(singles_counter, total))
ntmp = niter[int(s[1][0])]
pmtmp = p_mult[int(s[1][0])]
# Find corresponding source TSV file:
src_f = glob.glob(src_dir+"/*%s*" %(str(ptmp)+"_"))[0]
with open(src_f) as file:
head = [next(file) for x in range(1)]
# Number of sources is just the first # of the second column minus 1
nsource = int(re.split("\t", head[0])[1].replace("\n", ""))-1
asc_tmp = os.path.join(asc_dir, asc_list[int(s[1][0])])
print("......Opening file: %s" %(os.path.basename(asc_tmp)))
with rio.open(asc_tmp, 'r') as ds:
tmp = ds.read()
tmp[tmp == -9999] = 0
intmap = intmap + ((tmp * nsource * pmtmp) / ntmp)
Path(os.path.join(asc_dir, "Processed", os.path.basename(asc_tmp[0]))).touch()
save_counter += 1
if save_counter == 10:
with rio.open(cum_outmap, 'w', **tmp_meta) as ds:
ds.write(intmap)
save_counter = 0
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(['rm',asc_tmp],stdout=devnull, stderr=subprocess.STDOUT)
if total!=0:
with rio.open(cum_outmap, 'w', **tmp_meta) as ds:
ds.write(intmap)
total = len(dup_src)
dup_counter = 0
for d in dup_src:
dup_counter += 1
print("\n...Processing map %s of %s...\n" %(dup_counter, total))
# Population at 0th index, location at 1st
ptmp = d[0]
pmtmp = p_mult[int(d[1][0])]
for f in range(len(d[1])):
ntmp = niter[int(d[1][f])]
src_f = glob.glob(src_dir+"/*%s*" %(str(ptmp)+"_"))[f]
with open(src_f) as file:
head = [next(file) for x in range(1)]
nsource = int(re.split("\t", head[0])[1].replace("\n", ""))-1
asc_tmp = os.path.join(asc_dir, asc_list[int(d[1][f])])
print("......Opening file: %s" %(os.path.basename(asc_tmp)))
with rio.open(asc_tmp, 'r') as ds:
tmp = ds.read()
tmp[tmp == -9999] = 0
intmap = intmap + ((tmp * nsource * pmtmp) / ntmp)
Path(os.path.join(asc_dir, "Processed", os.path.basename(asc_tmp[0]))).touch()
save_counter += 1
if save_counter >= 10:
with rio.open(cum_outmap, 'w', **tmp_meta) as ds:
ds.write(intmap)
save_counter = 0
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(['rm',asc_tmp],stdout=devnull, stderr=subprocess.STDOUT)
with rio.open(cum_outmap, 'w', **tmp_meta) as ds:
ds.write(intmap)
| mairindeith/DeithBrodie2020_PredictingDefaunationBorneo | Circuit-theory simulations/GFLOWOutput_Summation.py | GFLOWOutput_Summation.py | py | 9,286 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.pa... |
29990285621 | ## The wext merged datafile
import sys
input_file = sys.argv[1]
data_file = sys.argv[2]
output_file = sys.argv[3]
cutoff = float(sys.argv[4])
#cutoff = 5
import pandas as pd
from sklearn.metrics import precision_recall_curve
from random import random
import math
from scipy.stats import chi2
import numpy as np
import logging
#filename =
logging.basicConfig(filename="log/"+ sys.argv[0].split('/')[0] + ".log", level=logging.INFO)
#logging.getLogger().setLevel('DEBUG')
logging.debug("Data file: "+data_file)
logging.debug("Output file: "+output_file)
logging.debug("cuttoff: "+str(cutoff))
data = pd.read_csv(data_file, delimiter='\t')
logging.debug(data.columns)
data = data[(data['Index1'] != "Index1")] # get rid of extra headers from cat
data['Distance'] = data['Distance'].apply(int)
data['P-Value'] = data['P-Value'].apply(float)
data = data[data['Distance'] > 0]
data = data.drop_duplicates()
logging.debug("Imported data. DF size:"+str(data.shape))
data_sorted = data.fillna(1).replace(float('-inf'), 1).sort_values(by = 'P-Value')
data_sorted = data_sorted[data_sorted['P-Value'] > 0]
data_sorted['True'] = data_sorted.apply(lambda x: x['Gene1'][-1] == x['Gene2'][-1], axis=1)
data_sorted['Pair'] = data_sorted.apply(lambda x: x['Gene1'][:-1] + ':' + x['Gene2'][:-1], axis = 1)
data_sorted['Index1'] = data_sorted['Index1'].apply(int)
data_sorted['Index2'] = data_sorted['Index2'].apply(int)
data_sorted['log p'] = data_sorted['P-Value'].apply(math.log)
logging.debug("Sorted data. DF size:"+str(data_sorted.shape))
def p_value(x):
k = 4
v = chi2.logsf(-2*x, k)
return v
def is_correct(x):
if x != 0:
return x < 0
else:
return random() < 0.5
diff = data_sorted
diff = diff[['Pair', 'Distance', 'log p', 'True']].groupby(['Pair', 'Distance', 'True']).sum()
diff.reset_index(inplace=True)
diff['log p joint'] = diff['log p'].apply(p_value)
diff['log p sum'] = diff.apply(lambda x: x['log p joint'] if x['True'] else -x['log p joint'], axis=1)
logging.debug("Created diff. Diff size:" + str(diff.shape))
div = diff[['Pair', 'Distance', 'log p sum']].groupby(by = ['Pair','Distance']).sum()
div = div[div['log p sum'] != 0]
div['log p abs'] = div['log p sum'].apply(abs)
div['correct'] = div['log p sum'].apply(lambda x: x < 0)
div['correct2'] = div['log p sum'].apply(is_correct)
div.reset_index(inplace=True)
logging.debug("Created div. Div size:" + str(div.shape))
logging.debug("Score span: " + str(div['log p sum'].max()) +"\t"+ str(div['log p sum'].min()))
import vcf
pos_index_map = {}
data = pd.read_csv(input_file)
pos_index_map = {}
def make_pos_index_map(x):
pos_index_map[x['loc']] = x.name + 1
data.apply(make_pos_index_map, axis=1)
def write_fragment(cutoff, output_file):
with open(output_file,'w') as out:
dclip = div[div['log p abs'] > cutoff]
#dclip = div
for i in range(len(dclip)):
d = dclip.iloc[i]
v1 = min(map(int, d['Pair'].split(':')))
v2 = max(map(int, d['Pair'].split(':')))
i1 = pos_index_map[v1]
i2 = pos_index_map[v2]
a1 = "0"
a2 = "0"
if d['log p sum'] < 0:
if random() > 0.5: allele = "00"
else: allele = "11"
else:
if random() > 0.5: allele = "10"
else: allele = "01"
if abs(i1 - i2) == 1:
q = '++'
line = "1 {0} {1} {2} {3}\n".format(d['Pair'], i1, allele, q)
else:
q = '++'
line = "2 {0} {1} {2} {3} {4} {5}\n".format(d['Pair'], i1, allele[0], i2, allele[1], q)
out.write(line)
write_fragment(cutoff, output_file)
| raphael-group/SC-hap | scripts/create_hapcut_input_fishers.py | create_hapcut_input_fishers.py | py | 3,765 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": ... |
73550094432 | import numpy as np
from sympy import symbols, pi, sin, cos, atan2, sqrt, simplify
from sympy.matrices import Matrix
import tf
"""
Test file for building the Kuka 6 DoF manipulator's forward and inverse
kinematic code.
FK(thetas) -> pose
IK(pose) -> thetas
"""
def build_mod_dh_matrix(s, theta, alpha, d, a):
"""Build the modified DH transformation matrix based on the provided
theta, alpha, d and a values.
:param s: Dictionary of DH parameters for the manipulator
:param theta: Sympy symbol
:param alpha: Sympy symbol
:param d: Sympy symbol
:param a: Sympy symbol
:return: Sympy Matrix object of the DH transformation matrix
"""
# Create the transformation matrix template
Ta_b = Matrix([[ cos(theta), -sin(theta), 0, a],
[ sin(theta)*cos(alpha), cos(theta)*cos(alpha), -sin(alpha), -sin(alpha)*d],
[ sin(theta)*sin(alpha), cos(theta)*sin(alpha), cos(alpha), cos(alpha)*d],
[ 0, 0, 0, 1]])
# Substitute in the DH parameters into the matrix
Ta_b = Ta_b.subs(s)
return Ta_b
def rot_x(q):
r_x = Matrix([[ 1, 0, 0],
[ 0, cos(q), -sin(q)],
[ 0, sin(q), cos(q)]])
return r_x
def rot_y(q):
r_y = Matrix([[ cos(q), 0, sin(q)],
[ 0, 1, 0],
[ -sin(q), 0, cos(q)]])
return r_y
def rot_z(q):
r_z = Matrix([[ cos(q), -sin(q), 0],
[ sin(q), cos(q), 0],
[ 0, 0, 1]])
return r_z
# Conversion factors between radians and degrees
rtd = 180 / pi
dtr = pi / 180
# Define DH parameter symbols
theta1, theta2, theta3, theta4, theta5, theta6, theta7 = symbols('theta1:8')
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7')
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8') # link offsets
a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7') # link lengths
# Modified DH params for KUKA KR210
s = {alpha0: 0, d1: 0.75, a0: 0,
alpha1: -pi/2, d2: 0, a1: 0.35, theta2: theta2 - pi/2,
alpha2: 0, d3: 0, a2: 1.25,
alpha3: -pi/2, d4: 1.50, a3: -0.054,
alpha4: pi/2, d5: 0, a4: 0,
alpha5: -pi/2, d6: 0, a5: 0,
alpha6: 0, d7: 0.303, a6: 0, theta7: 0,}
# EE location and orientation
#px = req.poses[x].position.x
#py = req.poses[x].position.y
#pz = req.poses[x].position.z
#(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(
# [req.poses[x].orientation.x, req.poses[x].orientation.y,
# req.poses[x].orientation.z, req.poses[x].orientation.w])
# Test pose and orientation of the end effector with all angles at 0 degrees
px = 2.1529
py = 0.0
pz = 1.9465
roll = 0.0
pitch = 0.0
yaw = 0.0
##############################################################################
# Step 1: Convert pose and orientation into a transformation matrix to
# compute the wrist center
# Build EE roation matrix
Rrpy = rot_z(yaw) * rot_y(pitch) * rot_x(roll)
lx = Rrpy[0, 0]
ly = Rrpy[1, 0]
lz = Rrpy[2, 0]
# Calculate the wrist center (test should be (1.85, 0, 1.947)
wx = px - (s[d7] + s[d6]) * lx
wy = py - (s[d7] + s[d6]) * ly
wz = pz - (s[d7] + s[d6]) * lz
#print('WC location: (%s, %s, %s)' % (wx, wy, wz))
##############################################################################
# Step 2: Calculate thetas for joint 1, 2 and 3 (determines EE position)
# Determine the angle for joint 1
theta1 = atan2(wy, wx).evalf()
theta1 = np.clip(theta1, -185*dtr, 185*dtr)
# Find the coordinates of x2, y2 and z2 considering theta 1
x2 = s[a1] * cos(theta1)
y2 = s[a1] * sin(theta1)
z2 = s[d1]
# Find the x, y and z distances between joint 2 and the wrist center
X2_WC = wx - x2
Y2_WC = wy - y2
Z2_WC = wz - z2
# Find the distances between joint 2 and the wrist center
L2_WC = sqrt(X2_WC**2 + Y2_WC**2 + Z2_WC**2)
# Find the distance between joint 2 and the wrist center
L3_4 = 0.96 # Distance from joint 3 to joint 4
L4_5 = 0.54 # Distance from joint 4 to joint 5 (WC)
L3_4_x = sqrt(L3_4**2 + abs(s[a3])**2) # X distance from joint 3 to joint 4
phi1 = pi - atan2(abs(s[a3]), L3_4_x)
L3_WC = sqrt(L3_4**2 + L4_5**2 - 2 * L3_4 * L4_5 * cos(phi1))
# Determine the angle for joint 3
cos_phi2 = (L2_WC**2 - L3_WC**2 - s[a2]**2) / (-2 * L3_WC * s[a2])
if abs(cos_phi2) > 1:
cos_phi2 = 1
print('cos_phi2 is greater than 1')
phi2 = atan2(sqrt(1 - cos_phi2**2), cos_phi2)
theta3 = (pi/2 - phi2).evalf()
theta3 = np.clip(theta3, -210*dtr, (155-90)*dtr)
# Determine the angle for joint 2
L2_WC_xy = sqrt(X2_WC**2 + Y2_WC**2)
phi3 = atan2(Z2_WC, L2_WC_xy)
cos_phi4 = (L3_WC**2 - L2_WC**2 - s[a2]**2) / (-2 * L2_WC * s[a2])
if abs(cos_phi4) > 1:
cos_phi4 = 1
print('cos_phi4 is greater than 1')
phi4 = atan2(sqrt(1 - cos_phi4**2), cos_phi4)
theta2 = (pi/2 - (phi3 + phi4)).evalf()
theta2 = np.clip(theta2, -45*dtr, 85*dtr)
##############################################################################
# Step 3: Determine the rotation matrix for the spherical wrist joints
# Build the transformation matrices of the first 3 joints
T0_1 = build_mod_dh_matrix(s=s, theta=theta1, alpha=alpha0, d=d1, a=a0)
T1_2 = build_mod_dh_matrix(s=s, theta=theta2, alpha=alpha1, d=d2, a=a1)
T2_3 = build_mod_dh_matrix(s=s, theta=theta3, alpha=alpha2, d=d3, a=a2)
# Rotation matrix of the first three joints
R0_3 = (T0_1 * T1_2 * T2_3).evalf(subs={theta1: theta1,
theta2: theta2,
theta3: theta3})[0:3, 0:3]
# Correction to account for orientation difference between definition of
# gripper link in the URDF file and the DH convention.
# (rotation around Z axis by 180 deg and Y axis by -90 deg)
R_corr = simplify(rot_z(pi) * rot_y(-pi/2))
# Calculate the symbolic rotation matrix of the spherical wrist joints
R3_6 = R0_3.T * Rrpy * R_corr
##############################################################################
# Step 4: Calculate the spherical wrist joint angles by converting the
# the rotation matrix to Euler angles
# tf requires a numpy matrix instead of a sympy matrix
R3_6_np = np.array(R3_6).astype(np.float64)
# Convert the rotation matrix to Euler angles using tf
alpha, beta, gamma = tf.transformations.euler_from_matrix(
R3_6_np, axes='rxyz') # xyx, yzx, xyz
theta4 = alpha
theta5 = beta
theta6 = gamma
theta4 = np.pi/2 + theta4
theta5 = np.pi/2 - theta5
#theta6 = theta6 - 2*np.pi
#r11 = R3_6[0, 0]
#r12 = R3_6[0, 1]
#r13 = R3_6[0, 2]
#r21 = R3_6[1, 0]
#r31 = R3_6[2, 0]
#r32 = R3_6[2, 1]
#r33 = R3_6[2, 2]
#
## Pitch angle; rotation around the y-axis
#theta5 = atan2(-r31, sqrt(r11**2 + r21**2)).evalf()
#theta5 = np.clip(theta5, -125*dtr, 125*dtr)
#
#if r31 == 1:
# # Gimbal lock at pitch = -90
# theta4 = 0 # yaw = 0
# theta6 = atan2(-r12, -r13).evalf() # roll
# print('Gimbal lock at pitch = -90')
#elif r31 == -1:
# # Gimal lock at pitch = 90
# theta4 = 0 # yaw = 0
# theta6 = atan2(r12, r13).evalf() # roll
# print('Gimbal lock at pitch = 90')
#else:
# # General orientation
#
# # Yaw angle; rotation around the z-axis
# theta4 = (atan2(r21, r11)).evalf()
# theta4 = np.clip(theta4, -350*dtr, 350*dtr)
#
# # Roll angle; rotation around the x-axis
# theta6 = (atan2(r32, r33)).evalf()
# theta6 = np.clip(theta6, -350*dtr, 350*dtr)
print('Theta 1: %s' % theta1)
print('Theta 2: %s' % theta2)
print('Theta 3: %s' % theta3)
print('Theta 4: %s' % theta4)
print('Theta 5: %s' % theta5)
print('Theta 6: %s' % theta6)
| camisatx/RoboticsND | projects/kinematics/kuka_kr210/kuka_ik.py | kuka_ik.py | py | 7,730 | python | en | code | 57 | github-code | 1 | [
{
"api_name": "sympy.matrices.Matrix",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sympy.cos",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sympy.sin",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sympy.sin",
"line_numbe... |
354278636 | from html_parser import MyHTMLParser
import urllib.request
from bs4 import BeautifulSoup
import requests
from language_detecter import LanguageDetector
parser = MyHTMLParser()
#url = "https://www.vpnverbinding.nl/beste-vpn/netflix/"
url = "https://www.vpnconexion.es/blog/mejor-vpn-para-netflix/?_ga=2.224715098.1306859094.1600959792-1235625754.1600959792"
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
response = urllib.request.urlopen(req)
html = response.read()
page = requests.get(url).text
soup = BeautifulSoup(page, "html.parser")
print("Analyzing....")
print(soup.title.string)
#get the webpage language
language = soup.html["lang"].replace("-","_")
print("The language webpage is: "+language)
lang_validate = LanguageDetector(language)
print("-----------")
#find the titles h3,h2,h1 too text in p, div and span inside the divs
contentTable = soup.find('div')
rows = contentTable.find_all(['h3', 'h2', 'h1', 'p', 'div', 'span', 'img', 'li', 'ul'])
for row in rows:
if not (row.string is None):
#print(row.string)
#print("ยทยทยทยทยทยทยทยทยทยทยทยท")
#append to set the blocks read in the webpage. (use Set for no repeated)
lang_validate.html_blocks.add(row)
for block in lang_validate.html_blocks:
lang_validate.is_in_setlanguage(block.string.strip())
print("-----# blocks ------> "+str(len(lang_validate.html_blocks)))
print("=========================")
print("Words Not translated....")
print("=========================")
#lang_validate.clear_not_translated_words()
for i in lang_validate.not_translated:
print(i) | ferchovzla/translated_words_checker | main.py | main.py | py | 1,587 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "html_parser.MyHTMLParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.Request",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 12,
"usage_type": "attribute"
},
{
... |
12287855696 | import cv2
import numpy as np
from calibrate_frame import *
from socket import gethostname
class Camera(object):
"""
Camera access wrapper.
"""
def __init__(self, pitch=0, port=0, test = 0):
self.capture = cv2.VideoCapture(port)
self.pitch = pitch
self.test = test
def get_frame(self, radial_dist=0):
"""
Retrieve a frame from the camera.
Returns the frame if available, otherwise returns None.
"""
if self.test == 0:
status, frame = self.capture.read()
frame = step(frame, self.pitch)
elif self.test == 1:
frame = cv2.imread('pitch0.png')
return frame
def close(self):
self.capture.release()
| pbsinclair42/SDP-2016 | vision/camera.py | camera.py | py | 757 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 28,
"usage_type": "call"
}
] |
27178702909 | from flask import Flask, request, render_template
students = [
{'studentNo': '10001', 'studentName': 'Student 1'},
{'studentNo': '10002', 'studentName': 'Student 2'},
]
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', students=students)
app.run(debug=True)
| pytutorial/flask_students1 | app.py | app.py | py | 320 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
}
] |
26205226971 | #!/usr/bin/env python3
import json
import logging
from watchdog.events import FileSystemEventHandler, FileModifiedEvent
from watchdog.observers import Observer
import xml.etree.ElementTree as ET
logger = logging.getLogger(__name__)
class IoMBianAvahiServicesFileHandler(FileSystemEventHandler):
def __init__(self, file_path="/etc/avahi/services/iombian.service"):
self.file_path = file_path
self.tree = None
self.services_discovered_callback = None
self.observer = None
self.load_file()
def start(self):
logger.debug("Starting Avahi Service File Handler")
if self.observer:
logger.error("Service already started")
return
self.observer = Observer()
self.observer.schedule(self, self.file_path)
self.observer.start()
self.load_services()
def stop(self):
logger.debug("Stopping Avahi Service File Handler")
if self.observer:
self.observer.stop()
self.observer.join()
def add_services_discovered_callback(self, callback):
if self.services_discovered_callback:
logger.warn("Services discovered callback already set")
return
self.services_discovered_callback = callback
def load_file(self):
self.tree = ET.parse(self.file_path)
def on_modified(self, event):
if isinstance(event, FileModifiedEvent):
logger.debug(f"Avahi file ('{self.file_path}') has been modified")
self.load_services()
def load_services(self):
services = {}
txt_records_elements = self.tree.findall(".//txt-record")
for txt_record_element in txt_records_elements:
txt_record = txt_record_element.text
service_name, service_info = txt_record.split("=")
services[service_name] = json.loads(service_info)
if self.services_discovered_callback:
self.services_discovered_callback(services)
| Tknika/iombian-services-uploader | src/iombian_avahi_services_file_handler.py | iombian_avahi_services_file_handler.py | py | 1,997 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "watchdog.events.FileSystemEventHandler",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "watchdog.observers.Observer",
"line_number": 26,
"usage_type": "call"
},
{
... |
12047781262 | import argparse
import json
from pyspark.sql import SparkSession
def main(input_hfs_path,
outliers_output_hfs_path,
clean_output_hfs_path,
config):
from filters.api import resolve_filter
spark = SparkSession \
.builder \
.appName("TextOutlier") \
.getOrCreate()
sc = spark.sparkContext
raw_data = sc.textFile(input_hfs_path)
print("Read from {}".format(input_hfs_path))
filtered_rdds = []
original_data = raw_data \
.zipWithIndex() \
.map(lambda _: (_[1], _[0]))
data = original_data
if config["show_counts"]:
total_count = data.count()
remaining_count = total_count
else:
total_count = remaining_count = -1
total_filtered_count = 0
for filter_index, filter_config in enumerate(config["filters"]):
filter_instance = resolve_filter(filter_config)
print("Running [{}] {}".format(
filter_index, filter_instance.short_name
))
data, filtered_data = filter_instance.run_filter(
data=data,
filter_index=filter_index,
)
filtered_rdds.append(filtered_data)
if config["show_counts"]:
filtered_count = filtered_data.count()
total_filtered_count += filtered_count
print(" Filtered out {} observation{} -- "
"{:.2f}% of total, {:.2f}% of remainder".format(
filtered_count,
"" if filtered_count == 1 else "s",
filtered_count / total_count * 100,
filtered_count / remaining_count * 100
))
remaining_count -= filtered_count
if config["show_counts"]:
print("ORIGINAL: {} ({:.2f}%)".format(
total_count, 100,
))
print("FILTERED: {} ({:.2f}%)".format(
total_filtered_count, total_filtered_count / total_count * 100,
))
print("REMAINING: {} ({:.2f}%)".format(
remaining_count, remaining_count / total_count * 100,
))
filtered_rdd = sc.union(filtered_rdds)
outliers = filtered_rdd \
.leftOuterJoin(original_data) \
.map(lambda _: (_[0], _[1][1], _[1][0]))
print("Writing outliers to {}".format(outliers_output_hfs_path))
outliers.saveAsTextFile(outliers_output_hfs_path)
if clean_output_hfs_path:
print("Writing clean output to {}".format(clean_output_hfs_path))
data.saveAsTextFile(clean_output_hfs_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Textual Outlier Detection')
parser.add_argument("--input_hfs_path", type=str, required=True)
parser.add_argument("--outliers_output_hfs_path", type=str, required=True)
parser.add_argument("--clean_output_hfs_path",
type=str, required=False, default="")
parser.add_argument("--config_json_path", type=str, required=True)
args = parser.parse_args()
with open(args.config_json_path, "r") as f:
config_ = json.loads(f.read())
main(
input_hfs_path=args.input_hfs_path,
outliers_output_hfs_path=args.outliers_output_hfs_path,
clean_output_hfs_path=args.clean_output_hfs_path,
config=config_,
)
| zphang/big_data_proj | main.py | main.py | py | 3,316 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 12,
"usage_type"... |
72593883233 | """ pretrain a word2vec on the corpus"""
import argparse
import os
from os.path import join, exists
from time import time
from datetime import timedelta
import gensim
class Sentences(object):
""" needed for gensim word2vec training"""
def __init__(self, data_path):
with open(data_path, 'r') as fin:
self.lines = fin.readlines()
def __iter__(self):
for line in self.lines:
report_id, txt, classes = line.strip('\n').split('|,|')
yield txt.lower().split()
def main(args):
start = time()
save_dir = args.path
if not exists(save_dir):
os.makedirs(save_dir)
sentences = Sentences(args.data_path)
model = gensim.models.Word2Vec(
size=args.dim, min_count=5, workers=16, sg=1)
model.build_vocab(sentences)
print('vocab built in {}'.format(timedelta(seconds=time()-start)))
model.train(sentences,
total_examples=model.corpus_count, epochs=model.iter)
model.save(join(save_dir, 'word2vec.{}d.{}.bin'.format(
args.dim, len(model.wv.vocab))))
model.wv.save_word2vec_format(join(
save_dir,
'word2vec.{}d.{}.w2v'.format(args.dim, len(model.wv.vocab))
))
print('word2vec trained in {}'.format(timedelta(seconds=time()-start)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='train word2vec embedding used for model initialization'
)
parser.add_argument("--data_path", type=str, default='./tc_data/track1_round1_train_20210222.csv')
parser.add_argument('--path', type=str, default='./user_data/model_data/', help='root of the model')
parser.add_argument('--dim', action='store', type=int, default=256)
args = parser.parse_args()
main(args)
| behome/tianchi | code/train_word2vec.py | train_word2vec.py | py | 1,821 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec",
"li... |
25476349860 | # -*- coding: utf-8 -*-
import datetime
from pathlib import Path
import emoji
import os
import re
from logzero import logger as log
from peewee import fn
from telegram import (
ForceReply,
InlineKeyboardButton,
InlineKeyboardMarkup,
KeyboardButton,
ReplyKeyboardMarkup,
TelegramError,
)
from telegram.ext import ConversationHandler, DispatcherHandlerStop, Job, run_async
from typing import Dict
from botlistbot import appglobals
from botlistbot import captions
from botlistbot import helpers
from botlistbot import mdformat
from botlistbot import settings
from botlistbot import util
from botlistbot.appglobals import db
from botlistbot.components.lookup import lookup_entity
from botlistbot.const import *
from botlistbot.const import BotStates, CallbackActions
from botlistbot.custemoji import Emoji
from botlistbot.dialog import messages, emojis
from botlistbot.models import Bot, Category, Revision, Statistic, Suggestion, User, track_activity
from botlistbot.util import restricted
@run_async
@track_activity("menu", "Administration", Statistic.ANALYSIS)
@restricted
def menu(bot, update):
uid = update.effective_user.id
is_admin = uid in settings.ADMINS
buttons = _admin_buttons(send_botlist_button=is_admin, logs_button=is_admin)
txt = "๐ Administration menu. Current revision: {}".format(
Revision.get_instance().nr
)
bot.formatter.send_message(
uid, txt, reply_markup=ReplyKeyboardMarkup(buttons, resize_keyboard=True)
)
return BotStates.ADMIN_MENU
def _admin_buttons(send_botlist_button=False, logs_button=False):
n_unapproved = len(Bot.select().where(Bot.approved == False, Bot.disabled == False))
n_suggestions = len(Suggestion.select_all())
n_pending = len(Bot.select_pending_update())
second_row = list()
if n_unapproved > 0:
second_row.append(
KeyboardButton(
captions.APPROVE_BOTS
+ " {}๐".format(mdformat.number_as_emoji(n_unapproved))
)
)
if n_suggestions > 0:
second_row.append(
KeyboardButton(
captions.APPROVE_SUGGESTIONS
+ " {}โ๏ธ".format(mdformat.number_as_emoji(n_suggestions))
)
)
buttons = [
[KeyboardButton(captions.EXIT), KeyboardButton(captions.REFRESH)],
[
KeyboardButton(captions.FIND_OFFLINE),
KeyboardButton(captions.SEND_CONFIG_FILES),
],
]
update_row = list()
if n_pending > 0:
update_row.append(
KeyboardButton(
captions.PENDING_UPDATE
+ " {}{}".format(
mdformat.number_as_emoji(n_pending),
captions.SUGGESTION_PENDING_EMOJI,
)
)
)
if send_botlist_button:
update_row.append(KeyboardButton(captions.SEND_BOTLIST))
if logs_button:
update_row.append(KeyboardButton(captions.SEND_ACTIVITY_LOGS))
if len(update_row) > 0:
buttons.insert(1, update_row)
if len(second_row) > 0:
buttons.insert(1, second_row)
return buttons
@restricted
def _input_failed(bot, update, chat_data, text):
chat_id = util.uid_from_update(update)
bot.formatter.send_failure(chat_id, text)
Statistic.of(
update,
"error",
"input failed in admin menu for {}".format(text),
Statistic.ANALYSIS,
)
chat_data["add_bot_message"] = None
def _add_bot_to_chatdata(chat_data, category=None):
new_bot = Bot(category=category)
chat_data["add_bot"] = new_bot
def format_pending(text):
return "{} {}".format(captions.SUGGESTION_PENDING_EMOJI, text)
def _edit_bot_buttons(to_edit: Bot, pending_suggestions: Dict, is_moderator):
bid = {"id": to_edit.id}
def is_pending(action):
if isinstance(action, str):
return action in pending_suggestions
else:
return any(a in pending_suggestions for a in action)
def pending_or_caption(action, caption):
return (
format_pending(str(pending_suggestions[action]))
if is_pending(action)
else str(caption)
)
buttons = [
InlineKeyboardButton(
pending_or_caption("name", to_edit.name or "Set Name"),
callback_data=util.callback_for_action(CallbackActions.EDIT_BOT_NAME, bid),
),
InlineKeyboardButton(
pending_or_caption("username", to_edit.username),
callback_data=util.callback_for_action(
CallbackActions.EDIT_BOT_USERNAME, bid
),
),
InlineKeyboardButton(
# remove bulletin from category
pending_or_caption(
"category",
str(pending_suggestions.get("category") or to_edit.category)[1:]
if to_edit.category
else "Choose a category",
),
callback_data=util.callback_for_action(
CallbackActions.EDIT_BOT_SELECT_CAT, bid
),
),
InlineKeyboardButton(
pending_or_caption(
"description",
"Change description" if to_edit.description else "Write a description",
),
callback_data=util.callback_for_action(
CallbackActions.EDIT_BOT_DESCRIPTION, bid
),
),
InlineKeyboardButton(
pending_or_caption(
"country",
to_edit.country.emojized if to_edit.country else "Set country/language",
),
callback_data=util.callback_for_action(
CallbackActions.EDIT_BOT_COUNTRY, bid
),
),
InlineKeyboardButton(
pending_or_caption(
"extra", "Change extra text" if to_edit.extra else "Add an extra text"
),
callback_data=util.callback_for_action(CallbackActions.EDIT_BOT_EXTRA, bid),
),
InlineKeyboardButton(
format_pending("Set keywords")
if is_pending(["add_keyword", "remove_keyword"])
else "Set keywords",
callback_data=util.callback_for_action(
CallbackActions.EDIT_BOT_KEYWORDS, bid
),
),
]
toggleable_properties = [
("inlinequeries", "๐", CallbackActions.EDIT_BOT_INLINEQUERIES),
("official", "๐น", CallbackActions.EDIT_BOT_OFFICIAL),
# ('offline', '๐ค', CallbackActions.EDIT_BOT_OFFLINE),
("spam", "๐ฎ", CallbackActions.EDIT_BOT_SPAM),
]
def toggle_button(property_name, emoji, callback_action):
is_pending = property_name in pending_suggestions.keys()
pending_emoji = captions.SUGGESTION_PENDING_EMOJI + " " if is_pending else ""
active = (
bool(pending_suggestions[property_name])
if is_pending
else bool(getattr(to_edit, property_name))
)
active_emoji = "โ๏ธ" if active else Emoji.HEAVY_MULTIPLICATION_X
caption = "{}{} {}".format(pending_emoji, emoji, active_emoji)
return InlineKeyboardButton(
caption,
callback_data=util.callback_for_action(
callback_action, {"id": to_edit.id, "value": not active}
),
)
for toggle in toggleable_properties:
buttons.append(toggle_button(*toggle))
if is_moderator:
buttons.append(
InlineKeyboardButton(
"Delete",
callback_data=util.callback_for_action(
CallbackActions.CONFIRM_DELETE_BOT, bid
),
)
)
header = []
if to_edit.category:
header.append(
InlineKeyboardButton(
captions.BACK_TO_CATEGORY,
callback_data=util.callback_for_action(
CallbackActions.SELECT_BOT_FROM_CATEGORY,
{"id": to_edit.category.id},
),
)
)
header.append(
InlineKeyboardButton(
captions.REFRESH,
callback_data=util.callback_for_action(
CallbackActions.EDIT_BOT, {"id": to_edit.id}
),
)
)
footer = list()
if is_moderator and len(pending_suggestions) > 0:
footer.append(
InlineKeyboardButton(
"๐ Apply all changes",
callback_data=util.callback_for_action(
CallbackActions.APPLY_ALL_CHANGES, {"id": to_edit.id}
),
)
)
return util.build_menu(
buttons, n_cols=2, header_buttons=header, footer_buttons=footer
)
@track_activity("menu", "bot editing", Statistic.ANALYSIS)
def edit_bot(bot, update, chat_data, to_edit=None):
uid = util.uid_from_update(update)
message_id = util.mid_from_update(update)
user = User.from_update(update)
if not to_edit:
if update.message:
command = update.message.text
if "edit" in command:
b_id = re.match(r"^/edit(\d+)$", command).groups()[0]
elif "approve" in command:
b_id = re.match(r"^/approve(\d+)$", command).groups()[0]
else:
raise ValueError("No 'edit' or 'approve' in command.")
try:
to_edit = Bot.get(id=b_id)
except Bot.DoesNotExist:
update.message.reply_text(util.failure("No bot exists with this id."))
return
else:
bot.formatter.send_failure(uid, "An unexpected error occured.")
return
# if not to_edit.approved:
# return approve_bots(bot, update, override_list=[to_edit])
pending_suggestions = Suggestion.pending_for_bot(to_edit, user)
reply_markup = InlineKeyboardMarkup(
_edit_bot_buttons(to_edit, pending_suggestions, uid in settings.MODERATORS)
)
pending_text = (
"\n\n{} Some changes are pending approval{}.".format(
captions.SUGGESTION_PENDING_EMOJI,
"" if user.chat_id in settings.MODERATORS else " by a moderator",
)
if pending_suggestions
else ""
)
meta_text = (
"\n\nDate added: {}\nMember since revision {}\n"
"Submitted by {}\nApproved by {}".format(
to_edit.date_added,
to_edit.revision,
to_edit.submitted_by,
to_edit.approved_by,
)
)
bot.formatter.send_or_edit(
uid,
"๐ Edit {}{}{}".format(
to_edit.detail_text,
meta_text if user.id in settings.MODERATORS else "",
pending_text,
),
to_edit=message_id,
reply_markup=reply_markup,
)
@restricted(strict=True)
def prepare_transmission(bot, update, chat_data):
chat_id = util.uid_from_update(update)
pending_update(bot, update)
text = mdformat.action_hint("Notify subscribers about this update?")
reply_markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"โ Notifications",
callback_data=util.callback_for_action(
CallbackActions.SEND_BOTLIST, {"silent": False}
),
),
InlineKeyboardButton(
"Silent",
callback_data=util.callback_for_action(
CallbackActions.SEND_BOTLIST, {"silent": True}
),
),
],
[
InlineKeyboardButton(
"Re-send all Messages",
callback_data=util.callback_for_action(
CallbackActions.SEND_BOTLIST, {"silent": True, "re": True}
),
)
],
]
)
# # TODO
# text = "Temporarily disabled"
# reply_markup = None
util.send_md_message(bot, chat_id, text, reply_markup=reply_markup)
@track_activity("menu", "approve suggestions", Statistic.ANALYSIS)
@restricted
def approve_suggestions(bot, update, page=0):
uid = util.uid_from_update(update)
suggestions = Suggestion.select_all()
if page * settings.PAGE_SIZE_SUGGESTIONS_LIST >= len(suggestions):
# old item deleted, list now too small
page = page - 1 if page > 0 else 0
start = page * settings.PAGE_SIZE_SUGGESTIONS_LIST
end = start + settings.PAGE_SIZE_SUGGESTIONS_LIST
has_prev_page = page > 0
has_next_page = (page + 1) * settings.PAGE_SIZE_SUGGESTIONS_LIST < len(suggestions)
suggestions = suggestions[start:end]
if len(suggestions) == 0:
bot.formatter.send_or_edit(
uid, "No more suggestions available.", to_edit=util.mid_from_update(update)
)
return
buttons = []
count = 1
text = "Please choose suggestions to accept.\n"
for x in suggestions:
number = str(count) + "."
text += "\n{} {}".format(number, str(x))
row = []
# Should the suggestion be editable and is it too long?
if x.action in Suggestion.TEXTUAL_ACTIONS:
row.append(
InlineKeyboardButton(
"{} {}๐".format(number, Emoji.WHITE_HEAVY_CHECK_MARK),
callback_data=util.callback_for_action(
CallbackActions.CHANGE_SUGGESTION, {"id": x.id, "page": page}
),
)
)
else:
row.append(
InlineKeyboardButton(
"{} {}".format(number, Emoji.WHITE_HEAVY_CHECK_MARK),
callback_data=util.callback_for_action(
CallbackActions.ACCEPT_SUGGESTION, {"id": x.id, "page": page}
),
)
)
row.append(
InlineKeyboardButton(
"{} {}".format(number, Emoji.CROSS_MARK),
callback_data=util.callback_for_action(
CallbackActions.REJECT_SUGGESTION, {"id": x.id, "page": page}
),
)
)
buttons.append(row)
count += 1
page_arrows = list()
if has_prev_page:
page_arrows.append(
InlineKeyboardButton(
Emoji.LEFTWARDS_BLACK_ARROW,
callback_data=util.callback_for_action(
CallbackActions.SWITCH_SUGGESTIONS_PAGE, {"page": page - 1}
),
)
)
if has_next_page:
page_arrows.append(
InlineKeyboardButton(
Emoji.BLACK_RIGHTWARDS_ARROW,
callback_data=util.callback_for_action(
CallbackActions.SWITCH_SUGGESTIONS_PAGE, {"page": page + 1}
),
)
)
buttons.append(page_arrows)
reply_markup = InlineKeyboardMarkup(buttons)
bot.formatter.send_or_edit(
uid,
util.action_hint(text),
reply_markup=reply_markup,
to_edit=util.mid_from_update(update),
disable_web_page_preview=True,
)
return CallbackStates.APPROVING_BOTS
@track_activity("menu", "approve bots", Statistic.ANALYSIS)
@restricted
def approve_bots(bot, update, page=0, override_list=None):
chat_id = util.uid_from_update(update)
if override_list:
unapproved = override_list
else:
unapproved = (
Bot.select()
.where(Bot.approved == False, Bot.disabled == False)
.order_by(Bot.date_added)
)
if page < 0:
page = 0
last_page = int((len(unapproved) - 1) / settings.PAGE_SIZE_BOT_APPROVAL)
if page * settings.PAGE_SIZE_BOT_APPROVAL >= len(unapproved):
# old item deleted, list now too small
page = last_page
start = page * settings.PAGE_SIZE_BOT_APPROVAL
end = start + settings.PAGE_SIZE_BOT_APPROVAL
has_prev_page = page > 0
has_next_page = (page + 1) * settings.PAGE_SIZE_BOT_APPROVAL < len(unapproved)
unapproved = unapproved[start:end]
if len(unapproved) == 0:
bot.formatter.send_or_edit(
chat_id,
"No more unapproved bots available. "
"Good job! (Is this the first time? ๐)",
to_edit=util.mid_from_update(update),
)
return
buttons = list()
for x in unapproved:
first_row = [
InlineKeyboardButton(
x.username, url="http://t.me/{}".format(x.username[1:])
)
]
second_row = [
InlineKeyboardButton(
"๐",
callback_data=util.callback_for_action(
CallbackActions.ACCEPT_BOT, {"id": x.id}
),
),
InlineKeyboardButton(
"๐",
callback_data=util.callback_for_action(
CallbackActions.REJECT_BOT, {"id": x.id, "page": page, "ntfc": True}
),
),
InlineKeyboardButton(
"๐",
callback_data=util.callback_for_action(
CallbackActions.REJECT_BOT,
{"id": x.id, "page": page, "ntfc": False},
),
),
InlineKeyboardButton(
emojis.RECOMMEND_MODERATOR,
callback_data=util.callback_for_action(
CallbackActions.RECOMMEND_MODERATOR, {"id": x.id, "page": page}
),
),
]
if len(unapproved) > 1:
buttons.append(first_row)
buttons.append(second_row)
page_arrows = list()
if has_prev_page:
page_arrows.append(
InlineKeyboardButton(
"โฎ",
callback_data=util.callback_for_action(
CallbackActions.SWITCH_APPROVALS_PAGE, {"page": -1}
),
)
)
page_arrows.append(
InlineKeyboardButton(
Emoji.LEFTWARDS_BLACK_ARROW,
callback_data=util.callback_for_action(
CallbackActions.SWITCH_APPROVALS_PAGE, {"page": page - 1}
),
)
)
if has_prev_page or has_next_page:
page_arrows.append(
InlineKeyboardButton(
"ยท{}ยท".format(page + 1),
callback_data=util.callback_for_action(
CallbackActions.SWITCH_APPROVALS_PAGE, {"page": page}
),
)
)
if has_next_page:
page_arrows.append(
InlineKeyboardButton(
Emoji.BLACK_RIGHTWARDS_ARROW,
callback_data=util.callback_for_action(
CallbackActions.SWITCH_APPROVALS_PAGE, {"page": page + 1}
),
)
)
page_arrows.append(
InlineKeyboardButton(
"โญ",
callback_data=util.callback_for_action(
CallbackActions.SWITCH_APPROVALS_PAGE, {"page": last_page}
),
)
)
buttons.append(page_arrows)
reply_markup = InlineKeyboardMarkup(buttons)
text = (
"What to do with {}?".format(util.escape_markdown(unapproved[0].username))
if len(unapproved) == 1
else messages.SELECT_BOT_TO_ACCEPT
)
bot.formatter.send_or_edit(
chat_id,
util.action_hint(text),
reply_markup=reply_markup,
to_edit=util.mid_from_update(update),
)
return CallbackStates.APPROVING_BOTS
@track_activity("menu", "recommend moderator", Statistic.DETAILED)
def recommend_moderator(bot, update, bot_in_question, page):
uid = update.effective_user.id
mid = util.mid_from_update(update)
moderators = User.select().where(
(User.chat_id << settings.MODERATORS) & (User.chat_id != uid)
)
buttons = [
InlineKeyboardButton(
u.first_name,
callback_data=util.callback_for_action(
CallbackActions.SELECT_MODERATOR,
{"bot_id": bot_in_question.id, "uid": u.id, "page": page},
),
)
for u in moderators
]
buttons.insert(
0,
InlineKeyboardButton(
captions.BACK,
callback_data=util.callback_for_action(
CallbackActions.SWITCH_APPROVALS_PAGE, {"page": page}
),
),
)
reply_markup = InlineKeyboardMarkup(util.build_menu(buttons, 1))
text = mdformat.action_hint(
"Select a moderator you think is better suited to evaluate the submission of {}.".format(
str(bot_in_question)
)
)
bot.formatter.send_or_edit(uid, text, to_edit=mid, reply_markup=reply_markup)
def share_with_moderator(bot, update, bot_in_question, moderator):
user = User.from_update(update)
buttons = [
[
InlineKeyboardButton(
"Yea, let me take this one!",
callback_data=util.callback_for_action(
CallbackActions.APPROVE_REJECT_BOTS, {"id": bot_in_question.id}
),
)
]
]
reply_markup = InlineKeyboardMarkup(buttons)
text = "{} thinks that you have the means to inspect this bot submission:\nโถ๏ธ {}".format(
user.markdown_short, bot_in_question
)
try:
util.send_md_message(
bot,
moderator.chat_id,
text,
reply_markup=reply_markup,
disable_web_page_preview=True,
)
answer_text = mdformat.success(
"I will ask {} to have a look at this submission.".format(
moderator.plaintext
)
)
except Exception as e:
answer_text = mdformat.failure(f"Could not contact {moderator.plaintext}: {e}")
if update.callback_query:
update.callback_query.answer(text=answer_text)
Statistic.of(
update,
"share",
"submission {} with {}".format(bot_in_question.username, moderator.plaintext),
)
@track_activity("menu", "edit bot category", Statistic.DETAILED)
def edit_bot_category(bot, update, for_bot, callback_action=None):
if callback_action is None:
callback_action = CallbackActions.EDIT_BOT_CAT_SELECTED
uid = util.uid_from_update(update)
categories = Category.select().order_by(Category.name.asc()).execute()
buttons = util.build_menu(
[
InlineKeyboardButton(
"{}{}".format(emoji.emojize(c.emojis, use_aliases=True), c.name),
callback_data=util.callback_for_action(
callback_action, {"cid": c.id, "bid": for_bot.id}
),
)
for c in categories
],
2,
)
return bot.formatter.send_or_edit(
uid,
util.action_hint(
"Please select a category" + (" for {}".format(for_bot) if for_bot else "")
),
to_edit=util.mid_from_update(update),
reply_markup=InlineKeyboardMarkup(buttons),
)
@restricted
def accept_bot_submission(bot, update, of_bot: Bot, category):
uid = util.uid_from_update(update)
message_id = util.mid_from_update(update)
user = User.from_update(update)
try:
of_bot.category = category
of_bot.date_added = datetime.date.today()
of_bot.approved = True
of_bot.approved_by = user
of_bot.save()
buttons = [
[
InlineKeyboardButton(
"Edit {} details".format(of_bot.username),
callback_data=util.callback_for_action(
CallbackActions.EDIT_BOT, {"id": of_bot.id}
),
)
]
]
reply_markup = InlineKeyboardMarkup(buttons)
bot.formatter.send_or_edit(
uid,
"{} has been accepted to the Botlist. ".format(
of_bot
),
to_edit=message_id,
reply_markup=reply_markup,
)
log_msg = "{} accepted by {}.".format(of_bot.username, uid)
# notify submittant
if of_bot.submitted_by != user:
try:
bot.sendMessage(
of_bot.submitted_by.chat_id,
util.success(
messages.ACCEPTANCE_PRIVATE_MESSAGE.format(
of_bot.username, of_bot.category
)
),
)
log_msg += "\nUser {} was notified.".format(str(of_bot.submitted_by))
except TelegramError:
log_msg += "\nUser {} could NOT be contacted/notified in private.".format(
str(of_bot.submitted_by)
)
log.info(log_msg)
except:
bot.formatter.send_failure(uid, "An error has occured. Bot not added.")
@track_activity("request", "list of offline bots")
def send_offline(bot, update):
chat_id = util.uid_from_update(update)
offline = (
Bot.select()
.where(Bot.offline == True, Bot.disabled == False)
.order_by(Bot.last_response.asc())
)
def offline_since(b):
if not b.last_response:
return "a long time"
slanged_time = helpers.slang_datetime(b.last_response)
return slanged_time.replace(" ago", "")
if len(offline) > 0:
text = "Offline Bots:\n\n"
text += "\n".join(
[
"{}{} โ /edit{}".format(
str(b), " (for {})".format(offline_since(b)), b.id
)
for b in offline
]
)
else:
text = "No bots are offline."
bot.formatter.send_message(chat_id, text)
@restricted
def reject_bot_submission(
bot,
update,
args=None,
to_reject=None,
verbose=True,
notify_submittant=True,
reason=None,
):
uid = util.uid_from_update(update)
user = User.from_update(update)
if to_reject is None:
if not update.message.reply_to_message:
bot.send_message(
update.effective_user.id,
util.failure("You must reply to a message of mine."),
)
return
text = update.message.reply_to_message.text
reason = reason if reason else (" ".join(args) if args else None)
try:
update.message.delete()
except:
pass
username = helpers.find_bots_in_text(text, first=True)
if not username:
bot.send_message(
update.effective_user.id,
util.failure("No username in the message that you replied to."),
)
return
try:
to_reject = Bot.by_username(username)
except Bot.DoesNotExist:
bot.send_message(
update.effective_user.id,
util.failure(
"Rejection failed: {} is not present in the "
"database.".format(username)
),
)
return
if to_reject.approved is True:
msg = "{} has already been accepted, so it cannot be rejected anymore.".format(
username
)
bot.sendMessage(uid, util.failure(msg))
return
Statistic.of(update, "reject", to_reject.username)
text = notify_submittant_rejected(bot, user, notify_submittant, reason, to_reject)
to_reject.delete_instance()
if verbose:
bot.sendMessage(uid, text)
if update.callback_query:
update.callback_query.answer(text=text)
def notify_submittant_rejected(bot, admin_user, notify_submittant, reason, to_reject):
notification_successful = False
msg = "{} rejected by {}.".format(to_reject.username, admin_user)
if notify_submittant or reason:
try:
if reason:
bot.send_message(
to_reject.submitted_by.chat_id,
util.failure(
messages.REJECTION_WITH_REASON.format(
to_reject.username, reason=reason
)
),
)
else:
bot.sendMessage(
to_reject.submitted_by.chat_id,
util.failure(
messages.REJECTION_PRIVATE_MESSAGE.format(to_reject.username)
),
)
msg += "\nUser {} was notified.".format(str(to_reject.submitted_by))
notification_successful = True
except TelegramError:
msg += "\nUser {} could NOT be contacted/notified in private.".format(
str(to_reject.submitted_by)
)
notification_successful = False
text = util.success("{} rejected.".format(to_reject.username))
if notification_successful is True:
text += " User {} was notified.".format(to_reject.submitted_by.plaintext)
elif notification_successful is False:
try:
text += " " + mdformat.failure(
"Could not contact {}.".format(to_reject.submitted_by.plaintext)
)
except:
pass
else:
text += " No notification sent."
return msg
@restricted
def ban_handler(bot, update, args, chat_data, ban_state: bool):
if args:
query = " ".join(args) if isinstance(args, list) else args
entity_to_ban = lookup_entity(query, exact=True)
if isinstance(entity_to_ban, User):
ban_user(bot, update, entity_to_ban, ban_state)
elif isinstance(entity_to_ban, Bot):
ban_bot(bot, update, chat_data, entity_to_ban, ban_state)
else:
update.message.reply_text(mdformat.failure("Can only ban users and bots."))
else:
# no search term
update.message.reply_text(
messages.BAN_MESSAGE if ban_state else messages.UNBAN_MESSAGE,
reply_markup=ForceReply(selective=True),
)
return ConversationHandler.END
@restricted
def ban_user(_bot, update, user: User, ban_state: bool):
if user.banned and ban_state is True:
update.message.reply_text(
mdformat.none_action("User {} is already banned.".format(user)),
parse_mode="markdown",
)
raise DispatcherHandlerStop
if not user.banned and ban_state is False:
update.message.reply_text(
mdformat.none_action("User {} is not banned.".format(user)),
parse_mode="markdown",
)
raise DispatcherHandlerStop
user.banned = ban_state
if ban_state is True:
with db.atomic():
user_submissions = Bot.select().where(
(Bot.approved == False)
& (Bot.submitted_by == user)
# TODO: does this need to include `Bot.deleted == True`?
)
for b in user_submissions:
b.delete_instance()
users_suggestions = Suggestion.select().where(
(Suggestion.executed == False) & (Suggestion.user == user)
)
for s in users_suggestions:
s.delete_instance()
update.message.reply_text(
mdformat.success(
"User {} banned, all bot submissions and suggestions removed.".format(
user
)
),
parse_mode="markdown",
)
Statistic.of(update, "ban", user.markdown_short)
else:
update.message.reply_text(
mdformat.success("User {} unbanned.".format(user)), parse_mode="markdown"
)
Statistic.of(update, "unban", user.markdown_short)
user.save()
@restricted
def ban_bot(bot, update, chat_data, to_ban: Bot, ban_state: bool):
if to_ban.disabled and ban_state is True:
update.message.reply_text(
mdformat.none_action("{} is already banned.".format(to_ban)),
parse_mode="markdown",
)
return
if not to_ban.disabled and ban_state is False:
update.message.reply_text(
mdformat.none_action("{} is not banned.".format(to_ban)),
parse_mode="markdown",
)
return
if ban_state:
to_ban.disable(Bot.DisabledReason.banned)
update.message.reply_text("Bot was banned.")
else:
to_ban.enable()
update.message.reply_text("Bot was unbanned.")
to_ban.save()
from botlistbot.components.explore import send_bot_details
return send_bot_details(bot, update, chat_data, to_ban)
def last_update_job(bot, job: Job):
return # make admins happy :)
last_update = helpers.get_channel().last_update
if last_update:
today = datetime.date.today()
delta = datetime.timedelta(days=10)
difference = today - last_update
if difference > delta:
for admin in settings.ADMINS:
try:
bot.sendMessage(
admin,
f"Last @BotList update was {difference.days} days ago. "
f"UPDATE NOW YOU CARNT! /admin",
)
except TelegramError:
pass
@restricted
def apply_all_changes(bot, update, chat_data, to_edit):
user = User.from_update(update)
user_suggestions = Suggestion.select_all_of_user(user)
for suggestion in user_suggestions:
suggestion.apply()
refreshed_bot = Bot.get(id=to_edit.id)
edit_bot(bot, update, chat_data, refreshed_bot)
Statistic.of(update, "apply", refreshed_bot.username)
@track_activity("menu", "pending bots for next update", Statistic.ANALYSIS)
def pending_update(bot, update):
uid = update.effective_chat.id
bots = Bot.select_pending_update()
if len(bots) == 0:
update.message.reply_text("No bots pending for update.")
return
txt = "Bots pending for next Update:\n\n"
if uid in settings.MODERATORS and util.is_private_message(update):
# append admin edit buttons
txt += "\n".join(["{} โ /edit{}".format(b, b.id) for b in bots])
else:
txt += "\n".join([str(b) for b in bots])
bot.formatter.send_message(uid, txt)
@track_activity("request", "runtime files", Statistic.ANALYSIS)
@restricted
def send_runtime_files(bot, update):
def send_file(path: Path):
path = str(path)
try:
uid = update.effective_user.id
bot.sendDocument(uid, open(path, "rb"), filename=os.path.split(path)[-1])
except:
pass
root = Path(appglobals.ROOT_DIR) / "botlistbot"
send_file(root / "files/intro_en.txt")
send_file(root / "files/intro_es.txt")
send_file(root / "files/new_bots_list.txt")
send_file(root / "files/category_list.txt")
send_file(root / "files/commands.txt")
send_file(root / "error.log")
send_file(root / "debug.log")
# def _merge_statistic_logs(statistic, file, level):
# all_logs = {s.date: s for s in statistic}
# handle = open(file, 'r')
# lines = handle.readlines()
#
# pattern = re.compile(r'\[(.*)\] .* (INFO|DEBUG|WARNING|ERROR|EXCEPTION) - (.*)')
# for l in lines:
# reg = re.match(pattern, l)
# groups = reg.groups()
# lvl = logging.getLevelName(groups[1])
# if level < lvl:
# continue
# date = dateutil.parser.parse(groups[0])
# message = groups[2]
#
# all_logs[date] = message
# # sorted(all_logs, key=lambda x: ) # TODO
# return all_logs
@track_activity("request", "activity logs", Statistic.ANALYSIS)
@restricted
def send_activity_logs(bot, update, args=None, level=Statistic.INFO):
num = 200
if args:
try:
num = int(args[0])
num = min(num, 500)
except:
pass
uid = update.effective_user.id
recent_statistic = Statistic.select().order_by(Statistic.date.desc()).limit(num)
recent_statistic = list(reversed(recent_statistic))
step_size = 30
for i in range(0, len(recent_statistic), step_size):
items = recent_statistic[i : i + step_size]
text = "\n".join(x.md_str() for x in items)
bot.formatter.send_message(uid, text)
@restricted
def send_statistic(bot, update):
interesting_actions = [
"explore",
"menu",
"command",
"request",
"made changes to their suggestion:",
"issued deletion of conversation in BotListChat",
]
stats = (
Statistic.select(Statistic, fn.COUNT(Statistic.entity).alias("count"))
.where(Statistic.action << interesting_actions)
.group_by(Statistic.action, Statistic.entity)
)
maxlen = max(len(str(x.count)) for x in stats)
text = "\n".join(
"`{}โช๏ธ` {} {}".format(str(s.count).ljust(maxlen), s.action.title(), s.entity)
for s in stats
)
bot.formatter.send_message(update.effective_chat.id, text, parse_mode="markdown")
@track_activity("menu", "short approve list", Statistic.ANALYSIS)
def short_approve_list(bot, update):
uid = update.effective_chat.id
bots = Bot.select_unapproved()
if len(bots) == 0:
update.message.reply_text("No bots to be approved.")
return
txt = "Bots pending approval:\n\n"
if uid in settings.MODERATORS and util.is_private_message(update):
# append admin edit buttons
txt += "\n".join(["{} โ /approve{}".format(b, b.id) for b in bots])
else:
txt += "\n".join([str(b) for b in bots])
bot.formatter.send_message(uid, txt)
@track_activity("menu", "manybots", Statistic.ANALYSIS)
@restricted
def manybots(bot, update):
uid = update.effective_chat.id
bots = Bot.select().where(
Bot.approved == True & Bot.botbuilder == True & Bot.disabled == False
)
txt = "Manybots in the BotList:\n\n"
# if uid in settings.MODERATORS and util.is_private_message(update):
# # append admin edit buttons
# txt += '\n'.join(["{} โ /approve{}".format(b, b.id) for b in bots])
# else:
txt += "\n".join([str(b) for b in bots])
bot.formatter.send_message(uid, txt)
| JosXa/BotListBot | botlistbot/components/admin.py | admin.py | py | 38,333 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "botlistbot.settings.ADMINS",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "botlistbot.settings",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "botlistbot.models.Revision.get_instance",
"line_number": 47,
"usage_type": "call"
}... |
28228061323 | import openai
import os
import random
import json
def get_json(path):
with open(path, 'r') as f:
d = f.read()
try:
return eval(d)
except:
return json.loads(d.replace("\\\\", "\\"))
def json_to_prompt(question_json):
# chatgpt can handle parsing the json
return f"Here is a json of a question, choose the best answer {question_json}. ONLY RESPOND WITH THE LETTER CHOICE"
def json_to_prompt2(question_json):
# chatgpt can handle parsing the json
return f"""Answer the following question.
Background: {question_json['background']}.
Choices: {question_json['answers']}
ONLY RESPOND WITH THE LETTER CHOICE"""
def json_to_prompt3(question_json):
choices_text = ""
for choice in question_json['answers']:
choices_text += f"{choice['choice']}) {choice['answer']}\n"
return f"""Answer the following question. \n
Background: {question_json['background']}. \n
Choices: {choices_text}\n
ONLY RESPOND WITH THE LETTER CHOICE"""
def json_to_prompt4(question_json):
choices_text = ""
for choice in question_json['answers']:
choices_text += f"{choice['choice']}) {choice['answer']}\n"
return f"""Answer the following question. \n
Background: {question_json['background']}. \n
Choices: {choices_text}\n
Respond in the format: "The correct answer is X.", only choices are "A", "B", "C", "D", or "E"\n
The following is the letter answer:\n"""
def get_answer_chatgpt(question_json):
return openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": json_to_prompt3(question_json)
}
],
temperature=0.0,
n=1,
max_tokens=2,
)["choices"][0]["message"]["content"].strip()
def get_answer_davinci(question_json):
return openai.Completion.create(model="davinci",
prompt= json_to_prompt3(question_json),
temperature=0.1,
n=1,
max_tokens=26,
)["choices"][0]["text"].strip().replace("The correct answer is", "").replace("The following is the letter answer:", "").strip()[0]
def letter_to_number(letter):
return {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4
}[letter]
def do_test(test_number, part_number, get_answer=get_answer_chatgpt, intermediate_print=False):
fs = os.listdir(f"lsat/practice_test_{test_number}/")
question, answer = None, None
for f in fs:
if f"part_{part_number}" not in f:
continue
if 'answers' in f:
answer = f
else:
question = f
questions = get_json(f"lsat/practice_test_{test_number}/{question}")
expected_answers = get_json(f"lsat/practice_test_{test_number}/{answer}")
actual_answers = {}
for question in questions:
answer = get_answer(question)
if answer not in ['A', 'B', 'C', 'D', 'E']:
print("random guess because we got", answer)
answer = random.choice(['A', 'B', 'C', 'D', 'E'])
actual_answers[question['question_number']] = answer
total_correct = 0
for i, (key, actual_answer) in enumerate(actual_answers.items()):
if letter_to_number(actual_answer) == expected_answers[key]:
total_correct += 1
if intermediate_print:
print(key, letter_to_number(actual_answer), expected_answers[key], round(total_correct / (i+1) * 100, 2))
return total_correct, round(total_correct / (i+1) * 100, 2)
if __name__ == '__main__':
for test in ["2"]:
for part in ["one", "two", "three", "four"]:
total_correct, percent = do_test(test, part)
print(f"Test {test}, part {part}, Got {total_correct} right, {percent}%")
| kennethgoodman/llm_take_tests | lsat/chat_gpt_takes_lsat.py | chat_gpt_takes_lsat.py | py | 3,382 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "openai.ChatCompletion.create",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "openai.ChatCompletion",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "op... |
23259752199 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from lmfit import Model
import scienceplots
elements=['Al','Mo','Ni','Ti','Zn']
alphas=[1.486,17.480,7.480,4.512,8.637]
mpos=[200,1600,800,500,900]
Mpos=[-3800,-2100,-3200,-3525,-3100]
resolutions=[]
res_unc=[]
def gaussian(x,amp,cen,sig):
return amp * np.exp(-(x-cen)**2 / (2*sig**2))
fit_mod=Model(gaussian)
params_gauss=fit_mod.make_params()
params_gauss['amp'].set(min=0,value=10000)
params_gauss['sig'].set(min=0,value=0.01,max=10)
print(params_gauss.pretty_print())
for i in range(len(elements)):
df=np.array(pd.read_csv('res_spec/'+elements[i]+'.csv'))
params_gauss['cen'].set(value=alphas[i])
result=fit_mod.fit(df[mpos[i]:Mpos[i],1],params=params_gauss,x=df[mpos[i]:Mpos[i],0])
print(result.fit_report())
fig,ax=plt.subplots(1,1)
ax.plot(df[mpos[i]:Mpos[i],0],result.best_fit,label='Fit')
ax.plot(df[mpos[i]:Mpos[i],0],df[mpos[i]:Mpos[i],1],label='Data')
fig.suptitle(elements[i]+' '+str(result.params['sig']*2.35))
resolutions.append(result.params['sig'].value)
res_unc.append(result.params['sig'].stderr)
ax.legend()
plt.style.use(['science','nature'])
fig,ax=plt.subplots(1,1)
def linear_res(x,dec,inter):
return dec*x+inter
lin_Mod=Model(linear_res)
param_lin = lin_Mod.make_params()
print(param_lin.pretty_print())
param_lin['dec'].set(min=0,value=0.5)
param_lin['inter'].set(value=0)
result =lin_Mod.fit(resolutions,params=param_lin,x=alphas)
print(result.fit_report())
ax.plot(alphas,result.best_fit)
ax.plot(alphas,resolutions,'.')
plt.show() | g-Baptista-gg/TecEsp | enRes.py | enRes.py | py | 1,594 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.exp",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "lmfit.Model",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number"... |
4911617378 | import json
from django.core.management.base import BaseCommand
from domain.policies.models import Policy
class Command(BaseCommand):
help = "seeds the database with default data from a JSON file"
def handle(self, *args, **options):
with open("seed.json", "r") as json_file:
seed = json.load(json_file)
policies = seed.get("policies", [])
for policy in policies:
policy = Policy.objects.create(**policy)
self.stdout.write(self.style.SUCCESS("successfully seeded the database"))
| antoniopataro/decision-engine | config_backend/api/management/commands/seed.py | seed.py | py | 548 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "domain.policies.models.Policy.objects.create",
"line_number": 16,
"usage_type": "call"
... |
74934217314 | import logging
from datetime import timedelta
from typing import Optional
_LOGGER = logging.getLogger(__name__)
class WorkInterval:
def __init__(self, duration: timedelta, minimum: timedelta, maximum: timedelta, warmup: Optional[timedelta], tick_duration: timedelta):
self._tick_duration = tick_duration.seconds
self._total_cycles = round(duration.total_seconds() / self._tick_duration)
self._minimum_work_cycles = round(minimum.total_seconds() / self._tick_duration)
self._warmup_cycles = 0 if warmup is None else round(warmup.total_seconds() / self._tick_duration)
if maximum is None:
self._maximum_work_cycles = self._total_cycles
else:
self._maximum_work_cycles = min(round(maximum.total_seconds() / self._tick_duration), self._total_cycles)
def should_work(self, tick: int, deviation: float, should_warmup: bool):
warmup = (self._warmup_cycles if should_warmup else 0)
if deviation > 0 and tick < self._maximum_work_cycles + warmup and tick < self._total_cycles:
calculate_cycles = round(self._maximum_work_cycles * deviation)
limited = min(max(calculate_cycles, self._minimum_work_cycles), self._maximum_work_cycles)
return tick < limited + warmup
else:
return False
def should_restart(self, tick):
return not tick < self._total_cycles
def __repr__(self):
return f"WorkInterval(" \
f"tick_duration = {self._tick_duration}, " \
f"_total_cycles = {self._total_cycles}, " \
f"_minimum_work_cycles = {self._minimum_work_cycles}) "
| yanoosh/home-assistant-heating-radiator | custom_components/heating_radiator/WorkInterval.py | WorkInterval.py | py | 1,660 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 9,
"usage_type": "name"
}
] |
17436198272 | from flask import Flask, request, render_template
app = Flask(__name__)
## Q1. Create a Flask application that displays "Hello, World!" on the homepage.
@app.route("/")
def index():
return "Hello World"
## Q2. Write a Flask route that takes a name parameter and returns "Hello, [name]!" as plain text.
@app.route("/query")
def input_function():
data = request.args.get('x')
return "Hello {}".format(data)
## Q3. Write a Flask route that takes a number parameter and returns the square of that number as plain text.
@app.route("/square/<int:num>")
def square(num):
return str(num**2)
## Q4. Write a Flask route that displays a simple HTML form that asks for a name and returns "Hello, [name]!" when submitted.
@app.route('/formname', methods = ['GET', 'POST'])
def names():
if request.method == "POST":
name = request.form['clientname']
ourname = name
return render_template('displayname.html', name=ourname)
if __name__=="__main__":
app.run(host='0.0.0.0')
| abhisunny2610/Data-Science | Python Practice Set/Practice Solution 11/app.py | app.py | py | 1,022 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
15212267498 | import pandas_profiling
from pathlib import Path
import glob
import argparse
import matplotlib.pyplot as plt
import pandas as pd
import os.path as osp
import xml.etree.ElementTree as ET
import numpy as np
from collections import Counter
title =['filename',
'img_width',
'img_height',
'img_depth',
'bbox_width',
'bbox_height',
'label',
'xmin',
'ymin',
'xmax',
'ymax',
'ignore',
'difficult']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--output')
args = parser.parse_args()
return args
def parse_single_voc(ann_file, root=None, min_size=None):
# if root is not None:
# ann_file = osp.join(root, ann_file)
tree = ET.parse(ann_file)
root = tree.getroot()
filename = root.find('filename').text
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
depth = int(size.find('depth').text)
table = []
for obj in root.findall('object'):
name = obj.find('name').text
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
ignore = False
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if min_size and (w < min_size or h < min_size):
ignore = True
row = {
'filename': filename,
'img_width': width,
'img_height': height,
'img_depth': depth,
'bbox_width': w,
'bbox_height':h,
'label': name,
'xmin': bbox[0],
'ymin': bbox[1],
'xmax': bbox[2],
'ymax': bbox[3],
'ignore': ignore,
'difficult': difficult
}
table.append(list(row.values()))
return table
def analyze(table: pd.DataFrame, export):
# table.plot.scatter(x='img_width', y='img_height', alpha=0.25)
# table.plot.scatter(x='bbox_width', y='bbox_height', alpha=0.25)
#
# table.plot.scatter(x='xmin', y='ymin', alpha=0.25)
# table.plot.scatter(x='xmax', y='ymax', alpha=0.25)
# plt.show()
pfr = pandas_profiling.ProfileReport(table)
pfr.to_file(export)
def main():
args = parse_args()
table = []
# build the dataloader
output = str(Path(args.input).parent.parent / 'analyze.csv')
output_html = str(Path(args.input).parent.parent / 'analyze.html')
for ann_folder in glob.glob(args.input):
rows = parse_single_voc(str(ann_folder))
table.extend(rows)
table = np.asarray(table)
table = pd.DataFrame(table, columns=title)
# print(table.head())
table.to_csv(output, index=False)
table = pd.read_csv(output)
analyze(table, output_html)
if __name__ == '__main__':
main()
| fanqie03/mmdetection.bak | tools/analyze_voc.py | analyze_voc.py | py | 3,061 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 42,
"usage_type": "name"
},
{
"api_nam... |
29867234262 | import pytest
import requests
import json
def test_product():
url = 'http://commdity-develop.kapeixi.cn/product/PPI1001001'
headers = {"content-type": "application/json"}
para = {'skuIdList': [773, 778, 788]}
r = requests.post(url, json=para, headers=headers)
print(json.dumps(r.json(),indent=2,ensure_ascii=False))
if __name__ == '__main__':
pytest.main(["-vs","test_api.py"]) | jmc517/HogwartsANDY15 | service/api_test.py | api_test.py | py | 405 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.main",
"line_number": 15,
"usage_type": "call"
}
] |
73033974433 | # -*- coding: utf-8 -*-
'''
Management of PostgreSQL extensions (e.g.: postgis)
===================================================
The postgres_extensions module is used to create and manage Postgres extensions.
.. code-block:: yaml
adminpack:
postgres_extension.present
.. versionadded:: 2014.7.0
'''
from __future__ import absolute_import
# Import Python libs
import logging
# Import salt libs
from salt.modules import postgres
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the postgres module is present
'''
return 'postgres.create_extension' in __salt__
def present(name,
if_not_exists=None,
schema=None,
ext_version=None,
from_version=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named extension is present with the specified privileges
name
The name of the extension to manage
if_not_exists
Add a if_not_exists switch to the ddl statement
schema
Schema to install the extension into
from_version
Old extension version if already installed
ext_version
version to install
user
System user all operations should be performed on behalf of
maintenance_db
Database to act on
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Extension {0} is already present'.format(name)}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if extension exists
mode = 'create'
mtdata = __salt__['postgres.create_metadata'](
name,
schema=schema,
ext_version=ext_version,
**db_args)
# The extension is not present, install it!
toinstall = postgres._EXTENSION_NOT_INSTALLED in mtdata
if toinstall:
mode = 'install'
toupgrade = False
if postgres._EXTENSION_INSTALLED in mtdata:
for flag in [
postgres._EXTENSION_TO_MOVE,
postgres._EXTENSION_TO_UPGRADE
]:
if flag in mtdata:
toupgrade = True
mode = 'upgrade'
if __opts__['test']:
ret['result'] = None
if mode:
ret['comment'] = 'Extension {0} is set to be {1}ed'.format(
name, mode).replace('eed', 'ed')
return ret
cret = None
if toinstall or toupgrade:
cret = __salt__['postgres.create_extension'](
name=name,
if_not_exists=if_not_exists,
schema=schema,
ext_version=ext_version,
from_version=from_version,
**db_args)
if cret:
if mode.endswith('e'):
suffix = 'd'
else:
suffix = 'ed'
ret['comment'] = 'The extension {0} has been {1}{2}'.format(name, mode, suffix)
elif cret is not None:
ret['comment'] = 'Failed to {1} extension {0}'.format(name, mode)
ret['result'] = False
else:
ret['result'] = True
return ret
def absent(name,
if_exists=None,
restrict=None,
cascade=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named extension is absent
name
Extension name of the extension to remove
cascade
Drop on cascade
if_exists
Add if exist slug
restrict
Add restrict slug
maintenance_db
Database to act on
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if extension exists and remove it
exists = __salt__['postgres.is_installed_extension'](name, **db_args)
if exists:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Extension {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.drop_extension'](name,
if_exists=if_exists,
restrict=restrict,
cascade=cascade,
**db_args):
ret['comment'] = 'Extension {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'Extension {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'Extension {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
| shineforever/ops | salt/salt/states/postgres_extension.py | postgres_extension.py | py | 5,852 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "salt.modules.postgres._EXTENSION_NOT_INSTALLED",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "salt.modules.postgres",
"line_number": 102,
"usage_type": "name"
... |
32244769321 | from tkinter import *
import tkinter as tk
from tkinter import ttk
import tkinter.messagebox as messagebox
import sqlite3
from PIL import Image,ImageTk
from OperationUI.OperationCommandGUI import *
from OperationUI.Colors import *
if __name__ == "__main__":
# Create the main window:
root.geometry("1440x826")
app_label = Label(root, text= "FUIYOOHAYA mobile phone shop mangament", background="#FFCAD4", font=("Comic Sans MS", 26, "bold"))
app_label.grid(row=0, column=1, padx=10, pady=20)
root.config(background="#FFCAD4")
root.grid_columnconfigure((0,2), weight=1)
root.grid_rowconfigure((0, 11), weight=1)
image = Image.open("./Image/mobile_store.png")
image.geometry = "800x750"
image_logo = ImageTk.PhotoImage(image)
image_label = tk.Label(image=image_logo, background="#FFCAD4")
image_label.place(x=400,y=220)
"""Product buttons"""
add_phone_button = Button(
root,
text = "Add Phone",
command = ProductOperation.open_add_phone_window,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
add_phone_button.grid(row=4, column=0, pady=10)
remove_button = Button(
root,
text = "Remove Phone",
command = ProductOperation.remove_phone,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
remove_button.grid(row=5, column=0, pady=10)
search_button = Button(
root,
text = "Search Phone",
command = ProductOperation.search_phone,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
search_button.grid(row=6, column=0, pady=10)
restock_button = Button(
root,
text = "Restock Phone",
command = ProductOperation.restock_phone,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white", font = ("VNI-Vari", 12, "bold"))
restock_button.grid(row=7, column=0, pady=10)
list_button = Button(
root,
text = "List Phones",
command = ProductOperation.list_phones,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
list_button.grid(row=8, column=0, pady=10)
"""Customer buttons"""
add_customer_button = Button(
root,
text = "Add Customer",
command = CustomerOperation.add_customer,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
add_customer_button.grid(row=4, column=2, pady=10)
remove_customer_button = Button(
root,
text = "Remove Customer",
command = CustomerOperation.remove_customer,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
remove_customer_button.grid(row=5, column=2,columnspan=1, pady=10)
search_customer_button = Button(
root,
text = "Search Customer",
command = CustomerOperation.search_customer,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
search_customer_button.grid(row=6, column=2, pady=10)
edit_customer_button = Button(
root,
text = "Edit Customer",
command = CustomerOperation.edit_customer,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
edit_customer_button.grid(row=7, column=2, pady=10)
list_customer_button = Button(
root,
text = "List Customers",
command = CustomerOperation.list_customer,
bg = MINUS_PINK,
borderwidth = 3,
fg = "white",
font = ("VNI-Vari", 12, "bold"))
list_customer_button.grid(row=8, column=2, pady=10)
"""EXIT button"""
exit_button = Button(
root,
text = "Exit Program",
command = Exit.exit_program,
bg = RED,
fg = "white",
borderwidth = 3,
font = ("VNI-Vari", 12, "bold"))
exit_button.grid(row=10, column=1,columnspan=1, pady=10)
# Run the main loop:
root.mainloop()
# Close the database connection when the program is done:
conn.close() | iamnopkm/python-project | main.py | main.py | py | 4,422 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"li... |
1024252645 | import csv
import mysql.connector
import argparse
from matplotlib import pyplot as plt
def query(sql, cursor):
result = []
cursor.execute(sql)
row = cursor.fetchone()
while row is not None:
result.append(row)
row = cursor.fetchone()
return result
def query_result_to_parrellel_list(query_result):
words = []
freqs = []
for word, freq in query_result:
words.append(word)
freqs.append(freq)
return words, freqs
def main():
mydb = mysql.connector.connect(
host="35.226.180.173",
user="root",
password="Password1!",
database="final",
)
cursor = mydb.cursor()
top_spam_sql = f"SELECT * from spam where frequency < 6000 order by frequency desc limit 10"
top_ham_sql = f"select * from ham where frequency < 6000 order by frequency desc limit 10"
top_spam = query(top_spam_sql, cursor)
top_ham = query(top_ham_sql, cursor)
words, freqs = query_result_to_parrellel_list(top_spam)
plt.figure(1)
plt.title("Spam < 6000")
plt.bar(words, freqs)
words, freqs = query_result_to_parrellel_list(top_ham)
plt.figure(2)
plt.title("Ham < 6000")
plt.bar(words, freqs)
plt.show()
if __name__ == '__main__':
main()
| dmaahs2017/Se413-final | graph_datalake_data.py | graph_datalake_data.py | py | 1,276 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 26,
"usage_type": "name"
},
{
"... |
3090309836 | #!/usr/bin/python
"""
Script used to connect to the edX MongoDB produce a file with the course
content nicely printed to it.
"""
import argparse
import json
import os
import re
def is_id(string):
"""Check string to see if matches UUID syntax of alphanumeric, 32 chars long."""
regex = re.compile('[0-9a-f]{32}\Z', re.I)
if bool(regex.match(string)):
return True
return False
def parse_id(string):
"""Returns the UUID part of a string only."""
return string.split('/')[-1]
def customize_discussion(course_data, block_data):
"""Sets the block name for Discussions if emtpy."""
try:
block_data['name'] = course_data['metadata']['discussion_target']
except KeyError:
try:
block_data['name'] = course_data['metadata']['discussion_category']
except KeyError:
if not block_data['name']:
block_data['name'] = 'Discussion'
def customize_html(course_data, block_data):
"""Sets the block name for HTML pages if emtpy."""
if is_id(block_data['name']) or not block_data['name']:
block_data['name'] = 'HTML Page'
def customize_openassessment(course_data, block_data):
"""Sets the block name for Open Assessments if emtpy."""
if is_id(block_data['name']):
block_data['name'] = 'Open Assessment'
def customize_problem(course_data, block_data):
"""Sets the block name for Problems if empty."""
if not block_data['name']:
block_data['name'] = 'Problem'
try:
block_data['markdown'] = course_data['metadata']['markdown']
except:
block_data['markdown'] = None
def customize_video(course_data, block_data):
"""Sets block data for Videos."""
try:
block_data['youtube_id'] = course_data['metadata']['youtube_id_1_0']
except KeyError:
block_data['youtube_id'] = None
try:
block_data['start_time'] = course_data['metadata']['start_time']
except KeyError:
block_data['start_time'] = None
try:
block_data['end_time'] = course_data['metadata']['end_time']
except KeyError:
block_data['end_time'] = None
def customize_by_type(course_data, block_data):
"""Master customizer function."""
if block_data['type'] == 'discussion':
customize_discussion(course_data, block_data)
if block_data['type'] == 'html':
customize_html(course_data, block_data)
elif block_data['type'] == 'openassessment':
customize_openassessment(course_data, block_data)
elif block_data['type'] == 'problem':
customize_problem(course_data, block_data)
elif block_data['type'] == 'video':
customize_video(course_data, block_data)
def add_children(course_data, block_data):
"""Adds the children to each block."""
block_children = []
for child in course_data['children']:
children_data = {}
children_data['child_id'] = parse_id(child)
children_data['child_type'] = None
block_children.append(children_data)
block_data['children'] = block_children
def build_course_map(course_content):
"""Parse out the data for each block."""
course_blocks = []
for key, course_data in course_content.items():
block_data = {}
block_data['id'] = parse_id(key)
block_data['type'] = course_data['category']
try:
block_data['name'] = course_data['metadata']['display_name']
except KeyError:
block_data['name'] = block_data['id']
customize_by_type(course_data, block_data)
add_children(course_data, block_data)
course_blocks.append(block_data)
return course_blocks
def main(filename):
"""Print each published couse content to a file."""
with open(filename) as json_file:
data = json.load(json_file)
course_dict = {}
course_dict['course_id'] = str(os.path.split(filename.strip('/'))[-1])
course_dict['blocks'] = build_course_map(data)
filename = '%s' % course_dict['course_id']
filepath = os.path.join('../input/', filename)
with open(filepath, 'w') as outfile:
json.dump(course_dict, outfile, indent=4)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser()
PARSER.add_argument('filename', help='JSON file to parse.')
ARGS = PARSER.parse_args()
main(ARGS.filename)
| powersj/ocv | src/edx_course_json.py | edx_course_json.py | py | 4,356 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 13... |
1473194817 | import random
import math
import string
from django.shortcuts import render,HttpResponseRedirect, HttpResponse
from main.models import *
def home(request):
return render(request, "Employee/home.html")
def approval(request):
enrollments = Enrollment.objects.filter(status="pending")
return render(request, "Employee/approval.html", {"enrolls":enrollments})
def approval_details(request, id):
enroll = Enrollment.objects.filter(pk=id).last()
return render(request, "Employee/approval_details.html", {"enrollment":enroll})
def reject(request, id):
enroll = Enrollment.objects.filter(pk=id).last()
enroll.status = "rejected"
enroll.save()
return HttpResponseRedirect("/employee_dashboard/approval")
def approve(request, id):
enroll = Enrollment.objects.filter(pk=id).last()
enroll.status = "approved"
enroll.save()
return HttpResponseRedirect("/employee_dashboard/approval")
def dealers(request):
dealer = Dealer.objects.filter(employee=Employee.objects.filter(user=request.user).last())
response = render(request, "Employee/dealers.html", {"dealers":dealer})
return response
def create_dealer(request):
id_created = ''.join(random.choice(string.digits) for _ in range(6))
if request.method == 'POST':
first_name = request.POST['first_name']
middle_name = request.POST['middle_name']
last_name = request.POST['last_name']
dob = request.POST['dob']
gender = request.POST['gender']
father_name = request.POST['father_name']
marital_status = request.POST['marital_status']
spouse_name = request.POST['spouse_name']
shop_name = request.POST['shop_name']
shop_location = request.POST['shop_location']
qualification = request.POST['qualification']
password = request.POST['password']
applicant_photo = request.FILES['applicant_photo']
shop_photo = request.FILES['shop_photo']
aadhaar_back = request.FILES['aadhaar_back']
aadhaar = request.FILES['aadhaar']
voter_id = request.FILES['voter_id']
pan_card = request.FILES['pan_card']
gst = request.FILES['gst']
bill_book = request.FILES['bill_book']
ifsc_code = request.POST['ifsc_code']
account_holder_first_name = request.POST['account_holder_first_name']
account_holder_last_name = request.POST['account_holder_last_name']
account_holder_middle_name = request.POST['account_holder_middle_name']
bank_name = request.POST['bank_name']
account_number = request.POST['account_number']
username = first_name + ' ' + last_name
employee = Employee.objects.filter(user=request.user).last()
user_created = User.objects.create_user(username=username, password=password)
user_detail = UserDetails.objects.create(user=user_created, is_dealer=True)
dealer_created = Dealer.objects.create(dealer_id=id_created, password=password,employee=employee, aadhaar_back=aadhaar_back, user=user_created, first_name=first_name, last_name=last_name, middle_name=middle_name, dob=dob, gender=gender, father_name=father_name, marital_status=marital_status, spouse_name=spouse_name, shop_name=shop_name, shop_location=shop_location, qualification=qualification, applicant_photo=applicant_photo, shop_photo=shop_photo, aadhaar=aadhaar, voter_id=voter_id, pan_card=pan_card, gst=gst, bill_book=bill_book, ifsc_code=ifsc_code, account_holder_first_name=account_holder_first_name, account_holder_last_name=account_holder_last_name, account_holder_middle_name=account_holder_middle_name, account_number=account_number, bank_name=bank_name, created_by="Employee")
return render(request, "Employee/create_dealer.html", {"success":True})
return render(request, "Employee/create_dealer.html", {"id":id_created})
def dealer_details(request, id):
dealer = Dealer.objects.filter(pk=id).last()
return render(request, "Employee/dealer_details.html", {"dealer":dealer}) | CodingSectorDeveloper/sms-1 | employee/views.py | views.py | py | 4,093 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 16,
"usage_type": "call"
},
{
"api_name":... |
23784084308 | # coding=utf-8
from django import forms
from django.urls import reverse
from .models import Ad
from app.models import City, Metro
from categories.models import Category
class SearchForm(forms.Form):
search_word = forms.CharField(max_length=255, widget=forms.TextInput(attrs={
'type': 'search',
'placeholder': 'ะะพะธัะบ ะฟะพ ะพะฑััะฒะปะตะฝะธัะผ...'
}), required=False)
city = forms.ChoiceField(widget=forms.Select(attrs={'class': 'city-choice', 'id': 'search_city'}), required=False)
metro = forms.CharField(widget=forms.Select(), required=False)
categories = forms.ChoiceField(widget=forms.Select(), required=False)
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
city_choices = (('', 'ะะพัะพะด'),)
for item in City.objects.all():
city_choices += ((str(item.id), item.title),)
category_choices = (('', 'ะะฐัะตะณะพัะธั'),)
for item in Category.objects.filter(parent=None):
category_choices += ((str(item.id), item.title),)
for sub in Category.objects.filter(parent=item):
category_choices += ((str(sub.id), '--' + sub.title),)
self.fields['categories'].choices = category_choices
self.fields['city'].choices = city_choices
self.fields['city'].widget.attrs.update({'data-url': reverse('get_metro_by_city')})
self.fields['metro'].widget.choices = (('', 'ะะตััะพ'), ('', 'ะัะฑะตัะธัะต ะณะพัะพะด'))
class AdCreationForm(forms.ModelForm):
images = forms.CharField(widget=forms.TextInput(attrs={'style': 'display: none;'}), required=False)
location = forms.CharField(widget=forms.HiddenInput, required=False)
removed_images = forms.CharField(required=False)
class Meta:
model = Ad
fields = ('category', 'metro', 'title', 'price', 'city', 'description', 'phone')
def __init__(self, *args, **kwargs):
super(AdCreationForm, self).__init__(*args, **kwargs)
category_choices = (('', 'ะัะฑะตัะธัะต ะบะฐัะตะณะพัะธั'),)
for item in Category.objects.filter(parent=None):
category_choices += ((str(item.id), item.title),)
for sub in Category.objects.filter(parent=item):
category_choices += ((str(sub.id), '--' + sub.title),)
metro_choices = (('', 'ะะตััะพ'), ('', 'ะัะฑะตัะธัะต ัะฝะฐัะฐะปะฐ ะณะพัะพะด'),)
city_choices = (('', 'ะัะฑะตัะธัะต ะณะพัะพะด'),)
for item in City.objects.all():
city_choices += ((str(item.id), item.title),)
self.fields['category'].choices = category_choices
self.fields['metro'].choices = metro_choices
self.fields['city'].choices = city_choices
self.fields['city'].widget.attrs.update({'class': 'city-choice', 'data-url': reverse('get_metro_by_city'), 'id': 'ad_creation_city'})
| asmuratbek/tumar24 | ad_app/forms.py | forms.py | py | 2,916 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.Form",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.for... |
32702787469 | # Como se dijo que la app manejaria las vistas, se creo este archivo. Aqui se
# manejaran los mapeos de las direcciones dentro de la app. Esto con el
# objetivo de que sea modular
# Modificamos la url de categoria para pasar el parametro category_name_slug
from django.conf.urls import url
from rango import views
# Creamos un app_name para rango
app_name = 'rango'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^about/$', views.about, name='about'),
url(r'^add_category/$', views.add_category, name='add_category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/$', views.show_category, name='show_category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/add_page/$', views.add_page, name='add_page'),
]
| alehpineda/tango_with_django_project | rango/urls.py | urls.py | py | 748 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rango.views.index",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "rango.views",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.conf.u... |
17065761069 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 7 20:13:28 2020
@author: Neha Shinkre
"""
import requests
url = 'http://localhost:5000/predict_api'
r = requests.post(url,json={'Age':18, 'EstimatedSalary':9000})
print(r.json)
| Nehaprog/IEEE-codersweek | new/request.py | request.py | py | 229 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 10,
"usage_type": "call"
}
] |
6033578794 | import asyncio
"""
WRAPPING COROS INTO TASKS
Wrapping coros into tasks, so that they could be run concurrently
.ensure_future() = .create_task()
"""
async def say_after(delay: int, what: str) -> int:
print(f"Sleeping {delay}. Word: {what}")
await asyncio.sleep(delay)
print(what)
return delay
async def main() -> None:
say_after_task_1 = asyncio.create_task(say_after(1, "Hello"))
say_after_task_2 = asyncio.create_task(say_after(1, "World"))
print("Tasks created")
delay_1 = await say_after_task_1
delay_2 = await say_after_task_2
print(delay_1, delay_2)
if __name__ == "__main__":
asyncio.run(main())
| EvgeniiTitov/coding-practice | coding_practice/concurrency/asyncio/chapter_presentation/example_2.py | example_2.py | py | 656 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "asyncio.sleep",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "asyncio.create_task",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "asyncio.create_task",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
... |
34559411929 | import os
os.environ['TOKENIZERS_PARALLELISM']='false'
import sys
import torch
import time
import math
import shutil
import pandas as pd
from dataclasses import dataclass
from collections import defaultdict
from torch.cuda.amp import GradScaler
from torch.utils.data import DataLoader
from transformers import get_constant_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup
from transformers import AutoTokenizer
from retrieval.loss import InfoNCE
from retrieval.model import Net
from retrieval.trainer import train
from retrieval.utils import setup_system, Logger
from retrieval.evaluate import evaluate_val, evaluate_train
from retrieval.dataset import EqualDatasetTrain, EqualDatasetEval
@dataclass
class Configuration:
#--------------------------------------------------------------------------
# Models:
#--------------------------------------------------------------------------
# 'sentence-transformers/LaBSE'
# 'microsoft/mdeberta-v3-base'
# 'sentence-transformers/stsb-xlm-r-multilingual'
# 'sentence-transformers/paraphrase-multilingual-mpnet-base-v2'
# 'sentence-transformers/xlm-r-100langs-bert-base-nli-mean-tokens'
#--------------------------------------------------------------------------
# Transformer
transformer: str = 'sentence-transformers/LaBSE'
pooling: str = 'cls' # 'mean' | 'cls' | 'pooler'
hidden_dropout_prob: float = 0.1
attention_dropout_prob: float = 0.1
proj = None # None | int for lower dimension
margin: float = 0.16
# Reduction of model size
layers_to_keep = None # None -> org. model | (1,2,...,11) layers to keep
# Model Destillation
transformer_teacher: str ='sentence-transformers/LaBSE'
use_teacher: bool = False # use destillation
pooling_teacher: str = 'cls' # 'mean' | 'cls' | 'pooler'
proj_teacher = None # None | int for lower dimension
# Language Sampling
init_pool = 0
pool = (0,1,2,3) # (0,) for only train on original data without translation
epoch_stop_switching: int = 36 # epochs no language switching more used (near end of training)
# Debugging
debug = None # False | 10000 for fast test
# Training
seed: int = 42
epochs: int = 40
batch_size: int = 512
mixed_precision: bool = True # using fp16
gradient_accumulation: int = 1
gradient_checkpointing: bool = False # use gradient checkpointing
verbose: bool = True # show progressbar
gpu_ids: tuple = (0,1,2,3) # GPU ids for training
# Eval
eval_every_n_epoch: int = 1
normalize_features: bool = True
zero_shot: bool = False # eval before first epoch
# Optimizer
clip_grad = 100. # None | float
decay_exclue_bias: bool = False
# Loss
label_smoothing: float = 0.1
# Learning Rate
lr: float = 0.0002
scheduler: str = 'polynomial' # 'polynomial' | 'constant' | None
warmup_epochs: int = 2
lr_end: float = 0.00005 # only for 'polynomial'
# Data
language: str = 'all' # 'all' | 'en', es', 'pt', 'fr', ....
fold: int = 0 # eval on fold x
train_on_all: bool = False # train on all data incl. data of fold x
max_len: int = 96 # max token lenght for topic and content
# Sampling
max_wrong: int = 128 # limit for sampling of wrong content for specific topic
custom_sampling: bool = True # do custom shuffle to prevent having related content in batch
sim_sample: bool = True # upsample missing and combine hard negatives in batch
sim_sample_start: int = 1 # if > 1 skip firt n epochs for sim_sampling
# Save folder for model checkpoints
model_path: str = './checkpoints'
# Checkpoint to start from
checkpoint_start = None # pre-trained checkpoint for model we want to train
checkpoint_teacher = None # pre-trained checkpoint for teacher
# set num_workers to 0 if on Windows
num_workers: int = 0 if os.name == 'nt' else 4
# train on GPU if available
device: str = 'cuda' if torch.cuda.is_available() else 'cpu'
# for better performance
cudnn_benchmark: bool = True
# make cudnn deterministic
cudnn_deterministic: bool = False
#-----------------------------------------------------------------------------#
# Config #
#-----------------------------------------------------------------------------#
config = Configuration()
if __name__ == '__main__':
model_path = '{}/{}/{}'.format(config.model_path,
config.transformer,
time.strftime('%H%M%S'))
if not os.path.exists(model_path):
os.makedirs(model_path)
shutil.copyfile(os.path.basename(__file__), '{}/train.py'.format(model_path))
# Redirect print to both console and log file
sys.stdout = Logger(os.path.join(model_path, 'log.txt'))
setup_system(seed=config.seed,
cudnn_benchmark=config.cudnn_benchmark,
cudnn_deterministic=config.cudnn_deterministic)
#-----------------------------------------------------------------------------#
# Model #
#-----------------------------------------------------------------------------#
print('\n{}[Model: {}]{}'.format(20*'-', config.transformer, 20*'-'))
model = Net(transformer_name=config.transformer,
gradient_checkpointing=config.gradient_checkpointing,
hidden_dropout_prob=config.hidden_dropout_prob,
attention_dropout_prob=config.attention_dropout_prob,
pooling=config.pooling,
projection=config.proj)
print(model.transformer.config)
# load pretrained Checkpoint
if config.checkpoint_start is not None:
print('Start from:', config.checkpoint_start)
model_state_dict = torch.load(config.checkpoint_start)
model.load_state_dict(model_state_dict, strict=True)
#-----------------------------------------------------------------------------#
# Drop Transformer Layers #
#-----------------------------------------------------------------------------#
if config.layers_to_keep is not None:
print('Remove layers from model. Only keep these layers: {}'.format(config.layers_to_keep))
new_layers = torch.nn.ModuleList([layer_module for i, layer_module in enumerate(model.transformer.encoder.layer) if i in config.layers_to_keep])
model.transformer.encoder.layer = new_layers
model.transformer.config.num_hidden_layers = len(config.layers_to_keep)
print('\n{}[Reduced Model: {}]{}'.format(17*'-', config.transformer, 17*'-'))
print(model.transformer.config)
#-----------------------------------------------------------------------------#
# DP and model to device #
#-----------------------------------------------------------------------------#
# Data parallel
print('GPUs available:', torch.cuda.device_count())
if torch.cuda.device_count() > 1 and len(config.gpu_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=config.gpu_ids)
# Model to device
model = model.to(config.device)
#-----------------------------------------------------------------------------#
# Model destillation #
#-----------------------------------------------------------------------------#
# Teacher for destillation
if config.use_teacher:
teacher = Net(transformer_name=config.transformer_teacher,
gradient_checkpointing=False,
hidden_dropout_prob=0.0,
attention_dropout_prob=0.0,
pooling=config.pooling_teacher,
projection=config.proj_teacher)
print('\n{}[Teacher: {}]{}'.format(23*'-', config.transformer , 23*'-'))
print(teacher.transformer.config)
if config.checkpoint_teacher is not None:
print('Load Teacher-Checkpoint:', config.checkpoint_teacher)
model_state_dict = torch.load(config.checkpoint_teacher)
teacher.load_state_dict(model_state_dict, strict=True)
else:
print('You are using a checkpoint for the Teacher-Model that was not trained on that task!!!')
for name, p in teacher.named_parameters():
p.requires_grad = False
if torch.cuda.device_count() > 1 and len(config.gpu_ids) > 1:
teacher = torch.nn.DataParallel(teacher, device_ids=config.gpu_ids)
teacher = teacher.to(config.device)
else:
teacher = None
#-----------------------------------------------------------------------------#
# Tokenizer #
#-----------------------------------------------------------------------------#
tokenizer = AutoTokenizer.from_pretrained(config.transformer)
#-----------------------------------------------------------------------------#
# Data #
#-----------------------------------------------------------------------------#
df_correlations = pd.read_csv('./data/correlations.csv')
topics = df_correlations['topic_id'].values
content = df_correlations['content_ids'].values
# GT dict for eval
gt_dict = dict()
for i in range(len(topics)):
content_tmp = content[i].split(' ')
topic_tmp = topics[i]
gt_dict[topic_tmp] = content_tmp
# split if not train on all data
if config.train_on_all:
df_correlations_train = df_correlations
else:
df_correlations_train = df_correlations[df_correlations['fold'] != config.fold]
if config.debug:
print(f'DEBUG MODE: use only {config.debug} topics for training')
#df_correlations = df_correlations.sample(n=config.debug)
topics = df_correlations_train['topic_id'].values
content = df_correlations_train['content_ids'].values
content2topic = defaultdict(set)
for i in range(len(topics)):
content_tmp = content[i].split(' ')
topic_tmp = topics[i]
for c in content_tmp:
content2topic[c].add(topic_tmp)
#-----------------------------------------------------------------------------#
# DataLoader #
#-----------------------------------------------------------------------------#
# Train
train_dataset = EqualDatasetTrain(df_correlations=df_correlations_train,
fold=config.fold,
tokenizer=tokenizer,
max_len=config.max_len,
shuffle_batch_size=config.batch_size,
pool=config.pool,
init_pool=config.init_pool,
train_on_all=config.train_on_all,
language=config.language,
debug=config.debug)
train_loader = DataLoader(dataset=train_dataset,
batch_size=config.batch_size,
shuffle=not config.custom_sampling,
num_workers=config.num_workers,
pin_memory=True,
collate_fn=train_dataset.smart_batching_collate
)
print('\nTrain Pairs:', len(train_dataset ))
# Eval
val_dataset_topic = EqualDatasetEval(mode='topic',
typ='val',
fold=config.fold,
tokenizer=tokenizer,
max_len=config.max_len,
pool=config.pool,
init_pool=config.init_pool,
train_on_all=config.train_on_all,
language=config.language,
debug=config.debug)
val_dataset_content = EqualDatasetEval(mode='content',
typ='val',
fold=config.fold,
tokenizer=tokenizer,
max_len=config.max_len,
pool=config.pool,
init_pool=config.init_pool,
train_on_all=config.train_on_all,
language=config.language,
debug=config.debug)
val_loader_topic = DataLoader(dataset=val_dataset_topic,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
collate_fn=val_dataset_topic.smart_batching_collate
)
val_loader_content = DataLoader(dataset=val_dataset_content,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
collate_fn=val_dataset_content.smart_batching_collate
)
print('\nTopics Val:', len(val_dataset_topic))
print('Content Val:', len(val_dataset_content))
#-----------------------------------------------------------------------------#
# Sim Sample #
#-----------------------------------------------------------------------------#
train_dataset_topic = EqualDatasetEval(mode='topic',
typ='train',
fold=config.fold,
tokenizer=tokenizer,
max_len=config.max_len,
pool=config.pool,
init_pool=config.init_pool,
train_on_all=config.train_on_all,
language=config.language,
debug=config.debug)
train_dataset_content = EqualDatasetEval(mode='content',
typ='train',
fold=config.fold,
tokenizer=tokenizer,
max_len=config.max_len,
pool=config.pool,
init_pool=config.init_pool,
train_on_all=config.train_on_all,
language=config.language,
debug=config.debug)
train_loader_topic = DataLoader(dataset=train_dataset_topic,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
collate_fn=train_dataset_topic.smart_batching_collate
)
train_loader_content = DataLoader(dataset=train_dataset_content,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
collate_fn=train_dataset_content.smart_batching_collate
)
print('\nTopics Train:', len(train_dataset_topic))
print('Content Train:', len(train_dataset_content))
#-----------------------------------------------------------------------------#
# Loss #
#-----------------------------------------------------------------------------#
loss_fn = torch.nn.CrossEntropyLoss(label_smoothing=config.label_smoothing)
loss_function = InfoNCE(loss_function=loss_fn,
device=config.device,
)
if config.mixed_precision:
scaler = GradScaler(init_scale=2.**10)
else:
scaler = None
#-----------------------------------------------------------------------------#
# optimizer #
#-----------------------------------------------------------------------------#
if config.decay_exclue_bias:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias']
optimizer_parameters = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01,
},
{
'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_parameters, lr=config.lr)
else:
optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr)
#-----------------------------------------------------------------------------#
# Scheduler #
#-----------------------------------------------------------------------------#
train_steps = math.floor((len(train_loader) * config.epochs) / config.gradient_accumulation)
warmup_steps = len(train_loader) * config.warmup_epochs
if config.scheduler == 'polynomial':
print('\nScheduler: polynomial - max LR: {} - end LR: {}'.format(config.lr, config.lr_end))
scheduler = get_polynomial_decay_schedule_with_warmup(optimizer,
num_training_steps=train_steps,
lr_end = config.lr_end,
power=1.5,
num_warmup_steps=warmup_steps)
elif config.scheduler == 'constant':
print('\nScheduler: constant - max LR: {}'.format(config.lr))
scheduler = get_constant_schedule_with_warmup(optimizer,
num_warmup_steps=warmup_steps)
else:
scheduler = None
print('Warmup Epochs: {} - Warmup Steps: {}'.format(str(config.warmup_epochs).ljust(2), warmup_steps))
print('Train Epochs: {} - Train Steps: {}'.format(config.epochs, train_steps))
#-----------------------------------------------------------------------------#
# Zero Shot #
#-----------------------------------------------------------------------------#
if config.zero_shot:
print('\n{}[{}]{}'.format(30*'-', 'Zero Shot', 30*'-'))
f2, precision, recall = evaluate_val(config,
model,
reference_dataloader=val_loader_content,
query_dataloader=val_loader_topic,
gt_dict=gt_dict,
cleanup=True)
#-----------------------------------------------------------------------------#
# Shuffle #
#-----------------------------------------------------------------------------#
# Initial values no sim_sampling for first or first n epochs
missing_pairs, topic2wrong = None, None
if config.custom_sampling:
train_loader.dataset.shuffle(missing_list=missing_pairs,
wrong_dict=topic2wrong,
max_wrong=config.max_wrong)
#-----------------------------------------------------------------------------#
# Train #
#-----------------------------------------------------------------------------#
t_train_start = time.time()
start_epoch = 0
best_score = 0
# language switch pool without original position 0
pools = config.pool[1:]
current_pool_pointer = 0
for epoch in range(1, config.epochs+1):
print('\n{}[Epoch: {}]{}'.format(30*'-', epoch, 30*'-'))
train_loss = train(config,
model,
dataloader=train_loader,
loss_function=loss_function,
optimizer=optimizer,
scheduler=scheduler,
scaler=scaler,
teacher=teacher)
print('Epoch: {}, Train Loss = {:.3f}, Lr = {:.6f}'.format(epoch,
train_loss,
optimizer.param_groups[0]['lr']))
print('\n{}[{}]{}'.format(30*'-', 'Evaluate (Val)', 30*'-'))
f2, precision, recall = evaluate_val(config,
model,
reference_dataloader=val_loader_content,
query_dataloader=val_loader_topic,
gt_dict=gt_dict,
cleanup=True)
if f2 > best_score:
best_score = f2
best_checkpoint = '{}/weights_e{}_{:.4f}.pth'.format(model_path, epoch, f2)
if torch.cuda.device_count() > 1 and len(config.gpu_ids) > 1:
torch.save(model.module.state_dict(), best_checkpoint)
else:
torch.save(model.state_dict(), best_checkpoint)
elif f2 < (0.8 * best_score):
print('Something went wrong:')
print(f'Resett to: {best_checkpoint} -> and continue training' )
model_state_dict = torch.load(best_checkpoint)
if torch.cuda.device_count() > 1 and len(config.gpu_ids) > 1:
model.module.load_state_dict(model_state_dict, strict=True)
else:
model.load_state_dict(model_state_dict, strict=True)
if config.sim_sample:
print('\n{}[{}]{}'.format(30*'-', 'Evaluate (Train)', 30*'-'))
# Set pool for next epoch -> sim sample for that pool
if len(config.pool) > 1:
if epoch < config.epoch_stop_switching:
# come back to original pool 0 every uneven epoch
if epoch % 2 == 0:
next_pool = 0
else:
next_pool = pools[current_pool_pointer % len(pools)]
current_pool_pointer += 1
# set train data for next epoch
train_loader_content.dataset.set_pool(next_pool)
train_loader_topic.dataset.set_pool(next_pool)
train_loader.dataset.set_pool(next_pool)
else:
train_loader_content.dataset.set_pool(0)
train_loader_topic.dataset.set_pool(0)
train_loader.dataset.set_pool(0)
if epoch >= config.sim_sample_start:
missing_pairs, topic2wrong = evaluate_train(config=config,
model=model,
reference_dataloader=train_loader_content,
query_dataloader=train_loader_topic,
gt_dict=gt_dict,
content2topic=train_loader.dataset.content2topic,
cleanup=True)
if config.custom_sampling:
train_loader.dataset.shuffle(missing_list=missing_pairs,
wrong_dict=topic2wrong,
max_wrong=config.max_wrong)
if torch.cuda.device_count() > 1 and len(config.gpu_ids) > 1:
torch.save(model.module.state_dict(), '{}/weights_end.pth'.format(model_path))
else:
torch.save(model.state_dict(), '{}/weights_end.pth'.format(model_path))
| KonradHabel/learning_equality | train.py | train.py | py | 26,975 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "os.name",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
22690451449 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
import json
from .models import Notes
from .api import get_all_list
@login_required
def create_note(request):
if request.method == 'POST':
try:
note_data = json.loads(request.body.decode('utf-8')) # Parse the JSON data
except json.JSONDecodeError:
return HttpResponse("Invalid JSON data")
userName = request.user.username
textContent = note_data.get("textContent")
title = note_data.get("title")
if len(title) == 0:
title = "Untitled Note"
note = Notes(userName=userName, title=title, textContent=textContent)
try:
note.save()
response_data = {"status": "success", "message": "Note saved to the database"}
except Exception as e:
response_data = {"status": "failure", "message": str(e)}
return JsonResponse(response_data)
return HttpResponse("Invalid Request")
@login_required
def edit_note(request):
if request.method == "POST":
try:
edit_note = json.loads(request.body.decode('utf-8')) # Parse the JSON data
except json.JSONDecodeError:
return HttpResponse("Invalid JSON data request")
noteId = edit_note.get('id')
textContent = edit_note.get('textcontent')
if noteId:
try:
note = Notes.objects.get(noteId=noteId)
note.textContent = textContent
note.save()
return JsonResponse({'message': 'Note Updated successfully'})
except Notes.DoesNotExist:
return JsonResponse({'error': 'Note not found'}, status=404)
return JsonResponse({'error': 'Note ID not provided'}, status=400)
@login_required
def delete_note(request):
if request.method == "DELETE":
try:
note_id = json.loads(request.body.decode('utf-8')).get("id") # Parse the JSON data
except json.JSONDecodeError:
return HttpResponse("Invalid JSON data request")
if note_id:
try:
note = Notes.objects.get(noteId=note_id)
note.delete()
return JsonResponse({'message': 'Note deleted successfully'})
except Notes.DoesNotExist:
return JsonResponse({'error': 'Note not found'}, status=404)
else:
return JsonResponse({'error': 'Note ID not provided'}, status=400)
return JsonResponse({'error': 'Not Valid request'}, status=400)
@login_required
def notes_list(request):
username = request.user.username
return render(request, 'notes.html',{"notes":get_all_list(username)}) | MasterZesty/QuickNote | quicknote/notes/views.py | views.py | py | 3,152 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.... |
33403336012 |
from subprocess import call
import math
# S1 = 500
# S2 = 250
import sys
import numpy as np
import os
from joblib import Parallel, delayed
import multiprocessing
# def run(Para1, Para2, Para3, S2_amp):
def run(Para1, Popul_ID):
# global mut
#call(["./main","BCL", str(S1), "S2", str(S2), "Mutation", mut, "S1_number", "20", "ISO", ISO]) #
# call(['./SAN', str(P1), str(P2)]) #"Mutation", mut, "S1_number", "20", "ISO", ISO]) #
# call(['./NCZ_Model_New_Ito', '250', '14', '250', 'WT', 'Normal', '0', '0', str(P1), str(P2), str(P3),str(P4),str(P5),str(P6)], stdout=f)
# call(['./NCZ_Model_New_Ito', '500', '14', '500', 'WT', 'Normal', '0', '0', str(P1), str(P2), str(P3),str(P4),str(P5),str(P6)], stdout=f)
P=['./main_HAM_Signalling_cvode_new', str(BCL), str(Popul_ID), str(ISO), str(CaMKII_inhb), str(CaMKII_db)]
for i in range(len(Para1)):
P.append(str(Para1[i]))
call(P, stdout=f)
Popul_params = np.loadtxt('para_large_sigma.log.20000')
# f2 = open("para_log.dat", "w+")
BCL = float(sys.argv[1])
ISO = float(sys.argv[2])
CaMKII_inhb = int(sys.argv[3])
CaMKII_db = int(sys.argv[4])
# run(BCL,"Normal", 0,0,0,0);
# Mode="SimDrug"
f = open("AP.log.dat.20000", "w+")
# sys.stdout = open('file', 'w')
IDs_to_run = list(range(len(Popul_params)))
run_parallel = True #False
if not run_parallel:
for i in IDs_to_run: #range(600):
index = str(i)
print ('processing ID ' + index)
run(Popul_params[i], i);
call('mv HAM_wrap_out.dat AP.BCL.1000.ID.'+index, shell=True);
call('mv Restart_ICs/ICs.bin Restart_ICs/ICs.bin.'+index, shell=True);
if run_parallel:
num_cores = 40#multiprocessing.cpu_count() - 2
results=Parallel(n_jobs=num_cores, prefer="threads")(
delayed(run)
(Popul_params[PopulID], PopulID)
for PopulID in IDs_to_run)
f.close() | drgrandilab/Ni-et-al-2023-Human-Atrial-Signaling-Model | PV-like_Populations/Simulations/run_pop.py | run_pop.py | py | 1,799 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "subprocess.call",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_numb... |
11725823156 | import numpy as np
from PIL import Image
from sys import argv
import side_by_side
L = 256
def histogram(im):
return side_by_side.histogram_rgb(im)
def uniform_hist(im):
histogram_r, accum_r, histogram_g, accum_g, histogram_b, accum_b = histogram(im)
def w_dot(r):
wr = accum_r[r[0]]
wg = accum_g[r[1]]
wb = accum_b[r[2]]
return list(map(lambda w: int(w * L - 0.5), [wr, wg, wb]))
ret = im.copy()
for i in range(ret.shape[0]):
for j in range(ret.shape[1]):
print(i,j)
ret[i][j] = w_dot(ret[i][j])
return ret
im1 = np.asarray(Image.open(argv[1]).convert('RGB'))
side_by_side.sbys_histogram([im1, uniform_hist(im1)], ['rgb', 'rgb'],argv=argv[2] if len(argv)>2 else None)
| gciruelos/imagenes-practicas | practica2/ej01-b.py | ej01-b.py | py | 759 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "side_by_side.histogram_rgb",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
... |
34813097933 | import requests
from bs4 import BeautifulSoup as bs
import time
import sqlite3
'''
็ฑไบ็ฝ็ซๅๆ่ฎพ็ฝฎ๏ผๆญค่ๆฌไป
่ฝ็ฌๅ้จๅ็ซ ่
Summary:
soup.get_text("|", strip=True) ่ทๅtagๅ
่ฃน็ๅ
ๅฎนๅนถๅป้คๅๅ็็ฉบๆ ผ
a['href'] ่ฟๅaๆ ็ญพไธhrefๅฑๆง็ๅผ
ๅฟซๆท้ฎ๏ผ่พๅ
ฅmainๆฒๅ่ฝฆๅณๅฏๅฟซ้่ฎพ็ฝฎไธปๅฝๆฐ
re.findall()ๅ ไธre.Sๅๆฐๅฏไปฅๅน้
ๅฐๆข่ก็ฌฆ๏ผๅณๆๆข่ก็ฌฆๅ
ๅซ่ฟๅป
for key, value in urlst.items():ๅฏไปฅ่ฟญไปฃๅญๅ
ธ็keyๅvalue
ๅคๆญ่ฏญๅฅl == []็ญไปทไบnot l
'''
def get_html(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}
res = requests.get(url, headers=headers)
res.encoding = res.apparent_encoding
html = res.text
return html
def get_urlst():
url = 'http://www.xbiquge.la/10/10489/'
html = get_html(url)
# print(html)
soup = bs(html, 'html.parser')
dd_lst = soup.find_all('dd')
# print(ddlst)
a_lst = [dd.find_all('a')[0] for dd in dd_lst]
# print(alst)
name_lst = [a.get_text() for a in a_lst]
ref_lst = [a['href'] for a in a_lst]
base_url = 'http://www.xbiquge.la'
ref_lst = [base_url + i for i in ref_lst]
# print(name_lst, ref_lst)
urlst = dict(zip(name_lst, ref_lst))
# print(urlst)
return urlst
def get_content(url):
html = get_html(url)
soup = bs(html, 'html.parser')
content = soup.find_all(id='content')
if not content:
content = 'ๆๆ '
else:
content = content[0].get_text()
# print(content)
return content
def save2db(data, dbpath):
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
row = ['"' + data[0] + '"', '"' + data[1] + '"']
# ็ฑไบไธๆๅฏนrow่ฟ่กjoinๆนๆณๅไธๅธฆโโๅท๏ผ่sqlite่ฏญๅฅ้่ฆๅ ๅๅผๅท๏ผๆ
้ๅๆญคๅคๅ ไธๅๅผๅท
sql = '''
insert into acrj(title, content)
values(%s)
''' % ','.join(row)
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
def init_db(dbpath):
sql = '''
create table acrj
(
id integer primary key autoincrement,
title text,
content text
)
'''
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
conn.close()
def main():
urlst = get_urlst()
path = 'bqg.db'
init_db(path)
for key, value in urlst.items():
content = get_content(value)
data = (key, content)
save2db(data, path)
time.sleep(1)
print('%s๏ผๅฎๆ๏ผ' % key)
if __name__ == '__main__':
main()
| mediew/pynote | spyder/biquge/biquge.py | biquge.py | py | 2,674 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
... |
11910443233 | from flask import jsonify, request
from app.models import Clinical_info, Token
from app import db
def deleteClinicalInfo(id):
'''delete clinical info record'''
token = request.headers['TOKEN']
t=Token.query.filter_by(token=token).first()
is_expired=t.status
if id is not None:
if token and is_expired == 'active':
clinicalInfo=Clinical_info.query.filter_by(id=id).first()
if clinicalInfo is not None:
db.session.delete(clinicalInfo)
db.session.commit()
else:
return jsonify('Clinical info record specified does not exist'),500
else:
return jsonify('no token provided or token has expired'),500
else:
return jsonify('No clinical info id provided'),500
| the1Prince/drug_repo | app/deletes/deleteClinicalInfo.py | deleteClinicalInfo.py | py | 899 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.headers",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.models.Token.query.filter_by",
"line_number": 12,
"usage_type": "call"
},
{
"api_name"... |
44697268734 | import discord
import os
import requests
import json
import random
from replit import db
from keep_alive import keep_alive
from discord.ext import commands,tasks
from pytube import YouTube
from pytube import Search
import pafy
import asyncio
from discord import FFmpegPCMAudio
bot = commands.Bot(command_prefix = '//')
check = False
@bot.command(name = "hello")
async def hello(ctx):
await ctx.send("Hello {}".format(ctx.author))
@bot.command(name = "kill")
async def kill(ctx, args):
await ctx.send("{} has beeen eliminated".format(args))
@bot.command(name = "speechless")
async def speechless(ctx):
await ctx.send(file=discord.File('speechless.gif'))
@bot.command(name = "UPcm")
async def upcm(ctx):
await ctx.send(file=discord.File('upcm.jpg'))
@bot.command(name = "play" ,aliases=["p"])
async def play(ctx, *args):
global check
global s
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if voice == None:
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
await channel.connect()
server = ctx.message.guild
vc = server.voice_client
if not check:
check = not check
s = Search(' '.join(args))
j = 1
output = "**Please select a track with the** `//play 1-5` **command:**\n"
for i in s.results:
title = (i.title).encode('utf8')
output += '**' + str(j) + ': **'
output += (title.decode('utf8')) + '\n'
if j == 5:
break
j += 1
global message
message = await ctx.send(output)
else:
if not args[0].isnumeric():
await ctx.send("Choose index number of song.")
return
check = not check
if vc.is_playing():
vc.stop()
vdo_index = eval(args[0])
vdo_id = s.results[vdo_index-1].video_id
p = pafy.new(vdo_id)
ba = p.getbestaudio()
try :
async with ctx.typing():
FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}
vc.play(FFmpegPCMAudio(ba.url, **FFMPEG_OPTIONS))
vc.is_playing()
await message.edit(content ='**Now playing:** {}'.format(p.title))
except:
await ctx.send("The bot is not connected to a voice channel.")
@bot.command(name='pause', help='This command pauses the song')
async def pause(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
voice_client.pause()
await ctx.send("Paused. Use resume command to resume.")
else:
await ctx.send("The bot is not playing anything at the moment.")
@bot.command(name='resume', help='Resumes the song')
async def resume(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_paused():
voice_client.resume()
await ctx.send("Resumed")
else:
await ctx.send("The bot was not playing anything before this. Use play_song command")
@bot.command(name='stop', help='Stops the song')
async def stop(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
voice_client.stop()
await ctx.send("Stopped")
else:
await ctx.send("The bot is not playing anything at the moment.")
@bot.command(name = 'guessmybday', help = "Guesses your birthday")
async def guessmybday(ctx):
count = 0
msg = await ctx.send("**Is your birthday in the following set?**\n 1 3 5 7\n 9 11 13 15\n17 19 21 23\n25 27 29 31\n")
check = lambda m: m.author == ctx.author and m.channel == ctx.channel
try:
confirm = await bot.wait_for("message", check=check, timeout=60)
except asyncio.TimeoutError:
await msg.edit(content="Guessing cancelled, timed out.")
return
if confirm.content == "yes" or confirm.content == "no":
if confirm.content == "yes":
count += 1
await msg.edit(content = "**Is your birthday in the following set?**\n 2 3 6 7\n10 11 14 15\n18 19 22 23\n26 27 30 31\n")
check = lambda m: m.author == ctx.author and m.channel == ctx.channel
try:
confirm = await bot.wait_for("message", check=check, timeout=60)
except asyncio.TimeoutError:
await msg.edit(content="Guessing cancelled, timed out.")
return
if confirm.content == "yes" or confirm.content == "no":
if confirm.content == "yes":
count += 2
await msg.edit(content = "**Is your birthday in the following set?**\n 4 5 6 7\n12 13 14 15\n20 21 22 23\n28 29 30 31\n")
check = lambda m: m.author == ctx.author and m.channel == ctx.channel
try:
confirm = await bot.wait_for("message", check=check, timeout=60)
except asyncio.TimeoutError:
await msg.edit(content="Guessing cancelled, timed out.")
return
if confirm.content == "yes" or confirm.content == "no":
if confirm.content == "yes":
count += 4
await msg.edit(content = "**Is your birthday in the following set?**\n 8 9 10 11\n12 13 14 15\n24 25 26 27\n28 29 30 31\n")
check = lambda m: m.author == ctx.author and m.channel == ctx.channel
try:
confirm = await bot.wait_for("message", check=check, timeout=60)
except asyncio.TimeoutError:
await msg.edit(content="Guessing cancelled, timed out.")
return
if confirm.content == "yes" or confirm.content == "no":
if confirm.content == "yes":
count += 8
await msg.edit(content = "**Is your birthday in the following set?**\n16 17 18 19\n20 21 22 23\n24 25 26 27\n28 29 30 31\n")
check = lambda m: m.author == ctx.author and m.channel == ctx.channel
try:
confirm = await bot.wait_for("message", check=check, timeout=60)
except asyncio.TimeoutError:
await msg.edit(content="Guessing cancelled, timed out.")
return
if confirm.content == "yes" or confirm.content == "no":
if confirm.content == "yes":
count += 16
await msg.edit(content = "Your Birthday is on **" + str(count) + "**")
return
keep_alive()
bot.run(os.getenv('TOKEN')) | seikhchilli/EncourageBot | main.py | main.py | py | 6,126 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "discord.File",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "discord.Fi... |
38745175764 | import cgi
import logging
import os
import random
import string
from google.appengine.api import images
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
KEY_RANGE = range(random.randint(24,32))
KEY_CHARS = string.ascii_letters;
# Database containing all labels to be appended to the user's photo. The label
# id must be called image_id so that DbFunctions can work on both Labels and
# ImageDB.
class LabelsDb (db.Model):
image_type = db.StringProperty()
image_id = db.StringProperty()
content = db.BlobProperty()
# Database containing all images uploaded by users. The date field is used by
# cronjobs to clean up the table. (TBD)
class ImageDb(db.Model):
image_type = db.StringProperty()
image_id= db.StringProperty()
content = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
# Helper functions for accessing DB data.
class DbFunctions:
def getImage(self, database, image_id):
imageDb = db.GqlQuery('SELECT * FROM %s WHERE image_id = :1' % database,
image_id)
# We return the first image available with the given image_id
for image in imageDb:
return image
return None
def getImages(self, database):
imageDb = db.GqlQuery('SELECT * FROM %s ORDER BY image_id ASC' % database)
return imageDb
def addImage(self, image_content, image_type = 'image/jpeg', key = None):
if key == None:
key = (''.join(random.choice(KEY_CHARS) for x in KEY_RANGE))
imageDb = ImageDb()
imageDb.image_id = key
imageDb.image_type = "image/jpeg"
imageDb.content = db.Blob(image_content)
imageDb.put();
return key;
# Serves photos given ids and the database to read from
class GetPhoto(webapp.RequestHandler):
def get(self):
image_id = self.request.get('image_id')
image_db = self.request.get('image_db')
if image_db == None:
imabe_db = 'ImageDb'
if image_db == 'LabelsDb' or image_db == 'ImageDb':
image = DbFunctions().getImage(image_db, image_id)
if not image == None:
image_type = 'image/png'
if not image.image_type == None:
image_type = image.image_type
self.response.headers['Content-Type'] = image_type
self.response.out.write(image.content)
return;
self.response.headers['Content-Type'] = 'text/html'
self.response.set_status(404, 'Not Found')
self.response.out.write('Imagem nao encontrada')
# Main page rendering function. You can always do the following to render the
# main page:
# self.response.out.write((RenderMainPage(error_message='Could not do this')))
# or
# self.response.out.write((RenderMainPage(success_content=my_success_content)))
def RenderMainPage(success_content = '', error_message = ''):
labels = DbFunctions().getImages('LabelsDb')
template_values = {
'labels': [],
'error_message': error_message,
'success_content': success_content
}
i = 0
for label in labels:
label_img = images.Image(label.content)
template_values['labels'].append({ 'name': 'label%d' % i,
'id': label.image_id })
i = i + 1
path = os.path.join(os.path.dirname(__file__), 'index.template')
return template.render(path, template_values)
# Simple URL handler for the main page.
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write(RenderMainPage())
# Given the content type return the corresponding appengine type.
def getImageTypeFromContentType(content_type):
if content_type == 'image/gif':
# appengine supports gif type as jpeg
return images.JPEG;
if content_type == 'image/jpeg':
return images.JPEG
if content_type == 'image/png':
return images.PNG
return None
# Handler that adds a label to the user photo.
class Legendario(webapp.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/html'
uploaded_image = self.request.POST['source_image']
if uploaded_image == None or uploaded_image == '':
self.response.out.write(RenderMainPage(error_message='Selecione uma foto.'))
return;
# extracts the photo type from the uploaded image
content_type = uploaded_image.type
image_type = getImageTypeFromContentType(content_type)
if image_type == None:
self.response.out.write(
RenderMainPage(error_message='Tipo de imagem desconhecido. Use imagens JPEG, PNG ou GIF'))
return;
image_content = self.request.get('source_image')
if len(image_content) > (1 << 20): # 1M
self.response.out.write(RenderMainPage(
error_message='Sua foto deve ter menos de 1 MB.'))
return;
label_name = self.request.get('label_name')
if label_name == None or label_name == '':
self.response.out.write(RenderMainPage(error_message='Escolha um dos labels.'))
return;
label = DbFunctions().getImage('LabelsDb', label_name)
if label == None:
self.response.out.write(RenderMainPage(
error_message='Label \'%s\' nao encontrado' % label_name))
return;
imageDb = ImageDb()
image = images.Image(image_content)
label_img = images.Image(label.content)
# There is this limitation on the appengine images library that doesn't
# allow tranformations whose height or width is > 4000, so lets reduce image
# right away. The label width and height is always guaranteed to be < than
# 1000 pixels so, if we need to resize something, this thing is the user
# height. The width will never exceed this limitation because we always
# scale down the bigger photo and label width is always < than 1000.
if image.height + label_img.height > 4000:
# Since we know that label height size is not the reason for the 4000
# exceed, lets resize image down.
image.resize(height=(4000 - label_img.height))
image = images.Image(image.execute_transforms(image_type))
# Make image and label to have the same width. Scale down the bigger one.
if label_img.width > image.width:
label_img.resize(width=image.width)
label_img = images.Image(label_img.execute_transforms(
getImageTypeFromContentType(label.image_type)))
else:
image.resize(width=label_img.width)
image = images.Image(image.execute_transforms(image_type))
# now images have the same width. Height will never exceed the 4000 limit.
no_crop_image = images.composite([(image, 0, 0, 1.0, images.TOP_RIGHT),
(label_img, 0, image.height, 1.0,
images.TOP_RIGHT) ], image.width,
image.height + label_img.height, 0,
images.JPEG)
crop_image = images.composite([(image, 0, 0, 1.0, images.TOP_RIGHT),
(label_img, 0, image.height - label_img.height, 1.0,
images.TOP_RIGHT) ], image.width,
image.height, 0, images.JPEG)
squared_width = image.width;
squared_height = image.height + label_img.height;
if (squared_width > squared_height): squared_height = squared_width
else: squared_width = squared_height
woffset = (squared_width - image.width) / 2
squared_xpos = -1 * woffset;
hoffset = (squared_height - (image.height + label_img.height))/2
squared_ypos = hoffset
squared_image = images.composite(inputs=[(image, squared_xpos , hoffset, 1.0, images.TOP_RIGHT),
(label_img, squared_xpos, image.height + hoffset, 1.0,
images.TOP_RIGHT) ], width=squared_width,
height=squared_height, color=0xffffffff,
output_encoding=images.JPEG)
results = [ no_crop_image, crop_image, squared_image ]
# Due to some weird behaviour of the transformation library, it may be the
# case that the result is bigger than the len(label_img) + len(image). Why,
# why, why??
for result in results:
if len(result) > (1 << 20):
self.response.out.write(RenderMainPage(error_message='''Sua imagem ficou
muito grande depois de acrescentar a legenda. Reduza o tamanho da sua
imagem original. Se isso nao resolver, tente reduzir suas dimensoes ou
sua resolucao. Se nada funcionar, mande um email para ademirao@gmail.com'''))
return;
no_crop_image_key = DbFunctions().addImage(no_crop_image)
crop_image_key = DbFunctions().addImage(crop_image)
squared_image_key = DbFunctions().addImage(squared_image)
self.response.out.write(RenderMainPage({
'images': [ {
'image_id': no_crop_image_key,
'image_descr': 'Sem Cortes',
}, {
'image_id': crop_image_key,
'image_descr': 'Cortada',
}, {
'image_id': squared_image_key,
'image_descr': 'Quadrada Sem Cortes'
}] }))
class AddLabel(webapp.RequestHandler):
def post(self):
labelDb = LabelsDb()
uploaded_label = self.request.POST['source_label']
content_type = uploaded_label.type
label_type = getImageTypeFromContentType(content_type)
if label_type == None:
self.response.out.write('Tipo de imagem desconhecido. Use imagens JPEG, PNG ou GIF')
return;
logging.info(content_type)
label_img = images.Image(self.request.get('source_label'))
label_data = self.request.get('source_label')
# Make sure label width is < than 1000
if label_img.width > 1000:
label_img.resize(width=1000)
label_data = label_img.execute_transforms(label_type)
if label_img.height > 500 :
label_img.resize(height=500)
label_data = label_img.execute_transforms(label_type)
labelDb.image_id = self.request.POST['label_name']
labelDb.content = db.Blob(label_data)
labelDb.image_type = content_type
labelDb.put();
self.response.out.write('Label added! Name %s' % labelDb.image_id)
application = webapp.WSGIApplication(
[('/', MainPage),
('/add_label', AddLabel),
('/legendame', Legendario),
('/photo', GetPhoto)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| ademirao/legendario | legendario.py | legendario.py | py | 10,488 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db.Model",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_n... |
1393116208 | import collections
class Solution:
"""
@param formula: a string
@return: return a string
"""
def countOfAtoms(self, formula):
# write your code here
if not formula:
return ""
stack,l,i = [collections.Counter()],len(formula), 0
while i < l:
if formula[i] == '(':
stack.append(collections.Counter())
i += 1
elif formula[i] == ')':
top = stack.pop()
i += 1
i_start = i
while i < l and formula[i].isdigit():
i += 1
multi = int(formula[i_start:i] or 1)
for name, v in top.items():
stack[-1][name] += v * multi
else:
i_start = i
i += 1
while i < l and formula[i].islower():
i += 1
name = formula[i_start:i]
i_start = i
while i < l and formula[i].isdigit():
i += 1
multi = int(formula[i_start:i] or 1)
stack[-1][name] += multi
result = ""
for name in sorted(stack[-1]):
result += name + (str(stack[-1][name]) if stack[-1][name] > 1 else "")
return result | NeroNL/algorithm | src/main/python/countOfAtoms.py | countOfAtoms.py | py | 1,319 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 14,
"usage_type": "call"
}
] |
14443118585 | import dash
from dash import dcc
from dash import html
from dash import dash_table
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
from flask import Flask
from flask import render_template, Response
import pandas as pd
import edgeiq
import cv2
import time
# edgeIQ
camera = edgeiq.WebcamVideoStream(cam=0)
obj_detect = edgeiq.ObjectDetection("alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
obj_detect.load(engine=edgeiq.Engine.DNN)
# Data
data = pd.DataFrame()
START_TIME = time.time()
# functions for rendering frame and performing object detection
def gen_video_feed():
while True:
frame = camera.read()
if frame is not None:
frame = perform_object_detection(frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def perform_object_detection(frame):
"""Perform object detction on an image, update
the table data, and returns a string.
Args:
frame (numpy array): The frame from the camera stream.
Returns:
string: The string representation of the image
"""
if frame is not None:
results = obj_detect.detect_objects(frame, confidence_level=.5)
frame = edgeiq.markup_image(
frame, results.predictions, colors=obj_detect.colors)
frame = edgeiq.resize(frame, width=800, height=300)
frame = cv2.imencode('.jpg', frame)[1].tobytes()
# update data for table
objects = {
'timestamp': str(round((time.time() - START_TIME), 0)),
'labels': ", ".join([p.label for p in results.predictions])
}
global data
if data is None:
data = pd.DataFrame({k: [v] for k, v in objects.items()})
else:
data = data.append(pd.DataFrame({k: [v] for k, v in objects.items()}))
data = data.drop_duplicates()
return frame
# Flask app
app = Flask(__name__, instance_relative_config=False)
# Flask routes (add as needed)
@app.route('/video_feed')
def video_feed():
return Response(gen_video_feed(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route("/")
def home():
return render_template("index.html")
# Dash Setup
dash_app = dash.Dash(
__name__,
server=app, # associate Flask
assets_folder="./static",
url_base_pathname='/dash/',
external_stylesheets=[dbc.themes.LUX]
)
# Dash Layout
dash_app.layout = dbc.Container(fluid=True, children=[
# body
dbc.Row([
dbc.Col(
# streamer content
html.Img(
src="/video_feed",
style={'position': 'center', 'width': 600, 'height': 350}
)
),
]),
dash_table.DataTable(
id="logs",
data=[],
columns=[],
style_as_list_view=False,
page_action="native",
page_size=10,
export_format="csv",
style_header={
'backgroundColor': 'rgba(0,0,0,0.2)',
'border': '1px solid white',
'font-family': 'Nunito Sans'
},
style_cell={
'backgroundColor': 'rgba(0,0,0,0.2)',
'color': 'black',
'text-align': 'left',
'font-size': '14px',
'font-family': 'Nunito Sans'
},
style_data={
'border': '1px solid white'
},
sort_by={
'column_id': 'timestamp',
'direction': 'desc'
}),
# automatically update periodically
dcc.Interval(
id='interval-component',
interval=1*5000, # in milliseconds
n_intervals=0
)
])
# Dash Callbacks
@dash_app.callback(
output=[Output("logs", "data"), Output("logs", "columns")],
inputs=[Input('interval-component', 'n_intervals')])
def render_log_table(n_intervals):
df = data
return df.to_dict('records'), [{"name": i, "id": i} for i in df.columns]
if __name__ == "__main__":
camera.start()
try:
app.run(host='localhost', port=5001, debug=False)
except Exception as e:
print(e)
finally:
camera.stop() | alwaysai/dash-interactive-streamer | app.py | app.py | py | 4,157 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "edgeiq.WebcamVideoStream",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "edgeiq.ObjectDetection",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "edgeiq.Engine",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pa... |
19092172950 | import torch.nn as nn
import torch.nn.functional as F
import torch
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The gt label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (torch.Tensor, optional): The weight for each class with
shape (C), C is the number of classes. Default None.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def semantic_exactly_one(pred):
"""Semantic loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
Returns:
torch.Tensor: The calculated semantic loss
"""
prob = F.sigmoid(pred)
wmc_tmp = torch.zeros_like(prob)
# exactly one semantic loss based on definition 1
for i in range(pred.shape[1]):
one_situation = torch.ones_like(pred).scatter_(1, torch.zeros_like(pred[:, 0]).fill_(i).unsqueeze(-1).long(), 0)
wmc_tmp[:, i] = torch.abs((one_situation - prob).prod(dim=1))
_log_wmc_tmp = -1.0 * torch.log(wmc_tmp.sum(dim=1))
return _log_wmc_tmp
@LOSSES.register_module()
class CESemanticLoss(nn.Module):
"""Cross entropy plus Semantic loss.
Args:
cls_score (torch.Tensor): The prediction with shape (N, \*).
label (torch.Tensor): The gt label with shape (N, \*).
Returns:
torch.Tensor: The calculated CE with semantic loss for labelled und unlablled samples
"""
def __init__(self, ):
super(CESemanticLoss, self).__init__()
def forward(self,
cls_score,
label):
labelled_examples = label.sum(dim=1)
unlabelled_examples = 1.0 - labelled_examples
CE = torch.multiply(labelled_examples, self.loss_weight * self.cross_entropy(cls_score, label))
semantic = 0.0005 * torch.multiply(labelled_examples, semantic_exactly_one(cls_score)) + \
0.0005 * torch.multiply(unlabelled_examples, semantic_exactly_one(cls_score))
CE_Semantic_Loss = torch.mean(torch.sum(torch.add(CE, semantic)))
return CE_Semantic_Loss
| jichengyuan/semantic_loss_detection | mmdet/models/losses/semantic_loss.py | semantic_loss.py | py | 2,907 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "utils.weight_reduce_loss",
"line_number": 35,
"usage_type": "call"
},
{
"ap... |
1858956859 | import cv2
import numpy as np
import random
#########################################################
# FUNCTION TO FIND THE CONNECTED COMPONENTS
#########################################################
def drawComponents(image, adj, block_size):
#ret, labels = cv2.connectedComponents(image)
#print(ret)
#print(labels)
#cv2.imshow('test1', labels.astype(np.uint8))
image = image.astype('uint8')
#print (image.shape)
block_w = block_size
block_h = block_size
nb = 0
comp = []
for r in range(0, image.shape[0] - block_w, block_h):
for c in range(0, image.shape[1] - block_w, block_h):
window = image[r:r+block_w, c:c+block_h]
x = list(cv2.connectedComponents(window, adj))
nb += x[0]
x[1] = x[1] * random.randint(1, 16) * random.randint(1, 16)
comp.append(x[1])
bc = image.shape[0]//block_size
br = image.shape[1]//block_size
img = np.zeros(image.shape)
#print (img.shape)
for r in range(0, img.shape[0] - block_w, block_h):
for c in range(0, img.shape[1] - block_w, block_h):
for i in range(len(comp)):
img[r:r+block_w, c:c+block_h] = comp[i]*255
for k in range(len(comp)):
for i in range(block_size):
for j in range(block_size):
if k%br == 0 and k!=0:
c = (((k+1)*block_size)//img.shape[1])*block_size + j
else:
c = ((k*block_size)//img.shape[1])*block_size + j
r = (k*block_size + i) % (br*block_size)
img[c][r] = comp[k][j][i]
cv2.imshow('Test Image', img)
#image = image.astype('uint8')
#nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, adj)
#label_hue = (107*output%np.max(output)).astype(np.uint8)
label_hue = (107*img%np.max(img)).astype(np.uint8)
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2RGB)
labeled_img[label_hue==0] = 0
'''
img2 = np.zeros(output.shape)
img2[output == max_label] = 255
img2 = img2 + output
'''
return labeled_img
#########################################################
# INPUTS
#########################################################
flag = 15
while flag != 0:
block = int(input("Please enter block size (m X m): "))
if flag == 1 or flag == 15:
adj = int(input("Enter the adjacency for detection (4 or 8): "))
if flag == 2 or flag == 15:
thresh = list(map(int, input("Enter the range of threshold separated by space(Example: 150 200): ").split(" ")))
if adj != 4 and adj != 8:
flag = 1
print("Inoperable value for adjacency. Please enter 4 or 8")
continue
elif len(thresh) != 2:
print("Please input exactly 2 numbers in the given format.")
flag = 2
continue
elif thresh[0] > thresh [1]:
thresh[0], thresh[1] = thresh[1], thresh[0]
else:
flag = 0
if thresh[0] < 0 or thresh[1] > 255:
print("Values are beyond limits. Please enter values between 0 and 255")
flag = 2
#########################################################
# READING IMAGE
#########################################################
img_orig = cv2.imread('../../Images/2.jpg')
cv2.imshow('Original', img_orig)
#im = cv2.UMat(Image.fromarray(img_orig).convert("L"))
#Image.fromarray(img_orig)
bw = cv2.cvtColor(img_orig, cv2.COLOR_RGB2GRAY)
#cv2.imshow("BW", bw)
#cv2.imwrite("./Outputs/Grayscale.jpg", bw)
x, img = cv2.threshold(bw, thresh[0], thresh[1], cv2.THRESH_BINARY) #ensuring binary
img[img==x] = 255
cv2.imshow("Binary", img)
#cv2.imwrite("./Outputs/Binary Image {V=("+str(thresh[0])+", "+str(thresh[1])+"), adj="+str(adj)+"}.jpg", img)
img2 = drawComponents(img, adj, block) # calling implementation function
#print(img2.shape)
cv2.imshow('Connected Components', img2)
#cv2.imwrite("./Outputs/Paths{V=("+str(thresh[0])+", "+str(thresh[1])+"), adj="+str(adj)+"}.jpg", img2)
#########################################################
# PRINTING OUTPUT
#########################################################
#img3 = bw * (img2.reshape(img2.shape[0],img2.shape[1]))
# Using the hues from img2 and the saturation and luminosity from the original image to get proper results.
cvt = cv2.cvtColor(img_orig, cv2.COLOR_RGB2HSV)
img4 = np.zeros(cvt.shape)
img2 = cv2.cvtColor(img2.astype(np.uint8), cv2.COLOR_RGB2HSV)
for i in range(img2.shape[0]):
for j in range(img2.shape[1]):
img4[i][j][0] = (img2[i][j][0]*9 + cvt[i][j][1]*1)//10 # HUE
img4[i][j][1] = (img2[i][j][1]*2 + cvt[i][j][1]*8)//10 # SATURATION
img4[i][j][2] = cvt[i][j][2] # LIGHT VALUE
if img2[i][j][0] == 0:
img4[i][j] = 0
img4 = cv2.cvtColor(img4.astype(np.uint8), cv2.COLOR_HSV2RGB)
#img3 = bw + (img2.reshape(img2.shape[0],img2.shape[1]))
#img4 = [[[i, i, i] for i in j] for j in img2]
#img5 = img_orig * img4
cv2.imshow('Result', img4.astype(np.uint8))
#cv2.imwrite("./Outputs/Result{V=("+str(thresh[0])+", "+str(thresh[1])+"), adj="+str(adj)+"}.jpg", img4.astype(np.uint8))
print ("Job done!")
cv2.waitKey(0)
cv2.destroyAllWindows()
| AgilePlaya/Image-Processing-Basics | Codes/Connected-Components/connected.py | connected.py | py | 5,497 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.connectedComponents",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"... |
5812062496 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
import time
import math
browser = webdriver.Chrome()
try:
def ln(x):
return math.log(x)
def sin(x):
return math.sin(x)
browser.get("http://suninjuly.github.io/explicit_wait2.html")
# Selenium will Wait until price be 100$
button = browser.find_element(By.ID, "book")
price_text = WebDriverWait(browser, 10).until(EC.text_to_be_present_in_element((By.ID, "price"), "$100"))
button.click()
x = int(browser.find_element(By.ID, "input_value").text)
# extract clean formula from text
formula_element = browser.find_element(By.CSS_SELECTOR, "label > :nth-child(1)")
formula = formula_element.text.split()[2].replace(",", "")
result_formula = eval(formula)
form_element = browser.find_element(By.ID, "answer")
form_element.send_keys(result_formula)
submit_buttom = browser.find_element(By.ID, "solve")
submit_buttom.click()
finally:
time.sleep(10)
browser.quit()
# add an empty line for unix system
| utkin7890/stepik_auto_tests_course | part2_lesson4_step8.py | part2_lesson4_step8.py | py | 1,193 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "math.log",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "math.sin",
... |
39604455911 | import multiprocessing
import os
import glob
import sys
import json
from tqdm import tqdm
from extractors.default import *
def main():
if not os.path.exists('../finished'):
os.makedirs('../finished')
for parser in availableParsers:
if not os.path.exists('../finished/%s' % parser):
os.makedirs('../finished/%s' % parser)
mypath = sys.argv[1]
fs = []
if os.path.exists('files.json'):
fs = json.load(open('files.json', 'r'))
else:
for root, directories, filenames in os.walk(mypath):
for filename in filenames:
fs.append(os.path.join(root, filename))
with open('files.json', 'w') as f:
f.write(json.dumps(fs))
# # Testing purposes
# for f in fs:
# print(f)
# parseRecipe(f)
# Process all
p = multiprocessing.Pool(multiprocessing.cpu_count())
print("Processing %d files..." % len(fs))
for i in tqdm(range(0, len(fs), 2 * multiprocessing.cpu_count())):
p.map(parseRecipe, fs[i:i + 2 * multiprocessing.cpu_count()])
if __name__ == "__main__":
main()
| schollz/parseingredient | src/parseHTML.py | parseHTML.py | py | 1,163 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_nu... |
34196708642 | from facenet_pytorch import MTCNN, InceptionResnetV1
import torch
from torchvision import datasets
from torch.utils.data import DataLoader
import datetime
# ๅๅงๅ้ข่ฎญ็ป็pytorchไบบ่ธๆฃๆตๆจกๅMTCNNๅ้ข่ฎญ็ป็pytorchไบบ่ธ่ฏๅซๆจกๅInceptionResnet
mtcnn = MTCNN(image_size=240, margin=0, keep_all=False, min_face_size=40)
resnet = InceptionResnetV1(pretrained='vggface2').eval()
# ไป็
ง็้ไธญ่ฏปๅๆฐๆฎ
dataset = datasets.ImageFolder('/Users/zhengrongkai/PycharmProjects/Face-Recognition-PyTorch-main/images_pytorch')
# ๅ
ณ่ๅๅญๅๆไปถ
idx_to_class = {i:c for c,i in dataset.class_to_idx.items()}
print('ๅผๅงๆถ้ด :',datetime.datetime.now())
print('Training..')
def collate_fn(x):
return x[0]
loader = DataLoader(dataset, collate_fn=collate_fn)
# ๅ
ณ่ไบบๅๅ็
ง็็ๅ่กจ
name_list = []
# ๅตๅ
ฅ็ฉ้ตๅ่กจ
embedding_list = []
# ็จMTCNNๆฃๆตๆฏๅฆไธบไบบ่ธๅนถไธ็จInceptionResnet็ๆๅตๅ
ฅ็ฉ้ต
for img, idx in loader:
face, prob = mtcnn(img, return_prob=True)
if face is not None and prob > 0.92:
emb = resnet(face.unsqueeze(0))
embedding_list.append(emb.detach())
name_list.append(idx_to_class[idx])
# ไฟๅญๆจกๅๆฐๆฎ
data = [embedding_list, name_list]
torch.save(data, '/Users/zhengrongkai/PycharmProjects/Face-Recognition-PyTorch/model.pt')
print('่ฎญ็ปๅฎๆ')
print('ๅฎๆๆถ้ด :',datetime.datetime.now()) | YKK00/Face-Recognition-using-Python | Face-Recognition-PyTorch/train.py | train.py | py | 1,404 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "facenet_pytorch.MTCNN",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "facenet_pytorch.InceptionResnetV1",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.ImageFolder",
"line_number": 12,
"usage_type": "call"
},
{... |
24537225227 | from pywinauto import Desktop
import time, requests, os, threading
import pyautogui
from pywinauto import timings
BASEURL = 'http://127.0.0.1:8000/'
PING_TIMEOUT = 45
PING_FREQUENCY = 45
QUEUE_LIMIT = 10
QUEUE_FREQUENCY = 5
q_processor = None
def exit_gracefully():
if q_processor:
q_processor.stop()
exit(0)
class QProcessor():
def __init__(self):
self.thread = threading.Thread(target=self.process_queue)
self.thread.setDaemon(True)
self.stopping = False
def start(self):
self.thread.start()
def process_queue(self):
while not self.stopping:
items = self.get_queue()
if items:
for item in items:
self.process(item['id'], item['component_id'], item['access_url'])
time.sleep(0.1)
else:
time.sleep(QUEUE_FREQUENCY)
def get_queue(self):
data = []
try:
r = requests.get("{}{}".format(BASEURL, "job-queue/"), timeout=10)
if r.status_code == 200:
d = r.json()
data = d['data_items'] if d['count'] > 0 else []
except Exception as e:
print(e)
pass
return data
def process(self, id, component_id, access_url):
try:
r = requests.post("{}{}".format(BASEURL, "job-processor/"), data={
'id' : id,
'component_id' : component_id,
'access_url' : access_url,
}, timeout=45)
print(r.json())
except:
try:
windows = Desktop(backend="uia").windows()
for win in windows:
try:
if ('OrCAD Capture CIS - Lite' in win.__str__()):
win.close()
else:
continue
except:
continue
except:
pass
def stop(self):
print('STOPPING TASK HANDLER.....')
self.stopping = True
while self.thread.is_alive():
print('waiting for thread to finish.....')
time.sleep(1)
if __name__ == '__main__':
try:
q_processor = QProcessor()
q_processor.start()
ping_check = 0
while True:
time.sleep(10)
print('.')
ping_check += 1
if ping_check >= PING_FREQUENCY:
ping_check = 0
except KeyboardInterrupt:
print("exiting..using keyboard")
exit_gracefully()
except SystemExit as se:
print("system existing..{}".format(se))
exit_gracefully()
except Exception as e:
print("Error happen..{}".format(e)) | jemartpacilan/converterServer | queue_processor.py | queue_processor.py | py | 2,804 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "threading.Thread",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_numbe... |
72122261475 | import uuid
from random import randint
class Producto:
def __init__(self,descripcion,codigoBarras,precio,proveedor):
self.id = uuid.uuid4()
self.descripcion = descripcion
self.clave = randint(1,200)
self.codigoBarras = codigoBarras
self.precio = precio
self.proveedor = proveedor
def __str__(self):
return '{0}, {1}, {2}, {3}, {4}, {5}'.format(self.id,self.descripcion,self.clave,self.codigoBarras,self.precio,self.proveedor)
class Carrito:
def __init__(self):
self.listadoProductos = []
self.usuario = ""
def cargarProducto(self,prod,cant):
self.listadoProductos.append([prod,cant])
def mostrarProductos(self):
i = 1
for Producto in self.listadoProductos:
print(str(i) + " - " + str(Producto[0].descripcion) + "\n")
i=i+1
class ListaProductos:
def __init__(self):
self.listadoProductos = []
def cargarProducto(self,prod):
self.listadoProductos.append(prod)
def mostrarProductos(self):
i = 0
for Producto in self.listadoProductos:
print(str(i) + " - " + str(Producto.descripcion) + "\n")
i=i+1
# Manzana = Producto("Fruta",1231241231,120,"Moรฑo Azul")
# Carrito1 = Carrito()
# Carrito1.cargarProducto(Manzana,2)
# print(Carrito1.listadoProductos[0][0].descripcion)
# print(Carrito1.listadoProductos[0][1])
# print(Carrito1.listadoProductos)
menu = '''### MENร ###
- 1 Agregar Producto
- 2 Agregar al Carrito
- 3 Salir'''
opcion = True
listadoProductosObjeto = ListaProductos()
carritoProductosObjeto = Carrito()
while opcion == True :
print(menu)
op = int (input("Ingrese una Opciรณn\n"))
if op == 1:
descripcion = input("Descripcion\n")
codigoBarras = int (input("Codigo de Barras\n"))
precio = int (input("Precio\n"))
proveedor = input("Proveedor\n")
objetoTransitorio = Producto(descripcion, codigoBarras, precio, proveedor)
listadoProductosObjeto.cargarProducto(objetoTransitorio)
print("Se agrego el Producto",objetoTransitorio)
#listadoProductosObjeto(Producto(descripcion,codigoBarras,precio,proveedor))
elif op == 2:
listadoProductosObjeto.mostrarProductos()
indice = int (input("Ingrese el numero del producto\n"))
cantidad = int (input("cantidad\n"))
productoTransitorio = listadoProductosObjeto.listadoProductos[indice]
carritoProductosObjeto.cargarProducto(productoTransitorio,cantidad)
carritoProductosObjeto.mostrarProductos()
elif op == 3:
opcion=False
| arcaex/TUP-Programacion-I | Python/POO/Prรกctica_Parcial.py | Prรกctica_Parcial.py | py | 2,625 | python | es | code | 5 | github-code | 1 | [
{
"api_name": "uuid.uuid4",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 8,
"usage_type": "call"
}
] |
10786424127 | #
# Create on 4/17/2018
#
# Author: Sylvia
#
"""
202. Happy Number
A happy number is a number defined by the following process: Starting with any positive integer, replace the number by
the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops
endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers.
Example: 19 is a happy number
1^2 + 9^1 = 82
8^2 + 2^2 = 68
6^2 + 8^2 = 100
1^2 + 0^2 + 0^2 = 1
"""
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
res = set()
while n != 1:
sum = 0
if n in res:
return False
res.add(n)
for d in str(n):
sum += int(d) * int(d)
n = sum
return True
import pytest
class Test:
@pytest.mark.parametrize('num, expect', [(19, True), (38, False), (1, True)])
def test_normal(self, num, expect):
res = Solution()
assert res.isHappy(num) is expect
| missweetcxx/fragments | leetcode/happy_number.py | happy_number.py | py | 1,107 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytest.mark.parametrize",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 44,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.