hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
28e8716a746cdb6acac715e98c28629d494e1a7a | 1,720 | py | Python | Reporting/urllib2_report_data.py | hbuter-rubrik/rubrik-scripts-for-python | 0e434854b778ff0f857425173e5cb7d6b83dddec | [
"MIT"
] | 5 | 2019-10-04T18:09:24.000Z | 2020-08-25T04:46:01.000Z | Reporting/urllib2_report_data.py | hbuter-rubrik/rubrik-scripts-for-python | 0e434854b778ff0f857425173e5cb7d6b83dddec | [
"MIT"
] | 2 | 2020-01-07T18:25:11.000Z | 2021-10-14T11:48:27.000Z | Reporting/urllib2_report_data.py | hbuter-rubrik/rubrik-scripts-for-python | 0e434854b778ff0f857425173e5cb7d6b83dddec | [
"MIT"
] | 6 | 2019-04-25T10:26:30.000Z | 2021-11-18T08:20:50.000Z | import urllib2
import json
import base64
import time
import ssl
"""
define our rubrik credentials
"""
RUBRIK_IP='rubrik.demo.com'
RUBRIK_USER='admin'
RUBRIK_PASS='mypassword123!'
"""
ignore self-signed certs
"""
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# get the report ID
BASE_RUBRIK_URL=("https://"+RUBRIK_IP)
request = urllib2.Request(BASE_RUBRIK_URL+"/api/internal/report?report_template=ProtectionTasksDetails&report_type=Canned")
base64string = base64.encodestring('%s:%s' % (RUBRIK_USER, RUBRIK_PASS)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request, context=ctx)
REPORT_ID = json.load(result.fp)['data'][0]['id']
# get the report data
PAYLOAD = {
'requestFilters': {
'taskType': 'Backup',
'taskStatus': 'Failed'
}
}
output = []
HAS_MORE = True
cursor = None
while HAS_MORE:
# set the cursor if we have one
if cursor:
PAYLOAD['cursor'] = cursor
request = urllib2.Request(BASE_RUBRIK_URL+"/api/internal/report/"+REPORT_ID+"/table")
base64string = base64.encodestring('%s:%s' % (RUBRIK_USER, RUBRIK_PASS)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
request.add_header('Content-Type', 'application/json')
result = urllib2.urlopen(request, json.dumps(PAYLOAD), context=ctx)
REPORT_DATA = json.load(result.fp)
HAS_MORE = REPORT_DATA['hasMore']
cursor = REPORT_DATA['cursor']
for entry in REPORT_DATA['dataGrid']:
this_entry = {}
for i in range(len(REPORT_DATA['columns'])):
this_entry[REPORT_DATA['columns'][i]] = entry[i]
output.append(this_entry)
| 33.076923 | 123 | 0.699419 |
28e92acc31d96b35a53502cfb20ad7033a7cf662 | 2,476 | py | Python | f_net/main.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 2 | 2021-09-04T09:08:38.000Z | 2021-09-04T09:08:44.000Z | f_net/main.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | null | null | null | f_net/main.py | DionysisChristopoulos/google-research | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | [
"Apache-2.0"
] | 5 | 2021-11-25T07:40:17.000Z | 2022-03-22T11:13:39.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file for pre-training or fine-tuning models."""
from absl import app
from absl import flags
from absl import logging
from clu import platform
import jax
from ml_collections import config_flags
import tensorflow as tf
from f_net import run_classifier
from f_net import run_pretraining
from f_net.configs.base import TrainingMode
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.mark_flags_as_required(["config"])
flags.DEFINE_string("workdir", None, "Work unit directory.", required=True)
flags.DEFINE_string(
"vocab_filepath",
None,
"Absolute path to SentencePiece vocab model.",
required=True)
flags.DEFINE_integer("random_seed", 0, "Integer for PRNG random seed.")
FLAGS = flags.FLAGS
def main(argv):
del argv
# Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make
# it unavailable to JAX.
tf.config.experimental.set_visible_devices([], "GPU")
logging.info("JAX process: %d / %d", jax.process_index(), jax.process_count())
logging.info("JAX devices: %r", jax.devices())
# Add a note so that we can tell which task is which JAX process.
platform.work_unit().set_task_status(
f"process_index: {jax.process_index()}, process_count: {jax.process_count()}"
)
platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
FLAGS.workdir, "workdir")
train_mode = FLAGS.config.mode
if train_mode == TrainingMode.PRETRAINING:
train_lib = run_pretraining
elif train_mode == TrainingMode.CLASSIFICATION:
train_lib = run_classifier
else:
raise ValueError("Unknown training mode: %s" % train_mode)
train_lib.train_and_evaluate(FLAGS.config, FLAGS.workdir,
FLAGS.vocab_filepath, FLAGS.random_seed)
if __name__ == "__main__":
app.run(main)
| 32.578947 | 83 | 0.735057 |
28e9489a4ce0811a2281acebf64dae5129d76367 | 18,341 | py | Python | mypyext/ml.py | VolkiTheDreamer/PythonRocks | f7b6cdf335687c6d111bf08387965ca3ecddd504 | [
"Apache-2.0"
] | null | null | null | mypyext/ml.py | VolkiTheDreamer/PythonRocks | f7b6cdf335687c6d111bf08387965ca3ecddd504 | [
"Apache-2.0"
] | null | null | null | mypyext/ml.py | VolkiTheDreamer/PythonRocks | f7b6cdf335687c6d111bf08387965ca3ecddd504 | [
"Apache-2.0"
] | 2 | 2019-10-04T10:56:14.000Z | 2022-03-06T18:18:59.000Z | import numpy as np
import pandas as pd
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score,roc_auc_score,roc_curve
from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.pipeline import Pipeline
import os, sys, site
import itertools
from numpy.random import uniform
from random import sample
from math import isnan
from multiprocessing import Pool
from scipy.spatial import distance
from sklearn.metrics.pairwise import cosine_similarity
def printAlgorithm(algo):
"""
You need the change the path.
"""
p=os.getcwd()
os.chdir(r"E:\OneDrive\Dökümanlar\GitHub\PythonRocks")
df=pd.read_excel("Algorithms.xlsx",skiprows=1)
print(df[df.Algorithm==algo].T)
os.chdir(p)
def adjustedr2(R_sq,y,y_pred,x):
return 1 - (1-R_sq)*(len(y)-1)/(len(y_pred)-x.shape[1]-1)
def calculate_aic_bic(n, mse, num_params):
"""
n=number of instances in y
"""
aic = n *np.log(mse) + 2 * num_params
bic = n * np.log(mse) + num_params * np.log(n)
# ssr = fitted.ssr #residual sum of squares
# AIC = N + N*np.log(2.0*np.pi*ssr/N)+2.0*(p+1)
# print(AIC)
# BIC = N + N*np.log(2.0*np.pi*ssr/N) + p*np.log(N)
# print(BIC)
return aic, bic
def printScores(y_test,y_pred,x=None,*, alg_type='c'):
"""
Args:
alg_type: c for classfication, r for regressin
"""
if alg_type=='c':
acc=accuracy_score(y_test,y_pred)
print("Accuracy:",acc)
recall=recall_score(y_test,y_pred)
print("Recall:",recall)
precision=precision_score(y_test,y_pred)
print("Precision:",precision)
f1=f1_score(y_test,y_pred)
print("F1:",f1)
return acc,recall,precision,f1
else:
mse=mean_squared_error(y_test,y_pred) #RMSE için squared=False yapılabilir ama bize mse de lazım
rmse=round(np.sqrt(mse),2)
print("RMSE:",rmse)
mae=round(mean_absolute_error(y_test,y_pred),2)
print("MAE:",mae)
r2=round(r2_score(y_test,y_pred),2)
print("r2:",r2)
adjr2=round(adjustedr2(r2_score(y_test,y_pred),y_test,y_pred,x),2)
print("Adjusted R2:",adjr2)
aic, bic=calculate_aic_bic(len(y_test),mse,len(x))
print("AIC:",round(aic,2))
print("BIC:",round(bic,2))
return (rmse,mae,r2,adjr2,round(aic,2),round(bic,2))
def draw_siluet(range_n_clusters,data,isbasic=True,printScores=True):
"""
Used for K-means
"""
if isbasic==False:
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(12,4)
ax1.set_xlim([-1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(data) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(data)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(data, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(data, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(data[:, 0], data[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
else:
ss = []
for n in range_n_clusters:
kmeans = KMeans(n_clusters=n)
kmeans.fit_transform(data)
labels = kmeans.labels_
score = silhouette_score(data, labels)
ss.append(score)
if printScores==True:
print(n,score)
plt.plot(range_n_clusters,ss)
def drawEpsilonDecider(data,n):
"""
for DBSCAN
n: # of neighbours
data:numpy array
"""
neigh = NearestNeighbors(n_neighbors=n)
nbrs = neigh.fit(data)
distances, indices = nbrs.kneighbors(data)
distances = np.sort(distances, axis=0)
distances = distances[:,1]
plt.ylabel("eps")
plt.plot(distances)
def draw_elbow(ks,data):
wcss = []
for i in ks:
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) #k-means++ ensures that you get don’t fall into the random initialization trap.???????
kmeans.fit(data)
wcss.append(kmeans.inertia_)
plt.plot(ks, wcss)
plt.title('Elbow Method')
plt.xlabel('# of clusters')
plt.ylabel('WCSS')
plt.show()
#PCA biplot
def biplot(score,coeff,y,variance,labels=None):
"""
found here: https://stackoverflow.com/questions/39216897/plot-pca-loadings-and-loading-in-biplot-in-sklearn-like-rs-autoplot
"""
xs = score[:,0]
ys = score[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
plt.scatter(xs * scalex,ys * scaley, c = y)
for i in range(n):
plt.arrow(0, 0, coeff[i,0], coeff[i,1],color = 'r',alpha = 0.5)
if labels is None:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
else:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'center')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel("PC{},Variance:{}".format(1,variance[0]))
plt.ylabel("PC{},Variance:{}".format(2,variance[1]))
plt.grid()
def PCAChart(X_pca,alpha=0.2):
n=X_pca.shape[1] #second dimension is the number of colums which is the number of components
if n==2:
plt.scatter(X_pca[:,0], X_pca[:,1],alpha=alpha);
elif n==3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
Axes3D.scatter(ax,xs=X_pca[:,0], ys=X_pca[:,1],zs=X_pca[:,2],alpha=alpha)
else:
print("n should be either 2 or 3")
def getfullitemsforOHE(wholedf,featlist,sort=True):
"""
wholedf should be the dataframe including both train and test set.
"""
def sortornot(X):
if sort==False:
return X
else:
return sorted(X)
fulllist=[]
for feat in featlist:
fulllist.append(sortornot(wholedf[feat].unique()))
return fulllist
def getfeaturenames(ct,dataframe):
final_features=[]
for trs in ct.transformers_:
trName=trs[0]
trClass=trs[1]
features=trs[2]
if isinstance(trClass,Pipeline):
n,tr=zip(*trClass.steps)
for t in tr: #t is a transformator object, tr is the list of all transoformators in the pipeline
if isinstance(t,OneHotEncoder):
for f in t.get_feature_names(features):
final_features.append("OHE_"+f)
break
else: #if not found onehotencoder, add the features directly
for f in features:
final_features.append(f)
elif isinstance(trClass,OneHotEncoder): #?type(trClass)==OneHotEncoder:
for f in trClass.get_feature_names(features):
final_features.append("OHE_"+f)
else:
#remainders
if trName=="remainder":
for i in features:
final_features.append(list(dataframe.columns)[i])
#all the others
else:
for f in features:
final_features.append(f)
return final_features
def featureImportanceEncoded(importance,feature_names,figsize=(8,6)):
plt.figure(figsize=figsize)
dfimp=pd.DataFrame(importance.reshape(-1,1).T,columns=feature_names).T
dfimp.index.name="Encoded"
dfimp.rename(columns={0: "Importance"},inplace=True)
dfimp.reset_index(inplace=True)
dfimp["Feature"]=dfimp["Encoded"].apply(lambda x:x[4:].split('_')[0] if "OHE" in x else x)
dfimp.groupby(by='Feature')["Importance"].sum().sort_values().plot(kind='barh');
def compareClassifiers(gs,tableorplot='plot',figsize=(10,5)):
cvres = gs.cv_results_
cv_results = pd.DataFrame(cvres)
cv_results['param_clf']=cv_results['param_clf'].apply(lambda x:str(x).split('(')[0])
cols={"mean_test_score":"MAX of mean_test_score","mean_fit_time":"MIN of mean_fit_time"}
summary=cv_results.groupby(by='param_clf').agg({"mean_test_score":"max", "mean_fit_time":"min"}).rename(columns=cols)
summary.sort_values(by='MAX of mean_test_score', ascending=False,inplace=True)
if tableorplot=='table':
return summary
else:
fig, ax1 = plt.subplots(figsize=figsize)
color = 'tab:red'
ax1.set_xticklabels('Classifiers', rotation=45,ha='right')
ax1.set_ylabel('MAX of mean_test_score', color=color)
ax1.bar(summary.index, summary['MAX of mean_test_score'], color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('MIN of mean_fit_time', color=color)
ax2.plot(summary.index, summary['MIN of mean_fit_time'], color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def CheckForClusterinTendencyWithHopkins(df):
"""
taken from https://matevzkunaver.wordpress.com/2017/06/20/hopkins-test-for-cluster-tendency/
the closer to 1, the higher probability of clustering tendency
"""
d = df.shape[1]
#d = len(vars) # columns
n = len(df) # rows
m = int(0.1 * n) # heuristic from article [1]
nbrs = NearestNeighbors(n_neighbors=1).fit(df.values)
rand_X = sample(range(0, n, 1), m)
ujd = []
wjd = []
for j in range(0, m):
u_dist, _ = nbrs.kneighbors(uniform(np.amin(df,axis=0),np.amax(df,axis=0),d).reshape(1, -1), 2, return_distance=True)
ujd.append(u_dist[0][1])
w_dist, _ = nbrs.kneighbors(df.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True)
wjd.append(w_dist[0][1])
H = sum(ujd) / (sum(ujd) + sum(wjd))
if isnan(H):
print(ujd, wjd)
H = 0
return H
def getNumberofCatsAndNumsFromDatasets(path,size=10_000_000):
"""
returns the number of features by their main type(i.e categorical or numeric or datetime)
args:
path:path of the files residing in.
size:size of the file(default is ~10MB). if chosen larger, it will take longer to return.
"""
os.chdir(path)
files=os.listdir()
liste=[]
for d in files:
try:
if os.path.isfile(d) and os.path.getsize(d)<size:
if os.path.splitext(d)[1]==".csv":
df=pd.read_csv(d,encoding = "ISO-8859-1")
elif os.path.splitext(d)[1]==".xlsx":
df=pd.read_excel(d)
else:
continue
nums=len(df.select_dtypes("number").columns)
date=len(df.select_dtypes(include=[np.datetime64]).columns)
cats=len(df.select_dtypes("O").columns)-date
liste.append((d,nums,cats,date))
except:
pass
dffinal=pd.DataFrame(liste,columns=["filename","numeric","categorical","datettime"])
dffinal.set_index("filename")
return dffinal
#Functions to run before and during modelling
def checkIfNumberOfInstanceEnough(df):
"""
o Çok az satır varsa daha fazla veri toplanması sağlanmalıdır
o Aşırı çok satır varsa kısmi sampling yapılabilir.(Detayları göreceğiz)
o Data çokluğundan emin değilseniz tamamıyla deneyin. Eğitim süresi çok uzun sürüyorsa aşamalı olarak azaltabilirsiniz.
"""
def checkIfNumberOFeatures(df):
"""
o Az kolon(feature) varsa yenileri temin edilmeye çalışılabilir
o Çok kolon varsa çeşitli boyut indirgeme ve önemli kolonları seçme yöntemleri uygulanır(Detayları sorna göreceğiz)
o Yine satırlardaki aynı mantıkla çok kolon olup olmadığında emin değilseniz önce tümüyle birlikte modelleme yapılır. Eğitim süresi uzun ise veya overfitting oluyorsa feature azaltma yöntemleri uygulanabilir.
Kolon sayısını azaltma sadece eğitim zamanını kısatlmakla kalmaz aynı zamanda overfittingi de engeller.
"""
def checkForImbalancednessForLabels(df):
"""
(Imbalanced ise train/test ayrımından sonra oversample yapılır)
"""
def remindForSomeProcesses():
"""
....
"""
print("transformasyon gerektirmeyen kısımlar: feature extraction, feaute selection, feature elimination")
def remindForDiscreteization():
"""
yüksek carianlitiy olan numeriklerde hangi durumlarda discretization?
"""
#arada X ve y manuel belirlenir
def traintest(X,y,testsize):
# önce trasin test yaptır, gerekirse başka parameterler de al
print("dont touch test set")
def remindForStep2FE():
print("transformasyon gerektiren işlemler step 2, hangileri?????????")
#bu arada aşağıdaki açıklamadaki ilk satır çalışablir
def buildModel(train,test):
"""
çoklu model mi kursak burda? VotingClassifier. parametre olarak pipelineları mı versek. evetse bi önjceki stepte bunu da hatıratsın, tellWhatAlgorithmsToUse bu da çalışsın tabi
fit trasnform
pedicr
skor kontrolü, çok düşükse underfitting sebeplerine bak, belli bi sebep yoksa yeni feature + yeni veri(azsa), veya yeni model
skor iyiyse cv kontrol
test setini ver
"""
def tellWhatAlgorithmsToUse(df,type):
"""
s ve u için ayrı ayrı mı?
""" | 38.531513 | 213 | 0.601876 |
28eb9384d1558fa0c10861f56ac8ad811737befd | 845 | py | Python | src/isaw.theme/isaw/theme/browser/viewlets/zotero.py | isawnyu/isaw.web | 604499f9fa55d1ce9698ca05f85ddb54a88f1cab | [
"CC-BY-3.0"
] | null | null | null | src/isaw.theme/isaw/theme/browser/viewlets/zotero.py | isawnyu/isaw.web | 604499f9fa55d1ce9698ca05f85ddb54a88f1cab | [
"CC-BY-3.0"
] | 405 | 2015-03-12T18:20:25.000Z | 2022-03-07T18:44:16.000Z | src/isaw.theme/isaw/theme/browser/viewlets/zotero.py | isawnyu/isaw.web | 604499f9fa55d1ce9698ca05f85ddb54a88f1cab | [
"CC-BY-3.0"
] | 1 | 2016-11-07T21:18:49.000Z | 2016-11-07T21:18:49.000Z | import re
from urlparse import urlparse
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
ZOTERO_JSON_BASE = 'https://api.zotero.org{}?v=3&format=json'
Z_MATCH = re.compile(r'^/(groups|users)/\d+/items/[A-Z1-9]+$')
class PublicationZoteroViewlet(ViewletBase):
render = ViewPageTemplateFile('zotero.pt')
html_ref = None
json_ref = None
def update(self):
zotero_url = getattr(self.context, 'bibliographic_uri', None)
if not zotero_url:
return
parsed = urlparse(zotero_url)
zotero_path = parsed.path
domain = parsed.netloc
if domain == 'www.zotero.org' and Z_MATCH.match(zotero_path):
self.html_ref = zotero_url
self.json_ref = ZOTERO_JSON_BASE.format(zotero_path)
| 33.8 | 71 | 0.695858 |
28ec2d89ad8ce29a9ec68a6cf207b6114836df8c | 1,079 | py | Python | descender.py | illBeRoy/tldr-of-the-world-data | 06d581eb117bdc79ebbe7af4f8ae4b26190d7231 | [
"MIT"
] | null | null | null | descender.py | illBeRoy/tldr-of-the-world-data | 06d581eb117bdc79ebbe7af4f8ae4b26190d7231 | [
"MIT"
] | null | null | null | descender.py | illBeRoy/tldr-of-the-world-data | 06d581eb117bdc79ebbe7af4f8ae4b26190d7231 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import json
import jinja2
import webbrowser
import graph
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('groups', help='json file describing seed groups')
args = parser.parse_args()
# load group from file
with open(args.groups, 'rb') as f:
groups = json.loads(f.read())
# load template from file
with open('descender.html.jinja', 'rb') as f:
template = jinja2.Template(f.read())
# load graph from file
graph = graph.Graph()
graph.load('./graph.pickle')
# find neighbours using the given groups and weight vector
for group in groups:
group['neighbours'] = graph.get_joint_neighbours(group['members'], group_size=50)
group['neighbours'] = [''.join([c for c in x if ord(c) < 128]) for x in group['neighbours']]
# generate output file
with open('/tmp/descender.results.html', 'wb') as f:
f.write(template.render({'groups': groups}))
# open it
webbrowser.open('file:///tmp/descender.results.html')
| 28.394737 | 100 | 0.656163 |
28ee6695911d76e64bcea52634f3ac6a4cf51dda | 268 | py | Python | picompile/types.py | pfalcon/picompile | 7724e629e3ffc48b066c938d7f02b5f4e8276051 | [
"MIT"
] | 3 | 2020-01-28T03:54:06.000Z | 2021-02-25T14:38:50.000Z | picompile/types.py | pfalcon/picompile | 7724e629e3ffc48b066c938d7f02b5f4e8276051 | [
"MIT"
] | null | null | null | picompile/types.py | pfalcon/picompile | 7724e629e3ffc48b066c938d7f02b5f4e8276051 | [
"MIT"
] | null | null | null | "Actual (primitive) types instanitated in the type system."
import sys
from .typesys import *
class np:
# Dummy
class ndarray:
pass
int32 = TCon("Int32")
int64 = TCon("Int64")
float32 = TCon("Float")
double64 = TCon("Double")
void = TCon("Void")
| 14.105263 | 59 | 0.652985 |
28f158e00bdef100d0ce90ce07f81579244e0faa | 2,735 | py | Python | tests/fraction_tests/test_neg.py | davideguidobene/cfractions | 2fc33d3ddf972e07787bdc99443868137999e114 | [
"MIT"
] | 3 | 2022-01-18T21:17:17.000Z | 2022-01-23T21:49:52.000Z | tests/fraction_tests/test_neg.py | davideguidobene/cfractions | 2fc33d3ddf972e07787bdc99443868137999e114 | [
"MIT"
] | 3 | 2021-06-28T13:30:58.000Z | 2022-01-16T19:05:00.000Z | tests/fraction_tests/test_neg.py | davideguidobene/cfractions | 2fc33d3ddf972e07787bdc99443868137999e114 | [
"MIT"
] | 1 | 2021-10-22T02:12:06.000Z | 2021-10-22T02:12:06.000Z | import sys
from numbers import Real
from hypothesis import given
from cfractions import Fraction
from tests.utils import (equivalence,
is_fraction_valid,
skip_reference_counter_test)
from . import strategies
@given(strategies.fractions)
def test_basic(fraction: Fraction) -> None:
result = -fraction
assert isinstance(result, Fraction)
assert is_fraction_valid(result)
@given(strategies.fractions)
def test_involution(fraction: Fraction) -> None:
result = -fraction
assert fraction == -result
@given(strategies.fractions)
def test_fixed_point(fraction: Fraction) -> None:
result = -fraction
assert equivalence(fraction == result, not fraction)
@given(strategies.fractions)
def test_involution(fraction: Fraction) -> None:
result = -fraction
assert fraction == -result
@given(strategies.fractions, strategies.finite_builtin_reals)
def test_add_operand(first: Fraction, second: Real) -> None:
assert -(first + second) == (-first) + (-second)
@given(strategies.finite_builtin_non_fractions, strategies.fractions)
def test_radd_operand(first: Real, second: Fraction) -> None:
assert -(first + second) == (-first) + (-second)
@given(strategies.fractions, strategies.finite_builtin_reals)
def test_sub_operand(first: Fraction, second: Real) -> None:
assert -(first - second) == (-first) - (-second)
@given(strategies.finite_builtin_non_fractions, strategies.fractions)
def test_rsub_operand(first: Real, second: Fraction) -> None:
assert -(first - second) == (-first) - (-second)
@given(strategies.fractions, strategies.finite_builtin_reals)
def test_mul_operand(first: Fraction, second: Real) -> None:
assert -(first * second) == (-first) * second == first * (-second)
@given(strategies.finite_builtin_non_fractions, strategies.fractions)
def test_rmul_operand(first: Real, second: Fraction) -> None:
assert -(first * second) == (-first) * second == first * (-second)
@given(strategies.fractions, strategies.finite_non_zero_reals)
def test_truediv_operand(first: Fraction, second: Real) -> None:
assert -(first / second) == (-first) / second == first / (-second)
@given(strategies.finite_builtin_non_fractions, strategies.non_zero_fractions)
def test_rtruediv_operand(first: Real, second: Fraction) -> None:
assert -(first / second) == (-first) / second == first / (-second)
@skip_reference_counter_test
@given(strategies.fractions)
def test_reference_counter(fraction: Fraction) -> None:
fraction_refcount_before = sys.getrefcount(fraction)
result = -fraction
fraction_refcount_after = sys.getrefcount(fraction)
assert fraction_refcount_after == fraction_refcount_before
| 30.054945 | 78 | 0.729433 |
28f37ae0b38a0ca393af612ee52d0d305a27311a | 336 | py | Python | flag/parser.py | danielchatfield/flag | e4c2bea2a74a428b9fdf7bf350e218c8cd71fdc5 | [
"MIT"
] | 2 | 2016-06-26T22:16:08.000Z | 2020-08-15T12:20:18.000Z | flag/parser.py | danielchatfield/flag | e4c2bea2a74a428b9fdf7bf350e218c8cd71fdc5 | [
"MIT"
] | null | null | null | flag/parser.py | danielchatfield/flag | e4c2bea2a74a428b9fdf7bf350e218c8cd71fdc5 | [
"MIT"
] | 1 | 2020-08-15T12:21:12.000Z | 2020-08-15T12:21:12.000Z | # -*- coding: utf-8 -*-
"""
flag.parser
~~~~~~~~~~~
"""
import argparse
from . import registry
def parse():
parser = argparse.ArgumentParser()
for flag in registry.iter():
flag.add_to_parser(parser)
args = vars(parser.parse_args())
for flag in registry.iter():
flag.update(args[flag.name])
| 16 | 38 | 0.592262 |
28f476813b8879c19e3170513ebfab33d088e25a | 93 | py | Python | ror/number_utils.py | jakub-tomczak/ror | cf9ab38a2d66f4816a1289b9726911960059fce7 | [
"MIT"
] | null | null | null | ror/number_utils.py | jakub-tomczak/ror | cf9ab38a2d66f4816a1289b9726911960059fce7 | [
"MIT"
] | null | null | null | ror/number_utils.py | jakub-tomczak/ror | cf9ab38a2d66f4816a1289b9726911960059fce7 | [
"MIT"
] | null | null | null | def format_number(number: float, precision: int) -> str:
return f'{number:.{precision}f}' | 46.5 | 56 | 0.698925 |
28f54f1fb9cdd4025290b813dd74c2874e584666 | 14,375 | py | Python | 4_Model_Updater/train_new_model.py | kshahnazari1998/SmartDota-Public | 270ddabfd353c57e754c00b7a5365d99f4d5902f | [
"MIT"
] | null | null | null | 4_Model_Updater/train_new_model.py | kshahnazari1998/SmartDota-Public | 270ddabfd353c57e754c00b7a5365d99f4d5902f | [
"MIT"
] | null | null | null | 4_Model_Updater/train_new_model.py | kshahnazari1998/SmartDota-Public | 270ddabfd353c57e754c00b7a5365d99f4d5902f | [
"MIT"
] | null | null | null | import json
import pandas as pd
import numpy as np
import random
from Sqldatabasehandler import sqlhandler
from datetime import datetime
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import PolynomialFeatures
import pickle
import torch
from torch import nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
class ModelUpdater:
def __init__(self, host, user, passwd, database):
"""
The constructor for the Class
"""
self.sqlhand = sqlhandler(host, user, passwd, database)
def update_model(self, batchsize=100000):
last_seq = self.get_last_game()
res = self.sqlhand.SqlQueryExec(
"SELECT count(*) FROM DotaMatches WHERE GameSEQ> %s",
True,
[
last_seq,
],
)
if res == -1:
return -1
new_games_count = self.sqlhand.get_row_result()
if new_games_count >= batchsize:
games = self.sqlhand.get_all_select_rows(
"SELECT * FROM DotaMatches WHERE GameSEQ>%s order by GameSEQ Limit %s",
[
last_seq,
batchsize,
],
)
cols = self.sqlhand.get_all_select_rows(
"SHOW columns FROM DotaMatches",
)
cols = [x[0] for x in cols]
games = pd.DataFrame(games)
games.columns = cols
games = games.dropna(subset=["Pick1Rad"])
model, linear = self.train_new_model(games)
now = datetime.now()
date_time = now.strftime("%m_%d_%Y_%H")
max_game_seq = games["GameSEQ"].max()
self.update_last_game(max_game_seq)
torch.save(model.state_dict(), "./model.pth")
torch.save(
model.state_dict(),
"./old_models/model_" + date_time + "_" + str(max_game_seq) + ".pth",
)
pickle.dump(linear, open(f"linear_model", "wb"))
pickle.dump(
linear,
open(
"./old_models/linear_model_"
+ date_time
+ "_"
+ str(max_game_seq)
+ ".pth",
"wb",
),
)
del games
self.update_model()
else:
return 0
def get_last_game(self):
try:
filepath = "last_game_seq.txt"
fp = open(filepath)
last_seq = int(fp.read())
fp.close()
return last_seq
except:
return -1
def update_last_game(self, lastseq):
try:
filepath = "last_game_seq.txt"
fp = open(filepath, "w")
fp.write(str(lastseq))
fp.close()
return 0
except:
return -1
def train_new_model(self, df):
df_no_leavers = df.query("Leavers==0")
class game_datasets(Dataset):
def __init__(self, rawdata):
X = rawdata.loc[:, "Pick1Rad":"Pick5Dir"]
y = rawdata["RadiantWin"]
self.x = torch.tensor(X.values)
self.y = torch.tensor(y.values)
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return len(self.y)
class GamePredictor_final(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(150, 50)
self.l2 = nn.Linear(50, 50)
self.l3 = nn.Linear(50, 1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.l3(x)
return torch.sigmoid(x)
net = GamePredictor_final()
net.load_state_dict(torch.load("model.pth"))
net.train()
optimizer = optim.Adam(net.parameters(), lr=0.001)
Epochs = 1
for epoch in range(0, Epochs):
train_data_set = game_datasets(df_no_leavers)
train_data_loader = DataLoader(train_data_set, batch_size=10000)
train_data_iter = iter(train_data_loader)
for data in train_data_iter:
x, y = data
net.zero_grad()
x = self.game_datasets_transform_X(x, 10)
# print(x[100])
y = self.game_datasets_transform_Y(y, 10)
x = x.view(-1, 150).float()
y = y.view(-1, 1).float()
output = net(x)
loss_func = nn.MSELoss()
loss = loss_func(output, y)
loss.backward()
optimizer.step()
print("Done Training")
# Training SGD
train_data_set = game_datasets(df_no_leavers)
train_data_loader = DataLoader(train_data_set, batch_size=2500)
train_data_iter = iter(train_data_loader)
poly = PolynomialFeatures(degree=2)
loaded_model = pickle.load(open(f"linear_model", "rb"))
del train_data_set
for data in train_data_iter:
x, y = data
x = self.game_datasets_transform_X_SGD(x, 5)
y = self.game_datasets_transform_Y(y, 5)
x = x.view(-1, 300).float()
y = y.view(-1, 1).float()
x = x.numpy()
x = poly.fit_transform(x)
y = y.numpy().ravel()
loaded_model.partial_fit(x, y, [0, 1])
print("Done Training")
return net, loaded_model
def game_datasets_transform_X(self, data_X, mode=None, device="cpu"):
# If mode is none only the 10 picks are added.
# If mode is equal to 10 all possible combinations are added aswell.
# If mode is either 1,2,3,4,5 the picks with those scenarios are only added.
if mode is not None:
picks = data_X.t()
picks = picks.to(device)
# 1st picks
picks_rad = torch.zeros(data_X.shape[0], 150, device=device)
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[0].long())] = -1
picks_dire = torch.zeros(data_X.shape[0], 150, device=device)
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[5].long())
] = 1
if mode == 10:
res = torch.cat([picks_rad, picks_dire], dim=0)
if mode == 1:
return torch.cat([picks_rad, picks_dire], dim=0)
# 2nd picks
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[1].long())] = -1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[6].long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 2:
return torch.cat([picks_rad, picks_dire], dim=0)
# 3rd picks
picks_rad[
range(picks_rad.shape[0]), torch.LongTensor(picks[5:7].long())
] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[0:2].long())
] = -1
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[2].long())] = -1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[7].long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 3:
return torch.cat([picks_rad, picks_dire], dim=0)
# 4th picks
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[3].long())] = -1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[8].long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 4:
return torch.cat([picks_rad, picks_dire], dim=0)
# 5th picks
picks_rad[
range(picks_rad.shape[0]), torch.LongTensor(picks[7:9].long())
] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[2:4].long())
] = -1
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[4].long())] = -1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[9].long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 5:
return torch.cat([picks_rad, picks_dire], dim=0)
# All picks (Only for mode 10)
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[9].long())] = 1
res = torch.cat([res, picks_rad], dim=0)
return res
else:
picks = data_X.t()
picks = picks.to(device)
picks_all = torch.zeros(data_X.shape[0], 150, device=device)
picks_all[range(picks_all.shape[0]), picks[0:5]] = -1
picks_all[range(picks_all.shape[0]), picks[5:10]] = 1
return picks_all
def game_datasets_transform_X_SGD(self, data_X, mode=None, device="cpu"):
# If mode is none only the 10 picks are added.
# If mode is equal to 10 all possible combinations are added aswell.
# If mode is either 1,2,3,4,5 the picks with those scenarios are only added.
if mode is not None:
picks = data_X.t()
picks = picks.to(device)
# picks = data_X
# 1st picks
picks_rad = torch.zeros(data_X.shape[0], 300, device=device)
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[0].long())] = 1
picks_dire = torch.zeros(data_X.shape[0], 300, device=device)
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[5] + 150).long())
] = 1
if mode == 10:
res = torch.cat([picks_rad, picks_dire], dim=0)
if mode == 1:
return torch.cat([picks_rad, picks_dire], dim=0)
# 2nd picks
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[1].long())] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[6] + 150).long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 2:
return torch.cat([picks_rad, picks_dire], dim=0)
# 3rd picks
picks_rad[
range(picks_rad.shape[0]), torch.LongTensor((picks[5:7] + 150).long())
] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[0:2].long())
] = 1
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[2].long())] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[7] + 150).long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 3:
return torch.cat([picks_rad, picks_dire], dim=0)
# 4th picks
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[3].long())] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[8] + 150).long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 4:
return torch.cat([picks_rad, picks_dire], dim=0)
# 5th picks
picks_rad[
range(picks_rad.shape[0]), torch.LongTensor((picks[7:9] + 150).long())
] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[2:4].long())
] = 1
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[4].long())] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[9] + 150).long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 5:
return torch.cat([picks_rad, picks_dire], dim=0)
# All picks (Only for mode 10)
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[9].long())] = 1
res = torch.cat([res, picks_rad], dim=0)
return res
else:
picks = data_X.t()
picks = picks.to(device)
picks_all = torch.zeros(data_X.shape[0], 150, device=device)
picks_all[range(picks_all.shape[0]), picks[0:5]] = -1
picks_all[range(picks_all.shape[0]), picks[5:10]] = 1
return picks_all
def game_datasets_transform_Y(self, data_Y, mode=None):
# y_trans = []
if mode == None:
return data_Y
y = data_Y.numpy()
# for i, y in enumerate(data_Y.numpy()):
if mode < 10:
# y_trans.append(y)
# y_trans.append(y)
res = np.tile(y, 2)
else:
res = np.tile(y, 11)
# res = np.concatenate([y,y])
# for _ in range(10):
# # y_trans.append(y)
# res = np.concatenate([res,y])
return torch.tensor(res)
if __name__ == "__main__":
# Define Dota game scraper and create database connection
try:
# Define Dota game scraper and create database connection
with open("keys.json") as f:
keys = json.load(f)
host = keys["database"]["host"]
print(host)
something = ModelUpdater(
host=keys["database"]["host"],
user=keys["database"]["user"],
passwd=keys["database"]["passwd"],
database=keys["database"]["database"],
)
something.update_model()
except Exception as e:
print(f"Error in Dota_skill_scraper.py. Can't start script. Error is {e}")
| 36.209068 | 88 | 0.517704 |
28f6f0c2610028a27b78a080b28387f6adc1ab80 | 2,227 | py | Python | meshrcnn/structures/mask.py | MAYURGAIKWAD/meshrcnn | b47ecd47ca7de7055b7d141e63ddab286c5245f3 | [
"BSD-3-Clause"
] | 1,028 | 2020-01-23T23:30:54.000Z | 2022-03-27T22:33:50.000Z | meshrcnn/structures/mask.py | MAYURGAIKWAD/meshrcnn | b47ecd47ca7de7055b7d141e63ddab286c5245f3 | [
"BSD-3-Clause"
] | 103 | 2020-01-24T05:29:48.000Z | 2022-03-08T13:04:24.000Z | meshrcnn/structures/mask.py | MAYURGAIKWAD/meshrcnn | b47ecd47ca7de7055b7d141e63ddab286c5245f3 | [
"BSD-3-Clause"
] | 179 | 2020-01-24T08:14:30.000Z | 2022-03-19T00:34:05.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch.nn import functional as F
def crop_mask_within_box(mask, box, mask_size):
"""
Crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
mask (Tensor): A tensor mask image.
box: 4 elements
mask_size (int):
Returns:
Tensor: ByteTensor of shape (mask_size, mask_size)
"""
# 1. Crop mask
roi = box.clone().int()
cropped_mask = mask[roi[1] : roi[3], roi[0] : roi[2]]
# 2. Resize mask
cropped_mask = cropped_mask.unsqueeze(0).unsqueeze(0)
cropped_mask = F.interpolate(cropped_mask, size=(mask_size, mask_size), mode="bilinear")
cropped_mask = cropped_mask.squeeze(0).squeeze(0)
# 3. Binarize
cropped_mask = (cropped_mask > 0).float()
return cropped_mask
def batch_crop_masks_within_box(masks, boxes, mask_side_len):
"""
Batched version of :func:`crop_mask_within_box`.
Args:
masks (Masks): store N masks for an image in 2D array format.
boxes (Tensor): store N boxes corresponding to the masks.
mask_side_len (int): the size of the mask.
Returns:
Tensor: A byte tensor of shape (N, mask_side_len, mask_side_len), where
N is the number of predicted boxes for this image.
"""
device = boxes.device
# Put boxes on the CPU, as the representation for masks is not efficient
# GPU-wise (possibly several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
masks = masks.to(torch.device("cpu"))
results = [crop_mask_within_box(mask, box, mask_side_len) for mask, box in zip(masks, boxes)]
if len(results) == 0:
return torch.empty(0, dtype=torch.float32, device=device)
return torch.stack(results, dim=0).to(device=device)
| 34.796875 | 97 | 0.68388 |
28f7bcf0fe258f3b0b26915576f9434aa6d0a9ec | 1,727 | py | Python | app/controller/org.py | Jimmy-Xu/fastapi_demo | f19c629cc7fa0e0e47e73e8688cd019bc74aa982 | [
"MIT"
] | 12 | 2020-09-01T09:19:41.000Z | 2022-03-17T05:48:50.000Z | app/controller/org.py | Jimmy-Xu/fastapi_demo | f19c629cc7fa0e0e47e73e8688cd019bc74aa982 | [
"MIT"
] | null | null | null | app/controller/org.py | Jimmy-Xu/fastapi_demo | f19c629cc7fa0e0e47e73e8688cd019bc74aa982 | [
"MIT"
] | 3 | 2021-04-26T02:53:04.000Z | 2021-11-01T14:32:38.000Z | from fastapi import APIRouter, Depends
from fastapi_plus.schema.base import ListArgsSchema, RespListSchema, RespIdSchema, RespBaseSchema
from fastapi_plus.utils.auth import get_auth_data
from fastapi_plus.utils.custom_route import CustomRoute
from ..schema.org import OrgInfoSchema, OrgRespDetailSchema
from ..service.org import OrgService
router = APIRouter(route_class=CustomRoute)
@router.post('/list', response_model=RespListSchema)
async def list(*, args: ListArgsSchema, auth_data: dict = Depends(get_auth_data)):
"""
读取组织数据列表
:param args: 请求参数集
:return: 组织列表结构
"""
args.user_id = auth_data.get('user_id')
return OrgService(auth_data).list(args)
@router.get('/{id}', response_model=OrgRespDetailSchema)
async def read(id: int, auth_data: dict = Depends(get_auth_data)):
"""
读取组织数据详情
:param id: 组织id
:return: 组织详情结构
"""
resp = OrgRespDetailSchema()
resp.detail = OrgService(auth_data).read(id)
return resp
@router.post('', response_model=RespIdSchema, response_model_exclude_none=True)
async def create(*, info: OrgInfoSchema, auth_data: dict = Depends(get_auth_data)):
"""
创建组织数据
:param info: 组织数据
:return:
"""
return OrgService(auth_data).create(info)
@router.put('/{id}', response_model=RespBaseSchema)
async def update(*, info: OrgInfoSchema, auth_data: dict = Depends(get_auth_data)):
"""
修改组织数据
:param info: 组织数据
:return:
"""
return OrgService(auth_data).update(info)
@router.delete("/{id}", response_model=RespBaseSchema)
async def delete(id: int, auth_data: dict = Depends(get_auth_data)):
"""
删除组织数据
:param id: 组织id
:return:
"""
return OrgService(auth_data).delete(id)
| 27.412698 | 97 | 0.712218 |
28f8e1f5d1a46da294aab0598c860ad545a8ca7d | 652 | py | Python | gds/blueprint/api/__init__.py | UniqueHK/pholcus | bf14621cf1fcd3517f2bbca7f9a7e73ee06cbf4a | [
"MIT"
] | 4 | 2015-12-15T06:06:40.000Z | 2016-06-20T07:52:06.000Z | gds/blueprint/api/__init__.py | UniqueHK/pholcus | bf14621cf1fcd3517f2bbca7f9a7e73ee06cbf4a | [
"MIT"
] | 1 | 2016-02-02T04:35:16.000Z | 2016-02-02T04:35:16.000Z | gds/blueprint/api/__init__.py | UniqueHK/pholcus | bf14621cf1fcd3517f2bbca7f9a7e73ee06cbf4a | [
"MIT"
] | 1 | 2015-07-22T12:17:08.000Z | 2015-07-22T12:17:08.000Z | #!/usr/bin/env python
# coding=utf8
import os
TOP = os.path.abspath('.')
STATIC = os.path.join(os.path.abspath('.'), 'static', 'exe')
ALLOWED_EXTENSIONS = set(['py'])
def allowed(filename):
return '.' in filename and filename.rsplit('.')[-1] in ALLOWED_EXTENSIONS
def exepath(filename):
filepath = os.path.join(STATIC, filename)
filedir = os.path.dirname(filepath)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filepath
def modelpath(filename):
filepath = os.path.join(TOP, filename)
return filepath
def store(filepath, filedata):
fi = open(filepath, 'w')
fi.write(filedata)
fi.close() | 25.076923 | 77 | 0.673313 |
28f8fe78a081d406b2ab32ded6242237cf670fed | 1,896 | py | Python | tests/refarm_redirects/test_views.py | fidals/blog | 2815b43d6724b142cc663a8cdc59d2afc9cad657 | [
"MIT"
] | 3 | 2017-05-05T12:01:54.000Z | 2018-03-06T21:33:04.000Z | tests/refarm_redirects/test_views.py | fidals/blog | 2815b43d6724b142cc663a8cdc59d2afc9cad657 | [
"MIT"
] | 172 | 2016-11-30T18:18:41.000Z | 2022-03-11T23:20:58.000Z | tests/refarm_redirects/test_views.py | fidals/blog | 2815b43d6724b142cc663a8cdc59d2afc9cad657 | [
"MIT"
] | 3 | 2017-09-25T17:11:46.000Z | 2020-04-17T09:14:43.000Z | import unittest
from django.contrib.redirects.models import Redirect
from django.contrib.sites.models import Site
from django.db.utils import IntegrityError
from django.test import TestCase
class Redirects(TestCase):
def test_redirect_from_existing_page(self):
"""`refarm-site.redirects` app should redirect from existing url too."""
# take some existing `url_from`
# @todo #360:30m Remove hardcoded fixture data.
# Replace `url_from` and `url_to` with urls, generated from db.
# It'll be much more short and clear.
url_from = '/catalog/categories/category-0/tags/6-v/'
# create redirect from `url_from` to another existing one - `url_to`
url_to = '/catalog/categories/category-0/'
Redirect.objects.create(
site=Site.objects.first(),
old_path=url_from,
new_path=url_to
)
# `url_from` should redirect to `url_to`
response = self.client.get(url_from)
self.assertEqual(response.status_code, 301)
# @todo #360:60m Add db constraint for looped redirect.
# Example of looped redirect:
# `/news/one-two/ --> /news/one-two/`
# `60m` because schema and data migrations are needed.
# And fix test.
@unittest.expectedFailure
def test_looped_redirect(self):
"""
Redirect like `/news/one-two/ --> /news/one-two/` should fail.
It should meet db constraint while adding.
"""
# hardcoded fixtures will be fixed with task in test ahead.
url_from = url_to = '/catalog/categories/category-0/tags/6-v/'
# should raise exception, but not. Pdd task ahead will fix it.
with self.assertRaises(IntegrityError):
Redirect.objects.create(
site=Site.objects.first(),
old_path=url_from,
new_path=url_to
)
| 37.176471 | 80 | 0.639241 |
28f9b230456479c535d2d51167428a69e99f40fe | 1,448 | py | Python | src/super_gradients/training/models/__init__.py | karndeepsingh/super-gradients | bfed440ecaf485af183570bf965eb5b74cb9f832 | [
"Apache-2.0"
] | 1 | 2022-01-15T19:33:06.000Z | 2022-01-15T19:33:06.000Z | src/super_gradients/training/models/__init__.py | karndeepsingh/super-gradients | bfed440ecaf485af183570bf965eb5b74cb9f832 | [
"Apache-2.0"
] | null | null | null | src/super_gradients/training/models/__init__.py | karndeepsingh/super-gradients | bfed440ecaf485af183570bf965eb5b74cb9f832 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa # FIXME: find sol for F403 error (caused by import *), most likely need to import everything by hand
from .sg_module import *
from super_gradients.training.models.classification_models.densenet import *
from super_gradients.training.models.classification_models.dpn import *
from super_gradients.training.models.classification_models.googlenet import *
from super_gradients.training.models.classification_models.lenet import *
from super_gradients.training.models.classification_models.mobilenet import *
from super_gradients.training.models.classification_models.mobilenetv2 import *
from super_gradients.training.models.classification_models.pnasnet import *
from super_gradients.training.models.classification_models.preact_resnet import *
from super_gradients.training.models.classification_models.resnet import *
from super_gradients.training.models.classification_models.resnext import *
from super_gradients.training.models.classification_models.senet import *
from super_gradients.training.models.classification_models.shufflenet import *
from super_gradients.training.models.classification_models.shufflenetv2 import *
from super_gradients.training.models.classification_models.vgg import *
from super_gradients.training.models.segmentation_models.shelfnet import *
from super_gradients.training.models.classification_models.efficientnet import *
from super_gradients.training.models.all_architectures import ARCHITECTURES
| 68.952381 | 115 | 0.870166 |
28fa10ec4cb7ea617432d1a843efa65bb4d46c15 | 2,327 | py | Python | nerodia/alert.py | harsh183/nerodia | 69c5e4408432e85b5af0b2da03015f729809dac4 | [
"MIT"
] | 83 | 2017-11-20T08:41:09.000Z | 2022-02-09T21:01:47.000Z | nerodia/alert.py | harsh183/nerodia | 69c5e4408432e85b5af0b2da03015f729809dac4 | [
"MIT"
] | 28 | 2017-11-21T02:25:03.000Z | 2021-04-15T15:26:30.000Z | nerodia/alert.py | harsh183/nerodia | 69c5e4408432e85b5af0b2da03015f729809dac4 | [
"MIT"
] | 14 | 2017-11-29T06:44:12.000Z | 2021-09-06T04:53:44.000Z | from selenium.common.exceptions import NoAlertPresentException
import nerodia
from .exception import UnknownObjectException
from .wait.wait import Waitable, TimeoutError
class Alert(Waitable):
def __init__(self, browser):
self.browser = browser
self.alert = None
@property
def text(self):
"""
Returns the text of the alert
:rtype: str
:Example:
browser.alert.text #=> 'ok'
"""
self.wait_for_exists()
return self.alert.text
def ok(self):
"""
Closes alert or accepts prompts/confirms
:Example:
browser.alert.ok
browser.alert.exists #=> False
"""
self.wait_for_exists()
self.alert.accept()
self.browser.after_hooks.run()
def close(self):
"""
Closes alert or cancels prmopts/confirms
:Example:
browser.alert.close()
browser.alert.exists #=> False
"""
self.wait_for_exists()
self.alert.dismiss()
self.browser.after_hooks.run()
def set(self, value):
"""
Enters text to prompt
:param value: keys to send
:Example:
browser.alert.set('Text for prompt')
browser.alert.ok()
"""
self.wait_for_exists()
self.alert.send_keys(value)
@property
def exists(self):
"""
Returns True if alert, confirm, or prompt is present and False otherwise
:rtype: bool
:Example:
browser.alert.exists #=> True
"""
try:
self.assert_exists()
return True
except UnknownObjectException:
return False
present = exists
@property
def selector_string(self):
return 'alert'
def assert_exists(self):
try:
self.alert = self.browser.driver.switch_to.alert
except NoAlertPresentException:
raise UnknownObjectException('unable to locate alert')
def wait_for_exists(self):
if not nerodia.relaxed_locate:
return self.assert_exists()
try:
return self.wait_until(lambda a: a.exists, message='waiting for alert')
except TimeoutError:
raise UnknownObjectException('unable to locate alert')
| 23.039604 | 83 | 0.581006 |
28fa6ae216b4aa0d88457aec32b09566f1611604 | 1,448 | py | Python | finetwork/distance_calculator/distance_calculator.py | annakuchko/FinNetwork | 4566ff96b33fb5668f9b28f41a94791d1cf9249c | [
"MIT"
] | 5 | 2021-12-07T22:14:10.000Z | 2022-03-30T14:09:15.000Z | finetwork/distance_calculator/distance_calculator.py | annakuchko/FinNetwork | 4566ff96b33fb5668f9b28f41a94791d1cf9249c | [
"MIT"
] | null | null | null | finetwork/distance_calculator/distance_calculator.py | annakuchko/FinNetwork | 4566ff96b33fb5668f9b28f41a94791d1cf9249c | [
"MIT"
] | null | null | null | from finetwork.distance_calculator import _distance_metrics
import pandas as pd
class CalculateDistance:
def __init__(self, data, method='pearson', scaled=False, sigma = 0.5):
self.data = data
self.method = method
self.scaled = scaled
self.sigma = sigma
def transform(self):
data = self.data
dist_dict = {}
for i in data.keys():
tmp = pd.DataFrame.from_dict({(v,k): data[i][v][k]['log_return']
for v in data[i].keys()
for k in data[i][v].keys()},
orient='index')
tmp.index = pd.MultiIndex.from_arrays(
[
[tmp.index[i][0] for i in range(len(tmp.index))],
[tmp.index[i][1] for i in range(len(tmp.index))]
]
)
tmp = tmp.reset_index().pivot('level_1', 'level_0')
distance_matrix = _distance_metrics._Metrics(
tmp,
method = self.method,
scaled=self.scaled, sigma=self.sigma
)._calculate()
distance_matrix.index = distance_matrix.index.get_level_values(
'level_0'
)
dist_dict[i] = distance_matrix
return dist_dict
| 34.47619 | 78 | 0.462017 |
28fc0673c7bc0e68a3641dedb06915366e9c6c39 | 27,818 | py | Python | aprastreioWin.py | Alexsussa/aprastreio | 1159861edd932f61a849f63f9dc7e5d34b2f272b | [
"MIT"
] | null | null | null | aprastreioWin.py | Alexsussa/aprastreio | 1159861edd932f61a849f63f9dc7e5d34b2f272b | [
"MIT"
] | null | null | null | aprastreioWin.py | Alexsussa/aprastreio | 1159861edd932f61a849f63f9dc7e5d34b2f272b | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
__version__ = 1.2
from tkinter.ttk import *
from tkinter.messagebox import *
from tkinter.scrolledtext import *
from tkinter import *
from bs4 import BeautifulSoup
from urllib.request import urlopen
from mailcomposer import MailComposer
from threading import Thread
import os
import sys
import sqlite3
import webbrowser
import ttips
import subprocess
import socket
listaRastreio = []
listaPendentes = []
listaEntregues = []
listaTodos = []
listaSepararEntregues = []
listaSepararPendentes = []
# Evita que o programa abra novamente enquanto enquanto ele já estiver aberto
pid = os.getpid()
pidfile = '/tmp/aprastreio.pid'
if not os.path.isfile(pidfile):
os.system(f'touch {pidfile}')
os.system(f'echo {pid} >> {pidfile}')
else:
sys.exit(-1)
# Cria o banco de dados caso ele não exista
db = os.path.expanduser('~/Dropbox/aprastreio/banco/')
if not os.path.exists(db):
os.makedirs(db)
banco = os.path.join(os.path.dirname(db), 'rastreios.db')
conexao = sqlite3.connect(banco, check_same_thread=False)
c = conexao.cursor()
c.execute('CREATE TABLE IF NOT EXISTS rastreio (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS entregues (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS pendentes (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
else:
banco = os.path.join(os.path.dirname(db), 'rastreios.db')
conexao = sqlite3.connect(banco, check_same_thread=False)
c = conexao.cursor()
c.execute('CREATE TABLE IF NOT EXISTS rastreio (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS entregues (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS pendentes (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
# Procura novas versões do software
def CheckUpdates(event=None):
janela.unbind('<Enter>')
versao = urlopen('https://www.dropbox.com/s/61rpf1xg8qr1vh1/version_linux.txt?dl=true').read()
if float(versao) > float(__version__):
subprocess.call(
['notify-send', 'AP - Rastreio Correios', 'Há uma nova versão disponível. Baixe agora!'])
showinfo(title='Atualização', message='Há uma nova versão disponível. Baixe agora!')
webbrowser.open('https://github.com/Alexsussa/aprastreio/releases/')
class Rastreio:
def __init__(self, master=None, rastreio='', objeto=''):
self.rastreio = rastreio
self.objeto = objeto
self.c1 = Frame(master)
self.c1['padx'] = 5
self.c1['pady'] = 3
self.c1.pack()
self.c2 = Frame(master)
self.c2.pack()
self.c3 = Frame(master)
self.c3.pack()
self.c4 = Frame(master)
self.c4.pack()
self.c5 = Frame(master)
self.c5.pack()
# Menu superior
menubar = Menu(janela)
arquivo = Menu(menubar, tearoff=0)
menubar.add_cascade(label='Arquivo', menu=arquivo)
menubar.add_separator()
arquivo.add_command(label='Sincronizar rastreios...',
command=lambda: Thread(target=self.NotifAltStatus).start(), accelerator='Ctrl+R')
# arquivo.add_command(label='Arquivar entregues', command=lambda: Thread(target=self.arquivarEntregues).start(), accelerator='Ctrl+B')
arquivo.add_command(label='Mover para entregues', command=lambda: Thread(target=self.arquivarRastreio).start(),
accelerator='Ctrl+B')
arquivo.add_command(label='Salvar', command=lambda: Thread(target=self.Cadastrar).start(), accelerator='Ctrl+S')
arquivo.add_command(label='Atualizar', command=lambda: Thread(target=self.Atualizar).start(),
accelerator='Ctrl+U')
arquivo.add_command(label='Deletar', command=lambda: Thread(target=self.Deletar).start(), accelerator='Ctrl+D')
arquivo.add_separator()
arquivo.add_command(label='Mostrar todos os rastreios',
command=lambda: {self.txtObjeto.config(values=self.listaTodos(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaTodos)})
arquivo.add_command(label='Mostar apenas os entregues',
command=lambda: {self.txtObjeto.config(values=self.listaEntregues(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaEntregues)})
"""arquivo.add_command(label='Mostar apenas os pendentes',
command=lambda: {self.txtObjeto.config(values=self.listaPendentes(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaPendentes)})"""
arquivo.add_separator()
arquivo.add_command(label='Sair', command=janela.destroy, accelerator='Ctrl+Q')
janela.bind('<Control-q>', self.JanExit)
janela.bind('<Control-Q>', self.JanExit)
ajuda = Menu(menubar, tearoff=0)
menubar.add_cascade(label='Ajuda', menu=ajuda)
ajuda.add_command(label='GitHub AP Rastreio...', command=lambda: Thread(
target=self.NavLink('https://github.com/Alexsussa/aprastreio/')).start(), accelerator='Ctrl+G')
ajuda.add_command(label='Checar atualizações...', command=lambda: Thread(target=CheckUpdates).start(),
accelerator='Ctrl+K')
ajuda.add_separator()
ajuda.add_command(label='Sobre', command=self.Sobre, accelerator='Ctrl+H')
janela.bind('<Control-h>', self.Sobre)
janela.bind('<Control-H>', self.Sobre)
janela.bind('<Control-g>', lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')))
janela.bind('<Control-G>', lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')))
janela.bind('<Control-k>', CheckUpdates)
janela.bind('<Control-K>', CheckUpdates)
janela.bind('<Control-b>', lambda e: Thread(target=self.arquivarRastreio).start())
janela.bind('<Control-B>', lambda e: Thread(target=self.arquivarRastreio).start())
janela.config(menu=menubar)
# Layout do programa
self.lbRastreio = Label(self.c1, text='RASTREIO:', fg='black')
self.lbRastreio.pack(side=LEFT)
self.txtRastreio = Entry(self.c1, width=14, bg='white', fg='black', selectbackground='blue',
selectforeground='white')
self.txtRastreio.pack(side=LEFT, padx=2)
self.lbObjeto = Label(self.c1, text='OBJETO:', fg='black')
self.lbObjeto.pack(side=LEFT)
self.txtObjeto = Combobox(self.c1, width=32, background='white', foreground='black',
values=self.listaTodos(event='<Button-1>'))
self.txtObjeto.pack(side=LEFT, padx=2)
janela.bind('<<ComboboxSelected>>', self.BuscaTodos)
self.btnRastrear = Button(self.c1, text='RASTREAR', fg='black',
command=lambda: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
self.btnRastrear.pack(side=LEFT, padx=2)
janela.bind('<Return>', lambda e: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
janela.bind('<KP_Enter>', lambda e: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
self.campo = ScrolledText(self.c2, width=77, height=30, bg='lightgray', fg='black', state='disable',
selectbackground='blue', font=('sans-serif', '10'))
self.campo.pack(fill='both', expand=True, pady=5)
self.whatsappimg = PhotoImage(file='imagens/WhatsApp.png')
self.emailimg = PhotoImage(file='imagens/Email.png')
self.salvarimg = PhotoImage(file='imagens/Salvar.png')
self.atualizarimg = PhotoImage(file='imagens/Atualizar.png')
self.deletarimg = PhotoImage(file='imagens/Lixeira.png')
self.btnWhatsapp = Button(image=self.whatsappimg, command=lambda: Thread(target=self.WhatsApp).start())
self.btnWhatsapp.pack(side=RIGHT)
ttips.Create(self.btnWhatsapp, text='Enviar por WhatsApp, Ctrl+W')
janela.bind('<Control-w>', lambda e: Thread(target=self.WhatsApp).start())
janela.bind('<Control-W>', lambda e: Thread(target=self.WhatsApp).start())
self.btnEmail = Button(image=self.emailimg, command=lambda: Thread(target=self.Email).start())
self.btnEmail.pack(side=RIGHT)
ttips.Create(self.btnEmail, text='Enviar por Email, Ctrl+E')
janela.bind('<Control-e>', lambda e: Thread(target=self.Email).start())
janela.bind('<Control-E>', lambda e: Thread(target=self.Email).start())
self.btnSalvar = Button(image=self.salvarimg, command=lambda: [self.RastreioExiste(), self.Cadastrar()])
self.btnSalvar.pack(side=LEFT, padx=1)
ttips.Create(self.btnSalvar, text='Salvar, Ctrl+S')
janela.bind('<Control-s>', lambda e: Thread(target=self.Cadastrar).start())
janela.bind('<Control-S>', lambda e: Thread(target=self.Cadastrar).start())
self.btnAtualizar = Button(image=self.atualizarimg, command=self.Atualizar)
self.btnAtualizar.pack(side=LEFT, padx=1)
ttips.Create(self.btnAtualizar, text='Atualizar, Ctrl+U')
janela.bind('<Control-u>', lambda e: Thread(target=self.Atualizar).start())
janela.bind('<Control-U>', lambda e: Thread(target=self.Atualizar).start())
self.btnDeletar = Button(image=self.deletarimg, command=self.Deletar)
self.btnDeletar.pack(side=LEFT, padx=1)
ttips.Create(self.btnDeletar, text='Deletar, Ctrl+D')
janela.bind('<Control-d>', lambda e: Thread(target=self.Deletar).start())
janela.bind('<Control-D>', lambda e: Thread(target=self.Deletar).start())
self.lbCreditos = Label(text='AP Correios - 2020')
self.lbCreditos.pack(side=TOP)
self.lbCreditos = Label(text='Software criado por Alex Pinheiro')
self.lbCreditos.pack(side=BOTTOM)
self.mouseMenu = Menu(janela, tearoff=0)
self.mouseMenu.add_command(label='Recortar')
self.mouseMenu.add_command(label='Copiar')
self.mouseMenu.add_command(label='Colar')
janela.bind('<Control-L>', self.Limpar)
janela.bind('<Enter>', Thread(target=CheckUpdates).start())
janela.bind('<Control-r>', lambda e: Thread(target=self.NotifAltStatus).start())
janela.bind('<Control-R>', lambda e: Thread(target=self.NotifAltStatus).start())
# Move rastreio para a lista de entregues
def arquivarRastreio(self):
rastreio = self.txtRastreio.get()
objeto = self.txtObjeto.get()
if rastreio == '' or objeto == '':
showwarning(title='Aviso', message='Selecione um rastreio para mover.')
else:
c.execute(f'SELECT codrastreio FROM rastreio WHERE codrastreio = "{rastreio}"')
c.execute(f'INSERT INTO entregues SELECT * FROM rastreio WHERE codrastreio = "{rastreio}"')
c.execute(f'DELETE FROM rastreio WHERE codrastreio = "{rastreio}"')
conexao.commit()
listaTodos.clear()
self.txtObjeto.config(values=self.listaTodos())
self.Limpar()
showinfo(title='Status', message=f'Rastreio {rastreio} arquivado.')
# Fecha o programa principal
def JanExit(self, event=None):
janela.destroy()
def NavLink(self, url):
webbrowser.open_new_tab(url)
def Sobre(self, event=None):
popup = Toplevel()
sobre = Label(popup, text='AP - Rastreios v1.2')
sobre.pack(pady=20)
logo = PhotoImage(file='imagens/sobre.png')
bgimg = Label(popup, image=logo)
bgimg.pack()
bgimg.image = logo
mit = Label(popup, text='Licença\n', fg='blue', cursor='hand2')
mit.pack()
github = Label(popup, text='GitHub\n', fg='blue', cursor='hand2')
github.pack()
popup.title('Sobre')
popup.geometry('400x300')
popup.resizable(False, False)
popup.grab_set()
popup.focus_force()
popup.transient(janela)
mit.bind('<Button-1>', lambda e: Thread(
target=self.NavLink('https://github.com/Alexsussa/aprastreio/blob/master/LICENSE')).start())
github.bind('<Button-1>',
lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')).start())
# Notificação de alteração de status dos rastreios
def NotifAltStatus(self, event=None):
try:
info = askyesno(title='ATUALIZANDO RASTREIOS',
message='Atualizando status dos rastreios...',
detail='Clique em SIM e aguarde até os objetos não entregues aparecerem na tela principal\nou clique em NÃO para atualizar manualmente mais tarde.')
if info == False:
pass
else:
janela.after(3600000, lambda: Thread(target=self.NotifAltStatus).start())
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Atualizando status dos rastreios...\n\nPor favor, aguarde...'])
c.execute('SELECT * FROM rastreio ORDER BY codrastreio')
self.Limpar()
for cod in c:
linkcorreios = urlopen(f'https://www.linkcorreios.com.br/?id={cod[1]}')
soup = BeautifulSoup(linkcorreios, 'html.parser')
lastStatus = soup.find('ul', attrs={'class': 'linha_status'})
last = lastStatus.text.strip().upper()
self.campo.delete(1.0, END)
if last[0:39] != 'STATUS: OBJETO ENTREGUE AO DESTINATÁRIO':
self.campo.config(state='normal')
self.campo.insert(INSERT, '-' * 80)
self.campo.insert(INSERT, '\n\nALTERAÇÃO DE STATUS')
self.campo.insert(INSERT, f'\n\n{cod[2]}\n{cod[1]}\n\n{last}\n\n', '-' * 80)
self.campo.config(state='disable')
subprocess.call(
['notify-send', 'AP - Rastreio Correios', f'ALTERAÇÂO DE STATUS\n\n{cod[2]}\n\n{last}\n\n'])
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Todos os objetos não entregues estão na tela principal.'])
except socket.error:
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.'])
showerror(title='AVISO', message='Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.')
def MenuMouse(self, event):
w = event.widget
self.mouseMenu.entryconfigure("Recortar", command=lambda: w.event_generate('<<Cut>>'))
self.mouseMenu.entryconfigure("Copiar", command=lambda: w.event_generate('<<Copy>>'))
self.mouseMenu.entryconfigure("Colar", command=lambda: w.event_generate('<<Paste>>'))
self.mouseMenu.tk_popup(event.x_root, event.y_root)
def Rastrear(self, event=None):
rastreio = self.txtRastreio.get()
objeto = self.txtObjeto.get()
if rastreio == '':
showwarning(title='AVISO', message='Digite um código de rastreio para rastrear.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
try:
subprocess.call(['notify-send', 'AP - Rastreio Correios', 'Rastreando encomenda...'])
linkcorreios = urlopen(f'https://www.linkcorreios.com.br/?id={rastreio}', timeout=20)
soup = BeautifulSoup(linkcorreios, 'html.parser')
status = soup.find('div', attrs={'class': 'singlepost'})
retorno = ''
if status:
retorno = status.text.strip().upper()
else:
retorno = 'O rastreamento não está disponível no momento:\n\n' \
'- Verifique se o código do objeto está correto;\n' \
'- O objeto pode demorar até 24 horas (após postagem) para ser rastreado no\nsistema dos Correios.'.strip().upper()
# print(retorno)
self.campo.config(state='normal')
self.campo.delete(1.0, END)
self.campo.insert(INSERT, retorno)
self.campo.config(state='disable')
lastStatus = soup.find('ul', attrs={'class': 'linha_status'})
if lastStatus:
last = lastStatus.text.strip().upper()
else:
last = 'O rastreamento não está disponível no momento:\n\n' \
'- Verifique se o código do objeto está correto;\n' \
'- O objeto pode demorar até 24 horas (após postagem) para ser rastreado no sistema dos Correios.'.strip().upper()
subprocess.call(['notify-send', 'AP - Rastreio Correios', f'{objeto}\n\n{last}'])
except socket.error:
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.'])
showerror(title='AVISO',
message='Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.')
"""except socket.timeout:
subprocess.call(
['notify-send', 'AP - Rastreio Correios', 'Tempo de resposta do servidor execedido.'])
showerror(title='AVISO', message='Tempo de resposta do servidor execedido.')"""
def WhatsApp(self):
rastreio = self.txtRastreio.get().strip().upper()
if rastreio == '':
showerror(title='AVISO', message='Para fazer o envio pelo WhatsApp, primeiro busque pelo rastreio.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
rastreio = self.txtRastreio.get()
webbrowser.open(
f'https://web.whatsapp.com/send?phone=&text=Ol%c3%a1.%20Clique%20no%20link%20para%20rastrear%20o%20objeto%20c%c3%b3digo%20{rastreio}%0ahttps%3a%2f%2fwww.linkcorreios.com.br%2f{rastreio}%3fw%3d1&source=&data=')
def Email(self):
rastreio = self.txtRastreio.get().strip().upper()
if not os.path.exists('/usr/bin/thunderbird') and not os.path.exists('/usr/bin/evolution'):
showwarning(title='AVISO', message='Nenhum cliente de email está instalado em seu computador.')
else:
rastreio = self.txtRastreio.get().strip().upper()
if rastreio == '':
showerror(title='AVISO', message='Para fazer o envio pelo Email, primeiro busque pelo rastreio.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
mc = MailComposer()
rastreio = self.txtRastreio.get()
mc.subject = f'Código de Rastreio ({rastreio})'
mc.body = f'Boa tarde!\n\n Segue código de rastreio para acompanhamento do seu pedido:\n\n https://www.linkcorreios.com.br/?id={rastreio}.\n\n'
mc.display('AP - Rastreio Correios')
# webbrowser.open(f'https://www.linkcorreios.com.br/?id={rastreio}#envie_por_email')
def Cadastrar(self):
rastreio = self.txtRastreio.get().strip().upper()
if self.txtRastreio.get() == '' or self.txtObjeto.get() == '':
showwarning(title='AVISO', message='Para salvar digite o rastreio e o nome do objeto.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
rastreio = self.txtRastreio.get().strip().upper()
objeto = self.txtObjeto.get().strip().upper()
c.execute(f'INSERT INTO rastreio (codrastreio, objeto) VALUES ("{rastreio}", "{objeto}")')
conexao.commit()
self.txtRastreio.delete(0, END)
self.txtObjeto.delete(0, END)
listaPendentes.clear()
self.txtObjeto.config(values=self.listaPendentes())
showinfo(title='STATUS', message=f'Rastreio {rastreio} cadastrado com sucesso.')
def Atualizar(self):
rastreio = self.txtRastreio.get().strip().upper()
objeto = self.txtObjeto.get().strip().upper()
if self.txtRastreio.get() == '' or self.txtObjeto.get() == '':
showerror(title='AVISO', message='Para atualizar os dados procure pelo rastreio primeiro.')
else:
aviso = askyesno(title='AVISO', message='Você deseja atualizar os dados desse rastreio?')
if aviso == False:
pass
elif aviso == True:
c.execute(
f'UPDATE rastreio SET codrastreio = "{rastreio}", objeto = "{objeto}" WHERE codrastreio = "{rastreio}"')
conexao.commit()
self.txtRastreio.delete(0, END)
self.txtObjeto.delete(0, END)
listaPendentes.clear()
self.txtObjeto.config(values=self.listaPendentes())
showinfo(title='STATUS', message=f'Rastreio {rastreio} atualizado com sucesso.')
else:
return None
def Deletar(self):
rastreio = self.txtRastreio.get().strip().upper()
if self.txtRastreio.get() == '' or self.txtObjeto.get() == '':
showerror(title='AVISO', message='Para deletar os dados procure pelo rastreio primeiro.')
else:
aviso = askyesno(title='AVISO', message='Você realmente deseja DELETAR os dados desse rastreio?\n'
'Esta ação não poderá ser desfeita.')
if aviso == False:
pass
elif aviso == True:
c.execute(f'DELETE FROM rastreio WHERE codrastreio = "{rastreio}"')
conexao.commit()
self.txtRastreio.delete(0, END)
self.txtObjeto.delete(0, END)
listaPendentes.clear()
self.txtObjeto.config(values=self.listaPendentes())
showinfo(title='STATUS', message=f'Rastreio {rastreio} deletado com sucesso.')
else:
return None
def listaTodos(self, event=None):
c.execute(f'SELECT objeto FROM rastreio ORDER BY id')
for objeto in c:
if objeto[0] not in listaTodos:
listaTodos.append(objeto[0])
return tuple(reversed(listaTodos))
def listaPendentes(self, event=None):
self.txtObjeto.insert(INSERT, 'Mostrando apenas objetos pendentes')
self.Limpar()
c.execute(f'SELECT objeto FROM pendentes ORDER BY id')
for objeto in c:
if objeto[0] not in listaPendentes:
listaPendentes.append(objeto[0])
return tuple(reversed(listaPendentes))
def listaEntregues(self, event=None):
self.Limpar()
c.execute(f'SELECT objeto FROM entregues ORDER BY id')
for objeto in c:
if objeto[0] not in listaEntregues:
listaEntregues.append(objeto[0])
return tuple(reversed(listaEntregues))
def ListaRastreio(self, event=None):
c.execute(f'SELECT codrastreio FROM rastreio ORDER BY codrastreio')
for rastreio in c:
if rastreio[0] not in listaRastreio:
listaRastreio.append(rastreio[0])
return tuple(listaRastreio)
def BuscaPendentes(self, event=None):
objeto = self.txtObjeto.get().strip().upper()
c.execute(f'SELECT * FROM pendentes WHERE objeto = "{objeto}"')
for linha in c:
self.rastreio = linha[1]
self.objeto = linha[2]
self.txtRastreio.delete(0, END)
self.txtRastreio.insert(INSERT, self.rastreio)
self.txtObjeto.delete(0, END)
self.txtObjeto.insert(INSERT, self.objeto)
def BuscaTodos(self, event=None):
objeto = self.txtObjeto.get().strip().upper()
c.execute(f'SELECT * FROM rastreio WHERE objeto = "{objeto}"')
for linha in c:
self.rastreio = linha[1]
self.objeto = linha[2]
self.txtRastreio.delete(0, END)
self.txtRastreio.insert(INSERT, self.rastreio)
self.txtObjeto.delete(0, END)
self.txtObjeto.insert(INSERT, self.objeto)
def BuscaEntregues(self, event=None):
objeto = self.txtObjeto.get().strip().upper()
c.execute(f'SELECT * FROM entregues WHERE objeto = "{objeto}"')
for linha in c:
self.rastreio = linha[1]
self.objeto = linha[2]
self.txtRastreio.delete(0, END)
self.txtRastreio.insert(INSERT, self.rastreio)
self.txtObjeto.delete(0, END)
self.txtObjeto.insert(INSERT, self.objeto)
def BuscaRastreio(self, event=None):
rastreio = self.txtRastreio.get().strip().upper()
c.execute(f'SELECT * FROM rastreio WHERE codrastreio = "{rastreio}"')
for linha in c:
self.rastreio = linha[1]
self.objeto = linha[2]
self.txtRastreio.delete(0, END)
self.txtRastreio.insert(INSERT, self.rastreio)
self.txtObjeto.delete(0, END)
self.txtObjeto.insert(INSERT, self.objeto)
def RastreioExiste(self):
rastreio = self.txtRastreio.get().strip().upper()
c.execute(f'SELECT * FROM rastreio WHERE codrastreio = "{rastreio}"')
for item in c:
if rastreio == item[1]:
status = showinfo(title='STATUS',
message='Código já cadastrado.\nTecle ENTER para\nbuscar o nome do objeto.')
def Limpar(self, event=None):
self.campo.config(state='normal')
self.txtRastreio.delete(0, END)
self.txtObjeto.delete(0, END)
self.campo.delete(1.0, END)
self.campo.config(state='disable')
janela = Tk()
iconejanela = PhotoImage(file='imagens/iconejanela.png')
janela.tk.call('wm', 'iconphoto', janela._w, iconejanela)
janela.resizable(False, False)
janela.geometry('630x610')
Rastreio(janela)
janela.title('AP - RASTREIO CORREIOS v1.2')
janela.update()
janela.mainloop()
if janela.destroy or janela.quit:
pass
os.system(f'rm {pidfile}')
| 46.910624 | 225 | 0.60752 |
28fcf00920c199ce0f0b62aba120f4cb4d0c324d | 5,480 | py | Python | samples/attributes.py | DavidJohnGee/clicrud | f1f178ac44649efe7b7681d37e97d2632b8971b2 | [
"Apache-2.0"
] | 9 | 2015-12-07T23:00:24.000Z | 2021-06-23T21:31:47.000Z | samples/attributes.py | DavidJohnGee/clicrud | f1f178ac44649efe7b7681d37e97d2632b8971b2 | [
"Apache-2.0"
] | 8 | 2016-04-05T12:36:54.000Z | 2017-05-15T16:00:08.000Z | samples/attributes.py | DavidJohnGee/clicrud | f1f178ac44649efe7b7681d37e97d2632b8971b2 | [
"Apache-2.0"
] | 7 | 2016-06-02T23:39:05.000Z | 2021-03-25T20:52:46.000Z | #!/usr/bin/env python
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
class _attributes(dict):
def __init__(self):
# This is the dictionary that is generated with the attributes
self.devices = {}
def get_attributes(self, **kwargs):
"""This method gets all attributes in the associated list.
I've tried to avoid 'custom' work, but it's CLI. Tough.
If you want to have more attributes, build it in to this method.
"""
# Figure out how many devices in the stack and what
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show version | inc Management Module')
# Get the count of devices
_ndevices = len(_tmp)
logging.info("[attributes.py] Detected stack devices %s" % _ndevices)
# This section fills in the device type and number
_devcount = 1
for dev in (_tmp):
_tmp2 = dev.strip()
_tmp2 = _tmp2.split(" ")
self.devices[_devcount] = {'model': _tmp2[4]}
if _devcount < _ndevices:
_devcount += 1
# This section fills in the version of code
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show version | inc SW: Version')
_devcount = 1
for dev in (_tmp):
_tmp2 = dev.strip()
_tmp2 = _tmp2.split(" ")
self.devices[_devcount].update({'version': _tmp2[2]})
if _devcount < _ndevices:
_devcount += 1
logging.info("[attributes.py] Detected version of code %s" % _tmp2)
# This section fills in the uptime per device
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show version | inc uptime')
_devcount = 1
for dev in (_tmp):
_tmp2 = dev.strip()
_tmp2 = _tmp2.split(" ")
_tmp3 = ' '.join(_tmp2[6:])
self.devices[_devcount].update({'uptime': _tmp3})
if _devcount < _ndevices:
_devcount += 1
logging.info("[attributes.py] Detected uptime %s" % _tmp3)
# This section fills in the hostname
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show running-config | inc hostname')
if _tmp:
_devcount = 1
_tmp2 = str(_tmp)
_tmp2 = _tmp2.strip()
_tmp2 = _tmp2.split(" ")
for dev in range(_ndevices):
self.devices[_devcount].update({'hostname': _tmp2[1]})
if _devcount < _ndevices:
_devcount += 1
logging.info("[attributes.py] Detected hostname %s" % _tmp2[1])
if not _tmp:
self.devices[_devcount].update({'hostname': 'Not set'})
logging.info("[attributes.py] No hostname detected")
# This section fills in the serial
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show version | inc Serial')
_devcount = 1
for dev in (_tmp):
_tmp2 = dev.strip()
_tmp2 = _tmp2.split(" ")
self.devices[_devcount].update({'serial': _tmp2[3]})
if _devcount < _ndevices:
_devcount += 1
logging.info("[attributes.py] Detected serial number %s"
% _tmp2[3])
def set_attribute(self, **kwargs):
"""This method sets and can override each attribute.
Requires KWs: device (integer)
parameter (string)
value (anything)
"""
_device = kwargs.get('device')
_parameter = kwargs.get('parameter')
_value = kwargs.get('value')
self.devices[_device].update({_parameter: _value})
logging.info("[attributes.py] Manually set attribute: %s: %s",
_parameter, _value)
def _transport_converter(self, transport, instance, command):
"""This method converts between SSH and Telnet.
Ultimately abstracting away the differences between the two.
"""
if transport is 'telnet':
_output = instance.read(command)
return _output
if transport is 'ssh':
_output = instance.read(command)
return _output
| 37.793103 | 77 | 0.534672 |
28fd8a0aa5ca53d8cc4ae5edc75046373f2c1af3 | 1,929 | py | Python | Q36_reversePairs.py | FreesiaLikesPomelo/-offer | 14ac73cb46d13c7f5bbc294329a14f3c5995bc7a | [
"Apache-2.0"
] | null | null | null | Q36_reversePairs.py | FreesiaLikesPomelo/-offer | 14ac73cb46d13c7f5bbc294329a14f3c5995bc7a | [
"Apache-2.0"
] | null | null | null | Q36_reversePairs.py | FreesiaLikesPomelo/-offer | 14ac73cb46d13c7f5bbc294329a14f3c5995bc7a | [
"Apache-2.0"
] | null | null | null | '''
面试题51. 数组中的逆序对
在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。输入一个数组,求出这个数组中的逆序对的总数。
示例 1:
输入: [7,5,6,4]
输出: 5
限制:
0 <= 数组长度 <= 50000
https://leetcode-cn.com/problems/shu-zu-zhong-de-ni-xu-dui-lcof/
执行用时 :1564 ms, 在所有 Python3 提交中击败了85.67%的用户
内存消耗 :18.5 MB, 在所有 Python3 提交中击败了100.00%的用户
'''
# merge-sort
# test cases:
# 1. input [] or [int]:return 0
# 2. function test: input sorted array: return
class Solution:
def merge(self, left: List[int], right: List[int]):
# return sortedList:List[int],inverNum:int
lidx = len(left)-1
ridx = len(right)-1
idx = ridx+lidx+1
result = list(range(idx+1))
inverNum = 0
while lidx>=0 and ridx>=0:
if left[lidx]>right[ridx]:
inverNum+=(ridx+1)
result[idx] = left[lidx]
idx-=1
lidx-=1
else:
result[idx] = right[ridx]
idx-=1
ridx-=1
if lidx<0:
# right list was left
while ridx>=0:
result[idx] = right[ridx]
idx-=1
ridx-=1
if ridx<0:
while lidx>=0:
result[idx] = left[lidx]
idx-=1
lidx-=1
return result, inverNum
def mergeSort(self, nums: List[int]):
# return sortedList:List[int],inverNum:int
if len(nums)<=1:
return nums, 0
mid = int(len(nums)/2)
inverNum = 0
left,lInverNum = self.mergeSort(nums[:mid])
right,rInverNum = self.mergeSort(nums[mid:])
result,tempInv = self.merge(left,right)
tempInv = lInverNum+rInverNum+tempInv
return result, tempInv
def reversePairs(self, nums: List[int]) -> int:
if nums==[] or len(nums)==1:
return 0
resList, inverNum = self.mergeSort(nums)
return inverNum
| 26.424658 | 64 | 0.524624 |
28fd8b4c1c5abdea704fd69e0b99370a0f6f8997 | 21,954 | py | Python | Apps/phgreynoise/greynoise_connector.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 2 | 2021-07-23T03:51:30.000Z | 2021-08-12T14:13:04.000Z | Apps/phgreynoise/greynoise_connector.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 4 | 2021-10-04T09:22:02.000Z | 2021-11-01T12:00:04.000Z | Apps/phgreynoise/greynoise_connector.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 2 | 2021-05-15T17:31:24.000Z | 2021-07-23T03:51:42.000Z | # File: greynoise_connector.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
# Python 3 Compatibility imports
from __future__ import print_function, unicode_literals
# Phantom App imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from greynoise_consts import *
import requests
import json
from requests.utils import requote_uri
from six.moves.urllib.parse import urljoin as _urljoin
import urllib.parse
def urljoin(base, url):
return _urljoin("%s/" % base.rstrip("/"), url.lstrip("/"))
class GreyNoiseConnector(BaseConnector):
"""Connector for GreyNoise App."""
def __init__(self):
"""GreyNoise App Constructor."""
super(GreyNoiseConnector, self).__init__()
self._session = None
self._app_version = None
self._api_key = None
def validate_parameters(self, param):
# Disable BaseConnector's validate functionality, since this App supports unicode domains and the
# validation routines don't
return phantom.APP_SUCCESS
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error messages from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = ERR_CODE_MSG
error_msg = e.args[0]
else:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
except:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
try:
if error_code in ERR_CODE_MSG:
error_text = "Error Message: {0}".format(error_msg)
else:
error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
except:
self.debug_print(PARSE_ERR_MSG)
error_text = PARSE_ERR_MSG
return error_text
def _validate_integer(self, action_result, parameter, key):
if parameter:
try:
if not float(parameter).is_integer():
return action_result.set_status(phantom.APP_ERROR, VALID_INTEGER_MSG.format(key=key)), None
parameter = int(parameter)
except:
return action_result.set_status(phantom.APP_ERROR, VALID_INTEGER_MSG.format(key=key)), None
if parameter < 0:
return action_result.set_status(phantom.APP_ERROR, NON_NEGATIVE_INTEGER_MSG.format(key=key)), None
return phantom.APP_SUCCESS, parameter
def get_session(self):
if self._session is None:
self._session = requests.Session()
self._session.params.update({
"api-key": self._api_key
})
return self._session
def _make_rest_call(self, action_result, method, *args, error_on_404=True, **kwargs):
session = self.get_session()
response_json = None
status_code = None
try:
r = session.request(method, *args, **kwargs)
if r.status_code != 404 or error_on_404:
r.raise_for_status()
status_code = r.status_code
except requests.exceptions.HTTPError as e:
err_msg = self._get_error_message_from_exception(e)
err_msg = urllib.parse.unquote(err_msg)
ret_val = action_result.set_status(phantom.APP_ERROR,
"HTTP error occurred while making REST call: {0}".format(err_msg))
except Exception as e:
err_msg = self._get_error_message_from_exception(e)
ret_val = action_result.set_status(phantom.APP_ERROR,
"General error occurred while making REST call: {0}".format(err_msg))
else:
try:
response_json = r.json()
ret_val = phantom.APP_SUCCESS
except Exception as e:
err_msg = self._get_error_message_from_exception(e)
ret_val = action_result.set_status(phantom.APP_ERROR,
"Unable to parse JSON response. Error: {0}".format(err_msg))
return (ret_val, response_json, status_code)
def _check_apikey(self, action_result):
self.save_progress("Testing API key")
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
API_KEY_CHECK_URL,
headers=self._headers
)
if phantom.is_fail(ret_val):
self.save_progress("API key check Failed")
return ret_val
if response_json is None:
self.save_progress("No response from API")
return action_result.set_status(phantom.APP_ERROR, "No response from API")
elif response_json.get("message") == "pong":
self.save_progress("Validated API Key")
self.debug_print("Validated API Key")
return phantom.APP_SUCCESS
else:
self.save_progress("Invalid response from API")
try:
response_json = json.dumps(response_json)
except:
return action_result.set_status(phantom.APP_ERROR, "Invalid response from API")
return action_result.set_status(phantom.APP_ERROR, "Invalid response from API: %s" % response_json)
def _test_connectivity(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
self.save_progress("Test Connectivity Failed")
return ret_val
self.save_progress("Test Connectivity Passed")
return action_result.set_status(phantom.APP_SUCCESS)
def _lookup_ip(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
return ret_val
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
LOOKUP_IP_URL.format(ip=param["ip"]),
headers=self._headers
)
if phantom.is_fail(ret_val):
return ret_val
result_data = {}
action_result.add_data(result_data)
result_data.update(response_json)
try:
result_data["visualization"] = VISUALIZATION_URL.format(ip=result_data["ip"])
if result_data["code"] in CODES:
result_data["code_meaning"] = CODES[result_data["code"]]
else:
result_data["code_meaning"] = "This code is unmapped"
except KeyError:
return action_result.set_status(phantom.APP_ERROR, "Error occurred while processing API response")
return action_result.set_status(phantom.APP_SUCCESS)
def _ip_reputation(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
return ret_val
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
IP_REPUTATION_URL.format(ip=param["ip"]),
headers=self._headers
)
if phantom.is_fail(ret_val):
return ret_val
result_data = {}
action_result.add_data(result_data)
result_data.update(response_json)
try:
result_data["visualization"] = VISUALIZATION_URL.format(ip=result_data["ip"])
except KeyError:
return action_result.set_status(phantom.APP_ERROR, "Error occurred while processing API response")
return action_result.set_status(phantom.APP_SUCCESS)
def _gnql_query(self, param, is_poll=False, action_result=None):
if not is_poll:
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
if is_poll:
return ret_val, None
else:
return ret_val
first_flag = True
remaining_results_flag = True
scroll_token = ""
full_response = {}
size = param["size"]
# Validate 'size' action parameter
ret_val, size = self._validate_integer(action_result, size, SIZE_ACTION_PARAM)
if phantom.is_fail(ret_val):
if is_poll:
return action_result.get_status(), None
else:
return action_result.get_status()
while remaining_results_flag:
if first_flag:
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
GNQL_QUERY_URl,
headers=self._headers,
params=(('query', param["query"]),
('size', size))
)
full_response.update(response_json)
if "scroll" in full_response:
scroll_token = full_response["scroll"]
if "complete" in full_response or len(full_response["data"]) >= size:
remaining_results_flag = False
elif "message" in full_response:
if full_response["message"] == "no results":
remaining_results_flag = False
first_flag = False
if remaining_results_flag:
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
GNQL_QUERY_URl,
headers=self._headers,
params=(('query', param["query"]),
('size', size),
('scroll', scroll_token))
)
full_response["complete"] = response_json["complete"]
if "scroll" in response_json:
full_response["scroll"] = response_json["scroll"]
for item in response_json["data"]:
full_response["data"].append(item)
if "scroll" in full_response:
scroll_token = full_response["scroll"]
if "complete" in full_response or len(full_response["data"]) >= size:
remaining_results_flag = False
elif "message" in full_response:
if full_response["message"] == "no results":
remaining_results_flag = False
else:
remaining_results_flag = True
if phantom.is_fail(ret_val):
if is_poll:
return ret_val, None
else:
return ret_val
result_data = {}
action_result.add_data(result_data)
try:
for entry in full_response["data"]:
entry["visualization"] = VISUALIZATION_URL.format(ip=entry["ip"])
except KeyError:
error_msg = "Error occurred while processing API response"
if is_poll:
return action_result.set_status(phantom.APP_ERROR, error_msg), None
else:
return action_result.set_status(phantom.APP_ERROR, error_msg)
result_data.update(full_response)
if is_poll:
return ret_val, result_data
else:
return action_result.set_status(phantom.APP_SUCCESS)
def _lookup_ips(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
return ret_val
try:
ips = [x.strip() for x in param["ips"].split(",")]
ips = list(filter(None, ips))
if not ips:
return action_result.set_status(phantom.APP_ERROR, INVALID_COMMA_SEPARATED_VALUE_ERR_MSG.format(key='ips'))
ips = ",".join(ips)
ips_string = requote_uri(ips)
except Exception as e:
err = self._get_error_message_from_exception(e)
err_msg = "Error occurred while processing 'ips' action parameter. {0}".format(err)
return action_result.set_status(phantom.APP_ERROR, err_msg)
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
LOOKUP_IPS_URL.format(ips=ips_string),
headers=self._headers
)
if phantom.is_fail(ret_val):
return ret_val
result_data = []
action_result.add_data(result_data)
try:
for result in response_json:
if result["code"] in CODES:
result["code_meaning"] = CODES[result["code"]]
else:
result["code_meaning"] = "This code is unmapped"
result["visualization"] = VISUALIZATION_URL.format(ip=result["ip"])
result_data.append(result)
return action_result.set_status(phantom.APP_SUCCESS)
except Exception as e:
err = self._get_error_message_from_exception(e)
err_msg = "Error occurred while processing results: {0}".format(err)
return action_result.set_status(phantom.APP_ERROR, err_msg)
def _process_query(self, data):
# spawn container for every item returned
if data["count"] > 0:
try:
for entry in data["data"]:
ip = entry["ip"]
self.save_progress("Processing IP address {}".format(ip))
container = {
"custom_fields": {},
"data": {},
"name": "",
"description": "Container added by GreyNoise",
"label": self.get_config().get("ingest", {}).get("container_label"),
"sensitivity": "amber",
"source_data_identifier": "",
"tags": entry["tags"],
}
if entry["classification"] == "malicious":
container["severity"] = "high"
else:
container["severity"] = "low"
artifact_cef = {
'ip': entry['ip'],
'classification': entry['classification'],
'first_seen': entry['first_seen'],
'last_seen': entry['last_seen'],
'actor': entry['actor'],
'organization': entry['metadata']['organization'],
'asn': entry['metadata']['asn']
}
if entry['metadata']['country']:
artifact_cef['country'] = entry['metadata']['country']
if entry['metadata']['city']:
artifact_cef['city'] = entry['metadata']['city']
container["artifacts"] = [{
"cef": artifact_cef,
"description": "Artifact added by GreyNoise",
"label": container["label"],
"name": "GreyNoise Query Language Entry",
"source_data_identifier": container["source_data_identifier"],
"severity": container["severity"]
}]
container["name"] = "GreyNoise Query Language Entry"
ret_val, container_creation_msg, container_id = self.save_container(container)
if phantom.is_fail(ret_val):
self.save_progress("Error occurred while saving the container")
self.debug_print(container_creation_msg)
continue
self.save_progress("Created %s" % container_id)
except Exception as e:
err = self._get_error_message_from_exception(e)
err_msg = "Error occurred while processing query data. {}".format(err)
self.debug_print(err_msg)
return phantom.APP_ERROR
return phantom.APP_SUCCESS
else:
self.save_progress("No results matching your GNQL query were found")
return phantom.APP_SUCCESS
def _on_poll(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
if self.is_poll_now():
self.save_progress('Due to the nature of the API, the '
'artifact limits imposed by POLL NOW are '
'ignored. As a result POLL NOW will simply '
'create a container for each artifact.')
config = self.get_config()
param["query"] = config.get("on_poll_query")
if self.is_poll_now():
param["size"] = param.get(phantom.APP_JSON_CONTAINER_COUNT, 25)
else:
on_poll_size = config.get("on_poll_size", 25)
# Validate 'on_poll_size' config parameter
ret_val, on_poll_size = self._validate_integer(action_result, on_poll_size, ONPOLL_SIZE_CONFIG_PARAM)
if phantom.is_fail(ret_val):
return action_result.get_status()
param["size"] = on_poll_size
if param["query"] == "Please refer to the documentation":
self.save_progress("Default on poll query unchanged, please enter a valid GNQL query")
return action_result.set_status(phantom.APP_ERROR, "Default on poll query unchanged")
ret_val, data = self._gnql_query(param, is_poll=True, action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
ret_val = self._process_query(data)
if phantom.is_fail(ret_val):
return action_result.set_status(phantom.APP_ERROR, "Failed to process the query")
else:
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
action = self.get_action_identifier()
if action == "test_connectivity":
ret_val = self._test_connectivity(param)
elif action == "lookup_ip":
ret_val = self._lookup_ip(param)
elif action == "ip_reputation":
ret_val = self._ip_reputation(param)
elif action == "gnql_query":
ret_val = self._gnql_query(param)
elif action == "lookup_ips":
ret_val = self._lookup_ips(param)
elif action == "on_poll":
ret_val = self._on_poll(param)
return ret_val
def initialize(self):
"""Initialize the Phantom integration."""
self._state = self.load_state()
config = self.get_config()
self._api_key = config['api_key']
app_json = self.get_app_json()
self._app_version = app_json["app_version"]
self._headers = {
"Accept": "application/json",
"key": self._api_key,
"User-Agent": "greynoise-phantom-integration-v{0}".format(self._app_version)
}
return phantom.APP_SUCCESS
def finalize(self):
"""Finalize the Phantom integration."""
# Save the state, this data is saved across actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == "__main__":
import pudb
import argparse
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument("input_test_json", help="Input Test JSON file")
argparser.add_argument("-u", "--username", help="username", required=False)
argparser.add_argument("-p", "--password", help="password", required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
login_url = BaseConnector._get_phantom_base_url() + "login"
try:
print("Accessing the Login page")
r = requests.get(login_url, verify=False)
csrftoken = r.cookies["csrftoken"]
data = dict()
data["username"] = username
data["password"] = password
data["csrfmiddlewaretoken"] = csrftoken
headers = dict()
headers["Cookie"] = "csrftoken=" + csrftoken
headers["Referer"] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=False, data=data, headers=headers)
session_id = r2.cookies["sessionid"]
except Exception as e:
print("Unable to get session id from the platform. Error: " + str(e))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = GreyNoiseConnector()
connector.print_progress_message = True
if session_id is not None:
in_json["user_session_token"] = session_id
connector._set_csrf_info(csrftoken, headers["Referer"])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0)
| 38.856637 | 123 | 0.579029 |
28ff0f386d7b1dca0a607e85432309e87a77eb9e | 1,044 | py | Python | nntransfer/trainer/main_loop_modules/main_loop_module.py | sinzlab/nntransfer | 78622feb568b1cc50ac0f73c8297f9785876a659 | [
"MIT"
] | null | null | null | nntransfer/trainer/main_loop_modules/main_loop_module.py | sinzlab/nntransfer | 78622feb568b1cc50ac0f73c8297f9785876a659 | [
"MIT"
] | null | null | null | nntransfer/trainer/main_loop_modules/main_loop_module.py | sinzlab/nntransfer | 78622feb568b1cc50ac0f73c8297f9785876a659 | [
"MIT"
] | 2 | 2021-07-30T08:39:04.000Z | 2022-03-01T16:26:11.000Z | class MainLoopModule(object):
def __init__(self, trainer):
self.trainer = trainer
self.train_loader = trainer.data_loaders["train"]
self.config = trainer.config
self.device = trainer.device
self.seed = trainer.seed
self.tracker = self.trainer.tracker
self.criterion = None
self.mode = None
self.train_mode = False
self.task_key = ""
self.epoch = -1
self.options = {}
def pre_epoch(self, model, mode, **options):
self.mode = mode
self.train_mode = mode == "Training"
self.epoch = self.tracker.epoch
self.options = options
def pre_forward(self, model, inputs, task_key, shared_memory):
self.task_key = task_key
return model, inputs
def post_forward(self, outputs, loss, targets, **shared_memory):
return outputs, loss, targets
def post_backward(self, model):
pass
def post_optimizer(self, model):
pass
def post_epoch(self, model):
pass
| 28.216216 | 68 | 0.612069 |
28ff68d107d4e01cf5ece21ad9bb66128f102b8f | 373 | py | Python | src/pgbackup/pgdump.py | narbutas/pgbackup | 2bc65dc9c4cdba135e0ae68c71d034de50fddda8 | [
"Apache-2.0"
] | null | null | null | src/pgbackup/pgdump.py | narbutas/pgbackup | 2bc65dc9c4cdba135e0ae68c71d034de50fddda8 | [
"Apache-2.0"
] | null | null | null | src/pgbackup/pgdump.py | narbutas/pgbackup | 2bc65dc9c4cdba135e0ae68c71d034de50fddda8 | [
"Apache-2.0"
] | null | null | null | import subprocess
import sys
def dump(url):
try:
return subprocess.Popen(['pg_dump', url], stdout=subprocess.PIPE)
except OSError as err:
print(f"Error: {err}")
sys.exit(1)
def dump_file_name(url, timestamp=None):
db_name = url.split('/')[-1]
db_name = db_name.split('?')[0]
if timestamp:
return f"{db_name}-{timestamp}.sql"
else:
return f"{db_name}.sql" | 21.941176 | 67 | 0.686327 |
e900f1fbad104966ef7247511d53bc745e2f6385 | 1,190 | py | Python | e2_s13.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | 2 | 2022-01-13T15:55:58.000Z | 2022-02-11T23:18:34.000Z | e2_s13.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | null | null | null | e2_s13.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | null | null | null | numero_vogal = 0
espaço = 0
numero_consoante = 0
contador = 0
escrita = 0
arquivo = input('Digite o nome do seu arquivo (.txt): ')
with open(arquivo, 'w', encoding='utf-8') as texto:
while escrita != 'sair':
escrita = input('Digite: ')
texto.write(escrita)
texto.write('\n')
contador += 1
with open(arquivo, encoding='utf-8') as texto:
file = texto.read()
file.split('\n')
for vogal in file:
if vogal in ('a', 'A', 'e', 'E', 'i', 'I', 'o', 'O', 'u', 'U', 'á', 'Á', 'é', 'É', 'í', 'Í', 'ó', 'Ó', 'ú',
'Ú', 'Â', 'â', 'ã', 'Ã', 'Õ', 'õ', 'ô', 'Ô', 'ê', 'Ê'):
numero_vogal += 1
for consoante in file:
if consoante in ('Q', 'q', 'W', 'w', 'R', 'r', 'T', 't', 'Y', 'y', 'P', 'p', 'S', 's', 'D', 'F', 'f', 'g',
'G', 'h', 'H', 'J', 'j', 'K', 'k', 'L', 'l', 'Ç', 'ç', 'Z', 'z', 'X', 'x', 'C', 'c', 'V', 'v',
'B', 'b', 'N', 'n', 'M', 'm'):
numero_consoante += 1
print(f'O número de linhas do texto é {contador - 1}')
print(f'O número de vogais é {numero_vogal - 2}')
print(f'O número de consoantes é {numero_consoante - 2}')
| 30.512821 | 117 | 0.443697 |
e903d12f8a04e71c8c2140ed40144f3db69c7ec2 | 7,249 | py | Python | app/sso/tests/providers/test_oauth2_google_views.py | getmetamapper/metamapper | 0b2f67eec03fbf7ece35ff9f58ea9bb2dde4d85f | [
"BSD-2-Clause"
] | 53 | 2020-07-01T23:11:59.000Z | 2022-03-31T19:10:28.000Z | app/sso/tests/providers/test_oauth2_google_views.py | metamapper-io/metamapper | 376716e72bcaca62f1ec09ca9a13a0346e5502f9 | [
"BSD-2-Clause"
] | 5 | 2020-11-25T19:48:57.000Z | 2022-02-27T23:50:18.000Z | app/sso/tests/providers/test_oauth2_google_views.py | metamapper-io/metamapper | 376716e72bcaca62f1ec09ca9a13a0346e5502f9 | [
"BSD-2-Clause"
] | 5 | 2020-08-29T16:43:59.000Z | 2022-01-17T19:05:30.000Z | # -*- coding: utf-8 -*-
from base64 import b64encode, b64decode
from unittest import mock
from django.conf import settings
from django.http import HttpResponseRedirect
from django.test import TestCase, Client
from django.urls import reverse
from testutils import factories
from urllib.parse import urlparse, parse_qs
class GoogleOAuth2ViewSetupTests(TestCase):
"""Test cases for `OAuth2googleView.setup_pipeline` method.
"""
def setUp(self):
self.client = Client(HTTP_HOST='example.com')
self.user = factories.UserFactory()
self.workspace = factories.WorkspaceFactory()
self.workspace.grant_membership(self.user, 'READONLY')
@mock.patch('app.sso.providers.oauth2.google.views.GoogleClient')
def test_when_valid(self, google_client):
"""It should redirect with an error.
"""
domain = 'metamapper.io'
google_client.return_value.get_user_domain.return_value = domain
google_client.return_value.refresh_token = 'meowmeowmeow'
response = self.client.get(reverse('sso-oauth2-google'), {
'code': 'meowmeowmeow',
'state': b64encode((f'login=0&wksp={self.workspace.pk}&uid={self.user.pk}').encode('utf-8')),
})
self.user.refresh_from_db()
self.assertTrue(isinstance(response, (HttpResponseRedirect,)))
self.assertEqual(
response.url,
f'{settings.WEBSERVER_ORIGIN}/{self.workspace.pk}/settings/authentication/setup/google?domain={domain}',
)
class GoogleOAuth2ViewLoginTests(TestCase):
"""Test cases for `OAuth2googleView.login_pipeline` method.
"""
def setUp(self):
self.client = Client(HTTP_HOST='example.com')
self.user = factories.UserFactory()
self.workspace = factories.WorkspaceFactory()
self.workspace.grant_membership(self.user, 'READONLY')
@mock.patch('app.sso.providers.oauth2.google.views.GoogleClient')
def test_when_connection_does_not_exist(self, google_client):
"""It should redirect with an error.
"""
response = self.client.get(reverse('sso-oauth2-google'), {
'code': 'meowmeowmeow',
'state': b64encode(('connection=betDse4R4gus').encode('utf-8')),
})
urlparams = parse_qs(urlparse(response.url).query)
self.assertTrue(isinstance(response, (HttpResponseRedirect,)))
self.assertEqual(
b64decode(urlparams['error'][0]).decode('utf-8'),
'The workspace does not exist or does not have SSO enabled.',
)
@mock.patch('app.sso.providers.oauth2.google.views.GoogleClient')
def test_when_connection_is_disabled(self, google_client):
"""It should redirect with an error.
"""
connection = factories.SSOConnectionFactory(workspace=self.workspace, is_enabled=False)
response = self.client.get(reverse('sso-oauth2-google'), {
'code': 'meowmeowmeow',
'state': b64encode(('connection=%s' % connection.pk).encode('utf-8')),
})
urlparams = parse_qs(urlparse(response.url).query)
self.assertTrue(isinstance(response, (HttpResponseRedirect,)))
self.assertEqual(
b64decode(urlparams['error'][0]).decode('utf-8'),
'The workspace does not exist or does not have SSO enabled.',
)
@mock.patch('app.sso.providers.oauth2.google.views.GoogleClient')
def test_with_not_part_of_google_organization(self, google_client):
"""It should redirect with an error.
"""
domain = 'metamapper.io'
google_client.return_value.get_user_domain.return_value = domain
google_client.return_value.get_user.return_value = {
"sub": "1234",
"email": self.user.email,
"given_name": self.user.fname,
"family_name": self.user.lname,
"email_verified": True,
}
connection = factories.SSOConnectionFactory(
workspace=self.workspace,
is_enabled=True,
extras={'domain': 'metamapper.dev'},
)
response = self.client.get(reverse('sso-oauth2-google'), {
'code': 'meowmeowmeow',
'state': b64encode(('connection=%s' % connection.pk).encode('utf-8')),
})
urlparams = parse_qs(urlparse(response.url).query)
self.assertTrue(isinstance(response, (HttpResponseRedirect,)))
self.assertEqual(
b64decode(urlparams['error'][0]).decode('utf-8'),
f'The domain for your Google account ({domain}) is not allowed to authenticate with this provider.',
)
@mock.patch('app.sso.providers.oauth2.google.views.GoogleClient')
def test_when_valid(self, google_client):
"""It authenticate the user.
"""
domain = self.user.email.split("@")[-1]
google_client.return_value.get_user_domain.return_value = domain
google_client.return_value.get_user.return_value = {
"sub": "1234",
"email": self.user.email,
"given_name": self.user.fname,
"family_name": self.user.lname,
"email_verified": True,
}
connection = factories.SSOConnectionFactory(
workspace=self.workspace,
is_enabled=True,
extras={'domain': domain},
)
sso_domain = factories.SSODomainFactory(
workspace=self.workspace,
domain=domain,
)
sso_domain.mark_as_verified()
response = self.client.get(reverse('sso-oauth2-google'), {
'code': 'meowmeowmeow',
'state': b64encode(('connection=%s' % connection.pk).encode('utf-8')),
})
self.user.refresh_from_db()
self.assertTrue(isinstance(response, (HttpResponseRedirect,)))
self.assertEqual(
response.url,
f'{settings.WEBSERVER_ORIGIN}/{self.workspace.slug}/sso/{self.user.pk}/{self.user.sso_access_token}',
)
@mock.patch('app.sso.providers.oauth2.google.views.GoogleClient')
def test_when_domain_is_not_verified(self, google_client):
"""It should redirect with an error.
"""
domain = self.user.email.split("@")[-1]
google_client.return_value.get_user_domain.return_value = domain
google_client.return_value.get_user.return_value = {
"sub": "1234",
"email": self.user.email,
"given_name": self.user.fname,
"family_name": self.user.lname,
"email_verified": True,
}
connection = factories.SSOConnectionFactory(
workspace=self.workspace,
is_enabled=True,
extras={'domain': domain},
)
response = self.client.get(reverse('sso-oauth2-google'), {
'code': 'meowmeowmeow',
'state': b64encode(('connection=%s' % connection.pk).encode('utf-8')),
})
urlparams = parse_qs(urlparse(response.url).query)
self.assertTrue(isinstance(response, (HttpResponseRedirect,)))
self.assertEqual(
b64decode(urlparams['error'][0]).decode('utf-8'),
'Domain is not authorized for the provided workspace.',
)
| 36.611111 | 116 | 0.626983 |
e903dc912d5bbd81aedab3090527461e1da894a1 | 2,843 | py | Python | mammoth/ensembl.py | hbc/mammoth_code | 2e6909514e8ff232981ea2cb03f078257bc5c847 | [
"MIT"
] | 1 | 2017-05-22T01:18:13.000Z | 2017-05-22T01:18:13.000Z | mammoth/ensembl.py | hbc/mammoth_code | 2e6909514e8ff232981ea2cb03f078257bc5c847 | [
"MIT"
] | null | null | null | mammoth/ensembl.py | hbc/mammoth_code | 2e6909514e8ff232981ea2cb03f078257bc5c847 | [
"MIT"
] | null | null | null | """ensembl interaction function"""
import os
import requests, sys
import yaml
import logging
import gffutils
from collections import defaultdict
import mammoth.logger as mylog
server = "http://rest.ensembl.org{ext}"
ext = "/sequence/id/{id}?type=cds"
prot = "/sequence/id/{id}?type=protein"
sequence = "/sequence/region/elephant/{chr}:{start}..{end}:{strand}?"
def query_sequence(chr, start, end, strand):
r = requests.get(server.format(ext=sequence.format(**locals())), headers={ "Content-Type" : "text/plain"})
if not r.ok:
r.raise_for_status()
return None
return yaml.load(r.text)
def query_exon(id):
r = requests.get(server.format(ext=ext.format(id=id)), headers={ "Content-Type" : "application/json"})
if not r.ok:
r.raise_for_status()
return None
return yaml.load(r.text)
def query_prot(id):
r = requests.get(server.format(ext=prot.format(id=id)), headers={ "Content-Type" : "application/json"})
if not r.ok:
r.raise_for_status()
return None
return yaml.load(r.text)
def _get_db(db):
return gffutils.FeatureDB(db_file)
def _convert_to_db(db):
out = "%s.db" % db
if os.path.exists(out):
return gffutils.FeatureDB(out)
gffutils.create_db(db, disable_infer_transcripts=True, disable_infer_genes=True, dbfn=out)
return gffutils.FeatureDB(out)
def get_genes(db):
db = _convert_to_db(db)
genome = defaultdict(dict)
exons_pos = defaultdict(dict)
for gene in db.features_of_type("gene"):
if "gene_name" not in gene.attributes:
continue
if gene.attributes["gene_biotype"][0] == "protein_coding":
exon_seen = set()
for tx in db.children(gene, featuretype='transcript', order_by='start'):
if tx.attributes["transcript_biotype"][0] == "protein_coding":
# txs.add(tx["transcript_id"])
exons = dict()
for e in db.children(tx, featuretype='exon', order_by='start'):
if e.attributes['exon_id'][0] not in exon_seen:
exons.update({int(e.attributes['exon_number'][0]): e.attributes['exon_id'][0]})
exons_pos.update({e.attributes['exon_id'][0]: {'chrom': e.chrom,
'start': e.start,
'end': e.end,
'strand': e.strand}})
exon_seen.add(e.attributes['exon_id'][0])
genome[gene.attributes["gene_name"][0]].update({tx.attributes["transcript_id"][0]: {'size': abs(tx.end-tx.start),
'exons': exons}})
return genome, exons_pos
| 38.418919 | 133 | 0.570172 |
e904784d726457730b96e531625f80ef01e860f9 | 906 | py | Python | test/top.py | persianpros/transmissionrpc | 5e6a8487ca7684459ef9d3b375b207535ae2b9dd | [
"MIT"
] | null | null | null | test/top.py | persianpros/transmissionrpc | 5e6a8487ca7684459ef9d3b375b207535ae2b9dd | [
"MIT"
] | null | null | null | test/top.py | persianpros/transmissionrpc | 5e6a8487ca7684459ef9d3b375b207535ae2b9dd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# 2013-03, Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license.
import unittest
import transmissionrpc
class TopTest(unittest.TestCase):
def testConstants(self):
self.assertTrue(isinstance(transmissionrpc.__author__, str))
self.assertTrue(isinstance(transmissionrpc.__version_major__, int))
self.assertTrue(isinstance(transmissionrpc.__version_minor__, int))
self.assertTrue(isinstance(transmissionrpc.__version__, str))
self.assertTrue(isinstance(transmissionrpc.__copyright__, str))
self.assertTrue(isinstance(transmissionrpc.__license__, str))
self.assertEqual('{0}.{1}'.format(transmissionrpc.__version_major__, transmissionrpc.__version_minor__), transmissionrpc.__version__)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TopTest)
return suite
| 39.391304 | 142 | 0.738411 |
e908268a83f96abbf4d4753eae66db6c612a092e | 389 | py | Python | sglib/__init__.py | mvinyard/guideID | fec14c8713960428fc31f68ece7ac36a7b2bcab9 | [
"MIT"
] | null | null | null | sglib/__init__.py | mvinyard/guideID | fec14c8713960428fc31f68ece7ac36a7b2bcab9 | [
"MIT"
] | null | null | null | sglib/__init__.py | mvinyard/guideID | fec14c8713960428fc31f68ece7ac36a7b2bcab9 | [
"MIT"
] | null | null | null | # __init__.py
from ._sgRNA_Library import _sgRNA_Library as Library
# functions
from ._gene_annotation_functions._merge_reduce_gene_regions import _merge_reduce_gene_regions as merge_reduce
from ._guide_annotation_functions._annotate_biochemistry import _annotate_biochemistry as annotate_biochemistry, _annotate_GC_content as annotate_GC, _annotate_homopolymer as annotate_homopolymer
| 43.222222 | 195 | 0.894602 |
e90a508ef0e30d0bdd4948ae4a031308ac6c728e | 10,317 | py | Python | pag_demo.py | Topaz1618/MeowFile | 33878abfb552128368ad6bbf5396d45f21906ce3 | [
"MIT"
] | null | null | null | pag_demo.py | Topaz1618/MeowFile | 33878abfb552128368ad6bbf5396d45f21906ce3 | [
"MIT"
] | null | null | null | pag_demo.py | Topaz1618/MeowFile | 33878abfb552128368ad6bbf5396d45f21906ce3 | [
"MIT"
] | null | null | null | __copyright__ = """ Copyright (c) 2021 HangYan. """
__license__ = 'MIT license'
__version__ = '1.0'
__author__ = 'topaz1668@gmail.com'
from models import conn_db, UploadFiles
from sqlalchemy import func, distinct, or_, and_
import datetime
from datetime import timedelta
import time
import math
def string_to_ts(str_time):
try:
if not isinstance(str_time, str):
str_time = str(str_time)
ts = time.mktime(time.strptime(str_time, "%Y-%m-%d %H:%M:%S"))
return ts
except ValueError as e:
print("Catch error: ", e)
return 0
def ts_to_string(ts):
if not isinstance(ts, float):
ts = float(ts)
time_array = time.localtime(ts)
str_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
return str_time
# def page_limit():
# """ 10+ ms """
# session = conn_db()
# print(f"Before: {time.time()}")
# order_obj = session.query(ShopOrder).filter(
# ShopOrder.user_id == 1,
# ShopOrder.is_deleted == 0,
# ).all()
# print(f"After1: {time.time()}")
#
# print(len(order_obj))
# print(f"After2: {time.time()}")
#
#
# def page_limit_scalar():
# """ 5 ms """
# session = conn_db()
# print(f"Before: {time.time()}")
# total = session.query(func.count(distinct(ShopOrder.id))).filter(
# ShopOrder.user_id == 1,
# ShopOrder.is_deleted == 0,
# ).scalar()
#
# print(f"After: {time.time()} {total}")
#
# total_page = total / PAGE_LIMIT
# total_page = math.ceil(total_page)
#
# print("Total page: ", total_page)
# return total, total_page
#
#
# def slice_data(total, current_page=1):
# print(f"Current page: {current_page}")
# session = conn_db()
# start = (current_page -1) * PAGE_LIMIT
# end = total if PAGE_LIMIT * current_page > total else PAGE_LIMIT * current_page
# order_obj_list = session.query(ShopOrder).filter(
# ShopOrder.user_id == 1,
# ShopOrder.is_deleted == 0,
# )[start:end]
#
# for i in order_obj_list:
# print(i.id)
#
#
# def get_all():
# session = conn_db()
# order_obj_list = session.query(ShopOrder).filter(
# ShopOrder.user_id == 1,
# ).all()
#
# for i in order_obj_list:
# print(i.id)
#
#
# def order_by_colum():
# session = conn_db()
# results = session.query(ShopGoods).filter(ShopGoods.is_delete==0).order_by(ShopGoods.goods_price.desc()).all() # 高到低
# # results = session.query(ShopGoods).filter(ShopGoods.is_delete==0).order_by(ShopGoods.goods_price).all() # 低到高
#
# for i in results:
# print(i.goods_price)
#
# print(results)
#
#
# def order_by_join():
# session = conn_db()
# before = time.time()
# total = session.query(func.count(distinct(ShopGoods.id))).filter(
# or_(*[ShopGoods.menu_path == name for name in ["Actor"]]),
# ).scalar()
# print("Total: ", total)
#
# results = session.query(ShopGoods).filter(
# or_(*[ShopGoods.menu_path == name for name in ["Clothes", ]]),
# ).order_by(ShopGoods.goods_price.desc())[0:3] # 高到低
#
# # goods_list_obj = session.query(ShopGoods).filter(
# # or_(*[ShopGoods.goods_name == name for name in filter_list])).order_by(
# # ShopGoods.goods_price.desc())[start:end]
#
# after = time.time()
# for i in results:
# print(i.goods_price, i.goods_name)
#
# print(results, after - before)
#
#
# def order_by_or():
# session = conn_db()
# results = session.query(ShopMainMenu).filter(
# or_(
# ShopMainMenu.id == 1,
# ShopMainMenu.id == 2)).all()
#
# for i in results:
# print(i.name)
#
# print(results)
#
#
# def get_by_negate():
# # TEST_USER = ["15600803270", "15612345678", "15600000000", "15600809876", "15600800080","15600801111","15611111111","15612111111","15711111111","15600000001","15600000002","15600000003","15600802222","15611119999", "18310703270", "18310700909", "18434471028", "17747121395", "18622606402", "18610404330", "18582045352", "18262676236" ]
# # TEST_USER = ["15600803270", "15612345678", "18310703270", "18434471028",]
# session = conn_db()
# # total = session.query(func.count(distinct(ShopUser.id))).filter(
# # *[ShopUser.phonenum != name for name in TEST_USER]
# # ).scalar()
# #
# # session.close()
# # print("all data", total)
#
#
# def get_avg():
# TEST_USER = [
# "15600803270",
# "15612345678",
# "18310703270",
# "18434471028",
# "15600801111",
# "17747121395",
# "15600802222",
# "18622606402",
# # "18610404330",
# # "18582045352",
# # "18262676236",
# ]
#
# session = conn_db()
# access_sum = session.query(func.sum(distinct(ShopUser.access_times))).filter(
# *[ShopUser.phonenum != name for name in TEST_USER]
# ).scalar()
#
# total = session.query(func.count(distinct(ShopUser.id))).filter(
# *[ShopUser.phonenum != name for name in TEST_USER]
# ).scalar()
#
# access_time_avg = 0
# if total != 0:
# access_time_avg = round(access_sum / total, 2)
#
#
# session.close()
# print("all data", total)
#
#
# def test_about_cut_value():
# session = conn_db()
# start = 0
# end = 2
# uid = 1
# myitems_list_obj = session.query(ShopPersonalItems).filter(
# ShopPersonalItems.uid == 1,
# )[start:end]
# # print(myitems_list_obj)
#
# for myitems_obj in myitems_list_obj:
# print(myitems_obj.id)
#
#
# def or_and_toghter():
# TEST_USER = [
# "15600803270",
# "15612345678",
# ]
# old_users_list = ["15612345678", "15101231234", "15101231236"]
# session = conn_db()
# usage_amount = session.query(func.count(distinct(ShopUser.id))).filter(
# and_(
# *[ShopUser.phonenum != name for name in TEST_USER],
# or_(
# *[ShopUser.phonenum == name for name in old_users_list],
# ShopUser.access_times > 0,
# ))
# ).scalar()
#
# statistics_users_obj= session.query(ShopUser).filter(
# and_(*[ShopUser.phonenum != name for name in TEST_USER],
# or_(
# *[ShopUser.phonenum == name for name in old_users_list],
# ShopUser.access_times > 0,
# ))
# ).all()
#
# # for statistics_obj in statistics_users_obj:
# # print(statistics_obj.id, type(statistics_obj.access_times))
# # print("!!!!!", usage_amount)
#
# day_time = datetime.date.today()
#
# today_usage_amount = session.query(func.count(distinct(ShopUser.id))).filter(
# *[ShopUser.phonenum != name for name in TEST_USER],
# ShopUser.last_access_time > day_time
# ).scalar()
#
# print(">>>", today_usage_amount)
#
# today_usage_amount = session.query(ShopUser).filter(
# *[ShopUser.phonenum != name for name in TEST_USER],
# ShopUser.last_access_time > day_time
# ).all()
#
# for i in today_usage_amount:
# print("!!!", i.last_access_time)
#
#
# def test_about_or():
# session = conn_db()
# TEST_USER = ["15600803270"]
# utc_time = datetime.datetime.utcnow()
#
# # internal_user_amount = session.query(func.count(ShopMember.id)).filter(
# # ShopMember.senior_expire_time >= utc_time + timedelta(days=100*12*30),
# # ).scalar()
#
#
# internal_user_amount = session.query(ShopMember.id).filter(
# ShopMember.senior_expire_time >= utc_time + timedelta(days=1 * 12 * 30),
# ).join(ShopUser).filter(
# or_(*[ShopUser.phonenum == name for name in TEST_USER])
# ).scalar()
#
# member_list_obj = session.query(ShopMember).filter(
# ShopMember.senior_expire_time >= utc_time + timedelta(days=130 * 12 * 30)
# ).all()
#
# uid_list = []
# for member_obj in member_list_obj:
# uid_list.append(member_obj.id)
#
# user_list_obj = session.query(ShopUser).filter(
# or_(
# *[ShopUser.phonenum == name for name in TEST_USER],
# *[ShopUser.id == id for id in uid_list],
# )
# ).all()
#
#
# # print("!!!", user_list_obj)
#
# for i in user_list_obj:
# print(">>> ", i)
#
# # internal_user_amount = session.query(ShopMember).filter(
# # ShopMember.senior_expire_time > utc_time + timedelta(days=10*12*30),
# # ).all()
# #
# # for i in internal_user_amount:
# # print(i.uid, i.senior_expire_time)
#
# print("count", internal_user_amount)
#
# def tog():
# session = conn_db()
# TEST_USER = ["15600803270"]
# utc_time = datetime.datetime.utcnow()
#
# user_list_obj = session.query(ShopUser).filter(
# *[ShopUser.phonenum != name for name in TEST_USER]
# ).join(ShopMember).filter(
# ShopMember.senior_expire_time >= utc_time + timedelta(days=30 * 12 * 100),
# ).order_by(ShopUser.id.desc())[0:10]
#
# for i in user_list_obj:
# print(i.phonenum)
def show_all_data():
session = conn_db()
file_obj_list = session.query(UploadFiles).filter(
UploadFiles.is_intranet == True,
UploadFiles.is_delete == False,
).all()
utc_time = datetime.datetime.utcnow()
for file_obj in file_obj_list:
if utc_time - file_obj.upload_time > timedelta(days=1):
print(f"file name: {file_obj.filename} Time: {file_obj.upload_time}" )
def show_desc_data():
session = conn_db()
file_obj_list = session.query(UploadFiles).filter(
UploadFiles.is_intranet == True,
UploadFiles.is_delete == False,
).order_by(UploadFiles.id.desc()).all()
utc_time = datetime.datetime.utcnow()
for file_obj in file_obj_list:
# if utc_time - file_obj.upload_time > timedelta(days=1):
print(f"file name: {file_obj.filename} Time: {file_obj.upload_time}" )
if __name__ == "__main__":
PAGE_LIMIT = 12
# total, total_page = page_limit_scalar()
# or_and_toghter()
# get_all()
# slice_data(total)
# for i in range(1, 7):
# slice_data(total, i)
# order_by_colum()
# order_by_join()
# order_by_or()
# get_by_negate()
# get_avg()
# test_about_cut_value()
# a = None
# string_to_ts(a)
# test_about_or()
# tog()
show_desc_data() | 29.226629 | 342 | 0.602113 |
e90aae947ee6b59303ae1471afa7007b7d9e535a | 4,490 | py | Python | test/orb.py | Tythos/oyb | 0653c4fa24c73f4f2cb2d1c1a29d318f6e9cbd79 | [
"MIT"
] | 1 | 2017-08-05T16:16:32.000Z | 2017-08-05T16:16:32.000Z | test/orb.py | Tythos/oyb | 0653c4fa24c73f4f2cb2d1c1a29d318f6e9cbd79 | [
"MIT"
] | null | null | null | test/orb.py | Tythos/oyb | 0653c4fa24c73f4f2cb2d1c1a29d318f6e9cbd79 | [
"MIT"
] | null | null | null | """
"""
import datetime
import unittest
import numpy
from math import pi
import oyb
from oyb import earth, anomaly
class ClassTests(unittest.TestCase):
def test_default(self):
o = oyb.Orbit()
def test_args(self):
o = oyb.Orbit(a_m=1.064e7, e=0.42607, i_rad=39.687*pi/180, O_rad=130.32*pi/180, w_rad=42.373*pi/180, M_rad=4.2866)
def test_example4p3(self):
rEci_m = numpy.array([-6.045e6, -3.490e6, 2.5e6])
vEci_mps = numpy.array([-3.457e3, 6.618e3, 2.533e3])
o = oyb.Orbit.fromRV(rEci_m, vEci_mps)
h_m2ps = o.getAngMom()
tht_rad = anomaly.mean2true(o.M_rad, o.e)
T_s = o.getPeriod()
self.assertTrue(abs(h_m2ps - 5.831e10) / h_m2ps < 1e-3)
self.assertTrue(abs(o.i_rad - 153.2 * pi / 180) / o.i_rad < 1e-3)
self.assertTrue(abs(o.O_rad - 255.3 * pi / 180) / o.O_rad < 1e-3)
self.assertTrue(abs(o.e - 0.1712) / o.e < 1e-3)
self.assertTrue(abs(o.w_rad - 20.07 * pi / 180) / o.w_rad < 1e-3)
self.assertTrue(abs(tht_rad - 28.45 * pi / 180) / tht_rad < 1e-3)
self.assertTrue(abs(T_s - 2.278 * 3600) / T_s < 1e-3)
def test_example2p8(self):
o = oyb.Orbit.fromHTht(1.545e6, 126 * pi / 180, 8.52e5, 58 * pi / 180)
hPer_m, hApo_m = o.getShape()
T_s = o.getPeriod()
self.assertTrue(abs(o.a_m - 7.593e6) / o.a_m < 1e-3)
self.assertTrue(abs(o.e - 0.08164) / o.e < 1e-3)
self.assertTrue(abs(hPer_m - 5.955e5) / hPer_m < 1e-3)
self.assertTrue(abs(T_s - 1.829 * 3600) / T_s < 1e-3)
class FrameTests(unittest.TestCase):
def test_pqw(self):
o = oyb.Orbit(e=0.5, M_rad=0.5*pi)
rPqw_m = o.getRpqw()
def test_example4p7mod(self):
e = 0.4
a_m = 8e10 / (earth.mu_m3ps2 * (1 - e**2))
M_rad = anomaly.true2mean(30 * pi / 180, e)
o = oyb.Orbit(a_m=a_m, e=e, i_rad=30*pi/180, O_rad=40*pi/180, w_rad=60*pi/180, M_rad=M_rad)
rEci_m = o.getReci()
class J2Tests(unittest.TestCase):
def test_raan(self):
o = oyb.MeanJ2(a_m=6.718e6, e=8.931e-3, i_rad=51.43*pi/180)
dRaan_degpday = o.getRaanRate() * 180/pi * 86400
self.assertTrue(abs(dRaan_degpday - 5.181) / dRaan_degpday < 1e-3)
def test_aop(self):
o = oyb.MeanJ2(a_m=6.718e6, e=8.931e-3, i_rad=51.43*pi/180)
dAop_degpday = o.getAopRate() * 180/pi * 86400
self.assertTrue(abs(dAop_degpday - 3.920) / dAop_degpday < 1e-3)
def test_example4p9(self):
o = oyb.MeanJ2.fromSunSync(100 * 60)
self.assertTrue(abs(o.a_m - (7.5863e5 + earth.eqRad_m)) / o.a_m < 1e-3)
self.assertTrue(abs(o.i_rad - 98.43 * pi / 180) / o.i_rad < 1e-3)
def test_example4p10(self):
o = oyb.MeanJ2.fromConstAop(3 * 3600)
shape = o.getShape()
self.assertTrue(abs(shape[0] - 5.215e5) / shape[0] < 1e-3)
self.assertTrue(abs(shape[1] - 7.842e6) / shape[1] < 1e-3)
def test_example4p11(self):
rEci_m = numpy.array([-3.67e6, -3.87e6, 4.4e6])
vEci_mps = numpy.array([4.7e3, -7.4e3, 1e3])
o = oyb.MeanJ2.fromRV(rEci_m, vEci_mps)
rEciNew_m = o.getReci(o.tEpoch_dt + datetime.timedelta(4))
rNew_m = rEciNew_m.dot(rEciNew_m)**0.5
drEci_m = rEciNew_m - numpy.array([9.672e6, 4.32e6, -8.691e6])
self.assertTrue(drEci_m.dot(drEci_m)**0.5 / rNew_m < 1e-3)
class PropertyTests(unittest.TestCase):
def setUp(self):
hPer_km = 400
hApo_km = 4000
self.o = oyb.Orbit()
self.o.setShape(1e3 * hPer_km, 1e3 * hApo_km)
def test_a(self):
self.assertTrue(abs(self.o.e - 0.2098) / self.o.e < 1e-3)
def test_b(self):
h_m2ps = self.o.getAngMom()
self.assertTrue(abs(h_m2ps - 5.7172e10) / h_m2ps < 1e-3)
def test_cd(self):
vPer_mps, vApo_mps = self.o.getShapeVel()
self.assertTrue(abs(vPer_mps - 8.435e3) / vPer_mps < 1e-3)
self.assertTrue(abs(vApo_mps - 5.509e3) / vApo_mps < 1e-3)
def test_e(self):
self.assertTrue(abs(self.o.a_m - 8.578e6) / self.o.a_m < 1e-3)
def test_f(self):
T_s = self.o.getPeriod()
self.assertTrue(abs(T_s - 2.196 * 3600) / T_s < 1e-3)
def test_g(self):
rTaa_m = self.o.getTaaRad()
self.assertTrue(abs(rTaa_m - 8.387e6) / rTaa_m < 1e-3)
if __name__ == '__main__':
unittest.main()
| 38.376068 | 122 | 0.57951 |
e90be1f05a4443793696e6d766c9b0e422e47832 | 11,656 | py | Python | src/python/WMComponent/JobArchiver/JobArchiverPoller.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 1 | 2015-02-05T13:43:46.000Z | 2015-02-05T13:43:46.000Z | src/python/WMComponent/JobArchiver/JobArchiverPoller.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 1 | 2016-10-13T14:57:35.000Z | 2016-10-13T14:57:35.000Z | src/python/WMComponent/JobArchiver/JobArchiverPoller.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
The actual jobArchiver algorithm
"""
import logging
import os
import os.path
import shutil
import tarfile
import threading
from Utils.IteratorTools import grouper
from Utils.Timers import timeFunction
from WMCore.DAOFactory import DAOFactory
from WMCore.JobStateMachine.ChangeState import ChangeState
from WMCore.Services.ReqMgrAux.ReqMgrAux import isDrainMode
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Job import Job
from WMCore.WMException import WMException
from WMCore.WorkQueue.WorkQueueExceptions import WorkQueueNoMatchingElements
from WMCore.WorkQueue.WorkQueueUtils import queueFromConfig
from WMCore.WorkerThreads.BaseWorkerThread import BaseWorkerThread
class JobArchiverPollerException(WMException):
"""
_JobArchiverPollerException_
The Exception handler for the job archiver.
"""
class JobArchiverPoller(BaseWorkerThread):
"""
Polls for Error Conditions, handles them
"""
def __init__(self, config):
"""
Initialise class members
"""
BaseWorkerThread.__init__(self)
self.config = config
self.changeState = ChangeState(self.config)
myThread = threading.currentThread()
self.daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.loadAction = self.daoFactory(classname="Jobs.LoadFromIDWithWorkflow")
# Variables
self.numberOfJobsToCluster = getattr(self.config.JobArchiver,
"numberOfJobsToCluster", 1000)
self.numberOfJobsToArchive = getattr(self.config.JobArchiver,
"numberOfJobsToArchive", 10000)
try:
self.logDir = getattr(config.JobArchiver, 'logDir',
os.path.join(config.JobArchiver.componentDir, 'logDir'))
if not os.path.isdir(self.logDir):
os.makedirs(self.logDir)
except Exception as ex:
msg = "Unhandled exception while setting up logDir!\n"
msg += str(ex)
logging.exception(msg)
raise JobArchiverPollerException(msg)
self.tier0Mode = hasattr(config, "Tier0Feeder")
try:
if not self.tier0Mode:
self.workQueue = queueFromConfig(self.config)
except Exception as ex:
msg = "Could not load workQueue"
msg += str(ex)
logging.error(msg)
# raise JobArchiverPollerException(msg)
return
def setup(self, parameters):
"""
Load DB objects required for queries
"""
return
def terminate(self, params):
"""
_terminate_
This function terminates the job after a final pass
"""
logging.debug("terminating. doing one more pass before we die")
self.algorithm(params)
return
@timeFunction
def algorithm(self, parameters=None):
"""
Performs the archiveJobs method, looking for each type of failure
And deal with it as desired.
"""
try:
self.archiveJobs()
self.pollForClosable()
self.markInjected()
except WMException:
myThread = threading.currentThread()
if getattr(myThread, 'transaction', None) is not None \
and getattr(myThread.transaction, 'transaction', None) is not None:
myThread.transaction.rollback()
raise
except Exception as ex:
myThread = threading.currentThread()
msg = "Caught exception in JobArchiver\n"
msg += str(ex)
msg += "\n\n"
if getattr(myThread, 'transaction', None) is not None \
and getattr(myThread.transaction, 'transaction', None) is not None:
myThread.transaction.rollback()
raise JobArchiverPollerException(msg)
return
def archiveJobs(self):
"""
_archiveJobs_
archiveJobs will handle the master task of looking for finished jobs,
and running the code that cleans them out.
"""
doneList = self.findFinishedJobs()
logging.info("Found %i finished jobs to archive", len(doneList))
jobCounter = 0
for slicedList in grouper(doneList, 10000):
self.cleanWorkArea(slicedList)
successList = []
failList = []
killList = []
for job in slicedList:
if job["outcome"] == "success":
successList.append(job)
elif job["outcome"] == "killed":
killList.append(job)
else:
failList.append(job)
myThread = threading.currentThread()
myThread.transaction.begin()
self.changeState.propagate(successList, "cleanout", "success")
self.changeState.propagate(failList, "cleanout", "exhausted")
self.changeState.propagate(killList, "cleanout", "killed")
myThread.transaction.commit()
jobCounter += len(slicedList)
logging.info("Successfully archived %d jobs out of %d.", jobCounter, len(doneList))
def findFinishedJobs(self):
"""
_findFinishedJobs_
Will actually, surprisingly, find finished jobs (i.e., jobs either exhausted or successful)
"""
jobList = []
jobListAction = self.daoFactory(classname="Jobs.GetAllJobs")
jobList1 = jobListAction.execute(state="success", limitRows=self.numberOfJobsToArchive)
jobList2 = jobListAction.execute(state="exhausted", limitRows=self.numberOfJobsToArchive)
jobList3 = jobListAction.execute(state="killed", limitRows=self.numberOfJobsToArchive)
jobList.extend(jobList1)
jobList.extend(jobList2)
jobList.extend(jobList3)
if len(jobList) == 0:
# Then nothing is ready
return []
# Put together a list of job IDs
binds = []
for jobID in jobList:
binds.append({"jobid": jobID})
results = self.loadAction.execute(jobID=binds)
if not isinstance(results, list):
results = [results]
doneList = []
for entry in results:
# One job per entry
tmpJob = Job(id=entry['id'])
tmpJob.update(entry)
doneList.append(tmpJob)
return doneList
def cleanWorkArea(self, doneList):
"""
_cleanWorkArea_
Upon workQueue realizing that a subscriptions is done, everything
regarding those jobs is cleaned up.
"""
for job in doneList:
# print "About to clean cache for job %i" % (job['id'])
self.cleanJobCache(job)
return
def cleanJobCache(self, job):
"""
_cleanJobCache_
Clears out any files still sticking around in the jobCache,
tars up the contents and sends them off
"""
cacheDir = job['cache_dir']
if not cacheDir or not os.path.isdir(cacheDir):
msg = "Could not find jobCacheDir %s" % (cacheDir)
logging.error(msg)
return
cacheDirList = os.listdir(cacheDir)
if cacheDirList == []:
os.rmdir(cacheDir)
return
# Now we need to set up a final destination
try:
# Label all directories by workflow
# Workflow better have a first character
workflow = job['workflow']
firstCharacter = workflow[0]
jobFolder = 'JobCluster_%i' \
% (int(job['id'] / self.numberOfJobsToCluster))
logDir = os.path.join(self.logDir, firstCharacter,
workflow, jobFolder)
if not os.path.exists(logDir):
os.makedirs(logDir)
except Exception as ex:
msg = "Exception while trying to make output logDir\n"
msg += str("logDir: %s\n" % (logDir))
msg += str(ex)
logging.error(msg)
raise JobArchiverPollerException(msg)
# Otherwise we have something in there
try:
tarName = 'Job_%i.tar.bz2' % (job['id'])
with tarfile.open(name=os.path.join(logDir, tarName), mode='w:bz2') as tarball:
for fileName in cacheDirList:
fullFile = os.path.join(cacheDir, fileName)
try:
tarball.add(name=fullFile, arcname='Job_%i/%s' % (job['id'], fileName))
except IOError:
logging.error('Cannot read %s, skipping', fullFile)
except Exception as ex:
msg = "Exception while opening and adding to a tarfile\n"
msg += "Tarfile: %s\n" % os.path.join(logDir, tarName)
msg += str(ex)
logging.error(msg)
logging.debug("cacheDirList: %s", cacheDirList)
raise JobArchiverPollerException(msg)
try:
shutil.rmtree('%s' % (cacheDir), ignore_errors=True)
except Exception as ex:
msg = "Error while removing the old cache dir.\n"
msg += "CacheDir: %s\n" % cacheDir
msg += str(ex)
logging.error(msg)
raise JobArchiverPollerException(msg)
return
def markInjected(self):
"""
_markInjected_
Mark any workflows that have been fully injected as injected
"""
if self.tier0Mode:
logging.debug("Component will not check workflows for injection status")
return
myThread = threading.currentThread()
getAction = self.daoFactory(classname="Workflow.GetInjectedWorkflows")
markAction = self.daoFactory(classname="Workflow.MarkInjectedWorkflows")
result = getAction.execute()
# Check each result to see if it is injected:
injected = []
for name in result:
try:
if self.workQueue.getWMBSInjectionStatus(name, isDrainMode(self.config)):
injected.append(name)
except WorkQueueNoMatchingElements:
# workflow not known - free to cleanup
injected.append(name)
except Exception as ex:
logging.exception("Injection status checking failed, investigate: %s", str(ex))
logging.info("Found %d workflows to mark as injected", len(injected))
# Now, mark as injected those that returned True
if len(injected) > 0:
myThread.transaction.begin()
markAction.execute(names=injected, injected=True)
myThread.transaction.commit()
return
def pollForClosable(self):
"""
_pollForClosable_
Search WMBS for filesets that can be closed and mark them as closed.
"""
myThread = threading.currentThread()
myThread.transaction.begin()
closableFilesetDAO = self.daoFactory(classname="Fileset.ListClosable")
closableFilesets = closableFilesetDAO.execute()
logging.info("Found %d filesets to be closed", len(closableFilesets))
for closableFileset in closableFilesets:
openFileset = Fileset(id=closableFileset)
openFileset.load()
logging.debug("Closing fileset %s", openFileset.name)
openFileset.markOpen(False)
myThread.transaction.commit()
| 34.081871 | 99 | 0.591455 |
e90c4cbbc181386f952de337a177c966ed5084da | 1,795 | py | Python | src/aqm/run_create_views.py | layadelcarmen/aqm | ee300b0064b78d73e2020c1e50300e65a7fb727e | [
"MIT"
] | null | null | null | src/aqm/run_create_views.py | layadelcarmen/aqm | ee300b0064b78d73e2020c1e50300e65a7fb727e | [
"MIT"
] | null | null | null | src/aqm/run_create_views.py | layadelcarmen/aqm | ee300b0064b78d73e2020c1e50300e65a7fb727e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import psycopg2
def executeScriptsFromFile(filename, cursor):
'''Execute SQL create views'''
fd = open(filename, 'r')
sqlFile = fd.read()
fd.close()
sqlCommands = sqlFile.split(';')
for command in sqlCommands:
try:
print(command)
cursor.execute(command)
except psycopg2.Error as e:
print("Command skipped: ", e.pgerror)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file-script-sql",
help="File with SQL comands ; separated")
parser.add_argument("-u", "--db-user",
default="postgres",
help="DB user")
parser.add_argument("-a", "--db-password",
default="postgres",
help="DB password")
parser.add_argument("-s", "--db-host",
default="localhost",
help="Host")
parser.add_argument("-p", "--db-port",
default="5432",
help="Connexion port")
parser.add_argument("-d", "--db-name",
default="Tests",
help="DB name")
args = parser.parse_args()
conn = psycopg2.connect(
database=args.db_name, user=args.db_user, password=args.db_password, host=args.db_host, port=int(args.db_port)
)
conn.autocommit = True
cursor_c = conn.cursor()
executeScriptsFromFile(args.file_script_sql, cursor_c)
conn.commit()
conn.close()
if __name__ == '__main__':
main() | 31.491228 | 196 | 0.494708 |
e90c9d97a14172ce328d8d9a5973b099b111668f | 5,127 | py | Python | mnist/mnist_dist.py | vibhatha/PytorchExamples | df356f120d6eef69a94586af93bff75af307582d | [
"Apache-2.0"
] | 3 | 2021-04-11T05:09:00.000Z | 2021-08-11T09:58:53.000Z | mnist/mnist_dist.py | vibhatha/PytorchExamples | df356f120d6eef69a94586af93bff75af307582d | [
"Apache-2.0"
] | 4 | 2021-03-12T21:51:01.000Z | 2021-03-14T16:03:13.000Z | mnist/mnist_dist.py | vibhatha/PytorchExamples | df356f120d6eef69a94586af93bff75af307582d | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import argparse
from math import ceil
from random import Random
from socket import socket
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import os
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import numpy as np
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
""" Dataset partitioning helper """
class Partition(object):
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, index):
data_idx = self.index[index]
return self.data[data_idx]
class DataPartitioner(object):
def __init__(self, data, sizes=[0.7, 0.2, 0.1], seed=1234):
self.data = data
self.partitions = []
rng = Random()
rng.seed(seed)
data_len = len(data)
indexes = [x for x in range(0, data_len)]
rng.shuffle(indexes)
for frac in sizes:
part_len = int(frac * data_len)
self.partitions.append(indexes[0:part_len])
indexes = indexes[part_len:]
def use(self, partition):
return Partition(self.data, self.partitions[partition])
""" Partitioning MNIST """
def partition_dataset():
print("Data Loading")
dataset = datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
#print(type(dataset), dataset.data)
size = dist.get_world_size()
bsz = int(128 / float(size))
partition_sizes = [1.0 / size for _ in range(size)]
print("Partition Sizes {}".format(partition_sizes))
partition = DataPartitioner(dataset, partition_sizes)
partition = partition.use(dist.get_rank())
train_set = torch.utils.data.DataLoader(partition,
batch_size=bsz,
shuffle=True)
return train_set, bsz
""" Gradient averaging. """
def average_gradients(model):
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
""" Distributed Synchronous SGD Example """
def run(rank, size):
if (rank == 0):
print("Run Fn")
torch.manual_seed(1234)
train_set, bsz = partition_dataset()
print("Data Points Per Rank {} of Size {}".format(len(train_set.dataset), size))
model = Net()
optimizer = optim.SGD(model.parameters(),
lr=0.01, momentum=0.5)
num_batches = ceil(len(train_set.dataset) / float(bsz))
if (rank == 0):
print("Started Training")
total_data = len(train_set)
epochs = 10
total_steps = epochs * total_data
for epoch in range(10):
epoch_loss = 0.0
count = 0
for data, target in train_set:
# print(
# "Data Size {}({},{}) of Rank {} : target {}, {}".format(data.shape, (data[0].numpy().dtype), type(data),
# rank, target, len(target)))
#print(data[0],target[0])
count = count + 1
result = '{0:.4g}'.format((count / float(total_steps)) * 100.0)
print("Progress {}% \r".format(result), end='\r')
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
epoch_loss += loss.item()
#print(epoch_loss)
loss.backward()
average_gradients(model)
optimizer.step()
if (rank == 0):
print('Rank ', dist.get_rank(), ', epoch ',
epoch, ': ', epoch_loss / num_batches)
def init_processes(rank, size, fn, backend='tcp'):
""" Initialize the distributed environment. """
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
if __name__ == "__main__":
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
world_rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
print(world_rank, world_size)
init_processes(world_rank, world_size, run, backend='mpi')
| 29.635838 | 122 | 0.577921 |
e90e71723fb83c3e9db45cf94c16cac0b3962eb2 | 1,218 | py | Python | common/es/one_scripts.py | ltxhh/course | 45c8e4e436d9f20effccc7ed0844dfd07d8348b1 | [
"Apache-2.0"
] | null | null | null | common/es/one_scripts.py | ltxhh/course | 45c8e4e436d9f20effccc7ed0844dfd07d8348b1 | [
"Apache-2.0"
] | null | null | null | common/es/one_scripts.py | ltxhh/course | 45c8e4e436d9f20effccc7ed0844dfd07d8348b1 | [
"Apache-2.0"
] | null | null | null | # -*- codeing = utf-8 -*-
# @Time : 2022/4/12 13:43
# @Author : linyaxuan
# @File : one_scripts.py
# @Software : PyCharm
"""
将数据库数据导入es
"""
import pymysql
import traceback
from elasticsearch import Elasticsearch
def get_db_data():
# 打开数据库连接(ip/数据库用户名/登录密码/数据库名)
db = pymysql.connect(host="127.0.0.1:3306", user="root", password="linyaxuan666",
database="course", charset='utf8')
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
sql = "SELECT * FROM tb_course"
# 使用 execute() 方法执行 SQL 查询
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
# 关闭数据库连接
db.close()
return results
def insert_data_to_es():
es = Elasticsearch("http://47.94.58.100:9200/")
es.indices.delete(index='course')
try:
i = -1
for row in get_db_data():
print(row)
print(row[1], row[2])
i += 1
es.index(index='course', body={
'id': i,
'title': row[1],
'desc': row[2],
})
except:
error = traceback.format_exc()
print("Error: unable to fecth data", error)
if __name__ == "__main__":
insert_data_to_es() | 23.882353 | 85 | 0.559113 |
e90efb1a09f98ac8736cd0e451c9792361d21cef | 1,048 | py | Python | core/services/tests.py | ojengwa/reportr | ca66184e2b7f28cb3818bb5d3a931232566bff99 | [
"MIT"
] | 2 | 2015-01-26T14:43:20.000Z | 2015-02-13T10:18:44.000Z | core/services/tests.py | ojengwa/reportr | ca66184e2b7f28cb3818bb5d3a931232566bff99 | [
"MIT"
] | null | null | null | core/services/tests.py | ojengwa/reportr | ca66184e2b7f28cb3818bb5d3a931232566bff99 | [
"MIT"
] | null | null | null | from django.test import TestCase
from core.services.freckle import Freckle
from datetime import datetime
class TestServices(TestCase):
"""docstring for TestServices"""
def setUp(self):
self.client = Freckle('8zh8ny1wlym4ljyi68p0je410s1aj8b', 'andela')
self.users = self.client.get_users()
self.projects = self.client.get_projects()
start_date = '2014-10-27'
end_date = '2015-02-27'
self.entries = self.client.get_entries(self.projects, start_date, end_date)
self.project_times = self.client.get_project_times(self.projects, self.entries)
def tearDown(self):
pass
def test_freckle_get_users(self):
assert 'christina@andela.co' in self.users[0]['user']['email']
def test_freckle_get_projects(self):
assert 'Udacity' in self.projects[0]['project']['name']
def test_freckle_get_entries(self):
assert 'Nadayar' in self.entries[0]['entry']['user_name']
def test_freckle_get_project_times(self):
print self.project_times
| 32.75 | 87 | 0.69084 |
e910c8bb7a93d643dfe5883064380eb1ced0913d | 1,343 | py | Python | doubleRedirect.py | ebraminio/DeltaBot | 14d427ca644c4e842f72802a0e07adcaecda7097 | [
"CC0-1.0"
] | 10 | 2016-08-09T21:28:27.000Z | 2021-12-23T17:22:04.000Z | doubleRedirect.py | ebraminio/DeltaBot | 14d427ca644c4e842f72802a0e07adcaecda7097 | [
"CC0-1.0"
] | 9 | 2016-12-31T10:48:11.000Z | 2020-07-22T20:52:06.000Z | doubleRedirect.py | ebraminio/DeltaBot | 14d427ca644c4e842f72802a0e07adcaecda7097 | [
"CC0-1.0"
] | 11 | 2017-01-24T15:51:57.000Z | 2022-02-10T00:35:18.000Z | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# licensed under CC-Zero: https://creativecommons.org/publicdomain/zero/1.0
import pywikibot
from pywikibot.data import api
import re
site = pywikibot.Site('wikidata', 'wikidata')
site.login()
repo = site.data_repository()
def redirect(fromId, toId):
# get token
params = {
'action': 'query',
'meta': 'tokens'
}
req = api.Request(site=site, **params)
data = req.submit()
# create redirect
params3 = {
'action': 'wbcreateredirect',
'from': fromId,
'to': toId,
'bot': 1,
'token': data['query']['tokens']['csrftoken']
}
req3 = api.Request(site=site, **params3)
data3 = req3.submit()
def main():
params = {
'action': 'query',
'list': 'querypage',
'qppage': 'DoubleRedirects',
'qplimit': 5000
}
req = api.Request(site=site, **params)
data = req.submit()
for m in data['query']['querypage']['results']:
try:
if m['ns'] == 0:
item1 = pywikibot.ItemPage(repo, m['title'])
item2 = item1.getRedirectTarget().getRedirectTarget().getID()
redirect(m['title'], item2)
except:
pass
if __name__ == "__main__":
main()
| 24.87037 | 78 | 0.527923 |
e910d43c7dfc7b7187050331c0e77e2a387d6e5f | 271 | py | Python | Dataset/Leetcode/train/66/44.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/66/44.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/66/44.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, digits: List[int]) -> List[int]:
num = 0
ss = []
for i in range(len(digits)):
num += digits[i]*(pow(10,len(digits)-i-1))
for i in str(num+1):
ss.append(int(i))
return ss
| 24.636364 | 54 | 0.472325 |
e912162e98a071826f62ad98af891120325dbaa8 | 2,327 | py | Python | lab1/domanda3.py | D33pBlue/Algoritmi | 76088f40b49d51416919515a274ba076ae6a197e | [
"MIT"
] | null | null | null | lab1/domanda3.py | D33pBlue/Algoritmi | 76088f40b49d51416919515a274ba076ae6a197e | [
"MIT"
] | 1 | 2018-03-17T19:15:41.000Z | 2018-03-17T19:18:24.000Z | lab1/domanda3.py | D33pBlue/Algoritmi | 76088f40b49d51416919515a274ba076ae6a197e | [
"MIT"
] | null | null | null | from domanda1 import *
import random
import time
class DPATrial:
def __init__(self,m):
self.numNodes = m
self.nodeNumbers = list()
for i in range(m):
for _ in range(m):
self.nodeNumbers.append(i)
def runTrial(self,m):
V = []
random.shuffle(self.nodeNumbers)
for i in range(m):
u = self.nodeNumbers.pop()
V.append(u)
self.nodeNumbers.append(self.numNodes)
for v in V:
self.nodeNumbers.append(v)
self.numNodes = self.numNodes+1
return V
"""
Implementazione dell'algoritmo DPA per la generazione di un grafo casuale
"""
def DPA_graph(m,n):
graph = dict()
for v in range(m):
graph[v] = []
for u in range(m):
if u!=v:
graph[v].append(u)
trial = DPATrial(m)
for u in range(m,n):
V = trial.runTrial(m)
graph[u] = []
for v in V:
graph[u].append(v)
return graph
"""
Restituisce la distribuzione del grado uscente del grafo, come
dizionario avente per chiavi il grado e per valori la probabilita'
"""
def outdegree_dist(graph):
nvertex = float(len(graph.keys()))
outdegree = dict()
for v in graph.keys():
deg = len(graph[v])
if deg in outdegree:
outdegree[deg] += 1.0/nvertex
else:
outdegree[deg] = 1.0/nvertex
return outdegree
"""
Genera il grafico delle due distribuzioni, mostrandole assieme per
poterle confrontare
"""
def compare_dists(dist1,dist2):
xs = dist1.keys()
ys = [dist1[v] for v in xs]
plt.xscale('log')
plt.yscale('log')
plt.scatter(xs,ys,label="dist1")
xs = dist2.keys()
ys = [dist2[v] for v in xs]
plt.scatter(xs,ys,label="dist2")
plt.show()
if __name__ == '__main__':
graph_cit = load_adj_list('Cit-HepTh.txt',directed=True)
# inddist1 = indegree_dist(graph_cit)
outdist1 = outdegree_dist(graph_cit)
n = len(graph_cit.keys())
m = 0.0
for o in outdist1.keys():
m += o*outdist1[o]
m = int(round(m))
print "m=",m,"n=",n
t = time.time()
graph_dpa = DPA_graph(m,n)
print "Grafo generato in",time.time()-t,"s"
inddist2 = indegree_dist(graph_dpa)
# compare_dists(inddist1,inddist2)
plot_dist(inddist2)
| 25.855556 | 73 | 0.588311 |
e914dc16d8c9fee0bbb11e912b41acdddd08ad05 | 1,237 | py | Python | leetcode/permutation.py | huynonstop/solving-everything | 21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39 | [
"MIT"
] | null | null | null | leetcode/permutation.py | huynonstop/solving-everything | 21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39 | [
"MIT"
] | null | null | null | leetcode/permutation.py | huynonstop/solving-everything | 21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
return permute_unique(nums)
# https://leetcode.com/problems/permutations-ii/discuss/18602/9-line-python-solution-with-1-line-to-handle-duplication-beat-99-of-others-%3A-)
def permute_unique(nums):
rs = []
nums.sort()
def dfs(left_nums, path):
if not left_nums:
rs.append(path)
return
for i in range(len(left_nums)):
if i > 0 and nums[i] == nums[i - 1]:
continue
dfs(left_nums[:i] + left_nums[i+1:],
path + [left_nums[i]])
dfs(nums, [])
return rs
def permute_unique(nums):
n = len(nums)
rs = []
used = [False] * n
t = []
nums.sort()
def backtrack():
if len(t) == n:
rs.append(t[:])
return
for i in range(n):
if used[i]:
continue
if used[i - 1] and i > 0 and nums[i] == nums[i - 1]:
continue
used[i] = True
t.append(nums[i])
backtrack()
used[i] = False
t.pop()
backtrack()
return rs
permute_unique([1, 1, 2])
| 22.089286 | 142 | 0.501213 |
e91854ceacf0d5585096c1554b24da88785c0f89 | 14,051 | py | Python | labtoys/Tektronix/MSO5x.py | ppudo/labtoys_python | c8ca27637602b8aac0574e92da370a4a97e9fcad | [
"MIT"
] | null | null | null | labtoys/Tektronix/MSO5x.py | ppudo/labtoys_python | c8ca27637602b8aac0574e92da370a4a97e9fcad | [
"MIT"
] | null | null | null | labtoys/Tektronix/MSO5x.py | ppudo/labtoys_python | c8ca27637602b8aac0574e92da370a4a97e9fcad | [
"MIT"
] | null | null | null | #MSO5x.py
# Created on: 2020.11.18
# Author: ppudo
# e-mail: ppudo@outlook.com
#
# Project: labtoys
# Description: Class representing Tektronix MSO5 osciloscope series
#
#
# Changelog:
# -2021.11.18 version: 0.1.0
# - Initial class
#
#----------------------------------------------------------------------------------------------------------------------------------------------------
# Idea and changes proposal:
#
#
# Usefull information and links:
# Programing manual: https://download.tek.com/manual/5-Series-MSO54-MSO56-MSO58-MSO58L-Programmer-Manual_EN-US_077130501.pdf (2021.11.18)
#
from ..scpi import SCPI_Socket
from enum import Enum
class MSO5x:
def __init__( self, ip, port=4000 ):
self.__device = SCPI_Socket( ip, port )
self.__device.sendDalay = 0.001
#------------------------------------------------------------------------------------------------------------------------------------------------
# BASIC COMMANDS
#------------------------------------------------------------------------------------------------------------------------------------------------
def Clear( self ) -> bool:
self.__device.SendCommand( "CLEAR" ) == 0 #CLEAR
#----------------------------------------------------------------------------------------------
def Autoset( self ) -> bool:
self.__device.SendCommand( "AUTO" ) == 0 #AUTOset
#------------------------------------------------------------------------------------------------------------------------------------------------
# ACQUISITION COMMANDS
#------------------------------------------------------------------------------------------------------------------------------------------------
class ACQUIRE_STATE( Enum ):
RUN = 'RUN'
STOP = 'STOP'
ERROR = 'ERROR' #do not use, only for indication of results
#--------------------------------------------
def SetAcquireState( self, state: ACQUIRE_STATE=ACQUIRE_STATE.RUN ) -> bool:
return self.__device.SendCommand( "ACQ:STATE " + state.value ) == 0 #ACQuire:STATE
#--------------------------------------------
def GetAcquireState( self ) -> ACQUIRE_STATE:
ans = self.__device.SendCommandGetAns( "TRIG:A:MOD?" ) #TRIGger:A:MODe?
if( ans == "" ): return self.ACQUIRE_STATE.ERROR
if( ans == '1' ):
return self.ACQUIRE_STATE.RUN
elif( ans == '0' ):
return self.ACQUIRE_STATE.STOP
else:
return self.ACQUIRE_STATE.ERROR
#----------------------------------------------------------------------------------------------
class ACQUIRE_STOP_AFTER( Enum ):
RUN_STOP = 'RUNSTOP'
SEQUENCE = 'SEQUENCE'
ERROR = 'ERROR' #do not use, only for indication of results
#--------------------------------------------
def SetAcquireStopAfter( self, mode: ACQUIRE_STOP_AFTER=ACQUIRE_STOP_AFTER.RUN_STOP ) -> bool:
return self.__device.SendCommand( "ACQ:STOPA " + mode.value ) == 0 #ACQuire:STOPAfter
#--------------------------------------------
def GetAcquireStopAfter( self ) -> ACQUIRE_STOP_AFTER:
ans = self.__device.SendCommandGetAns( "TRIG:A:MOD?" ) #TRIGger:A:MODe?
if( ans == "" ): return self.ACQUIRE_STOP_AFTER.ERROR
try:
return self.ACQUIRE_STOP_AFTER( ans )
except ValueError:
return self.ACQUIRE_STOP_AFTER.ERROR
#----------------------------------------------------------------------------------------------
def Single( self ):
self.SetAcquireStopAfter( self.ACQUIRE_STOP_AFTER.SEQUENCE )
self.SetAcquireState( self.ACQUIRE_STATE.RUN )
#------------------------------------------------------------------------------------------------------------------------------------------------
# TRIGGER COMMANDS
#------------------------------------------------------------------------------------------------------------------------------------------------
class TRIGGER_MODE( Enum ):
AUTO = 'AUTO'
NORMAL = 'NORMAL'
ERROR = 'ERROR' #do not use, only for indication of results
#--------------------------------------------
def SetTrigger_A_Mode( self, mode: TRIGGER_MODE=TRIGGER_MODE.AUTO ) -> bool:
return self.__device.SendCommand( "TRIG:A:MOD " + mode.value ) == 0 #TRIGger:A:MODe
#--------------------------------------------
def GetTrigger_A_Mode( self ) -> TRIGGER_MODE:
ans = self.__device.SendCommandGetAns( "TRIG:A:MOD?" ) #TRIGger:A:MODe?
if( ans == "" ): return self.TRIGGER_MODE.ERROR
try:
return self.TRIGGER_MODE( ans )
except ValueError:
return self.TRIGGER_MODE.ERROR
#----------------------------------------------------------------------------------------------
class TRIGGER_STATE( Enum ):
ARMED = 'ARMED'
AUTO = 'AUTO'
READY = 'READY'
SAVE = 'SAVE'
TRIGGER = 'TRIGGER'
ERROR = 'ERROR' #do not use, only for indication of results
#--------------------------------------------
def TriggerForce( self ) -> bool:
return self.__device.SendCommand( "TRIG FORC" ) == 0 #TRIGger FORCe
#--------------------------------------------
def GetTriggerState( self ) -> TRIGGER_STATE:
ans = self.__device.SendCommandGetAns( "TRIG:STATE?" ) #TRIGger:STATE?
if( ans == "" ): return self.TRIGGER_STATE.ERROR
try:
return self.TRIGGER_STATE( ans )
except ValueError:
return self.TRIGGER_STATE.ERROR
#------------------------------------------------------------------------------------------------------------------------------------------------
# General Instructions
#------------------------------------------------------------------------------------------------------------------------------------------------
def GetIDN( self ) -> list:
ans = self.__device.SendCommandGetAns( "*IDN?" )
if( len(ans) == 0 ): return []
return ans.split( ',' )
#------------------------------------------------------------------------------------------------------------------------------------------------
# MEASUREMENTS
#------------------------------------------------------------------------------------------------------------------------------------------------
def __GetMeasurementsMax( self, id: int, connIdx: int ) -> float:
command = "MEASU:MEAS" + str(id) + ":RESU:ALLA:MAX?" #MEASUrement:MEAS<x>:RESUlts:ALLAcqs:MAXimum?
meas = self.__device.SendCommandGetAns( command, connIdx=connIdx )
if( len(meas) == 0 ): return float( 'nan' )
return float( meas )
def GetMeasurementsMax( self, id: int ) -> float:
return self.__GetMeasurementsMax( id, 0 )
#----------------------------------------------------------------------------------------------
def __GetMeasurementsMean( self, id: int, connIdx: int ) -> float:
command = "MEASU:MEAS" + str(id) + ":RESU:ALLA:MEAN?" #MEASUrement:MEAS<x>:RESUlts:ALLAcqs:MEAN?
meas = self.__device.SendCommandGetAns( command, connIdx=connIdx )
if( len(meas) == 0 ): return float( 'nan' )
return float( meas )
def GetMeasurementsMean( self, id: int, connIdx=0 ) -> float:
return self.__GetMeasurementsMean( id, 0 )
#----------------------------------------------------------------------------------------------
def __GetMeasurementsMin( self, id: int, connIdx: int ) -> float:
command = "MEASU:MEAS" + str(id) + ":RESU:ALLA:MIN?" #MEASUrement:MEAS<x>:RESUlts:ALLAcqs:MINimum?
meas = self.__device.SendCommandGetAns( command, connIdx=connIdx )
if( len(meas) == 0 ): return float( 'nan' )
return float( meas )
def GetMeasurementsMin( self, id: int, connIdx=0 ) -> float:
return self.__GetMeasurementsMin( id, 0 )
#----------------------------------------------------------------------------------------------
def __GetMeasurementsPK2PK( self, id: int, connIdx: int ) -> float:
command = "MEASU:MEAS" + str(id) + ":RESU:ALLA:PK2PK?" #MEASUrement:MEAS<x>:RESUlts:ALLAcqs:PK2PK?
meas = self.__device.SendCommandGetAns( command, connIdx=connIdx )
if( len(meas) == 0 ): return float( 'nan' )
return float( meas )
def GetMeasurementsPK2PK( self, id: int ) -> float:
return self.__GetMeasurementsPK2PK( id, 0 )
#----------------------------------------------------------------------------------------------
def __GetMeasurementsPopulation( self, id: int, connIdx: int ) -> float:
command = "MEASU:MEAS" + str(id) + ":RESU:ALLA:POPU?" #MEASUrement:MEAS<x>:RESUlts:ALLAcqs:POPUlation?
meas = self.__device.SendCommandGetAns( command, connIdx=connIdx )
if( len(meas) == 0 ): return float( 'nan' )
return float( meas )
def GetMeasurementsPopulation( self, id: int ) -> float:
return self.__GetMeasurementsPopulation( id, 0 )
#----------------------------------------------------------------------------------------------
def __GetMeasurementsStdDev( self, id: int, connIdx: int ) -> float:
command = "MEASU:MEAS" + str(id) + ":RESU:ALLA:STDD?" #MEASUrement:MEAS<x>:RESUlts:ALLAcqs:STDDev?
meas = self.__device.SendCommandGetAns( command, connIdx=connIdx )
if( len(meas) == 0 ): return float( 'nan' )
return float( meas )
def GetMeasurementsStdDev( self, id: int ) -> float:
return self.__GetMeasurementsStdDev( id, 0 )
#------------------------------------------------------------------------------------------------------------------------------------------------
class Measurements:
def __init__( self, max: float, mean: float, min: float, pk2pk: float, pop: float, stdDev: float ):
self.__maximum = max
self.__mean = mean
self.__minimum = min
self.__peak2peak = pk2pk
self.__population = pop
self.__standardDeviation = stdDev
#------------------------------------------------------------------------------------------
@property
def maximum( self ):
return self.__maximum
#-----------------------------------------------------------------
@property
def mean( self ):
return self.__mean
#-----------------------------------------------------------------
@property
def minimum( self ):
return self.__minimum
#-----------------------------------------------------------------
@property
def peak2peak( self ):
return self.__peak2peak
#-----------------------------------------------------------------
@property
def population( self ):
return self.__population
#-----------------------------------------------------------------
@property
def standardDeviation( self ):
return self.__standardDeviation
#----------------------------------------------------------------------------------------------
def GetMeasurements( self, id: int ) -> Measurements:
connIdx = self.__device.Connect()
if( connIdx == -1 ): return None
meas = MSO5x.Measurements( max = self.__GetMeasurementsMax( id, connIdx ),
mean = self.__GetMeasurementsMean( id, connIdx ),
min = self.__GetMeasurementsMin( id, connIdx ),
pk2pk = self.__GetMeasurementsPK2PK( id, connIdx ),
pop = self.__GetMeasurementsPopulation( id, connIdx ),
stdDev = self.__GetMeasurementsStdDev( id, connIdx ) )
self.__device.Close( connIdx )
return meas
#------------------------------------------------------------------------------------------------------------------------------------------------
# SAVE FUNCTIONS
#------------------------------------------------------------------------------------------------------------------------------------------------
def SetCurrentWorkingDirectory( self, path: str ) -> bool:
#path - with "/" as separator
command = "FILES:CWD \"" + path + "\"" #FILESystem:CWD
return self.__device.SendCommand( command ) == 0
#--------------------------------------------
def GetCurrentWorkingDirectory( self ) -> str:
directory = self.__device.SendCommandGetAns( "FILES:CWD?" ) #FILESystem:CWD?
if( len(directory) == 0 ): return ""
return directory
#----------------------------------------------------------------------------------------------
def SaveSession( self, path: str ) -> bool:
#path - file with *.tss extension and with "/" as separator
command = "SAVE:SESSION " #SAVE:SESSION <path>
command += "\"" + path + "\""
return self.__device.SendCommand( command ) == 0
| 50.725632 | 153 | 0.38951 |
e91d0411a8febadb09ca20268e15414ccded8163 | 1,543 | py | Python | pedurma/proofreading.py | Esukhia/pedurma | 334b5957db30f514d396bd9defc9e9381f5b290b | [
"MIT"
] | null | null | null | pedurma/proofreading.py | Esukhia/pedurma | 334b5957db30f514d396bd9defc9e9381f5b290b | [
"MIT"
] | null | null | null | pedurma/proofreading.py | Esukhia/pedurma | 334b5957db30f514d396bd9defc9e9381f5b290b | [
"MIT"
] | 1 | 2021-11-04T07:04:05.000Z | 2021-11-04T07:04:05.000Z | from pedurma.pecha import ProofreadNotePage
from pedurma.utils import from_yaml
def get_note_page_img_link(text_id, pg_num, repo_path):
text_meta = from_yaml((repo_path / text_id / "meta.yml"))
image_grp_id = text_meta.get("img_grp_id", "")
img_link = f"https://iiif.bdrc.io/bdr:{image_grp_id}::{image_grp_id}{int(pg_num):04}.jpg/full/max/0/default.jpg"
return img_link
def get_note_page(text_id, cur_pg_num, repo_path=None):
manual_note = (
repo_path / text_id / "manual_notes" / f"{cur_pg_num:04}.txt"
).read_text(encoding="utf-8")
google_note = (
repo_path / text_id / "google_notes" / f"{cur_pg_num:04}.txt"
).read_text(encoding="utf-8")
img_link = get_note_page_img_link(text_id, cur_pg_num, repo_path)
page = ProofreadNotePage(
manual=manual_note, google=google_note, img_link=img_link, page_num=cur_pg_num
)
return page
def get_note_pages(text_id, repo_path):
note_pages = []
page_paths = list((repo_path / text_id / "google_notes").iterdir())
page_paths.sort()
for page_path in page_paths:
page_num = int(page_path.stem)
note_pages.append(get_note_page(text_id, page_num, repo_path))
return note_pages
def update_note_page(text_id, page: ProofreadNotePage, repo_path=None):
new_manual_note_page = page.manual
cur_pg_num = page.page_num
(repo_path / text_id / "manual_notes" / f"{cur_pg_num:04}.txt").write_text(
new_manual_note_page, encoding="utf-8"
)
print(f"INFO: {cur_pg_num} updated")
| 35.068182 | 116 | 0.706416 |
e91e11b03c50d75698f208a10f1b310af5a8ffcc | 4,043 | py | Python | authors/apps/articles/tests/test_likes_dislikes.py | andela/ah-backend-prime | 0708463d4565a4977a5a5dcb839f1dfed52fdc90 | [
"BSD-3-Clause"
] | 1 | 2019-09-19T14:30:05.000Z | 2019-09-19T14:30:05.000Z | authors/apps/articles/tests/test_likes_dislikes.py | e-ian/authors-haven-frontend | 05829c8088ca49ef2cf0863dc87ec55b44b13534 | [
"BSD-3-Clause"
] | 22 | 2019-03-25T16:10:53.000Z | 2022-03-11T23:44:21.000Z | authors/apps/articles/tests/test_likes_dislikes.py | e-ian/authors-haven-frontend | 05829c8088ca49ef2cf0863dc87ec55b44b13534 | [
"BSD-3-Clause"
] | 6 | 2019-03-25T09:39:39.000Z | 2021-03-11T23:54:12.000Z | import json
from rest_framework import status, response
from django.urls import reverse
from .base import ArticlesBaseTest
from .test_data import VALID_ARTICLE
from authors.apps.authentication.tests.test_data import (
VALID_USER_DATA
)
from rest_framework.test import APIClient, APITestCase
from .base import BaseTest
class TestLikeDislikeArticle(ArticlesBaseTest):
'''Test likes and dislikes functionality'''
def test_like_article(self):
'''Test for liking article'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
response = self.client.post(
f'/api/v1/articles/{articles_slug}/like/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['likes'], 1)
def test_dislike_article(self):
'''Test for disliking article'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
response = self.client.post(
f'/api/v1/articles/{articles_slug}/dislike/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['dislikes'], 1)
def test_like_article_twice(self):
'''Test for disliking article twice'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
self.client.post(
f'/api/v1/articles/{articles_slug}/like/',
HTTP_AUTHORIZATION=token
)
response = self.client.post(
f'/api/v1/articles/{articles_slug}/like/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['likes'], 0)
def test_dislike_article_twice(self):
'''Test for disliking article twice'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
self.client.post(
f'/api/v1/articles/{articles_slug}/dislike/',
HTTP_AUTHORIZATION=token
)
response = self.client.post(
f'/api/v1/articles/{articles_slug}/dislike/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['dislikes'], 0)
def test_like_disliked_article_twice(self):
'''Test for liking a disliked article'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
self.client.post(
f'/api/v1/articles/{articles_slug}/like/',
HTTP_AUTHORIZATION=token
)
response = self.client.post(
f'/api/v1/articles/{articles_slug}/dislike/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['likes'], 0)
self.assertEqual(response.data['dislikes'], 1)
| 33.97479 | 71 | 0.623547 |
e91ff99a3728e01c9518fdfe79d256b14ae28af1 | 353 | py | Python | DataBase Sqlite3/NoteMeilheur.py | otmanabdoun/IHM-Python | 624e961c2f6966b98bf2c1bc4dd276b812954ba1 | [
"Apache-2.0"
] | 3 | 2021-12-08T10:34:55.000Z | 2022-01-17T21:02:40.000Z | NoteMeilheur.py | otmanabdoun/IHM-Python | 624e961c2f6966b98bf2c1bc4dd276b812954ba1 | [
"Apache-2.0"
] | null | null | null | NoteMeilheur.py | otmanabdoun/IHM-Python | 624e961c2f6966b98bf2c1bc4dd276b812954ba1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 3 04:38:07 2021
@author: User
"""
import sqlite3
connexion = sqlite3.connect("dbM2IQL.db")
curseur = connexion.cursor()
curseur.execute("""SELECT e.Nom, c.note FROM Etudiant as e INNER JOIN
CF as c ON e.id = c.fk_etudiant
ORDER BY c.note DESC LIMIT 1""")
print(curseur.fetchone()) | 25.214286 | 70 | 0.651558 |
e921b1bc0cceec8b113f393fdb06b057357e8848 | 24,410 | py | Python | packages/robotframework-test-assistant/robotframework-test-assistant.py | jg8481/leon | b94a6c753cee79f4568ab7a83932351f7c949791 | [
"MIT"
] | 3 | 2020-01-15T20:49:42.000Z | 2020-11-22T01:41:33.000Z | packages/robotframework-test-assistant/robotframework-test-assistant.py | jg8481/leon | b94a6c753cee79f4568ab7a83932351f7c949791 | [
"MIT"
] | null | null | null | packages/robotframework-test-assistant/robotframework-test-assistant.py | jg8481/leon | b94a6c753cee79f4568ab7a83932351f7c949791 | [
"MIT"
] | 3 | 2020-01-18T17:06:56.000Z | 2020-12-16T16:03:57.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import utils
import os
import os.path
import sys
import subprocess
import re
import time
filepath = os.path.dirname(os.path.realpath(__file__))
small_time_delay = 5 ##--> Use this to set up your small time delay. This time delay is in seconds.
medium_time_delay = 20 ##--> Use this to set up your medium time delay. This time delay is in seconds.
large_time_delay = 600 ##--> Use this to set up your large time delay. This time delay is in seconds.
def Clean_Up_Results(string, entities):
"""Leon will clean up the results folder"""
subprocess.call(filepath + '/robotframework-runner.sh Clean-Up-Results', shell=True)
return utils.output('end', 'clean_up_results_ran', utils.translate('clean_up_results_ran'))
def Check_One(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-One', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Two(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Two', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Three(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Three', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Four(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Four', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Set_Up_Runner_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Three(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Four(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Four', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Custom_Runner_One(string, entities):
"""Leon will start a custom Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Runner_One(string, entities):
"""Leon will display the results of the Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-One', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Group_One(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-One', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Two(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Two', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Three(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Three', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Four(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Four', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Set_Up_Runner_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Three(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Three', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Four(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Four', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Custom_Runner_Two(string, entities):
"""Leon will start a custom Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-Two', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Runner_Two(string, entities):
"""Leon will display the results of the Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-Two', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Slack_Notification_Send_All(string, entities):
"""Leon will send the console log results of the Robot Framework automated check runs to Slack"""
subprocess.call(filepath + '/robotframework-runner.sh Slack-Notification-Send-All', shell=True)
return utils.output('end', 'notify_the_team', utils.translate('notify_the_team'))
def Build_Docker_Containers(string, entities):
"""Leon will build Docker Containers for running Robot Framework scripts"""
subprocess.call(filepath + '/robotframework-runner.sh Build-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Clean_Up_Docker_Containers(string, entities):
"""Leon will stop and remove Docker Containers"""
subprocess.call(filepath + '/robotframework-runner.sh Clean-Up-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Robot_Framework_Docker_API_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-API-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Docker_Random_Order_API_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-Random-Order-API-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Docker_MBT_Graphwalker_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-MBT-Graphwalker-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Current_MBT_Graphwalker_Path(string, entities):
"""Leon will display the results of the current Graphwalker Path generated by the Robot Framework Docker Container"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Current-MBT-Graphwalker-Results', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Run_Same_Robot_Framework_Docker_MBT_Graphwalker_Checks_Again(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Run-Same-Robot-Framework-Docker-MBT-Graphwalker-Checks-Again', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Selenium_Desktop_Web_Checks(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Selenium-Desktop-Web-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Start_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will start a Docker Container for running remote Robot Framework scripts triggered by a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Start_Remote_Selenium_Process_Webhook_Container(string, entities):
"""Leon will start a Docker Container for running remote Robot Framework scripts triggered by a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Remote_Selenium_Process_Webhook_Container(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Both_Webhook_Docker_Containers_For_Parallel_Run(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('multiple_checks_ran'))
def Set_Up_Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Slack_Notification_Send_All(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Slack-Notification-Send-All', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Build_Docker_Containers(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Clean_Up_Docker_Containers(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Start_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Start_Remote_Selenium_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Remote_Selenium_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Both_Webhook_Docker_Containers_For_Parallel_Run(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Generate_Bug_Risk_Prediction_Scores_For_A_GitHub_Repo(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Display_Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will display the results of the Robot Framework automated RPA tasks run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Generic_Customizable_Time_Delayed_Runner_One(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following is just an example triggering a single time delayed check.
subprocess.call(filepath + '/robotframework-runner.sh Check-One', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_Time_Delayed_Runner_Two(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example builds off of a previously created Custom_Runner_Two .csv file.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-One', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_Time_Delayed_Runner_Three(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example will chain together the commands for a new Custom_Runner_One .csv file, runs it, and displays results.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Display-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_On_Demand_Runner(string, entities):
"""Leon will set up a generic on-demand task runner"""
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example will chain together the commands for all of the custom runners and sends notifications to the team.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Four', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Selenium-Desktop-Web-Checks', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Four', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-MBT-Graphwalker-Checks', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Gather-All-Robot-Framework-Test-Results-And-Deploy-Dashboard-To-Heroku', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Slack-Notification-Send-All', shell=True)
return utils.output('end', 'generic_on_demand_task_ran', utils.translate('generic_on_demand_task_ran'))
def Gather_All_Robot_Framework_Test_Results_And_Deploy_Dashboard_To_Heroku(string, entities):
"""Leon will run Robot Framework ReBot and Git commands to deploy a results file to Heroku"""
subprocess.call(filepath + '/robotframework-runner.sh Gather-All-Robot-Framework-Test-Results-And-Deploy-Dashboard-To-Heroku', shell=True)
return utils.output('end', 'gathered_test_results_and_deployed_dashboard_to_heroku', utils.translate('gathered_test_results_and_deployed_dashboard_to_heroku'))
def Help_Confused_Users(string, entities):
"""Leon will try to help confused users who don't know how to use this leon-ai package"""
return utils.output('end', 'help_confused_users', utils.translate('help_confused_users'))
def Jira_Task_Runner(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Jira-Task-Runner', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
| 70.549133 | 292 | 0.7712 |
e9225ac8234cba226c9c33772de98e2d065d77b6 | 349 | py | Python | chapter2/bandit.py | mtrazzi/understanding-rl | 83a9b7608c805189a39b4ef81893f6ebe982f9e1 | [
"MIT"
] | 95 | 2020-04-26T12:36:07.000Z | 2020-05-02T13:23:47.000Z | chapter2/bandit.py | 3outeille/rl-book-challenge | b02595b0aec3e9632ef5d9814e925384931089bd | [
"MIT"
] | 2 | 2020-09-24T20:29:29.000Z | 2021-11-27T11:17:45.000Z | chapter2/bandit.py | 3outeille/rl-book-challenge | b02595b0aec3e9632ef5d9814e925384931089bd | [
"MIT"
] | 15 | 2020-04-27T04:10:02.000Z | 2020-04-30T21:42:04.000Z | import numpy as np
class Bandit:
def __init__(self, k=10, mean=0):
self.k = k
self.q = np.random.randn(k) + mean
self.old_q = [q_val for q_val in self.q] # copy
def max_action(self):
return np.argmax(self.q)
def reward(self, action):
return np.random.normal(self.q[action])
def reset(self):
self.q = self.old_q
| 19.388889 | 52 | 0.638968 |
e92450a33cbfd0332cf6d8991a025ba1f22e0f12 | 1,765 | py | Python | Sublime_Plugin/EditFormat.py | dtysky/Gal2Renpy | 59a70c5d336394155dedaf82d17bd99297f92d1a | [
"MIT"
] | 36 | 2015-04-19T05:03:10.000Z | 2022-03-29T08:12:38.000Z | Sublime_Plugin/EditFormat.py | dtysky/Gal2Renpy | 59a70c5d336394155dedaf82d17bd99297f92d1a | [
"MIT"
] | 2 | 2016-05-05T07:24:09.000Z | 2017-11-01T05:32:11.000Z | Sublime_Plugin/EditFormat.py | dtysky/Gal2Renpy | 59a70c5d336394155dedaf82d17bd99297f92d1a | [
"MIT"
] | 2 | 2016-12-01T02:12:33.000Z | 2020-03-09T02:27:19.000Z | #coding:utf-8
#########################
#Copyright(c) 2014 dtysky
#########################
def EditFormat(US,UT):
tmp={
'sc':[
0,('k'),{'k':'None'},
('cp','sc'),{'cp':'None','sc':'None'}
],
'sw':[
0,(),{},
('s',),{'s':'None'}
],
'chrlast':[
1,('l','t'),{'l':'None','t':'None'},
('m','p','c','f','d'),{'m':'None','p':'None','c':'None','f':'None','d':'None'}
],
'bg':[
0,('l','t'),{'l':'None','t':'None'},
('m','s','w'),{'m':'None','s':'None','w':'None'}
],
'cg':[
0,('l','t'),{'l':'None','t':'None'},
('m','s'),{'m':'None','s':'None'}
],
'bgm':[
0,(),{},
('m',),{'m':'None'}
],
'movie':[
0,(),{},
('m','k'),{'m':'None','k':'None'}
],
'sound':[
0,(),{},
('m','k'),{'m':'None','k':'None'}
],
'date':[
0,('m',),{'m':'None'},
('s',),{'s':'None'}
],
'vd':[
0,(),{},
('m',),{'m':'None'}
],
'ef':[
1,('e','args'),{'e':'None','args':'None'},
('c',),{'c':'None'}
],
'gf':[
0,('l','t'),{'l':'None','t':'None'},
('m',),{'m':'None'}
],
'key':[
0,('m',),{'m':'None'},
('s','n'),{'s':'None','n':'None'}
],
'mode':[
0,(),{},
('m',),{'m':'None'}
],
'pause':[
0,(),{},
('p',),{'p':'None'}
],
'view':[
0,(),{},
('m',),{'m':'None'}
],
'chc':[
0,(),{},
('a','b'),{'a':'None','b':'None'}
],
'renpy':[
0,(),{},
('m',),{'m':'None'}
],
'test':[
0,(),{},
('m',),{'m':'None'}
]
}
for ch in UT.Args['ch']['m']:
tmp['ch-'+ch]=[
1,('l','t'),{'l':'None','t':'None'},
('m','p','c','f','d'),{'m':ch,'p':'None','c':'None','f':'None','d':'None'}
]
return tmp | 19.611111 | 82 | 0.273088 |
e924c56b0295f7f4a6d78dab18bd9428b1fe0209 | 272 | py | Python | wesgame.py | WestenPy/Curso_em_video | 9f6a9775d27e1b86d54b381aba5da69b2ae21b27 | [
"MIT"
] | null | null | null | wesgame.py | WestenPy/Curso_em_video | 9f6a9775d27e1b86d54b381aba5da69b2ae21b27 | [
"MIT"
] | null | null | null | wesgame.py | WestenPy/Curso_em_video | 9f6a9775d27e1b86d54b381aba5da69b2ae21b27 | [
"MIT"
] | null | null | null | from random import randint
def ataque():
r = randint(1, 20)
if r >= 18:
print('DANO CRÍTICO!')
elif r >= 8 or r < 18:
print('DANO NORMAL!')
elif r >= 3 or r < 8:
print('DANO REDUZIDO!')
else:
print('ERRO!')
ataque()
| 16 | 31 | 0.5 |
e924f0db03f4f2a8c126f7c109a518852a2aa24a | 6,850 | py | Python | ProcessingData/get_gp-bias.py | gomes-lab/SARA_ScienceAdvances | 61848d1c92a66bd58c8c195e5b2bb250ef8efb51 | [
"MIT"
] | 1 | 2022-01-13T12:17:29.000Z | 2022-01-13T12:17:29.000Z | ProcessingData/get_gp-bias.py | gomes-lab/SARA_ScienceAdvances | 61848d1c92a66bd58c8c195e5b2bb250ef8efb51 | [
"MIT"
] | null | null | null | ProcessingData/get_gp-bias.py | gomes-lab/SARA_ScienceAdvances | 61848d1c92a66bd58c8c195e5b2bb250ef8efb51 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Script to extract the gp bias features from microscopy images
"""
import sys
import json
import os
import copy as cp
import numpy as np
import glob
import matplotlib.pyplot as plt
import matplotlib
from numpy.polynomial import polynomial
import offsets as GS
from probability_dist import *
import data_storage as ds
import zone as LSA_Zone
from os import listdir
from matplotlib import cm
from collections import OrderedDict
import seaborn as sns
import itertools
#Set color schemes
cmaps = OrderedDict()
cmaps['Qualitative'] = ['Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']
plt.rcParams["image.cmap"] = "Set1"
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Set1.colors)
palette = itertools.cycle(sns.color_palette("muted"))
palette = sns.color_palette("muted")
def list_files(directory, extension):
return [f for f in listdir(directory) if f.endswith('.' + extension)]
def convert_bias_parameters(bias_parameters, center):
"""
Converts the sum of Gaussian parameters into a format the the GP can
interpret
"""
bias_parameters_new = []
for b in bias_parameters:
std = b[2] * 0.5 * np.sqrt(2.*np.log(2.))
bb = (b[0] * b[4], b[1] - center, std, b[3])
bias_parameters_new.append(bb)
return bias_parameters_new
def get_img_filename(pos, image_error, bx = 1., by = 1.):
"""
Convert position to a filename
"""
lsa = ds.LSA()
stripe = {}
stripe["x"] = pos[0]
stripe["y"] = pos[1]
if image_error:
stripe["x"] = round(pos[0]/bx)
stripe["y"] = round(pos[1]/by)
stripe["dwell"] = 0.
stripe["Tpeak"] = 0.
fn = lsa.image_name(stripe)
fn = fn[:9]
return fn
def get_filename(pos, img_dir, bx = 1., by = 1.):
"""
Captures an image with given settings.
"""
fn = get_img_filename(pos, image_error = True, bx = bx, by = by)
fn += "*.bmp"
if 'img_dir' in locals():
fn = os.path.join(img_dir, fn)
img_fn = glob.glob(fn)
if len(img_fn) > 0:
img_fn = sorted(img_fn)[0]
img = Image.open(img_fn)
mode = img.mode
if mode == "RGB":
r, g, b = img.split()
img = Image.merge("RGB", (b, g, r))
return img, img_fn
rescaling_datas = []
img_dir = "Bi2O3/Images/"
files = list_files(img_dir, "bmp")
exclude = []
for f in files[:]:
if f in exclude:
continue
rescaling_data = {}
#Parse information from the filename
meta_img = {}
fn_meta = f.split("_")
#The last part is the temperature in C
meta_img["Tpeak"] = float(fn_meta[-1].split(".")[0])
#The second last part is the temperature in dwell time in microsec
meta_img["dwell"] = float(fn_meta[-2])
meta_img["logtau"] = np.log10(float(fn_meta[-2]))
meta_img["pos"] = [float(fn_meta[0][1:])*2, float(fn_meta[1])*5]
meta_img["filename"] = f
pos = meta_img["pos"]
img, img_fn = get_filename(pos, img_dir, bx = 2., by = 5.)
plt_out = img_fn.replace("bmp", "png").replace("b", "aa")
zone = LSA_Zone.zone()
img_spec_offset = GS.img_spec_offset()
img_spec_offset.scale = 0.00092 #Scaling of pixels in mm
img_spec_offset.scale_imgcam = 0.0006680932 #Scaling of pixels in mm for imaging camera
img_spec_offset.offset = 0 #Offset of the spectrometer with respect to the image center in pixels.
img_spec_offset.offsety = 0 #Offset of the spectrometer with respect to the image center in pixels.
img_spec_offset.img_shift = img_spec_offset.offset * img_spec_offset.scale #The amount of shift along the x-axis in mm of the spectrum with respect to image
img_spec_offset.offset_global = [0., 0.]
zone.pos = pos
pd = probability_dist()
img, img_center_px, img_info, img_data, img_peaks = zone.image_from_file(img_fn, img_spec_offset)
if abs(img_center_px - zone.img_width * 0.5) > zone.img_width*0.1:
img_center_px = 0.5 * zone.img_width
img_center = zone.img_xdomain[0] + img_center_px/zone.img_width * (zone.img_xdomain[1] - zone.img_xdomain[0])
spec_center = img_center
peaks = np.array(img_peaks)
n_dense = 800
zone.spec_xdomain = [img_center-1.75, img_center+1.75]
x_plot = np.linspace(zone.spec_xdomain[0], zone.spec_xdomain[1], n_dense).reshape(-1,1)
dist_peaks, dist_lsa, dist_peaks_lsa, bias_parameters, LSA_width = pd.get_img_bias(peaks, img_center, spec_center, x_plot, lsa_frac = 1.)
bias_parameter_centered = convert_bias_parameters(bias_parameters, img_center)
#Convolve the uncertainty and the prior distribution
dist_sum_peaks = pd.sum(dist_peaks,"SumPeaks",1.)
dist_sum_peaks_lsa = pd.sum(dist_peaks_lsa,"SumPeaks",1.)
# Plot on three seperate axes
fig, axes = plt.subplots(nrows=2, sharex=True)
axes = axes.tolist()
axes[0].set_ylabel("Rescaling (a.u.)")
axes[1].set_ylabel("y pos (mm)")
axes[1].set_xlabel("x pos (mm)")
w1 = zone.img_xdomain[0] - img_center
w2 = zone.img_xdomain[1] - img_center
h1 = zone.img_ydomain[0] - 0.5 * (zone.img_ydomain[0] + zone.img_ydomain[1])
h2 = zone.img_ydomain[1] - 0.5 * (zone.img_ydomain[0] + zone.img_ydomain[1])
l1, = axes[0].plot(x_plot - img_center, dist_lsa, color=palette[3], label = "LSA bias")
axes[0].yaxis.set_ticks([])
axes.append(axes[0].twinx())
l2, = axes[2].plot(x_plot - img_center, dist_sum_peaks['dist'], color=palette[4], label = "RGB bias")
axes[2].yaxis.set_ticks([])
plt.legend([l1, l2],["LSA bias", "RGB bias"], loc = 'upper right', frameon=False)
# Size of the image in pixels (size of orginal image)
width, height = img.size
# Setting the points for cropped image
left = 0
top = height/2
right = width
bottom = height
# Cropped image of above dimension
img = img.crop((left, top, right, bottom))
width, height = img.size
im = axes[1].imshow(img, extent=[w1,w2,h1,h2], aspect = 'auto')
axes[1].set_xlim([-0.55, 0.55])
for bias_i in bias_parameter_centered[:-1]:
axes[1].axvline(x=bias_i[1], ymin = (h2), ymax = 2.2*h2,
color=palette[8], linewidth = 1.0)
title_str = "Dwell "+str(meta_img["dwell"])+"\u03bcs, Tpeak "+str(meta_img["Tpeak"])+"℃"
plt.title(title_str)
plt.savefig(plt_out, format='png')
plt.close(fig)
rescaling_data["meta_data"] = meta_img
rescaling_data["rescaling_parameters"] = bias_parameter_centered
rescaling_datas.append(rescaling_data)
# Serializing json
json_object = json.dumps(rescaling_datas, indent = 4)
# Writing to json
with open("bias.json", "w") as outfile:
outfile.write(json_object)
| 35.492228 | 163 | 0.646277 |
e925522b3d3915457215980e5bca266c8fd2ff38 | 2,448 | py | Python | monitoring/automation/monitor.py | shane0/flask-website-monitor | 39031b9207c97baef4b10a792e038f241bcdc857 | [
"MIT"
] | 1 | 2017-04-13T05:29:15.000Z | 2017-04-13T05:29:15.000Z | monitoring/automation/monitor.py | shane0/flask-website-monitor | 39031b9207c97baef4b10a792e038f241bcdc857 | [
"MIT"
] | 1 | 2017-04-12T23:44:58.000Z | 2017-04-12T23:44:58.000Z | monitoring/automation/monitor.py | shane0/flask-website-monitor | 39031b9207c97baef4b10a792e038f241bcdc857 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A website monitor.
"""
import sys
import traceback
import requests
import re
import json
import datetime
DEFAULT_CONFIG_FILE = 'config.json'
def check():
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,ja;q=0.2',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
}
try:
config_file = DEFAULT_CONFIG_FILE #if not (args.config) else args.config
with open(config_file) as config_data:
config = json.load(config_data)
except:
return ('Fix your config.')
websites = config["websites"]
results = []
class Result:
def __init__(self, site, url, status):
self.site = site
self.url = url
self.status = status
def __str__(self):
return '%-8s %-25s %-45s' % (status, site, url)
def to_html(self):
color = 'green' if self.status == 'OK' else 'red'
return '''<tr style="height: 30px;">
<td style="text-align: center; color: %s">%s</td>
<td>%s</td>
<td><a href="%s">%s</a></td>
</tr>''' % (color, self.status, self.site, self.url, self.url)
now = datetime.datetime.now()
print(now)
for site in sorted(websites):
url = websites[site]
try:
res = requests.get(websites[site], headers=headers)
status = 'OK' if res.status_code == 200 else res.status_code
except:
status = 'TIMEOUT'
result = Result(site, url, status)
results.append(result)
print(result)
body = "<h3>Site Monitor - %s</h3>" % now
body += '<table class="table" >'
body += '''<thead><tr>
<th style="width: 15%%">STATUS</th>
<th style="width: 30%%">SITE</th>
<th style="width: 55%%">URL</th>
</tr></thead>'''
body_str = ''.join([r.to_html() for r in sorted(results, key=lambda rst: rst.site)])
body += '<tbody>%s</tbody>' % body_str
body += '</table>'
# test write to file
# f = open('result.html', 'w')
# f.write(body)
# f.close()
print(body)
return body
| 28.137931 | 126 | 0.541258 |
e9268aab7efb78626ab35cbb0daf3f9adf12bcb0 | 253 | py | Python | tests/test_MicropythonBoards.py | dwighthubbard/micropython-cloudmanager | 1b41eeaf7f5a34a622826bf0030a9f5c45d1aefc | [
"MIT"
] | 1 | 2017-02-22T03:18:48.000Z | 2017-02-22T03:18:48.000Z | tests/test_MicropythonBoards.py | dwighthubbard/micropython-cloudmanager | 1b41eeaf7f5a34a622826bf0030a9f5c45d1aefc | [
"MIT"
] | null | null | null | tests/test_MicropythonBoards.py | dwighthubbard/micropython-cloudmanager | 1b41eeaf7f5a34a622826bf0030a9f5c45d1aefc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import sys
sys.path.insert(0, '.')
from cloudmanager.board import MicropythonBoards
for result in MicropythonBoards().execute("import os;print(os.uname())"):
print(result.read().strip())
| 25.3 | 73 | 0.754941 |
e92912ace35fc868f85b6a3bdb13260570590334 | 412 | py | Python | Chapter03/c3_27_datadotworld_1.py | andrewjcoxon/Hands-On-Data-Science-with-Anaconda | 82504a059ecd284b3599fa9af2b3eb6bbd6e28f3 | [
"MIT"
] | 25 | 2018-06-25T16:21:09.000Z | 2022-02-08T09:28:29.000Z | Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter03/c3_27_datadotworld_1.py | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | null | null | null | Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter03/c3_27_datadotworld_1.py | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | 17 | 2018-06-15T02:55:30.000Z | 2022-03-09T15:24:42.000Z | """
Name : c3_27_datadotworld_1.py
Book : Hands-on Data Science with Anaconda)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 1/15/2018
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import datadotworld as dw
dataset = 'jonloyens/an-intro-to-dataworld-dataset'
data = dw.load_dataset(dataset, force_update=True)
list(dataset.dataframes) | 27.466667 | 51 | 0.694175 |
e92a3ce5abab1bfe02516472d0fc6c56a482d48d | 15,964 | py | Python | strutil.py | IloveKanade/k3fmt | 13a81562b9fc706dbf7fc05fcae130260bc2551d | [
"MIT"
] | null | null | null | strutil.py | IloveKanade/k3fmt | 13a81562b9fc706dbf7fc05fcae130260bc2551d | [
"MIT"
] | 3 | 2021-08-06T07:24:40.000Z | 2022-03-23T06:58:36.000Z | strutil.py | IloveKanade/k3fmt | 13a81562b9fc706dbf7fc05fcae130260bc2551d | [
"MIT"
] | 1 | 2021-08-04T08:41:33.000Z | 2021-08-04T08:41:33.000Z | import re
import os
import errno
import string
import subprocess
import k3color
listtype = (tuple, list)
invisible_chars = ''.join(map(chr, list(range(0, 32))))
invisible_chars_re = re.compile('[%s]' % re.escape(invisible_chars))
def break_line(linestr, width):
lines = linestr.splitlines()
rst = []
space = ' '
if isinstance(linestr, k3color.Str):
space = k3color.Str(' ')
for line in lines:
words = line.split(' ')
buf = words[0]
for word in words[1:]:
if len(word) + len(buf) + 1 > width:
rst.append(buf)
buf = word
else:
buf += space + word
if buf != '':
rst.append(buf)
return rst
def line_pad(linestr, padding=''):
"""
:param linestr: multiple line string with `\n` as line separator.
:param padding: left padding string to add before each line.
It could also be a callable object that returns a string.
This is useful when creating dynamic padding.
:return: multiple line string with `\n` as line separator, with left padding added.
"""
lines = linestr.split("\n")
if type(padding) in (str, bytes):
lines = [padding + x for x in lines]
elif callable(padding):
lines = [padding(x) + x for x in lines]
lines = "\n".join(lines)
return lines
def _to_str(y):
if isinstance(y, k3color.Str):
pass
elif isinstance(y, int):
y = str(y)
elif isinstance(y, listtype):
y = str(y)
return y
def struct_repr(data, key=None):
"""
Render primitive or composite data to a structural representation string list.
:param data: a number, string, list or dict to render to a structural representation.
:param key: is a callable that is used to sort dict keys. It is used in sort: `keys.sort(key=key)`.
:return: a list of string.
Render a data to a multi-line structural(yaml-like) representation.
a = {
1: 3,
'x': {1:4, 2:5},
'l': [1, 2, 3],
}
for l in struct_repr(a):
print l
"""
# Output:
# 1 : 3
# l : - 1
# - 2
# - 3
# x : 1 : 4
# 2 : 5
if type(data) in listtype:
if len(data) == 0:
return ['[]']
max_width = 0
elt_lines = []
for elt in data:
sublines = struct_repr(elt)
sublines_max_width = max([len(x) for x in sublines])
if max_width < sublines_max_width:
max_width = sublines_max_width
elt_lines.append(sublines)
lines = []
for sublines in elt_lines:
# - subline[0]
# subline[1]
# ...
lines.append('- ' + sublines[0].ljust(max_width))
for l in sublines[1:]:
lines.append(' ' + l.ljust(max_width))
return lines
elif type(data) == dict:
if len(data) == 0:
return ['{}']
max_k_width = 0
max_v_width = 0
kvs = []
for k, v in data.items():
k = utf8str(k)
sublines = struct_repr(v)
sublines_max_width = max([len(x) for x in sublines])
if max_k_width < len(k):
max_k_width = len(k)
if max_v_width < sublines_max_width:
max_v_width = sublines_max_width
kvs.append((k, sublines))
kvs.sort(key=key)
lines = []
for k, sublines in kvs:
# foo : sub-0
# sub-1
# b : sub-0
# sub-0
lines.append(k.rjust(max_k_width) + ' : ' +
sublines[0].ljust(max_v_width))
for l in sublines[1:]:
lines.append(' '.rjust(max_k_width) +
' ' + l.ljust(max_v_width))
return lines
else:
data = filter_invisible_chars(data)
return [utf8str(data)]
def filter_invisible_chars(data):
"""
Filters invisible characters in a string or a unicode object
:param data: a string or unicode object to filter invisible characters
:return: a filtered string or unicode object
"""
# from pykit.strutil import filter_invisible_chars
# cases = [
# "1273883926293937729\000\001\031",
# "\x00\x01\x02\x03\x04\005",
# u"1122299299299299292",
# u"\x00\x01\x02\x03\x04\005",
# ]
#
# rst = []
# for case in cases:
# rst.append(strutil.filter_invisible_chars(case))
#
# for r in rst:
# print(r)
# '1273883926293937729'
# ''
# u'1122299299299299292'
# u''
if type(data) not in (bytes, str):
return data
return invisible_chars_re.sub('', data)
def _get_key_and_headers(keys, rows):
if keys is None:
if len(rows) == 0:
keys = []
else:
r0 = rows[0]
if type(r0) == dict:
keys = list(r0.keys())
keys.sort()
elif type(r0) in listtype:
keys = [i for i in range(len(r0))]
else:
keys = ['']
_keys = []
column_headers = []
for k in keys:
if type(k) not in listtype:
k = [k, k]
_keys.append(k[0])
column_headers.append(str(k[1]))
return _keys, column_headers
def utf8str(s):
if isinstance(s, bytes):
return str(s, "utf-8")
return str(s)
def format_line(items, sep=' ', aligns=''):
"""
It formats a list in a multi row manner.
It is compatible with colored string such as those created with `strutil.blue("blue-text")`.
:param items: elements in a line.
Each element could be a `string` or a `list` of `string`.
If it is a `list` of `string`, it would be rendered as a multi-row
element.
:param sep: specifies the separator between each element in a line.
By default it is a single space `" "`.
:param aligns: specifies alignment for each element.
- `l` for left-align.
- `r` for right-align.
If no alignment specified for i-th element, it will be aligned to right by default.
:return: formatted string.
format a line with multi-row columns.
"""
# items = [ 'name:',
# [ 'John',
# 'j is my nick'],
# [ 'age:' ],
# [ 26, ],
# [ 'experience:' ],
# [ '2000 THU',
# '2006 sina',
# '2010 other'
# ],
# ]
# format_line(items, sep=' | ', aligns = 'llllll')
#
# outputs:
# name: | John | age: | 26 | experience: | 2000 THU
# | j is my nick | | | | 2006 sina
# | | | | | 2010 other
aligns = [x for x in aligns] + [''] * len(items)
aligns = aligns[:len(items)]
aligns = ['r' if x == 'r' else x for x in aligns]
items = [(x if type(x) in listtype else [x])
for x in items]
items = [[_to_str(y)
for y in x]
for x in items]
maxHeight = max([len(x) for x in items] + [0])
def max_width(x):
return max([y.__len__()
for y in x] + [0])
widths = [max_width(x) for x in items]
items = [(x + [''] * maxHeight)[:maxHeight]
for x in items]
lines = []
for i in range(maxHeight):
line = []
for j in range(len(items)):
width = widths[j]
elt = items[j][i]
actualWidth = elt.__len__()
elt = utf8str(elt)
if actualWidth < width:
padding = ' ' * (width - actualWidth)
if aligns[j] == 'l':
elt = elt + padding
else:
elt = padding + elt
line.append(elt)
line = sep.join(line)
lines.append(line)
return "\n".join(lines)
def format_table(rows,
keys=None,
colors=None,
sep=' | ',
row_sep=None):
"""
Render a list of data into a table.
Number of rows is `len(rows)`.
Number of columns is `len(rows[0])`.
:param rows: list of items to render.
Element of list can be number, string, list or dict.
:param keys: specifies indexes(for list) or keys(for dict) to render.
It is a list.
Indexes or keys those are not in this list will not be rendered.
It can also be used to specify customized column headers, if element in
list is a 2-element tuple or list:
:param colors: specifies the color for each column.
It is a list of color values in number or color name strings.
If length of `colors` is smaller than the number of columns(the number of
indexes of a list, or keys of a dict), the colors are repeated for columns
after.
:param sep: specifies char to separate rows.
By default it is None, it means do not add line separator.
:param row_sep: specifies column separator char.
By default it is `" | "`.
:return: a list of string.
"""
keys, column_headers = _get_key_and_headers(keys, rows)
colors = _get_colors(colors, len(keys))
# element of lns is a mulit-column line
# lns = [
# # line 1
# [
# # column 1 of line 1
# ['name:', # row 1 of column 1 of line 1
# 'foo', # row 2 of column 1 of line 1
# ],
#
# # column 2 of line 1
# ['school:',
# 'foo',
# 'bar',
# ],
# ],
# ]
# headers
lns = [
[[a + ': ']
for a in column_headers]
]
for row in rows:
if row_sep is not None:
lns.append([[None] for k in keys])
if type(row) == dict:
ln = [struct_repr(row.get(k, ''))
for k in keys]
elif type(row) in listtype:
ln = [struct_repr(row[int(k)])
if len(row) > int(k) else ''
for k in keys]
else:
ln = [struct_repr(row)]
lns.append(ln)
def get_max_width(cols):
return max([len(utf8str(c[0]))
for c in cols] + [0])
max_widths = [get_max_width(cols) for cols in zip(*lns)]
rows = []
for row in lns:
ln = []
for i in range(len(max_widths)):
color = colors[i]
w = max_widths[i]
ln.append([k3color.Str(x.ljust(w), color)
if x is not None else row_sep * w
for x in row[i]])
rows.append(format_line(ln, sep=sep))
return rows
def _get_colors(colors, col_n):
if colors is None:
colors = []
colors = colors or ([None] * col_n)
while len(colors) < col_n:
colors.extend(colors)
colors = colors[:col_n]
return colors
def _findquote(line, quote):
if len(quote) == 0:
return -1, -1, []
i = 0
n = len(line)
escape = []
while i < n:
if line[i] == '\\':
escape.append(i)
i += 2
continue
if line[i] in quote:
quote_s = i - len(escape)
j = i
i += 1
while i < n and line[i] != line[j]:
if line[i] == '\\':
escape.append(i)
i += 2
continue
i += 1
if i < n:
quote_e = i - len(escape)
return quote_s, quote_e, escape
else:
return quote_s, -1, escape
i += 1
return -1, -1, escape
def tokenize(line, sep=None, quote='"\'', preserve=False):
"""
:param line: the line to tokenize.
:param sep: is None or a non-empty string separator to tokenize with.
If sep is None, runs of consecutive whitespace are regarded as a single
separator, and the result will contain no empty strings at the start or end
if the string has leading or trailing whitespace. Consequently, splitting
an empty string or a string consisting of just whitespace with a None
separator returns `[]`. Just like `str.split(None)`.
By default, `sep` is None.
:param quote:Every character in `quote` is regarded as a quote. Add a `\` prefix to make
an exception. Segment between the same quotes is preserved.
By default, `quote` is `'"\''`.
:param preserve: preserve the quote itself if `preserve` is `True`.
By default, `preserve` is `False`.
:return: a list of string.
"""
if sep == quote:
raise ValueError('diffrent sep and quote is required')
if sep is None:
if len(line) == 0:
return []
line = line.strip()
rst = ['']
n = len(line)
i = 0
while i < n:
quote_s, quote_e, escape = _findquote(line[i:], quote)
if len(escape) > 0:
lines = []
x = 0
for e in escape:
lines.append(line[x:i + e])
x = i + e + 1
lines.append(line[x:])
line = ''.join(lines)
n = len(line)
if quote_s < 0:
sub = n
else:
sub = i + quote_s
if i < sub:
sub_rst = line[i:sub].split(sep)
if sep is None:
if line[sub - 1] in string.whitespace:
sub_rst.append('')
if line[i] in string.whitespace:
sub_rst.insert(0, '')
head = rst.pop()
sub_rst[0] = head + sub_rst[0]
rst += sub_rst
if quote_s < 0:
break
# discard incomplete
# 'a b"c' -> ['a']
if quote_e < 0:
rst.pop()
break
head = rst.pop()
if preserve:
head += line[i + quote_s:i + quote_e + 1]
else:
head += line[i + quote_s + 1:i + quote_e]
rst.append(head)
i += quote_e + 1
return rst
def parse_colon_kvs(data):
data = tokenize(data, quote='"\'')
ret = {}
for buf in data:
if ':' not in buf:
raise ValueError('invalid arguments, arguments'
'need key-val like: "k:v"')
k, v = buf.split(':', 1)
ret[k] = v
return ret
def page(lines, max_lines=10, control_char=True, pager=('less',)):
"""
Display `lines` of string in console, with a pager program (`less`) if too many
lines.
It could be used in a interactive tool to display large content.
It output strings directly to stdout.
:param lines: is `list` of lines to display.
:param max_lines: specifies the max lines not to use a pager.
By default it is 10 lines.
:param control_char: specifies if to interpret controlling chars, such as color char in terminal.
:param pager: specifies the program as a pager.
It is a list of command and argument.
By default it is `('less',)`.
:return: Nothing
"""
if len(lines) > max_lines:
pp = {'stdin': subprocess.PIPE,
'stdout': None,
'stderr': None}
cmd_pager = list(pager)
if control_char:
if pager == ('less',):
cmd_pager += ['-r']
subproc = subprocess.Popen(cmd_pager,
close_fds=True,
cwd='./',
**pp)
try:
out, err = subproc.communicate(bytes('\n'.join(lines).encode("utf-8")))
except IOError as e:
if e[0] == errno.EPIPE:
pass
else:
raise
subproc.wait()
else:
os.write(1, bytes(('\n'.join(lines) + "\n").encode("utf-8")))
| 26.084967 | 103 | 0.5057 |
e92ba6f82fbd7b5de0f238a51cd87521f2ccd146 | 16,920 | py | Python | camera.py | Euclideon/udSDKPython | a82157ab6382fda6291bdcca9ec2a51203b95b2a | [
"MIT"
] | 4 | 2020-09-03T05:35:15.000Z | 2021-11-08T04:31:55.000Z | camera.py | Euclideon/udSDKPython | a82157ab6382fda6291bdcca9ec2a51203b95b2a | [
"MIT"
] | 1 | 2020-08-18T06:49:21.000Z | 2020-08-18T06:49:21.000Z | camera.py | Euclideon/udSDKPython | a82157ab6382fda6291bdcca9ec2a51203b95b2a | [
"MIT"
] | 1 | 2020-09-11T07:52:32.000Z | 2020-09-11T07:52:32.000Z | import logging
import math
import numpy as np
import pyglet
import udSDK
logger = logging.getLogger(__name__)
class Camera():
"""
Base camera class for Euclideon udSDK Python Sample
This sets the default behaviour for a perspective camera
Stores the state of the camera, and provides functions for modifyting
that state
User input is passed from the UDViewport object vio
the set_{}Pressed functions (for mapped functions)
Mouse Input is passed through the on_mouse_drag function
This is intended to be subclassed for custom camera behaviour
"""
def __init__(self, renderTarget: udSDK.udRenderTarget):
self.normalSpeed = 0.3
self.fastSpeed = 1
self.moveSpeed = self.normalSpeed
self.moveVelocity = [0, 0, 0]
self.matrix = np.identity(4)
self._view = renderTarget
self.position = [0, 0, 0]
self.nearPlane = 0.01
self.farPlane = 2
self.FOV = 60
#booleans indicating button activation
self.forwardPressed = False
self.backPressed = False
self.rightPressed = False
self.leftPressed = False
self.upPressed = False
self.downPressed = False
self.shiftPressed = False
self.ctrlPressed = False
self.zoomInPressed = False
self.zoomOutPressed = False
self.theta = 0
self.phi = 0
self.zoom = 1
self.mouseSensitivity = 1 / 100
self.camRotation = [0, 0, 0]
self.lookAtTarget = [0, 0, 0]
self.rotationMatrix = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
self.facingDirection = [0, 1, 0]
self.rotationAxis = np.array([0,0,1])
self.tangentVector = np.array([0,1,0])
self._projectionMatrix = []
self.controlString = """
W,S,A,D: Move\n
E: Move up\n
C: Move Down\n
Click + drag: Look around\n
Shift (Hold): Increase speed\n
O: Zoom in\n
P: Zoom out\n
"""
def on_cast(self):
"""
To be called when this class is converted to from another Camera derived class
ensures that appropriate variables are set in lieu of __init__being called without
resetting all variables
Returns
-------
"""
pass
@property
def position(self):
return self.__position
@position.setter
def position(self, newposition):
self.__position = tuple(newposition)
self.matrix[3, :3] = newposition
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Camera, self.matrix.flatten())
def get_controls_string(self):
return self.controlString
def get_view_vertices(self):
"""
Returns
-------
the extents of the viewing volume projected onto 2d space
"""
#TODO make this correctly display the location of near and far plane
rat = np.tan(self.FOV/2/180*np.pi)/self.farPlane
nearLeft = [-self.nearPlane * rat, self.nearPlane/self.farPlane]
farLeft = [-self.farPlane * rat, self.farPlane/self.farPlane]
nearRight = [self.nearPlane * rat, self.nearPlane/self.farPlane]
farRight = [self.farPlane * rat, self.farPlane/self.farPlane]
return [farLeft, nearLeft, nearRight, farRight]
def set_forwardPressed(self, val:bool):
self.forwardPressed = val
def set_backPressed(self, val):
self.backPressed = val
def set_rightPressed(self, val):
self.rightPressed = val
def set_leftPressed(self, val):
self.leftPressed = val
def set_upPressed(self, val):
self.upPressed = val
def set_downPressed(self, val):
self.downPressed = val
def set_shiftPressed(self, val):
self.shiftPressed = val
def set_ctrlPressed(self, val):
self.ctrlPressed = val
def set_zoomInPressed(self, val):
self.zoomInPressed = val
def set_zoomOutPressed(self, val):
self.zoomOutPressed = val
def reset_projection(self):
self.set_projection_perspective()
def on_key_press(self, symbol, modifiers):
"""
Defined for passing key presses not mapped using the key bindings in the view port
override subclasses
Parameters
----------
symbol
modifiers
Returns
-------
"""
pass
def on_key_release(self, symbol, modifiers):
pass
def rotate_polar(self, vec, dtheta, dphi):
"""
takes change in polar coordiantes and updates the camera rotation
based on it
Returns
-------
the a copy of vector vec rotated by dtheta in the xy plane and phi
"""
r = math.sqrt(vec[0]**2+vec[1]**2+vec[2]**2)
theta = math.atan2(vec[1], vec[0])
phi = math.acos(vec[2]/r)
#prevent rotation such that the vector is pointing directly up or down
thresh = 0.1
if abs(phi + dphi) < thresh or abs(phi + dphi - math.pi) < thresh:
dphi = 0
xprime = r * math.sin(phi+dphi)*math.cos(theta+dtheta)
yprime = r * math.sin(phi+dphi) * math.sin(theta + dtheta)
zprime = r * math.cos(phi+dphi)
self.phi = phi
self.theta = theta
return [xprime, yprime, zprime]
def set_projection_perspective(self, near=None, far=None, FOV=None):
if near is None:
near = self.nearPlane
if far is None:
far = self.farPlane
if FOV is None:
FOV = self.FOV
else:
self.FOV = FOV
FOV = FOV/180*np.pi
e = 1/np.tan(FOV/2)
a = self._view.height/self._view.width
self._projectionMatrix = \
[
e*a, 0, 0, 0,
0, 0, (far+near)/(far-near), 1,
0, e, 0, 0,
0, 0, -(2*far*near)/(far-near), 0
]
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Projection, self._projectionMatrix)
def set_projection_ortho(self, left, right, top, bottom, near, far):
self._projectionMatrix = \
[
2/(right-left), 0, 0, 0,
0, 0, 2/(far-near), 0,
0, 2/(top - bottom), 0, 0,
-(right+left)/(right-left), -(top+bottom)/(top-bottom), -(far+near)/(far-near), 1
]
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Projection, self._projectionMatrix)
def set_rotation(self, x=0, y=-5, z=0, roll=0, pitch=0, yaw=0):
"""
Sets the camera matrix to have a rotation of yaw, pictch roll
Parameters
----------
x
y
z
roll
pitch
yaw
Returns
-------
"""
sy = math.sin(yaw)
cy = math.cos(yaw)
sp = math.sin(pitch)
cp = math.cos(pitch)
sr = math.sin(roll)
cr = math.cos(roll)
self.matrix = np.array([
[cy*cp, cy*sp*sr-sy*cr, cy*sp*cr+sy*sr, 0],
[sy*cp, sy*sp*sr+cy*cr, sy*sp*cr-cy*sr, 0],
[-sp, cp*sr, cp*cr, 0],
[x, y, z, 1]
])
self.rotationMatrix = self.matrix[:3, :3]
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Camera, self.matrix.flatten())
def axisAngle(self, axis, theta):
#cTheta = np.dot(np.array([0,1,0]), dPoint) / np.linalg.norm(dPoint)
#theta = np.arccos(cTheta)
cTheta = np.cos(theta)
sTheta = np.sin(theta)
self.matrix = np.array(
[
[cTheta + axis[0] ** 2 * (1 - cTheta), axis[0] * axis[1] * (1 - cTheta) - axis[2] * sTheta, axis[0] * axis[2] * (1 - cTheta), 0],
[axis[1] * axis[0] * (1 - cTheta) + axis[2] * sTheta, cTheta + axis[1] ** 2 * (1 - cTheta), axis[1] * axis[2] * (1 - cTheta) - axis[0] * sTheta, 0],
[axis[2] * axis[0] * (1 - cTheta) - axis[1] * sTheta, axis[2] * axis[1] * (1 - cTheta) + axis[0] * sTheta, cTheta + axis[2] ** 2 * (1 - cTheta), 0],
[self.position[0], self.position[1], self.position[2], 1]
]
)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
vec = self.rotate_polar(self.facingDirection,dx/100,dy/100)
self.look_direction(np.array(vec))
def look_at(self, lookAtPoint=None, cameraPosition=None):
"""
faces the camera at point2, positions the camera at point1
Parameters
----------
cameraPosition: position of the camera
lookAtPoint: x, y, z tuple to face the camera towards
"""
if cameraPosition is None:
cameraPosition = self.position
else:
self.position = cameraPosition
if lookAtPoint is None:
lookAtPoint = self.lookAtTarget
if not np.array_equal(lookAtPoint, cameraPosition):
#calculate our axis of rotation based on the distance between these points
dPoint = np.array(lookAtPoint) - np.array(cameraPosition)
else:
dPoint = np.array([1, 1, 0])
self.look_direction(dPoint)
def look_direction(self, dPoint: np.array):
"""
Points the camera in the direction vector dPoint
assumes that the tangent vector has a z value of zero (i.e. no roll)
Parameters
----------
dPoint
Returns
-------
"""
tangent = [0, 0, 0]
if dPoint[1] != 0:
tangent[0] = (dPoint[0]-np.sqrt(dPoint[0]**2+4*dPoint[1]**2))/(2*dPoint[1])
elif dPoint[2]>0:
tangent[0] = 1
else:
tangent[0] = -1
tangent[1] = 1-tangent[0]**2
tangent = -np.array(tangent)
tangent = tangent / np.sqrt(tangent.dot(tangent))
forward = dPoint/np.sqrt(dPoint.dot(dPoint))
axis = np.cross(tangent, forward)
axis = axis / np.sqrt(axis.dot(axis))
self.matrix = np.array(
[
[tangent[0], tangent[1], tangent[2], 0],
[forward[0], forward[1], forward[2], 0],
[axis[0], axis[1], axis[2], 0],
[self.position[0], self.position[1], self.position[2], 1]
]
)
self.rotationAxis = axis
self.tangentVector = tangent
self.rotationMatrix = self.matrix[:3, :3]
self.facingDirection = np.array([0,1,0]).dot(self.rotationMatrix).tolist()
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Camera, self.matrix.flatten())
def update_move_direction(self):
"""
updates the velocity and projection based on what keys have been pressed since the last call
"""
self.moveVelocity = [0, 0, 0]# in local coordinates
if self.shiftPressed:
self.moveSpeed = self.fastSpeed
else:
self.moveSpeed = self.normalSpeed
if self.forwardPressed:
self.moveVelocity[1] += self.moveSpeed
if self.backPressed:
self.moveVelocity[1] -= self.moveSpeed
if self.rightPressed:
self.moveVelocity[0] += self.moveSpeed
if self.leftPressed:
self.moveVelocity[0] -= self.moveSpeed
if self.upPressed:
self.moveVelocity[2] += self.moveSpeed
if self.downPressed:
self.moveVelocity[2] -= self.moveSpeed
if self.zoomInPressed:
self.zoom += 1
if self.zoomOutPressed and self.zoom>1:
self.zoom -= 1
self.mouseSensitivity = 0.1/self.zoom
self.set_projection_perspective(self.nearPlane, self.farPlane, self.zoom)
self.moveVelocity = np.array(self.moveVelocity).dot(self.rotationMatrix).tolist()
def update_position(self, dt):
self.update_move_direction()
newposition = [0, 0, 0]
newposition[0] = self.position[0] + self.moveVelocity[0] * dt
newposition[1] = self.position[1] + self.moveVelocity[1] * dt
newposition[2] = self.position[2] + self.moveVelocity[2] * dt
self.position = newposition
class OrthoCamera(Camera):
def __init__(self, renderTarget):
super().__init__(renderTarget)
self.FOV = 90
def on_cast(self):
self.controlString = """
Ortho Camera (experimental):
W,S,A,D: Move\n
E: Move up\n
C: Move Down\n
Click + drag: Look around\n
Shift (Hold): Increase speed\n
O: Zoom in\n
P: Zoom out\n
"""
self.FOV = 90
def update_move_direction(self):
super().update_move_direction()
self.moveVelocity[2] = 0
v = np.array(self.moveVelocity)
mag = np.sqrt(v.dot(v))
if mag != 0:
self.moveVelocity = (v/mag).tolist()
if self.upPressed:
self.moveVelocity[2] += self.moveSpeed
if self.downPressed:
self.moveVelocity[2] -= self.moveSpeed
def update_position(self, dt):
super().update_position(dt)
ar = self._view.width/self._view.height
zoom = np.exp(self.zoom)
viewWidth = 100/self.zoom
self.mouseSensitivity = 0.1/ zoom
self.set_projection_ortho(-ar/2*viewWidth, ar/2*viewWidth, 1/ar/2*viewWidth, -1/ar/2*viewWidth, self.nearPlane, self.farPlane)
def reset_projection(self):
pass
class MapCamera(OrthoCamera):
"""
Orthographic camera that follows a target and remains a set height above it
"""
def __init__(self, renderTarget, target, elevation):
super().__init__(renderTarget)
self.target = target
self.elevation = elevation
class DefaultTarget(object):
def __init__(self):
self.position = [0, 0, 0]
def on_cast(self):
pass
#here we override the default control behaviour of the camera
def update_move_direction(self):
pass
def on_mouse_drag(self, *args, **kwargs):
pass
def update_position(self, dt):
self.position = [self.target.position[0], self.target.position[1], self.target.position[2]+self.elevation]
self.look_direction(np.array([0, 0, -1]))
ar = self._view.width/self._view.height
zoom = self.zoom
self.set_projection_ortho(-ar/2*self.position[2]/zoom, ar/2*self.position[2]/zoom, 1/ar/2*self.position[2]/zoom, -1/ar/2*self.position[2]/zoom,self.nearPlane,self.farPlane)
class OrbitCamera(Camera):
"""
Movement of this camera is relative to a fixed point in space
"""
def on_cast(self):
self.controlString = """
Orbit Camera (experimental):
W,S,A,D: Move\n
E: Move up\n
C: Move Down\n
Click + drag: Move rotation Centre\n
Shift (Hold): Increase speed\n
O: Zoom in\n
P: Zoom out\n
"""
def update_move_direction(self):
self.look_at()
super(OrbitCamera, self).update_move_direction()
#self.moveVelocity = np.array(self.moveVelocity).dot(self.rotationMatrix).tolist()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
horiz = dx * self.tangentVector * self.mouseSensitivity
vert = dy * self.rotationAxis * self.mouseSensitivity
if not self.ctrlPressed:
self.lookAtTarget = self.lookAtTarget + horiz + vert
else:
self.position = self.position - horiz - vert
class PerspectiveCamera(OrbitCamera):
def update_position(self, dt):
#self.facingDirection = np.array([0, 1, 0]).dot(self.rotationMatrix).tolist()
for i in range(3):
self.lookAtTarget[i] = self.position[i] + self.facingDirection[i]
super().update_position(dt)
class TrackCamera(Camera):
def update_position(self, dt):
self.lookAtTarget[1] += 0.0001
super().update_position(dt)
self.look_at()
class RecordCamera(Camera):
"""
A camera class for manual generation and replay of flythroughs of models
the user defines a set of waypoints by pressing space when the camera is positioned at
the desired locations
Pressing enter will replay the path
Backspace will delete the most recently added waypoint
"""
def __init__(self, *args, **kwargs):
super().__init(*args, **kwargs)
self.on_cast()
def on_cast(self):
self.controlString = """
Recording Camera:
W,S,A,D: Move\n
E: Move up\n
C: Move Down\n
Click + drag: Look around\n
Shift (Hold): Increase speed\n
O: Zoom in\n
P: Zoom out\n
Space: Record Position as Waypoint\n
Backspace: Remove Last Waypoint\n
Enter: Play back recorded path\n"""
try:
self.waypoints
except AttributeError:
self.waypoints = []
self.replayInd = 0
self.replaying = False
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.SPACE:
self.waypoints.append(self.position)
if symbol == pyglet.window.key.ENTER:
try:
self.position = self.waypoints[0]
except IndexError:
return
self.replaying = True
self.replayInd = 1
if symbol == pyglet.window.key.BACKSPACE:
self.waypoints.pop()
def update_move_direction(self):
try:
self.replaying
except AttributeError:
self.replaying = False
if not self.replaying:
super().update_move_direction()
return
#here we linearly interpolate the path and face the camera direction
#ddir = dir + np.array(self.lookAtTarget)-np.array(self.position)
#define the facing the one we are going in
dir = np.array(self.waypoints[self.replayInd]) - np.array(self.position)
mag = np.linalg.norm(dir) #how far away from the waypoint we are
ddir = dir/mag - np.array(self.facingDirection)
dir = dir/mag * self.moveSpeed #dir is now the velocity we want the camera to travel in
self.look_direction(np.array(self.facingDirection) + ddir / 10)
self.moveVelocity = (dir).tolist()
if abs(mag) < self.moveSpeed:
#we are as close as we can get in a single step to the waypoint
if self.replayInd+1 < len(self.waypoints):
#self.position = self.waypoints[self.replayInd]
#move to the next waypoint
self.replayInd += 1
else:
#end the replay
self.replaying = False
self.moveVelocity = [0, 0, 0]
return
#self.look_at(self.waypoints[self.replayInd+1])
| 29.32409 | 176 | 0.642317 |
e930d65f391b7723982c2721df59191c1d9d3a9f | 316 | py | Python | src/menu.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | null | null | null | src/menu.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | 1 | 2020-11-03T13:41:49.000Z | 2020-11-03T14:05:22.000Z | src/menu.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | null | null | null | import sys
class Menu():
@staticmethod
def print_menu():
print (30 * "-" , "Text based adventure" , 30 * "-")
print ("1. start adventure (intro)")
print ("2. start adventure (no intro)")
print ("3. help")
print ("4. quit")
print (67 * "-")
print(" ")
| 24.307692 | 60 | 0.487342 |
e9321bfef23bb209db9bf3ff382e024a2aab02ee | 337 | py | Python | envipyarclib/gptool/parameter/templates/ulong64array.py | envi-idl/envipyarclib | 90135652510c3d53c5f51177252c1fea2639bf22 | [
"MIT"
] | 1 | 2019-08-03T05:10:18.000Z | 2019-08-03T05:10:18.000Z | envipyarclib/gptool/parameter/templates/ulong64array.py | envi-idl/envipyarclib | 90135652510c3d53c5f51177252c1fea2639bf22 | [
"MIT"
] | null | null | null | envipyarclib/gptool/parameter/templates/ulong64array.py | envi-idl/envipyarclib | 90135652510c3d53c5f51177252c1fea2639bf22 | [
"MIT"
] | 1 | 2020-02-25T14:12:50.000Z | 2020-02-25T14:12:50.000Z | """
Defines the parameter template for the specified data type.
"""
from .basicarray import BASICARRAY
class ULONG64ARRAY(BASICARRAY):
"""
Defines the parameter template for the specified data type.
"""
pass
def template():
"""Factory method for this parameter template class"""
return ULONG64ARRAY('GPLong')
| 19.823529 | 63 | 0.706231 |
e9324103dd727dbfbcd73f1ba4bae58a0ea2e051 | 41,336 | py | Python | Dataflow/full_executer_wordshop.py | Smurf-maker/WordShop | ac0095ee28207f23744337d4cb35c5ca764d7e26 | [
"MIT"
] | null | null | null | Dataflow/full_executer_wordshop.py | Smurf-maker/WordShop | ac0095ee28207f23744337d4cb35c5ca764d7e26 | [
"MIT"
] | null | null | null | Dataflow/full_executer_wordshop.py | Smurf-maker/WordShop | ac0095ee28207f23744337d4cb35c5ca764d7e26 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Full Executer WordShop.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kGSQWNtImJknauUN9L8ZRRwIzAdwbmo_
First, we load the pegasus paraphraser.
"""
# Commented out IPython magic to ensure Python compatibility.
!git clone https://github.com/google-research/pegasus
# %cd pegasus
!export PYTHONPATH=.
!pip3 install -r requirements.txt
!pip install transformers==3.5.0
import torch
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
model_name = 'tuner007/pegasus_paraphrase'
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = PegasusTokenizer.from_pretrained(model_name)
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
def get_response(input_text,num_return_sequences,num_beams):
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=60).to(torch_device)
translated = model.generate(**batch,max_length=60,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
return tgt_text
import pandas as pd
import nltk
nltk.download('cmudict')
nltk.download('wordnet')
"""Next, we import the procrustean alliteration paraphraser"""
class RuleBoundsInterface:
"""This interface is used to define different properties of a rhetorical figure generating algorithm.
These properties will create a ruleset for a rhectorical figure, allowing the algorithm to produce results relevant
to the user-request."""
def evaluate(self, tokenlist, replacementquota):
"""Returns a dataset containing the best application of the rule to the original tokenlist using the proportion specified. This also means
that certain conditions will have no return value."""
pass
class LanguageBoundsInterface:
"""This interface is used to define different properties and implementation of a language.
These properties will create a ruleset for a language, which will be the bounds an algorithm
can work in the given context."""
########## Variables ##########
_NULL_PHENOME_INDICATOR = None # Phenome representation of an unknown phenome
_SIMILARITY_THRESHOLD = 0.2 # The threshold that must be passed for a word to be considered similar. Scaled from 0-1.
MULTI_TOKEN_INDICATOR = None # Character used to identify when a token has multiple words. This functionality is specific to a corpus. Must be changed if corpus is changed.
vowelphenomes = None # Contains all phenomes that produce vowel-related sounds for this language.
###############################
########## Constructor ##########
def __init__(self, sensitivity):
self._SIMILARITY_THRESHOLD = sensitivity
#################################
def getphenomes(self, arg):
"""Returns all phenome-lists related to the token."""
pass
def hypernyms(self, arg):
"""Returns all hypernyms related to the token. ('context' is the representation of the phrase in collection form.)"""
pass
def hyponyms(self, arg):
"""Returns all hyponyms related to the token. ('context' is the representation of the phrase in collection form.)"""
pass
def messagefail(self, input):
"""Produces the fail message to print to users in this language if the process cannot return a value."""
pass
def messageonlyresult(self, arg):
"""Produces a indicator message if only one result was possible from the input parameters given."""
pass
def messagetopresult(self, resultlen, requestedresultcount):
"""Produces the top 'x' results message to users in this language if the process has multiple results."""
pass
def similarity(self, arg, arg2):
"""Returns a token similarity score based on language-based weights. Used for determining optimal replacement for
contexts."""
pass
def split(self, arg):
"""Returns an ordered list of tokens, split at delimiters based off of the the language context settings."""
pass
from enum import Enum
from nltk import RegexpTokenizer
from nltk.corpus import cmudict
from nltk.corpus import wordnet
from nltk.wsd import lesk
class AmericanEnglishLangContext(LanguageBoundsInterface):
"""Defines the properties and implementation of standard American English."""
########## Variables ##########
_cmu = cmudict.dict() # Pretrained phenome generation model. Created outside of methods because it is used over iteration(s) and is expensive to generate; TREAT THIS VALUE AS AN IMMUTABLE.
_MULTI_TOKEN_INDICATOR = "_" # Character used to identify when a token has multiple words. This functionality is specific to a corpus. Must be changed if corpus is changed.
_NULL_PHENOME_INDICATOR = "*NONE*" # Used by algorithm to indicate if a corressponding phemone could not be found for a token
_SIMILARITY_THRESHOLD = 0.2 # The threshold that must be passed for a word to be considered similar. Scaled from 0-1.
vowelphenomes = ["AA", "AE", "AH", "AO", "AW", "AY",
"AX", "AXR", "EH", "ER", "EY", "IH",
"IX", "IY", "OW", "OY","UH", "UW", "UX"] # Contains all phenomes that produce vowel-related sounds for this language.
###############################
def _getproperformattype(self, unformattoken):
"""Used to parse through the Wordnet sysnet-token return value to retrieve only relevant sections. Currently the only returns the word.
In future implementations, this function may not be needed if the corpus has a function to return only the word as a string."""
name, junk = unformattoken.name().split(".", 1);
return name
def _getproperhandlemissingphenome(self, unknowntoken):
"""Takes a unknown-phenome (a token which could not be evaluated by CMUdict) and attempts to generate a phenome. If CMUdict or
Wordnet implementation is changed this function MUST be changed."""
finaleval = []
# After various testing, it has been determined that calculating for two letters yields the most consistent results for unknown phenomes.
tokenlen = len(unknowntoken)
if tokenlen is 0:
finaleval.append([self._NULL_PHENOME_INDICATOR])
elif tokenlen is 1:
finaleval.append([unknowntoken.upper()]) # The letter IS the phenome
else:
relevant = unknowntoken[:2] # get first two chars
finalattempt = self._cmu.get(relevant, None)
if finalattempt is None: # No possible phenome can be generated by this algorithm
finaleval.append([self._NULL_PHENOME_INDICATOR])
elif finalattempt is list:
finaleval.append(finalattempt)
else: # 'finalattempt' is guareenteed to only be of type NONE, list, or list[list].
finaleval.extend(finalattempt) # flatten list; tis step is necessary to maintain parsability
return finaleval
def _getproperhandlemultitoken(self, multitoken):
"""Takes a multi-word (a token with words seperated by '_' by Wordnet) and breaks it down into a format that can be evaluated by the CMUdict. If CMUdict or
Wordnet implementation is changed this function MUST be changed."""
finaleval = []
individualtokens = multitoken.split(self._MULTI_TOKEN_INDICATOR)
for token in individualtokens: # evaluate each token phenome indiviually; then represent multitoken for EACH phenome calculated, when returned to scanning.
phenome = self._cmu.get(token.lower(), None)
if phenome is list:
finaleval.append(phenome)
else: # 'phenome' is guareenteed to only be of type NONE, list, or list[list].
if phenome is None:
phenome = self._getproperhandlemissingphenome(token)
finaleval.extend(phenome) # flatten list; this step is necessary to maintain parsability
return finaleval
def getphenomes(self, arg):
"""Returns all phenome-lists related to the token. ('context' is the representation of the phrase in collection form.)"""
# uses CMUdict as the core processing algorithm. If CMUdict fails to find a match the function will predict a possible phenome for the token.
# This function is guareenteed to return a value.
generatephenome = self._cmu.get(arg.lower(), None) # _cmu is defined globally above in "VARIABLES" section. Treat as an immutable.
if generatephenome is None:
if arg.__contains__(self._MULTI_TOKEN_INDICATOR): # _MULTI_TOKEN_INDICATOR is defined globally above in "VARIABLES" section. Treat as an immutable.
generatephenome = self._getproperhandlemultitoken(arg)
else: # token is unknown by CMUdict
generatephenome = self._getproperhandlemissingphenome(arg)
# When multiple phenomes exist for same word, a list[list[str]] is generated
return generatephenome
def hypernyms(self, context, arg):
"""Returns all hypernyms related to the token. ('context' is the representation of the phrase in collection form.)"""
# This function assumes the use of Wordnet. If Wordnet implementation changes, this function MUST change.
eval = None
interpretation = lesk(context, arg)
if interpretation is not None:
eval = map(self._getproperformattype, interpretation.hypernyms())
return eval
def hyponyms(self, context, arg):
"""Returns all hyponyms related to the token."""
# This function assumes the use of Wordnet. If Wordnet implementation changes, this function MUST change.
eval = None
interpretation = lesk(context, arg)
if interpretation is not None:
eval = map(self._getproperformattype, interpretation.hyponyms())
return eval
def messagefail(self, input):
"""Produces the fail message to print to users in this language if the process cannot return a value."""
built = " ".join(input)
return ("Your input: '" + built + "' was not able to be parsed under the conditions you desired. Please try new conditions or try a new phrase.")
def messageonlyresult(self, arg):
"""Produces a indicator message if only one result was possible from the input parameters given."""
return ("This is the only result processed from the given input:\n" + arg)
def messagetopresult(self, resultlen, requestedresultcount):
"""Produces the top 'x' results message to users in this language if the process has multiple results."""
if resultlen < requestedresultcount:
return ("Top " + str(resultlen) + " result(s):\n")
else:
return ("Top " + str(requestedresultcount) + " result(s):\n")
def similarity(self, contextclues, arg1, arg2):
"""Returns a key-value pair for scoring similarity. [0] a bool that determines if the word is similar enough to satisfy language criteria
and the score associated with the evaluation."""
# This function assumes the use of Wordnet. If Wordnet implementation changes, this function MUST change.
evaluation = False
score = 0
if arg1 is arg2:
evaluation = True
score = self._SIMILARITY_THRESHOLD # Penalizing score to prevent paraphrases from returning themselves
else:
contextA = lesk(contextclues, arg1)
contextB = lesk(contextclues, arg2)
if contextA and contextB: # Otherwise score will stay zero
score = contextA.path_similarity(contextB)
if score is not None and self._SIMILARITY_THRESHOLD <= score:
evaluation = True
return (evaluation, score)
def split(self, arg):
# Returns all non-whitespace tokens.
return RegexpTokenizer('\w+|\$[\d\.]+|\S+').tokenize(arg)
class Mapper:
"""This module takes a user-input string and chunks it into formatted tuples which
contains relevant data for all essential pieces of string."""
def maptolist(self, arg, langbound):
"""Returns formatted-tuple containing relevant processing data for each token.""" #update
return self._internalmap(arg, langbound)
def _internalmap(self, arg, langbound):
# Internally seperates the string into relevant tokens. # update
return langbound.split(arg)
class AlliterationRuleContext(RuleBoundsInterface):
"""Defines the properties and rules of the alliteration rhetorical figure."""
def _applyrule(self, sourcedata, tokentargetcount, langbound):
"""Trim interal-map token list to only retain tokens that constrain to the alliteration ruleset."""
phenomeselect = []
for phenomeset in sourcedata:
if phenomeset not in langbound.vowelphenomes:
#run proportion algorithm
setrep = sourcedata[phenomeset]
if tokentargetcount <= len(setrep):
phenomeselect.append(sourcedata[phenomeset])
# Return selection
if not phenomeselect:
return None
else:
return phenomeselect
def _applyscan(self, sourcematrix, langbound):
"""Scan a token-matrix and return a dataset that holds information on the phenome frequency of alliteration
in the matrix."""
dataset = {} # Dicitonary is of type 'char' -> dict{int -> list[str]}'
for index, item in enumerate(sourcematrix): # going through each token
for content in item: # going through synonym content of each token
phenomelists = self._getsourcephenome(content, langbound) # generate phenomes for alliteration evaluation
for phenomes in phenomelists: # going through each phenome list for token (some tokes may have multiple pronounciations)
relevantphenome = phenomes[0] # use the FIRST phenome because this is alliteration
if dataset.get(relevantphenome, None) is None: # if letter has NOT been scanned previously, create an entry
dataset[ relevantphenome ] = {} # Dictionary will contain key-value pairs corresponding to the index and the list of words available.
dataset[ relevantphenome ] [index] = [content]
else:
if dataset[ relevantphenome ].get(index, None) is None: # if an entry for THIS index has NOT been created, create one.
dataset[ relevantphenome ] [index] = [content]
else:
if content not in dataset[ relevantphenome ] [index]:
dataset[ relevantphenome ] [index].append(content)
return dataset
def _getsourcephenome(self, evaltoken, langbound):
"""Returns a phenome value for a string-token."""
phenomeform = langbound.getphenomes(evaltoken)
return phenomeform
def _getrelevantsynonyms(self, tokenlist, sourcetoken, langbound):
"""Returns a token-list of the original context and synonyms that are relevant, if applicable."""
relevant = [sourcetoken] # original token is always first within the collection
# Add all relevant synonyms to evaluation list as strings
hypernyms = langbound.hypernyms(tokenlist, sourcetoken)
hyponyms = langbound.hyponyms(tokenlist, sourcetoken)
if hypernyms is not None:
relevant.extend(hypernyms)
if hyponyms is not None:
relevant.extend(hyponyms)
return relevant
def _internalmap(self, tokenlist, langbound):
"""Map relevant replacement tokens to a matrix. This return token-matrix will have a one-to-to corresspondence
to the passed tokenlist argument."""
replacements = []
for token in tokenlist:
similar = self._getrelevantsynonyms(tokenlist, token, langbound)
replacements.append(similar)
return replacements
def evaluate(self, tokenlist, replacementquota, langbound):
if replacementquota < 1: # Nothing will be applied to the target list. Do not process.
return None
# Map and chart data for rule application
preprocess = self._internalmap(tokenlist, langbound)
process = self._applyscan(preprocess, langbound)
# Apply rule and return data
postprocess = self._applyrule(process, replacementquota, langbound)
return postprocess
class AssonanceRuleContext(RuleBoundsInterface):
"""Defines the properties and rules of the assonance rhetorical figure."""
def _applyrule(self, sourcedata, tokentargetcount, langbound):
"""Trim interal-map token list to only retain tokens that constrain to the assonance ruleset."""
phenomeselect = []
for phenomeset in sourcedata:
if phenomeset in langbound.vowelphenomes:
#run proportion algorithm
setrep = sourcedata[phenomeset]
if tokentargetcount <= len(setrep):
phenomeselect.append(sourcedata[phenomeset])
# Return selection
if not phenomeselect:
return None
else:
return phenomeselect
def _applyscan(self, sourcematrix, langbound):
"""Scan a token-matrix and return a dataset that holds information on the phenome frequency of assonance
in the matrix."""
dataset = {} # Dicitonary is of type 'char' -> dict{int -> list[str]}'
for index, item in enumerate(sourcematrix): # going through each token
for content in item: # going through synonym content of each token
phenomelists = self._getsourcephenome(content, langbound) # generate phenomes for assonance evaluation
for phenomes in phenomelists: # going through each phenome list for token (some tokes may have multiple pronounciations)
relevantphenome = phenomes[0] # use the FIRST phenome because this is assonance
if dataset.get(relevantphenome, None) is None: # if letter has NOT been scanned previously, create an entry
dataset[ relevantphenome ] = {} # Dictionary will contain key-value pairs corresponding to the index and the list of words available.
dataset[ relevantphenome ] [index] = [content]
else:
if dataset[ relevantphenome ].get(index, None) is None: # if an entry for THIS index has NOT been created, create one.
dataset[ relevantphenome ] [index] = [content]
else:
if content not in dataset[ relevantphenome ] [index]:
dataset[ relevantphenome ] [index].append(content)
return dataset
def _getsourcephenome(self, evaltoken, langbound):
"""Returns a phenome value for a string-token using CMUdict as the core processing algorithm. If CMUdict fails to find a match
the function will predict a possible phenome for the token. This function is guareenteed to return a value."""
phenomeform = langbound.getphenomes(evaltoken)
return phenomeform
def _getrelevantsynonyms(self, tokenlist, sourcetoken, langbound):
"""Returns a token-list of the original context and synonyms that are relevant, if applicable."""
relevant = [sourcetoken] # original token is always first within the collection
# Add all relevant synonyms to evaluation list as strings
hypernyms = langbound.hypernyms(tokenlist, sourcetoken)
hyponyms = langbound.hyponyms(tokenlist, sourcetoken)
if hypernyms is not None:
relevant.extend(hypernyms)
if hyponyms is not None:
relevant.extend(hyponyms)
return relevant
def _internalmap(self, tokenlist, langbound):
"""Map relevant replacement tokens to a matrix. This return token-matrix will have a one-to-to corresspondence
to the passed tokenlist argument."""
replacements = []
for token in tokenlist:
similar = self._getrelevantsynonyms(tokenlist, token, langbound)
replacements.append(similar)
return replacements
def evaluate(self, tokenlist, replacementquota, langbound):
if replacementquota < 1: # Nothing will be applied to the target list. Do not process.
return None
# Map and chart data for rule application
preprocess = self._internalmap(tokenlist, langbound)
process = self._applyscan(preprocess, langbound)
# Apply rule and return data
postprocess = self._applyrule(process, replacementquota, langbound)
return postprocess
class RhymeRuleContext(RuleBoundsInterface):
"""Defines the properties and rules of the rhyme rhetorical figure."""
def _applyrule(self, sourcedata, tokentargetcount, langbound):
"""Trim interal-map token list to only retain tokens that constrain to the rhyme ruleset."""
phenomeselect = []
for phenomeset in sourcedata:
#run proportion algorithm
setrep = sourcedata[phenomeset]
if tokentargetcount <= len(setrep):
phenomeselect.append(sourcedata[phenomeset])
# Return selection
if not phenomeselect:
return None
else:
return phenomeselect
def _applyscan(self, sourcematrix, langbound):
"""Scan a token-matrix and return a dataset that holds information on the phenome frequency of rhyme
in the matrix."""
dataset = {} # Dicitonary is of type 'char' -> dict{int -> list[str]}'
for index, item in enumerate(sourcematrix): # going through each token
for content in item: # going through synonym content of each token
phenomelists = self._getsourcephenome(content, langbound) # generate phenomes for rhyme evaluation
for phenomes in phenomelists: # going through each phenome list for token (some tokes may have multiple pronounciations)
relevantphenome = phenomes[len(phenomes) - 1] # use the LAST phenome because this is rhyme
if dataset.get(relevantphenome, None) is None: # if letter has NOT been scanned previously, create an entry
dataset[ relevantphenome ] = {} # Dictionary will contain key-value pairs corresponding to the index and the list of words available.
dataset[ relevantphenome ] [index] = [content]
else:
if dataset[ relevantphenome ].get(index, None) is None: # if an entry for THIS index has NOT been created, create one.
dataset[ relevantphenome ] [index] = [content]
else:
if content not in dataset[ relevantphenome ] [index]:
dataset[ relevantphenome ] [index].append(content)
return dataset
def _getsourcephenome(self, evaltoken, langbound):
"""Returns a phenome value for a string-token."""
phenomeform = langbound.getphenomes(evaltoken)
return phenomeform
def _getrelevantsynonyms(self, tokenlist, sourcetoken, langbound):
"""Returns a token-list of the original context and synonyms that are relevant, if applicable."""
relevant = [sourcetoken] # original token is always first within the collection
# Add all relevant synonyms to evaluation list as strings
hypernyms = langbound.hypernyms(tokenlist, sourcetoken)
hyponyms = langbound.hyponyms(tokenlist, sourcetoken)
if hypernyms is not None:
relevant.extend(hypernyms)
if hyponyms is not None:
relevant.extend(hyponyms)
return relevant
def _internalmap(self, tokenlist, langbound):
"""Map relevant replacement tokens to a matrix. This return token-matrix will have a one-to-to corresspondence
to the passed tokenlist argument."""
replacements = []
for token in tokenlist:
similar = self._getrelevantsynonyms(tokenlist, token, langbound)
replacements.append(similar)
return replacements
def evaluate(self, tokenlist, replacementquota, langbound):
if replacementquota < 1: # Nothing will be applied to the target list. Do not process.
return None
# Map and chart data for rule application
preprocess = self._internalmap(tokenlist, langbound)
process = self._applyscan(preprocess, langbound)
# Apply rule and return data
postprocess = self._applyrule(process, replacementquota, langbound)
return postprocess
import heapq
class Analyzer:
"""Takes a rule-output and applies it to the target-input while attempting to retain the original meaning of the input as much as possible."""
def _checkaccuracy(self, bestfitdata):
"""Takes a bestfitted output-slice and scores it on its accuracy when compared to original sentence."""
if not bestfitdata:
return None
elif len(bestfitdata) is 1:
return bestfitdata[0] # only one possible return
else:
accuracyheap = []
for collection in bestfitdata:
accuracy = 0 # reset every iteration
for item in collection:
accuracy += item[0] # the first value in every tuple is expected to be accuracy score.
heapq.heappush(accuracyheap, (accuracy, collection))
return heapq.nlargest(len(accuracyheap) - 1, accuracyheap)[0][1] # contains all values in highest to lowest order, but only returns highest
def _checkbestfit(self, input, outputslice, langbound, replacementquota):
"""Takes a index to word-list mapping and selects the word from the list that retains the most sentence meaning. If no word applies, there will be no word
replacement for that slot."""
bestfitinorder = [] # Used as a heapq to sort by highest scores.
for index in outputslice: # each replacement index in dict
tokenselect = None
highestscore = 0 # Used to select best word, resets on every changed index.
for token in outputslice[index]: # each word in replacement index
compareto = input[index] #corressponding index in original input
score = langbound.similarity(input, compareto, token)
if score[0] and highestscore < score[1]: # Does satisfy similarity criteria?
tokenselect = token
highestscore = score[1]
if tokenselect: # After evaluation, check if there are any valid tokens to use.
heapq.heappush(bestfitinorder, (highestscore, index, tokenselect)) # Acts as a priority queue and orders values from lowest to highest
# Returning
if len(bestfitinorder) < replacementquota: # Not enough values to return, replacements are invalid
return None
else:
return heapq.nlargest(int(replacementquota), bestfitinorder)
def _construct(self, originalinput, tokenchain, langbound, requestedresults): # add variable top X accurate results
"""Converts a token list and bestfit data into a readable string."""
convergence = []
result = " "
empty = " " # placeholder to perform syntax operations on
if tokenchain is None:
#result = langbound.messagefail(originalinput)
result = ""
else:
convergence = self._replacetokens(originalinput, tokenchain)
#result = langbound.messageonlyresult(empty.join(convergence))
result = empty.join(convergence)
#if type(result) == str:
result = result.replace("_"," ")
return result
def _internalmap(self, originalinput, ruleoutput, langbound, replacementquota):
"""Maps each rule-output dictionary to be checked for bestfit."""
mapping = []
if ruleoutput is not None:
for outs in ruleoutput: # Iterate through each output dict avaliable
trimmed = self._checkbestfit(originalinput, outs, langbound, replacementquota)
if trimmed is not None:
mapping.append(trimmed)
return mapping
def _replacetokens(self, originalinput, tokenchain):
"""Replaces tokens at targeted indicies in the original input from tokenchain, and returns a list of the results. This algorithm expects tokenchain to be a list."""
for item in tokenchain: # Tuples containing (score, index, token)
originalinput[item[1]] = item[2] # works because indicies were marked from beginning of process
return originalinput # this is after modifications
def analyze(self, originalinput, ruleoutput, langbound, replacementquota, topresults = 1):
"""Applies a rule-output to a target input and attempts to retain input meaning. Rule output is expected to be in (float, list[ dict(int, list[str]) ]) format."""
preprocess = self._internalmap(originalinput, ruleoutput, langbound, replacementquota)
process = self._checkaccuracy(preprocess)
postprocess = self._construct(originalinput, process, langbound, topresults)
return postprocess
"""We define three methods for use in the final integrated function"""
def alliteration(arg, proportion, sensitivity):
langcontext = AmericanEnglishLangContext(sensitivity)
mapsection = Mapper()
rule = AlliterationRuleContext()
interpreter = Analyzer()
mappedtokens = mapsection.maptolist(arg, langcontext)
# Calculate exact proportion
calcd = len(mappedtokens)
if proportion < 1:
calcd *= proportion
calcd = round(calcd)
applied = rule.evaluate(mappedtokens, calcd, langcontext)
finalresult = interpreter.analyze(mappedtokens, applied, langcontext, calcd)
return finalresult
def assonance(arg, proportion, sensitivity):
langcontext = AmericanEnglishLangContext(sensitivity)
mapsection = Mapper()
rule = AssonanceRuleContext()
interpreter = Analyzer()
mappedtokens = mapsection.maptolist(arg, langcontext)
# Calculate exact proportion
calcd = len(mappedtokens)
if proportion < 1:
calcd *= proportion
calcd = round(calcd)
applied = rule.evaluate(mappedtokens, calcd, langcontext)
finalresult = interpreter.analyze(mappedtokens, applied, langcontext, calcd)
return finalresult
def rhyming(arg, proportion, sensitivity):
langcontext = AmericanEnglishLangContext(sensitivity)
mapsection = Mapper()
rule = RhymeRuleContext()
interpreter = Analyzer()
mappedtokens = mapsection.maptolist(arg, langcontext)
# Calculate exact proportion
calcd = len(mappedtokens)
if proportion < 1:
calcd *= proportion
calcd = round(calcd)
applied = rule.evaluate(mappedtokens, calcd, langcontext)
finalresult = interpreter.analyze(mappedtokens, applied, langcontext, calcd)
return finalresult
"""Next, we define the sentence curator"""
!pip install fuzzywuzzy[speedup]
# Load clustering and Levenshtein distance
import scipy.cluster.hierarchy as h
from fuzzywuzzy import fuzz
import scipy.spatial.distance as d
# Load word2vec prerequisites for correlation distance between words
!wget -P /root/input/ -c "https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz"
!pip3 install gensim
from gensim.models import KeyedVectors
vecmod = KeyedVectors.load_word2vec_format('/root/input/GoogleNews-vectors-negative300.bin.gz', binary=True)
# Load Google's pre-trained Word2Vec model.
import numpy as np
from scipy import spatial
from nltk.tokenize import word_tokenize
from nltk import download
download('stopwords')
download('punkt')
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
index2word_set = set(vecmod.wv.index2word)
def curate(input, candidates, max_clusters):
def _SentenceVec(sentence):
tokens = word_tokenize(sentence)
words = [word.lower() for word in tokens if word.isalpha()]
words = [word for word in words if not word in stop_words]
sentence_vec = np.zeros((300, ), dtype='float32')
n_words = 0
for word in words:
if word in index2word_set:
n_words += 1
sentence_vec = np.add(sentence_vec, vecmod[word])
if (n_words > 0):
sentence_vec = np.divide(sentence_vec, n_words)
return sentence_vec
## This is the start of an alternative approach to the metric
## The idea is to define each sentence simply as an array of word vectors
## And compute the distance between the two arrays using cdist().
## Then use the mahalanobis distance to compute the distance between the
## two populations. This shoudl capture some of the lost structure of
## mean vectors.
#def _SentenceArray(sentence):
# tokens = word_tokenize(sentence)
# words = [word.lower() for word in tokens if word.isalpha()]
# words = [word for word in words if not word in stop_words]
# sentence_array = np.zeros((300, ), dtype='float32')
# n_words = 0
# for word in words:
# if word in index2word_set:
# n_words += 1
# sentence_array = np.add(sentence_array, vecmod[word])
def _cor_dist(s1, s2):
distance = spatial.distance.correlation(s1, s2)
return distance
def _cluster(candidates,max_clusters):
X = [[c] for c in candidates]
#V = [_SentenceVec(c) for c in candidates]
Levenshtein = [(100-d)/100 for d in d.pdist(X,fuzz.ratio)]
#Covariance = d.pdist(V,_cor_dist)
#y = [a*b for a,b in zip(Levenshtein,Covariance)]
y = Levenshtein
Z = h.linkage(y)
return h.fcluster(Z,t=max_clusters,criterion="maxclust")
def _score_paraphrases(candidates,original):
V = [_SentenceVec(c) for c in candidates]
t = _SentenceVec(original)
return [_cor_dist(paraphrase,t) for paraphrase in V]
if len(candidates) > 0:
clust = _cluster(candidates,max_clusters)
score = _score_paraphrases(candidates,input)
# groups = dict(zip(clust,[zip(score,candidates)]))
keys = list(set(clust))
group = {key:[] for key in keys}
for i in range(len(candidates)):
group[clust[i]].append([score[i],candidates[i]])
top_results = []
for key in group:
scores = [group[key][i][0] for i in range(len(group[key]))]
for j in range(len(group[key])):
if group[key][j][0] == max(scores):
top_results.append(group[key][j][1])
return top_results
if len(candidates) == 0:
print("There were no results")
"""Next, we integrate the Pegasus sample generator, the Procrustean paraphraser and the paraphrase curator."""
def paraphrase(input, proportion, sensitivity, max_output, sample_size, method):
# Generate sample with Pegasus
num_return_sequences = sample_size
num_beams = 4*sample_size
sample = get_response(input,num_return_sequences,num_beams)
sample.append(input)
# Generate paraphrase list with given method
candidates = []
for i in range(len(sample)):
candidates.append(method(sample[i],proportion,sensitivity))
for x in candidates:
if x == "":
candidates.remove(x)
# curate the list
result = curate(input, candidates, max_output)
return result
x = "the cat jumped over the moon and ate the planet jupyter"
paraphrase(x,0.4,0.15,2,25,rhyming)
x = "the cat jumped over the moon and ate the planet jupyter"
paraphrase(X,0.3,0.5,5,20,assonance)
x = "the cat jumped over the moon and landed on planet jupyter"
proportion = 0.4
sensitivity = 0.3
max_output = 3
sample_size = 20
paraphrase(x,proportion,sensitivity,max_output,sample_size,alliteration)
"""Lastly, we test the algorithm on a sample of eight excerpts from classic works.
We have 3 methods and we set the max output to 3. Thus we have a maximum of 72 possible sentences to evaluate.
"""
Dickens = "My meaning simply is, that whatever I have tried to do in life, I have tried with all my heart to do well"
Twain = "Persons attempting to find a motive in this narrative will be prosecuted; persons attempting to find a moral in it will be banished; persons attempting to find a plot in it will be shot."
Forster = "Most of life is so dull that there is nothing to be said about it, and the books and talk that would describe it as interesting are obliged to exaggerate, in the hope of justifying their own existence."
Grahame = "The Mole was a good listener, and Toad, with no one to check his statements or to criticize in an unfriendly spirit, rather let himself go."
Joyce = "A certain pride, a certain awe, withheld him from offering to God even one prayer at night, though he knew it was in God’s power to take away his life while he slept and hurl his soul hellward ere he could beg for mercy."
London = "When, on the still cold nights, he pointed his nose at a star and howled long and wolf-like, it was his ancestors, dead and dust, pointing nose at star and howling down through the centuries and through him."
Fitzgerald = "And so with the sunshine and the great bursts of leaves growing on the trees, just as things grow in fast movies, I had that familiar conviction that life was beginning over again with the summer."
Eliot = "For years after Lydgate remembered the impression produced in him by this involuntary appeal—this cry from soul to soul."
test_set = [Dickens,Twain,Forster,Grahame,Joyce,London,Fitzgerald,Eliot]
Dickens_alliteration = paraphrase(Dickens,0.3,0.3,3,30,alliteration)
print(Dickens)
for sentence in Dickens_alliteration:
print(sentence)
Twain_alliteration = paraphrase(Twain,0.2,0.2,3,30,alliteration)
print(Twain)
for sentence in Twain_alliteration:
print(sentence)
Forster_alliteration = paraphrase(Forster,0.2,0.6,3,30,alliteration)
print(Forster)
for sentence in Forster_alliteration:
print(sentence)
Grahame_alliteration = paraphrase(Grahame,0.3,0.3,3,30,alliteration)
print(Grahame)
for sentence in Grahame_alliteration:
print(sentence)
Joyce_alliteration = paraphrase(Joyce,0.2,0.3,3,30,alliteration)
print(Joyce)
for sentence in Joyce_alliteration:
print(sentence)
London_alliteration = paraphrase(London,0.3,0.3,3,30,alliteration)
print(London)
for sentence in London_alliteration:
print(sentence)
Fitzgerald_alliteration = paraphrase(Fitzgerald,0.2,0.3,3,20,alliteration)
print(Fitzgerald)
for sentence in Fitzgerald_alliteration:
print(sentence)
Eliot_alliteration = paraphrase(Eliot,0.2,0.5,3,30,alliteration)
print(Eliot)
for sentence in Eliot_alliteration:
print(sentence)
Dickens_rhyming = paraphrase(Dickens,0.4,0.5,3,30,rhyming)
print(Dickens)
for sentence in Dickens_rhyming:
print(sentence)
Twain_rhyming = paraphrase(Twain,0.3,0.5,3,30,rhyming)
print(Twain)
for sentence in Twain_rhyming:
print(sentence)
Forster_rhyming = paraphrase(Forster,0.3,0.3,3,30,rhyming)
print(Forster)
for sentence in Forster_rhyming:
print(sentence)
Grahame_rhyming = paraphrase(Grahame,0.3,0.25,3,30,rhyming)
for sentence in Grahame_rhyming:
print(sentence)
Joyce_rhyming = paraphrase(Joyce,0.33,0.5,3,30,rhyming)
print(Joyce)
for sentence in Joyce_rhyming:
print(sentence)
London_rhyming = paraphrase(London,0.3,0.3,3,30,rhyming)
print(London)
for sentence in London_rhyming:
print(sentence)
Fitzgerald_rhyming = paraphrase(Fitzgerald,0.3,0.3,3,30,rhyming)
for sentence in Fitzgerald_rhyming:
print(sentence)
Eliot_rhyming = paraphrase(Eliot,0.3,0.3,3,30,rhyming)
print(Eliot)
for sentence in Eliot_rhyming:
print(sentence)
Dickens_assonance = paraphrase(Dickens,0.1,0.15,3,30,assonance)
for sentence in Dickens_assonance:
print(sentence)
Twain_assonance = paraphrase(Twain,0.2,0.1,3,30,assonance)
for sentence in Twain_assonance:
print(sentence)
Forster_assonance = paraphrase(Forster,0.1,0.1,3,30,assonance)
for sentence in Forster_assonance:
print(sentence)
Grahame_assonance = paraphrase(Grahame,0.2,0.1,3,30,assonance)
for sentence in Grahame_assonance:
print(sentence)
Joyce_assonance = paraphrase(Joyce,0.2,0.1,3,30,assonance)
for sentence in Joyce_assonance:
print(sentence)
London_assonance = paraphrase(London,0.2,0.1,3,30,assonance)
for sentence in London_assonance:
print(sentence)
Fitzgerald_assonance = paraphrase(Fitzgerald,0.2,0.1,3,30,assonance)
for sentence in Fitzgerald_assonance:
print(sentence)
Eliot_assonance = paraphrase(Eliot,0.2,0.1,3,30,assonance)
for sentence in Eliot_assonance:
print(sentence) | 42.483042 | 230 | 0.682287 |
e932fb4ec343373146508adfa905b3c8915cb66b | 4,831 | py | Python | train.py | ppujol76/-Pere_Transformers | e267bcc6559c998accaed647cacbff253031f8b0 | [
"MIT"
] | null | null | null | train.py | ppujol76/-Pere_Transformers | e267bcc6559c998accaed647cacbff253031f8b0 | [
"MIT"
] | null | null | null | train.py | ppujol76/-Pere_Transformers | e267bcc6559c998accaed647cacbff253031f8b0 | [
"MIT"
] | 1 | 2021-06-21T08:40:18.000Z | 2021-06-21T08:40:18.000Z | import torch
import os
from model.visualization import Visualization
from panel.main import tensorboard_panel
from torch.utils.data.dataset import Subset
import random
import numpy as np
def write_on_tensorboard(epoch:int, loss:int, bleu:int, image, expected_captions, generated_captions):
tensorboard_panel.add_sentences_comparison(epoch,expected_captions[0],generated_captions[0])
tensorboard_panel.add_loss(epoch,loss)
tensorboard_panel.add_bleu(epoch,bleu)
tensorboard_panel.add_image(epoch,image,expected_captions[0],generated_captions[0])
def split_subsets(dataset,train_percentage=0.8,all_captions=True):
"""
Performs the split of the dataset into Train and Test
"""
if all_captions==True:
# Get a list of all indexes in the dataset and convert to a numpy array
all_indexes = np.array([*range(0,len(dataset))])
# Reshape the array so we can shuffle indexes in chunks of 5
all_indexes_mat = all_indexes.reshape(-1,5)
np.random.shuffle(all_indexes_mat)
all_indexes_shuffled = all_indexes_mat.flatten()
# Get the number of images for train and the rest are for test
num_train_imgs = int(len(all_indexes_shuffled)/5*train_percentage)
# Create the subsets for train and test
train_split = Subset(dataset,all_indexes_shuffled[0:num_train_imgs*5].tolist())
test_split = Subset(dataset,all_indexes_shuffled[num_train_imgs*5:].tolist())
else:
all_first_index = [*range(0,len(dataset),5)]
random.shuffle(all_first_index)
num_train_imgs = int(len(all_first_index)*train_percentage)
train_split = Subset(dataset,all_first_index[0:num_train_imgs])
test_split = Subset(dataset,all_first_index[num_train_imgs:])
return train_split,test_split
def train_single_epoch(epoch, model, train_loader, optimizer, criterion, device,scheduler):
"""
Train single epoch
"""
model.train()
for i, batch in enumerate(iter(train_loader)):
# Si volem entrenar només amb un batch
# if i==0:
# batch1 = batch
# img, target = batch1
img, target = batch
img, target = img.to(device), target.to(device)
optimizer.zero_grad()
output = model(img, target)
output = output.permute(1,2,0)
loss = criterion(output[:,:,:-1], target[:,1:]) # target[:,1:])
print(i, loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=0.25)
optimizer.step()
# Aixo és per fer servir el scheduer Exponential, que s'ha de fer estep cada cop que vulguis abaixar la gamma.
# if (i+1)%10 == 0:
# scheduler.step()
# print(optimizer.param_groups[0]['lr'])
candidate_corpus = [model.vocab.generate_caption(torch.argmax(output[0].transpose(1, 0), dim=-1))]
reference_corpus = [model.vocab.generate_caption(target[0, 1:])]
bleu = 0
# bleu = bleu_score(candidate_corpus, reference_corpus)
print('--------------------------------------------------------------------------------------------------')
print('--------------------------------------------------------------------------------------------------')
print(f'Epoch {epoch} batch: {i} loss: {loss.item()}')
print('--------------------------------------------------------------------------------------------------')
print(candidate_corpus[0])
print(reference_corpus[0])
print('--------------------------------------------------------------------------------------------------')
# Ho comento per què em dona un error de cuda
# write_on_tensorboard(i+(epoch*len(train_loader)),loss.item(),bleu,img[0],reference_corpus,candidate_corpus)
def evaluate(model,test_loader, vocab, device,criterion):
model.eval()
total_loss = 0.
#device= 'cpu'
with torch.no_grad():
for idx, batch in enumerate(iter(test_loader)):
img, target = batch
img = img.to(device)
target = target.to(device)
for i in range(img.shape[0]):
sentence = model.inference(image=img[i].unsqueeze(0),vocab=vocab)
alphas = model.forward(image=img[i].unsqueeze(0), vocab=vocab)[1]
caption = ' '.join(sentence)
Visualization.plot_attention((img[0]), sentence, alphas) # showing expected and plotting attention
total_loss += target.numel()*criterion(sentence,target).item()
n += target.numel()
return total_loss / n, caption
def save_model(model, epoch):
"""
Function to save current model
"""
filename = os.path.join('model','checkpoints','Epoch_'+str(epoch)+'_model_state.pth')
model_state = {
'epoch':epoch,
'model':model.state_dict()
}
torch.save(model_state, filename)
def train(num_epochs, model, train_loader,test_loader, optimizer, criterion, device,log_interval,vocab,scheduler):
"""
Executes model training. Saves model to a file every 5 epoch.
"""
for epoch in range(1,num_epochs+1):
train_single_epoch(epoch, model, train_loader,optimizer, criterion, device, scheduler)
scheduler.step()
if epoch % 5 == 0:
save_model(model, epoch)
| 35.262774 | 114 | 0.673981 |
e933799d41eabf2ce3d0578ad558fcf9ab8d220d | 2,251 | py | Python | views/probabilidade.py | pxcx/ambar-backend | 350baabb492e4fbc1002ea851d1cef4fc999b81a | [
"MIT"
] | null | null | null | views/probabilidade.py | pxcx/ambar-backend | 350baabb492e4fbc1002ea851d1cef4fc999b81a | [
"MIT"
] | null | null | null | views/probabilidade.py | pxcx/ambar-backend | 350baabb492e4fbc1002ea851d1cef4fc999b81a | [
"MIT"
] | null | null | null | from flask import jsonify
from sqlalchemy import func
from datetime import datetime, date
from models.previsao import Previsao, db
def configure(app):
# /probabilidade - retorna a probabilidade total de chuva
# - inicio (YYYY-MM-DD)
# - fim (YYYY-MM-DD)
@app.route('/probabilidade/<inicio>/<fim>', methods=['GET'])
def probabilidade(inicio, fim):
try:
# convertendo os parametros em datetime
inicio = datetime.strptime(inicio,'%Y-%m-%d')
fim = datetime.strptime(fim,'%Y-%m-%d')
# total de cidades cadastradas
totalCidades = db.session.query(func.count(Previsao.cidade).label('total_cidades')).\
filter(Previsao.date >= date(inicio.year, inicio.month, inicio.day)).\
filter(Previsao.date <= date(fim.year, fim.month, fim.day)).\
group_by(Previsao.cidade).\
first()
totalCidades = totalCidades.total_cidades
# buscando a probabilidade de chuva por dia
probabilidadeList = db.session.query(Previsao.date, Previsao.chuva_probabilidade).\
filter(Previsao.date >= date(inicio.year, inicio.month, inicio.day)).\
filter(Previsao.date <= date(fim.year, fim.month, fim.day)).\
all()
#formatando a saida
pa = 1/totalCidades
aux = {}
for i in probabilidadeList:
pb = i.chuva_probabilidade/100
if str(i.date) in aux:
aux[str(i.date)] = aux[str(i.date)] + pb*(pb*pa)/pa
else:
aux[str(i.date)] = pb*(pb*pa)/pa
out = 0
for key,val in aux.items():
if out > 0:
out = out + val*(val*pa)/pa
else:
out = val*(val*pa)/pa
return jsonify({'probabilidade_chuva': out})
except KeyError as e:
return jsonify({'error': 'O paramêtro "'+str(e)+'" não foi enviado.'})
except Exception as e:
return jsonify({'error': str(e)})
if __name__ == "__main__":
app.run(debug=True) | 39.491228 | 97 | 0.52821 |
e937f0e5ec885071b7daceb7fa5456d999a1e95f | 293 | py | Python | scripts/makeNegativesList.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 8 | 2016-11-20T19:43:45.000Z | 2020-12-09T04:58:05.000Z | scripts/makeNegativesList.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 45 | 2015-05-04T20:41:05.000Z | 2017-07-17T12:04:13.000Z | scripts/makeNegativesList.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 9 | 2016-11-20T19:43:46.000Z | 2020-09-01T21:01:54.000Z | import sys,os
import utils as cu
params = cu.loadParams('fullList positivesList output')
full = [x for x in open(params['fullList'])]
positives = [x for x in open(params['positivesList'])]
out = open(params['output'],'w')
for r in full:
if r not in positives:
out.write(r)
out.close()
| 22.538462 | 55 | 0.692833 |
e93a77efc359563f0911c10f45a8c7e3f5ed8fd4 | 1,354 | py | Python | tests/test_model.py | alexdawn/rollinghub | 6043c12520d7e0b0596f28c166616c1014e1f870 | [
"MIT"
] | null | null | null | tests/test_model.py | alexdawn/rollinghub | 6043c12520d7e0b0596f28c166616c1014e1f870 | [
"MIT"
] | 11 | 2019-08-18T21:37:28.000Z | 2022-03-21T22:17:37.000Z | tests/test_model.py | alexdawn/rollinghub | 6043c12520d7e0b0596f28c166616c1014e1f870 | [
"MIT"
] | null | null | null | import pytest
from rollinghub.db import get_db
def test_index(client, auth):
response = client.get('/')
assert b"Log In" in response.data
assert b"Register" in response.data
auth.login()
response = client.get('/')
assert b'Log Out' in response.data
assert b'test title' in response.data
assert b'by testman on 1900-01-01' in response.data
assert b'href="/1/update"' in response.data
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
'/1/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
def test_author_required(app, client, auth):
# change the model author to another user
with app.app_context():
db, cur = get_db()
cur.execute('UPDATE model SET author_id = 2 WHERE id = 1')
db.commit()
auth.login()
# current user can't modify other user's post
assert client.post('/1/update').status_code == 403
assert client.post('/1/delete').status_code == 403
# current user doesn't see edit link
assert b'href="/1/update"' not in client.get('/').data
@pytest.mark.parametrize('path', (
'/2/update',
'/2/delete',
))
def test_exists_required(client, auth, path):
auth.login()
assert client.post(path).status_code == 404
| 27.08 | 72 | 0.656573 |
e93be486b0635edc83619c16da55bfa370ed7c0e | 19,672 | py | Python | openpype/hosts/unreal/plugins/load/load_camera.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | 1 | 2022-02-08T15:40:41.000Z | 2022-02-08T15:40:41.000Z | openpype/hosts/unreal/plugins/load/load_camera.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | null | null | null | openpype/hosts/unreal/plugins/load/load_camera.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Load camera from FBX."""
from pathlib import Path
import unreal
from unreal import EditorAssetLibrary
from unreal import EditorLevelLibrary
from unreal import EditorLevelUtils
from openpype.pipeline import (
AVALON_CONTAINER_ID,
legacy_io,
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
class CameraLoader(plugin.Loader):
"""Load Unreal StaticMesh from FBX"""
families = ["camera"]
label = "Load Camera"
representations = ["fbx"]
icon = "cube"
color = "orange"
def _get_data(self, asset_name):
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
return asset_doc.get("data")
def _set_sequence_hierarchy(
self, seq_i, seq_j, min_frame_j, max_frame_j
):
tracks = seq_i.get_master_tracks()
track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
track = t
break
if not track:
track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
subscenes = track.get_sections()
subscene = None
for s in subscenes:
if s.get_editor_property('sub_sequence') == seq_j:
subscene = s
break
if not subscene:
subscene = track.add_section()
subscene.set_row_index(len(track.get_sections()))
subscene.set_editor_property('sub_sequence', seq_j)
subscene.set_range(
min_frame_j,
max_frame_j + 1)
def _import_camera(
self, world, sequence, bindings, import_fbx_settings, import_filename
):
ue_version = unreal.SystemLibrary.get_engine_version().split('.')
ue_major = int(ue_version[0])
ue_minor = int(ue_version[1])
if ue_major == 4 and ue_minor <= 26:
unreal.SequencerTools.import_fbx(
world,
sequence,
bindings,
import_fbx_settings,
import_filename
)
elif (ue_major == 4 and ue_minor >= 27) or ue_major == 5:
unreal.SequencerTools.import_level_sequence_fbx(
world,
sequence,
bindings,
import_fbx_settings,
import_filename
)
else:
raise NotImplementedError(
f"Unreal version {ue_major} not supported")
def load(self, context, name, namespace, data):
"""
Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and avalon container
hierarchy = context.get('asset').get('data').get('parents')
root = "/Game/OpenPype"
hierarchy_dir = root
hierarchy_dir_list = []
for h in hierarchy:
hierarchy_dir = f"{hierarchy_dir}/{h}"
hierarchy_dir_list.append(hierarchy_dir)
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
tools = unreal.AssetToolsHelpers().get_asset_tools()
# Create a unique name for the camera directory
unique_number = 1
if EditorAssetLibrary.does_directory_exist(f"{hierarchy_dir}/{asset}"):
asset_content = EditorAssetLibrary.list_assets(
f"{root}/{asset}", recursive=False, include_folder=True
)
# Get highest number to make a unique name
folders = [a for a in asset_content
if a[-1] == "/" and f"{name}_" in a]
f_numbers = []
for f in folders:
# Get number from folder name. Splits the string by "_" and
# removes the last element (which is a "/").
f_numbers.append(int(f.split("_")[-1][:-1]))
f_numbers.sort()
if not f_numbers:
unique_number = 1
else:
unique_number = f_numbers[-1] + 1
asset_dir, container_name = tools.create_unique_asset_name(
f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="")
asset_path = Path(asset_dir)
asset_path_parent = str(asset_path.parent.as_posix())
container_name += suffix
EditorAssetLibrary.make_directory(asset_dir)
# Create map for the shot, and create hierarchy of map. If the maps
# already exist, we will use them.
h_dir = hierarchy_dir_list[0]
h_asset = hierarchy[0]
master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map"
if not EditorAssetLibrary.does_asset_exist(master_level):
EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map")
level = f"{asset_path_parent}/{asset}_map.{asset}_map"
if not EditorAssetLibrary.does_asset_exist(level):
EditorLevelLibrary.new_level(f"{asset_path_parent}/{asset}_map")
EditorLevelLibrary.load_level(master_level)
EditorLevelUtils.add_level_to_world(
EditorLevelLibrary.get_editor_world(),
level,
unreal.LevelStreamingDynamic
)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(level)
# Get all the sequences in the hierarchy. It will create them, if
# they don't exist.
sequences = []
frame_ranges = []
i = 0
for h in hierarchy_dir_list:
root_content = EditorAssetLibrary.list_assets(
h, recursive=False, include_folder=False)
existing_sequences = [
EditorAssetLibrary.find_asset_data(asset)
for asset in root_content
if EditorAssetLibrary.find_asset_data(
asset).get_class().get_name() == 'LevelSequence'
]
if not existing_sequences:
scene = tools.create_asset(
asset_name=hierarchy[i],
package_path=h,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
asset_data = legacy_io.find_one({
"type": "asset",
"name": h.split('/')[-1]
})
id = asset_data.get('_id')
start_frames = []
end_frames = []
elements = list(
legacy_io.find({"type": "asset", "data.visualParent": id}))
for e in elements:
start_frames.append(e.get('data').get('clipIn'))
end_frames.append(e.get('data').get('clipOut'))
elements.extend(legacy_io.find({
"type": "asset",
"data.visualParent": e.get('_id')
}))
min_frame = min(start_frames)
max_frame = max(end_frames)
scene.set_display_rate(
unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
scene.set_playback_start(min_frame)
scene.set_playback_end(max_frame)
sequences.append(scene)
frame_ranges.append((min_frame, max_frame))
else:
for e in existing_sequences:
sequences.append(e.get_asset())
frame_ranges.append((
e.get_asset().get_playback_start(),
e.get_asset().get_playback_end()))
i += 1
EditorAssetLibrary.make_directory(asset_dir)
cam_seq = tools.create_asset(
asset_name=f"{asset}_camera",
package_path=asset_dir,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
# Add sequences data to hierarchy
for i in range(0, len(sequences) - 1):
self._set_sequence_hierarchy(
sequences[i], sequences[i + 1],
frame_ranges[i + 1][0], frame_ranges[i + 1][1])
data = self._get_data(asset)
cam_seq.set_display_rate(
unreal.FrameRate(data.get("fps"), 1.0))
cam_seq.set_playback_start(0)
cam_seq.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1)
self._set_sequence_hierarchy(
sequences[-1], cam_seq,
data.get('clipIn'), data.get('clipOut'))
settings = unreal.MovieSceneUserImportFBXSettings()
settings.set_editor_property('reduce_keys', False)
if cam_seq:
self._import_camera(
EditorLevelLibrary.get_editor_world(),
cam_seq,
cam_seq.get_bindings(),
settings,
self.fname
)
# Create Asset Container
unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(master_level)
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
return asset_content
def update(self, container, representation):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
root = "/Game/OpenPype"
asset_dir = container.get('namespace')
context = representation.get("context")
hierarchy = context.get('hierarchy').split("/")
h_dir = f"{root}/{hierarchy[0]}"
h_asset = hierarchy[0]
master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map"
EditorLevelLibrary.save_current_level()
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[asset_dir],
recursive_paths=False)
sequences = ar.get_assets(filter)
filter = unreal.ARFilter(
class_names=["World"],
package_paths=[str(Path(asset_dir).parent.as_posix())],
recursive_paths=True)
maps = ar.get_assets(filter)
# There should be only one map in the list
EditorLevelLibrary.load_level(maps[0].get_full_name())
level_sequence = sequences[0].get_asset()
display_rate = level_sequence.get_display_rate()
playback_start = level_sequence.get_playback_start()
playback_end = level_sequence.get_playback_end()
sequence_name = f"{container.get('asset')}_camera"
# Get the actors in the level sequence.
objs = unreal.SequencerTools.get_bound_objects(
unreal.EditorLevelLibrary.get_editor_world(),
level_sequence,
level_sequence.get_bindings(),
unreal.SequencerScriptingRange(
has_start_value=True,
has_end_value=True,
inclusive_start=level_sequence.get_playback_start(),
exclusive_end=level_sequence.get_playback_end()
)
)
# Delete actors from the map
for o in objs:
if o.bound_objects[0].get_class().get_name() == "CineCameraActor":
actor_path = o.bound_objects[0].get_path_name().split(":")[-1]
actor = EditorLevelLibrary.get_actor_reference(actor_path)
EditorLevelLibrary.destroy_actor(actor)
# Remove the Level Sequence from the parent.
# We need to traverse the hierarchy from the master sequence to find
# the level sequence.
root = "/Game/OpenPype"
namespace = container.get('namespace').replace(f"{root}/", "")
ms_asset = namespace.split('/')[0]
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"{root}/{ms_asset}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
master_sequence = sequences[0].get_asset()
sequences = [master_sequence]
parent = None
sub_scene = None
for s in sequences:
tracks = s.get_master_tracks()
subscene_track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
break
if subscene_track:
sections = subscene_track.get_sections()
for ss in sections:
if ss.get_sequence().get_name() == sequence_name:
parent = s
sub_scene = ss
# subscene_track.remove_section(ss)
break
sequences.append(ss.get_sequence())
# Update subscenes indexes.
i = 0
for ss in sections:
ss.set_row_index(i)
i += 1
if parent:
break
assert parent, "Could not find the parent sequence"
EditorAssetLibrary.delete_asset(level_sequence.get_path_name())
settings = unreal.MovieSceneUserImportFBXSettings()
settings.set_editor_property('reduce_keys', False)
tools = unreal.AssetToolsHelpers().get_asset_tools()
new_sequence = tools.create_asset(
asset_name=sequence_name,
package_path=asset_dir,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
new_sequence.set_display_rate(display_rate)
new_sequence.set_playback_start(playback_start)
new_sequence.set_playback_end(playback_end)
sub_scene.set_sequence(new_sequence)
self._import_camera(
EditorLevelLibrary.get_editor_world(),
new_sequence,
new_sequence.get_bindings(),
settings,
str(representation["data"]["path"])
)
data = {
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container.get('container_name')), data)
EditorLevelLibrary.save_current_level()
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=False)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
EditorLevelLibrary.load_level(master_level)
def remove(self, container):
path = Path(container.get("namespace"))
parent_path = str(path.parent.as_posix())
ar = unreal.AssetRegistryHelpers.get_asset_registry()
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"{str(path.as_posix())}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
if not sequences:
raise Exception("Could not find sequence.")
world = ar.get_asset_by_object_path(
EditorLevelLibrary.get_editor_world().get_path_name())
filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"{parent_path}"],
recursive_paths=True)
maps = ar.get_assets(filter)
# There should be only one map in the list
if not maps:
raise Exception("Could not find map.")
map = maps[0]
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(map.get_full_name())
# Remove the camera from the level.
actors = EditorLevelLibrary.get_all_level_actors()
for a in actors:
if a.__class__ == unreal.CineCameraActor:
EditorLevelLibrary.destroy_actor(a)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(world.get_full_name())
# There should be only one sequence in the path.
sequence_name = sequences[0].asset_name
# Remove the Level Sequence from the parent.
# We need to traverse the hierarchy from the master sequence to find
# the level sequence.
root = "/Game/OpenPype"
namespace = container.get('namespace').replace(f"{root}/", "")
ms_asset = namespace.split('/')[0]
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"{root}/{ms_asset}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
master_sequence = sequences[0].get_asset()
sequences = [master_sequence]
parent = None
for s in sequences:
tracks = s.get_master_tracks()
subscene_track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
break
if subscene_track:
sections = subscene_track.get_sections()
for ss in sections:
if ss.get_sequence().get_name() == sequence_name:
parent = s
subscene_track.remove_section(ss)
break
sequences.append(ss.get_sequence())
# Update subscenes indexes.
i = 0
for ss in sections:
ss.set_row_index(i)
i += 1
if parent:
break
assert parent, "Could not find the parent sequence"
EditorAssetLibrary.delete_directory(str(path.as_posix()))
# Check if there isn't any more assets in the parent folder, and
# delete it if not.
asset_content = EditorAssetLibrary.list_assets(
parent_path, recursive=False, include_folder=True
)
if len(asset_content) == 0:
EditorAssetLibrary.delete_directory(parent_path)
| 35.509025 | 79 | 0.573861 |
e93d157cf7aab5c1bcb7bfeee8e1f4209c714ad6 | 2,862 | py | Python | recommander-lib/src/main.py | armendu/recommander-system | e2d13838237584cc5cc4de2f4ea2d63f9f3b8889 | [
"MIT"
] | 1 | 2021-04-29T04:15:13.000Z | 2021-04-29T04:15:13.000Z | recommander-lib/src/main.py | armendu/recommander-system | e2d13838237584cc5cc4de2f4ea2d63f9f3b8889 | [
"MIT"
] | null | null | null | recommander-lib/src/main.py | armendu/recommander-system | e2d13838237584cc5cc4de2f4ea2d63f9f3b8889 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Main application file
"""
__author__ = "Armend Ukehaxhaj"
__version__ = "1.0.0"
__license__ = "MIT"
from logzero import logger
import numpy as np
import pandas as pd
import csv
import pickle
from word2vec import word2vec
from preprocessor import preprocessor
import json
primary_data_filename = "input/GoTrainedData.txt"
sample_data_filename = "input/Sample.txt"
amazon_sample = "input/amazon_co-ecommerce_sample.csv"
def open_input_file(filename):
canvas = []
with open('./input/initial-data.json') as json_file:
data = json_file.readlines()
for line in data:
obj = json.loads(line)
value = obj.get("title", "")
value_brand = obj.get("brand", "")
temp_sentance = value + " " + value_brand
# print(temp_sentance)
canvas.append(temp_sentance)
# if value is not None:
# print(value)
# temp_sentance += value
# if value_brand is not None:
# temp_sentance += value_brand
# canvas = []
# saved = pd.read_csv(filename)
# canvas = saved['product_name']
return canvas
def main():
logger.info("Starting app")
settings = {}
settings['n'] = 5 # dimension of word embeddings
settings['window_size'] = 3 # context window +/- center word
settings['min_count'] = 0 # minimum word count
settings['epochs'] = 3 # 5000 # number of training epochs
# number of negative words to use during training
settings['neg_samp'] = 10
settings['learning_rate'] = 0.01 # learning rate
np.random.seed(0) # set the seed for reproducibility
# corpus = [['the', 'quick', 'brown', 'fox',
# 'jumped', 'over', 'the', 'lazy', 'dog']]
# logger.info("Retrieving corpus")
corpus = open_input_file(amazon_sample)
# Pre process data
logger.info("Preprocess the data")
pp = preprocessor()
corpus = pp.preprocess(corpus)
# for row in new_corpus:
# for word in row:
# logger.info(word)
# logger.info("Preprocessed data: ")
# logger.info(corpus)
# INITIALIZE W2V MODEL
# w2v = word2vec(settings)
# generate training data
# logger.info("Training")
# training_data = w2v.generate_training_data(settings, new_corpus)
# train word2vec model
# w2v.train(training_data)
model_filename = 'models/finalized_model-refactored.sav'
# save the model to disk
# pickle.dump(w2v, open(model_filename, 'wb'))
# Load the pickled model
w2v_from_pickle = pickle.load(open(model_filename, 'rb'))
# Use the loaded pickled model to make predictions
w2v_from_pickle.word_sim("microphone", 6)
if __name__ == "__main__":
main()
| 27.519231 | 74 | 0.615653 |
e93d7534c6c036af381481b03aad9004f87feec7 | 27 | py | Python | tests/models/__init__.py | Stevenjin8/song2vec | 908de06881d0598dcd2869d89709a2d50654a7fe | [
"MIT"
] | null | null | null | tests/models/__init__.py | Stevenjin8/song2vec | 908de06881d0598dcd2869d89709a2d50654a7fe | [
"MIT"
] | null | null | null | tests/models/__init__.py | Stevenjin8/song2vec | 908de06881d0598dcd2869d89709a2d50654a7fe | [
"MIT"
] | null | null | null | """Tests for ML models."""
| 13.5 | 26 | 0.592593 |
e93dd26357433b7e319a7cf157df9046ce5be7e6 | 2,378 | py | Python | spark_auto_mapper/data_types/datetime.py | gagan-chawla/SparkAutoMapper | 7b0aca2e4bece42b3229550f3f2fcc9607f79437 | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper/data_types/datetime.py | gagan-chawla/SparkAutoMapper | 7b0aca2e4bece42b3229550f3f2fcc9607f79437 | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper/data_types/datetime.py | gagan-chawla/SparkAutoMapper | 7b0aca2e4bece42b3229550f3f2fcc9607f79437 | [
"Apache-2.0"
] | null | null | null | from typing import Optional, List
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import coalesce, to_timestamp
from spark_auto_mapper.data_types.column import AutoMapperDataTypeColumn
from spark_auto_mapper.data_types.data_type_base import AutoMapperDataTypeBase
from spark_auto_mapper.data_types.literal import AutoMapperDataTypeLiteral
from spark_auto_mapper.helpers.value_parser import AutoMapperValueParser
from spark_auto_mapper.type_definitions.defined_types import AutoMapperDateInputType
class AutoMapperDateTimeDataType(AutoMapperDataTypeBase):
def __init__(
self,
value: AutoMapperDateInputType,
formats: Optional[List[str]] = None
) -> None:
"""
Converts the value to a timestamp type in Spark
:param value: value
:param formats: (Optional) formats to use for trying to parse the value otherwise uses Spark defaults
"""
super().__init__()
self.value: AutoMapperDataTypeBase = value \
if isinstance(value, AutoMapperDataTypeBase) \
else AutoMapperValueParser.parse_value(value)
self.formats: Optional[List[str]] = formats
def get_column_spec(
self, source_df: Optional[DataFrame], current_column: Optional[Column]
) -> Column:
# if column is not of type date then convert it to date
formats_column_specs: List[Column] = [
to_timestamp(
self.value.get_column_spec(
source_df=source_df, current_column=current_column
),
format=format_
) for format_ in self.formats
] if self.formats else [
to_timestamp(
self.value.get_column_spec(
source_df=source_df, current_column=current_column
)
)
]
if source_df is not None and isinstance(self.value, AutoMapperDataTypeColumn) \
and not dict(source_df.dtypes)[self.value.value] == "timestamp":
return coalesce(*formats_column_specs)
elif isinstance(self.value, AutoMapperDataTypeLiteral):
return coalesce(*formats_column_specs)
else:
column_spec = self.value.get_column_spec(
source_df=source_df, current_column=current_column
)
return column_spec
| 38.983607 | 109 | 0.670311 |
e93e7a9f148352765158065775751a4ec95c81cf | 1,425 | py | Python | backend/registry/migrations/0002_auto_20220105_1336.py | mrmap-community/MrMap | 5dc05b7a5339b967047cd207755718f670a1d7cd | [
"MIT"
] | 10 | 2021-03-12T17:46:38.000Z | 2022-03-11T10:59:01.000Z | backend/registry/migrations/0002_auto_20220105_1336.py | mrmap-community/mrmap | 5dc05b7a5339b967047cd207755718f670a1d7cd | [
"MIT"
] | 214 | 2021-03-10T19:24:17.000Z | 2022-03-15T07:34:24.000Z | backend/registry/migrations/0002_auto_20220105_1336.py | mrmap-community/MrMap | 5dc05b7a5339b967047cd207755718f670a1d7cd | [
"MIT"
] | 9 | 2021-03-16T19:47:54.000Z | 2022-03-11T11:01:22.000Z | # Generated by Django 3.2.9 on 2022-01-05 12:36
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('registry', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='webfeatureservice',
managers=[
('capabilities', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='webmapservice',
managers=[
('capabilities', django.db.models.manager.Manager()),
],
),
migrations.AddConstraint(
model_name='catalougeserviceoperationurl',
constraint=models.UniqueConstraint(fields=('method', 'operation'), name='registry_catalougeserviceoperationurl_unique_together_method_id_operation'),
),
migrations.AddConstraint(
model_name='webfeatureserviceoperationurl',
constraint=models.UniqueConstraint(fields=('method', 'operation'), name='registry_webfeatureserviceoperationurl_unique_together_method_id_operation'),
),
migrations.AddConstraint(
model_name='webmapserviceoperationurl',
constraint=models.UniqueConstraint(fields=('method', 'operation'), name='registry_webmapserviceoperationurl_unique_together_method_id_operation'),
),
]
| 36.538462 | 162 | 0.65193 |
e93e898e14d862c8186e0e63f6ce2ac5ff75423c | 15,524 | py | Python | relah.py | ttwj/ReLah | 8231636d4698001dc615848096a97ebd78ae2713 | [
"WTFPL"
] | 3 | 2020-01-31T08:22:49.000Z | 2021-01-10T20:02:37.000Z | relah.py | ttwj/ReLah | 8231636d4698001dc615848096a97ebd78ae2713 | [
"WTFPL"
] | null | null | null | relah.py | ttwj/ReLah | 8231636d4698001dc615848096a97ebd78ae2713 | [
"WTFPL"
] | null | null | null | # Python implementation of DBS PayLah!
# By ttwj - 2017
import base64
import random
import string
#remember to install pycryptodome!
import datetime
from Crypto.Cipher import AES, PKCS1_v1_5
from Crypto.PublicKey import RSA
import lxml.etree, json
from lxml import html
from pprint import pprint
from io import StringIO
import requests
import re
import time
import warnings
import requests
import contextlib
from api.models import PayLahAPISource
http_proxy = "http://localhost:8888"
https_proxy = "https://localhost:8888"
app_ver = '4.0.0'
proxyDict = {
"http": http_proxy,
'https': https_proxy
}
try:
from functools import partialmethod
except ImportError:
# Python 2 fallback: https://gist.github.com/carymrobbins/8940382
from functools import partial
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance, *(self.args or ()), **(self.keywords or {}))
@contextlib.contextmanager
def no_ssl_verification():
old_request = requests.Session.request
requests.Session.request = partialmethod(old_request, verify=False)
warnings.filterwarnings('ignore', 'Unverified HTTPS request')
yield
warnings.resetwarnings()
requests.Session.request = old_request
from Crypto.Cipher import AES
from Crypto import Random
class AESCipher:
def __init__(self, key):
"""
Requires hex encoded param as a key
"""
self.key = key.encode()
BLOCK_SIZE = 16
def pkcs5_pad(self, s):
"""
padding to blocksize according to PKCS #5
calculates the number of missing chars to BLOCK_SIZE and pads with
ord(number of missing chars)
@see: http://www.di-mgt.com.au/cryptopad.html
@param s: string to pad
@type s: string
@rtype: string
"""
return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)
def encrypt(self, raw):
"""
Returns hex encoded encrypted value!
"""
raw = self.pkcs5_pad(raw)
iv = '1234567898765432'.encode()
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return cipher.encrypt(raw.encode('utf-8'))
def decrypt(self, enc):
"""
Requires hex encoded param to decrypt
"""
enc = enc.decode("hex")
iv = enc[:16]
enc = enc[16:]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(enc))
class DBSPayLahTransaction(object):
rand = ''
public_key_bin = ''
cipher = None
def updatePayLahAPISource(self):
self.payLahAPISource.api_random = self.rand
self.payLahAPISource.api_base64_public_key = self.base64_public_key
def __init__(self, payLahAPISource):
self.payLahAPISource = payLahAPISource
"""
api_random = models.CharField(max_length=20)
api_base64_public_key = models.TextField()
api_deviceID = models.CharField(max_length=100)
api_phoneID = models.CharField(max_length=100)
api_encryptedPasscode = models.TextField()
api_unencryptedPasscodeLength = models.IntegerField()
api_cookiesJSON = JSONField()
"""
self.ipAddress = payLahAPISource.api_ipAddress
self.rand = payLahAPISource.api_random
self.base64_public_key = payLahAPISource.api_base64_public_key
self.deviceID = payLahAPISource.api_deviceID
self.phoneID = payLahAPISource.api_phoneID
self.encryptedPasscode = payLahAPISource.api_encryptedPasscode
self.public_key_bin = base64.b64decode(payLahAPISource.api_base64_public_key.encode('utf-8'))
self.unencryptedPasscodeLength = str(payLahAPISource.api_unencryptedPasscodeLength)
self.cipher = AESCipher(self.rand)
self.r = requests.session()
self.r.cookies = requests.utils.cookiejar_from_dict(payLahAPISource.api_cookiesJSON)
#def __init__(self):
# self.r = requests.Session()
def ran_generator(size=16, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def get_server(self):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'ipAddress': self.ipAddress,
'platform': 'iPhone',
'serviceID': 'getServer'
}
data = self.requestLah(payload)
self.public_key_bin = base64.b64decode(data['base64EncodedString'].encode('utf-8'))
print(data)
def requestLah(self, payload):
import requests
import logging
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which is not logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
#http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
#logging.basicConfig()
#logging.getLogger().setLevel(logging.DEBUG)
#requests_log = logging.getLogger("requests.packages.urllib3")
#requests_log.setLevel(logging.DEBUG)
#requests_log.propagate = True
with no_ssl_verification():
r = self.r.post("https://p2pcweb.dbs.com/services/DBSMobileWalletService0/" + payload['serviceID'], data=payload,
#proxies=proxyDict,
headers={
'user-agent': 'PayLah/7 CFNetwork/808.2.16 Darwin/16.3.0',
})
data = json.loads(r.text)
return data
def encrypt(self, text):
return base64.b64encode(self.cipher.encrypt(text))
def prelogin(self):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'loginType': 'wallet',
'platform': 'iPhone',
'serviceID': 'prelogin',
}
print(payload)
self.requestLah(payload)
def generate_paylah_url(self, amount, reservation_id, retry=False):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'count': self.encrypt('20'),
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'isOneTimeOnly': self.encrypt('Y'),
'payment_name': self.encrypt('BeepPay PayLah ' + reservation_id),
'periodOfSale': self.encrypt('7'),
'price': self.encrypt(amount),
'phoneId': self.encrypt(self.phoneID),
'phoneModel': 'iPhone 5s',
'platform': 'iPhone',
'serviceID': 'generatePaylahURL',
}
print(payload)
data = self.requestLah(payload)
if data['statusCode'] != '0000':
if retry is False:
print("PayLah expired, regenerating")
# TODO: save this particulars somewhere in the model :-)
self.retry_paylah_login()
return self.generate_paylah_url(amount, reservation_id, retry=True)
else:
raise Exception('Exceeded login retries')
print(data)
return data
def retry_paylah_login(self):
'''
self.rand = payLahAPISource.api_random
self.base64_public_key = payLahAPISource.api_base64_public_key
self.deviceID = payLahAPISource.api_deviceID
self.phoneID = payLahAPISource.api_phoneID
self.encryptedPasscode = payLahAPISource.api_encryptedPasscode
self.public_key_bin = base64.b64decode(payLahAPISource.api_base64_public_key.encode('utf-8'))
self.unencryptedPasscodeLength = str(payLahAPISource.api_unencryptedPasscodeLength)
self.cipher = AESCipher(self.rand)
self.r = requests.session()
self.r.cookies = requests.utils.cookiejar_from_dict(payLahAPISource.api_cookiesJSON)
'''
self.get_server()
# transaction.public_key_bin = base64.b64decode("MIICqDCCAZACCGNAYXyIwSRhMA0GCSqGSIb3DQEBBQUAMBUxEzARBgNVBAMMCkRCUyBQYXlsYWgwHhcNMTcxMDI3MTczMTEyWhcNMTkxMDI4MTczMTEyWjAYMRYwFAYDVQQDDA1EQlMgTWVyY2hhbnRzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAhQ2CljVoM6GrAWrxN0qh9dgVLwpTcFsC2C3uKecRFCDODZY3Qv/DL8ta8+ZN+UWmvHCt/tWjt7FCCIolfn1iXyPuldngsey/JKTSmhPL1imufPUJjbUZaTSwpP1y7DWWJGLqqZMdtyaq0KkpxDM8rBgmXm9eC+YQ+woDux2SQp4PlCpnjxXpYoXG55CWjLsQLx1AaVOFjH38do13OIvEMJWucfmDgY4k6l8TT9gxKoGXTN7p9rHK57dVDOLTScspjuOazU6nLM0U5obsQAvjEzMzKo4wDESremQYWlcaKT4gOliSwbOy4EF6XBrtU+JC7jGPWAOpx/evRUecfKgR9wIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAyjzuSQB2LMLe24TsNHO7zqpdxl4TQ1fd6B7+E1klKPn/pcWgVQZkgsOyjH+LY7P+yk5LVUyGBukuH8qQUY2LWqo8Si1dJeSJzRSKfMElZj1idMx04X9YiJHvpq4eRaqjXtmsXRgc7bD3TlE6ZZa1GwVWux67IdfhCb/9pnfY37d9G6xM0Tk2UkxTc+WfXLG8k1RX6HhjQ8vTNJhkMTb/TwZfLQ89owPKzSahCpk9qKj9TU4uuDJXmAmiuf6IKCXL+mvGeltc/NDGetvsSwUCkBfkpuRoiS4mHkdGn+4w3izgobByjAgQMNpK4l7qLuonmHLDFkE92tX/yn4bJxqGy".encode('utf-8'))
self.wallet_launch()
self.prelogin()
self.wallet_login_new()
self.payLahAPISource.api_random = self.rand
self.payLahAPISource.api_base64_public_key = self.base64_public_key
self.payLahAPISource.api_deviceID = self.deviceID
self.payLahAPISource.api_phoneID = self.phoneID
self.payLahAPISource.api_encryptedPasscode = self.encryptedPasscode
self.payLahAPISource.api_unencryptedPasscodeLength = self.unencryptedPasscodeLength
self.payLahAPISource.api_cookiesJSON = requests.utils.dict_from_cookiejar(self.r.cookies)
self.payLahAPISource.save()
def get_paymentlink_expired_history(self):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'count': self.encrypt('50'),
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'index': self.encrypt('0'),
'phoneId': self.encrypt(self.phoneID),
'phoneModel': 'iPhone 5s',
'platform': 'iPhone',
'serviceID': 'getPaymentLinkHistoryExpired',
}
return self.requestLah(payload)
def get_transaction_history(self, retry=False):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'count': self.encrypt('80'),
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'index': self.encrypt('1'),
'loginType': '02',
'phoneId': self.encrypt(self.phoneID),
'phoneModel': 'iPhone 5s',
'platform': 'iPhone',
'serviceID': 'getTransactionHistory',
}
print(payload)
data = self.requestLah(payload)
if data['statusCode'] != '0000':
if retry is False:
print("PayLah expired, regenerating")
# TODO: save this particulars somewhere in the model :-)
self.retry_paylah_login()
return self.get_transaction_history(retry=True)
else:
raise Exception('Exceeded login retries')
print(json.dumps(data))
return data
def force_paylink_expire(self, transactionRef):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'deviceId': self.encrypt(self.deviceID),
'expiryDays': self.encrypt('EXPIRY'),
'ipAddress': self.ipAddress,
'isOneTime': self.encrypt('Y'),
'status': self.encrypt('E'),
'transactionRefNumber': self.encrypt(transactionRef),
'platform': 'iPhone',
'serviceID': 'updatePaymentLink',
'isOnetime': self.encrypt('Y'),
}
print(payload)
return self.requestLah(payload)
def wallet_login_new(self):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'count': self.encrypt('10'),
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'encryptedPassCode': self.encryptedPasscode,
'index': self.encrypt('1'),
'loginType': '02',
'phoneId': self.encrypt(self.phoneID),
'phoneModel': 'iPhone 5s',
'platform': 'iPhone',
'serviceID': 'walletloginNew',
'touchIDStatus': 'Active',
'unencryptedPasscodelength': self.unencryptedPasscodeLength
}
print(payload)
return self.requestLah(payload)
def wallet_launch(self):
self.rand = DBSPayLahTransaction.ran_generator()
#self.rand = "QCos1rgim225kkrE"
self.cipher = AESCipher(self.rand)
public_key = RSA.import_key(self.public_key_bin)
cipher_rsa = PKCS1_v1_5.new(public_key)
cipher_text = cipher_rsa.encrypt(self.rand.encode())
print(cipher_text)
#print("random " + self.rand)
#print(self.public_key_bin)
encoded = base64.b64encode(cipher_text)
#encoded = "RrdSu8k31vXLdCctxUrXK+YNdJVyy/x9fUC3Z322Ku4/2GsGWqJty4H/1Z6XTnkTkKjcuCmRYcBce5NBnroBcyCIrWrlfG3H+xTYU/vuRylQjvFopIHAhvp8KZ1myR2dhghUMCoKmzr2tZyT9Ay4GHEPfLYzIdtivpNnJNjpM8LTe+4n/cMLtBLuLdZiiDH/OLLuenKxieS4pl9YTMeG3pxAuGWZk5D2qccOy8SEH7H2D+JJzu7GX+WM0GPTMDoxvYwOifaLxvcM5qJoZ8AInso54dOdV+jytIDfnO2aHaksTqLMFLOeiYST8puKOAIfWpSuDl+Yr3knMiz5Dq3cXw=="
print("encoded " + str(encoded))
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'deviceId': self.encrypt(self.deviceID),
'encryptedAES128Key': '',
'encryptedDeviceModel': self.encrypt('iPhone 5s'),
'encryptedOs': self.encrypt('iPhone'),
'fromWalletType': self.encrypt('02'),
'inputParam': encoded,
'ipAddress': self.ipAddress,
'phoneId': self.encrypt(self.phoneID),
'platform': 'iPhone',
'searchCriteria': 'deviceID',
'searchParam': self.encrypt(self.deviceID),
'serviceID': 'walletLaunch',
'subscriptionId': '',
'timeStamp': timestamp,
'toWalletType': self.encrypt('00')
}
print(payload)
self.requestLah(payload)
#paylah_api_source = PayLahAPISource.objects.get(pk=1)
#txn = DBSPayLahTransaction(paylah_api_source)
#txn.get_transaction_history()
| 33.67462 | 987 | 0.626578 |
e93fa44d8c8e89fa596f6f1e1b5862803b660a31 | 13,090 | py | Python | st_dashboard.py | amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction | 62d6b754348ed7ae5d5ef4bd31eb2553a76c8892 | [
"MIT"
] | null | null | null | st_dashboard.py | amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction | 62d6b754348ed7ae5d5ef4bd31eb2553a76c8892 | [
"MIT"
] | null | null | null | st_dashboard.py | amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction | 62d6b754348ed7ae5d5ef4bd31eb2553a76c8892 | [
"MIT"
] | null | null | null | import streamlit as st
import dill
import pandas as pd
import plotly.express as px
from datetime import date
import statsmodels
with open('compiled-sentiment-history.pkd', 'rb') as f:
df_compiled = dill.load(f)
df_compiled.drop_duplicates(inplace=True)
dates = list({idx[1] for idx in df_compiled.index})
dates = sorted(dates, key=lambda dt: (str(dt).split('-')))
# date_ = '2021-06-01'
st.title('The Data Incubator Capstone Project')
st.subheader('*Title*: **Wallstreetbets Gossip vs. Market Price Action**')
st.subheader('*Created by*: Amir A. Taghavey - Summer, 2021')
st.markdown('*Email*: a [dot] taghavey @ gmail [dot] com')
''' '''
st.markdown(
'This App was developed as main deliverable of thecapstone project requirement of [**the Data Incubator**](https://www.thedataincubator.com/) fellowship program.')
st.sidebar.title('Options Dashboard:')
page = st.sidebar.selectbox('Select field:',
(
'Synopsis',
'App structure',
'VIZ: Reddit hot_10 vs. time',
'VIZ: Gossip vs. Action',
'ML analysis summary',
'Acknowledgments')
, 0)
if page == 'Synopsis':
st.markdown(
'''
**Background**: The short-squeeze of GameStop and AMC stocks in early 2021 was impacted in great part by the massive-scale coordinated action of the subreddit ***wallstreetbets*** ants army of retail investors.
Many of the early ants realized remarkable gains on their investment enabling them to payoff their student loans or home mortgages at the demise of a few hedge funds such as the London-based White Square Capital.
These events motivated new swarms of retail investors to join in the movement with their hard-earned savings, and for many this game has offered its ugly face!
**Objective**: Motivated by the story above, this project aimed at finding an objective answer to one question: ***Is safety in being a part of the herd when it comes to navigating the US Stock Market?***
**Methods**: To achieve this, I (i) scanned popular social media platforms to identify and characterize how the retail investors percieved the market performance for the most frequently talked about stocks on New York Stock Exchange before each trading session and (ii) compiled the actual market action data at the end of each trading session on a daily basis over the time period of 6/1/2021-9/1/2021, and performed an extensive amount of analysis to extract possible underlying correlartions.
**Summary**: NO correlation (and hence NO basis for meaningful predictions) was found betweem the market price action and any of the prior (i) PRE-market gossip / sentiment, (ii) stock price action, or (iii) stock options activity from the previous trading session.
**Conclusion**: Moral of the story, objectively and in a nutshell, is that ***No evidence was found to support ANY consistent forward temporal correlation bwteen market gossip and price action!***
'''
)
elif page == 'App structure':
st.markdown(
'''
App Structure:
\n
A. *reddit's PRE-market hot_20* (9:00 AM ET), the 20 most talked about NYSE stocks are identified
B. recent posts from *stocktwits* and *twitter* APIs for the hot_20 list of the day are compiled
C. vader sentiment intensity analyzer is implemented to extract investor sentiment from compiled text
D. price action data are collected from *yahoo_fin* API at the close of market (4:00 PM ET)
E. investor sentiment - market performance data are analyzed, modeled, and visualized
''')
img = 'CodeStructure.png'
st.image(img, clamp=True,
caption='Schematic of the logical code structure and inter-connections between modules \
(i) compiling market talk data from social media platforms, \
(ii) performing sentiment intensity analysis, \
(iii) gathering financial data, and \
(iv) conducting data analytics on compiled market gossip - price action data.')
elif page == 'ML analysis summary':
st.subheader('**Machine Learning Correlation Analysis**')
st.markdown('''
\n
***Summary:*** An extensive correlation analysis study of the compiled data was conducted
with the *objective* to find underlying forward temporal correlations (if any) between
(a) post-market price action and (b.1) pre-market sentiment nalysis data, (b.2) pre-market
stock options activity data (e.g., contract volume, change in open interest, change in percent ITM / OTM, etc.),
and/or (b.3) previous trading session post-market price action data for reddit's hot stock list.
\n
***Approach***: Target (i.e. lable) was to predict the change in stock price, $$\Delta$$P.
Price change was defined as price quote at market close less price quote at market open normalized to
price quote at market open for a given ticker on reddit hot list. Two types of approaches were implemented
to model $$\Delta$$P: **A. Regressive Approach**, and **B. Binary Classification Approach**.
In the latter approach, price action signal was reduced to upward / downward trends.
\n
***Transformations***: All quantitative features were scaled using standard scaler, and dimensionality
reduction was carried out using TrauncatedSVD method.
\n
***Modeling***: Cross validation score was used to compare modeling performance of the tested models.
Model comparisons among regressors and classifiers were done separately using $$r^{2}$$ and accuracy
metrics, respectively.
\n
Models implemented include:
\n
| Model | Regression | Classification |
| :--- | :--------: | :------------: |
| Linear Regression | ✔ | |
| Logistic Regression | | ✔ |
| Ridge with cross-validation | ✔ | ✔ |
| Decision Tree | ✔ | ✔ |
| Random Forest | ✔ | ✔ |
| K-Nearest-Neighbors | ✔ | ✔ |
| Support Vector Machine | ✔ | ✔ |
| Multi-layer Perceptron Network | ✔ | ✔ |
\n
.
\n
***Results***: All regressors returned an $$r^{2}$$-value equal to zero (0) consistent with no detectable correlation
between any of (i) sentiment, (ii) stock options, or (iii) previous-day stock data and the response
variable (i.e. $$\Delta$$P). This was further corroborated with the slighly higher than the null-model
classification accuracy score yielded by the KNN classifier of 0.54 (versus 0.53 classification
accuracy corresponding to the null hypothesis).
The modeling results could extract no correlation between (signal) price action data for the
reddit hotlist and the sentiment extracted from the market talks, option activities or prior
trading-session data.
''')
elif page == 'Acknowledgments':
st.markdown('''
- Reddit hotlist sentiment intensity analysis in this project was done by implementing an exising
[reddit-sentiment_analyis](https://github.com/asad70/reddit-sentiment-analysis) github repository
developed by [**asad70**](https://github.com/asad70). It was modified to expend search scope
to additional financial sub-reddits, provide human-guided training to Vader Sentiment Intensity
Analyzer, and to fit the required i/o structure of this project.
- I would like to thank and acknowledge Dr. [Robert Schroll](robert@thedataincubator.com),
my instructor and TDI capstone project advisor, for the instrumental feedback I received from him
during the design, development and execution of this project.
''')
elif page == 'VIZ: Gossip vs. Action':
trendline_on = st.sidebar.checkbox('add linear trendline:', False)
date_idx = st.sidebar.slider('Select date index:',
min_value=0,
max_value=len(dates)-1,
value=0)
date_ = dates[date_idx]
df = df_compiled.loc[(slice(None), date_),:]
df.sort_values('counts', ascending=False, inplace=True)
df.reset_index(inplace=True)
# plt = sentiment_visualizer_date(c_df,'2021-06-01')
plt=px.scatter(df,
x='bull_bear_ratio',
y='change_sn',
color='neutral',
size='counts', #text='ticker',
size_max=20,
color_continuous_scale=px.colors.sequential.BuPu_r,
hover_data=['ticker', 'volume'],
labels={'bull_bear_ratio': 'Investor Bullishness [-]',
'change_sn': 'Price Change [-]'},
trendline='ols' if trendline_on else None,
title=f"As of {date.strftime(date_, r'%B, %d %Y')}:"
)
plt.update_layout(plot_bgcolor='white', # #ceced0
title_font={'size':16, 'family':'Arial Black'},
yaxis={'showgrid':False, 'zeroline':False, 'linecolor': 'black',
'zerolinecolor': 'grey', 'tickfont':{'size':12},
'titlefont':{'size':14, 'family':'Arial Black'},
'range':[-0.2,0.2]},
xaxis={'showgrid':False, 'zeroline':False, 'linecolor': 'black',
'tickfont':{'size':12}, 'titlefont':{'size':14, 'family':'Arial Black'},
'range':[.75,1.75]},
height=600, width=700, #'ylorrd'
coloraxis_colorbar={'title':"Neutrality",
'tickvals': [0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] ,
'tick0': 0.4,
# 'cmin':0.5,
# 'cmax': 1.0,
#'tickvals':[5,6,7,8,9], 'ticktext': ['0.1M', '1M', '10M', '100M', '1B']
},
hovermode="x unified"
)
plt.update_traces(textposition='top center',
textfont={'size':10, 'color':'grey'},
marker={'line':{'color':'#ceced0'}},
#hovertemplate=None,
)
st.plotly_chart(plt, use_container_width=True)
st.subheader('Sentiment')
st.dataframe(df[['ticker', 'bearish', 'bullish',
'neutral', 'bull_bear_ratio',
'change_sn', 'volume']])
elif page == 'VIZ: Reddit hot_10 vs. time':
st.subheader('All-time (since the Memorial Day weekend!) HOT-10 stocks on Reddit:')
hot_10_inds = df_compiled.reset_index().groupby(by='ticker') \
.count()[['date']].sort_values('date', ascending=False)[:10].index
df_ = df_compiled.reset_index()
hot10_counts = df_[df_.ticker.isin(hot_10_inds)] \
.groupby('ticker') \
.sum()[['counts']] \
.reindex(hot_10_inds) \
.reset_index()
fig = px.pie(hot10_counts, values='counts', names='ticker', hole=0.3,
color_discrete_sequence=px.colors.sequential.RdBu)
fig.update_traces(textposition='inside', textinfo='percent+label')
st.plotly_chart(fig)
hot10 = [f'{i+1}. {ticker}' for i, ticker in enumerate(hot_10_inds)]
picked_hot = st.sidebar.selectbox('choose ticker to plot:', options=hot10, index=0)
picked_hot = picked_hot.split(' ')[1]
st.markdown(f'Bar chart of daily intra-session change in stock price for **${picked_hot}**:')
df = df_compiled.loc[picked_hot].drop(columns=['counts'])
plt = px.bar(df, y='change_sn', text='volume', color='bull_bear_ratio',
color_continuous_scale=px.colors.sequential.RdBu_r)
plt.update_traces(texttemplate='%{text:.2s}', textposition='outside')
plt.update_layout(uniformtext_minsize=8)
plt.update_layout(xaxis_tickangle=-45,
yaxis={'showgrid':False,
'title': 'session change [-]',
'range':[-0.1, 0.1]},
coloraxis_colorbar={'title':"Investor\nBullishness",
'tickmode': 'array',
'tickvals': [0.8, 0.9, 1, 1.1, 1.2],
'tick0': 0.8,})
st.plotly_chart(plt, use_container_width=True)
st.dataframe(df)
| 55.940171 | 504 | 0.58793 |
e940349493488e9c1525d630923a4b14c70fd2d8 | 745 | py | Python | hoofball/migrations/0002_comment.py | leo-holanda/Hoofball | ccf4399d33a6381acd2ff41efce3dbf0dca6a092 | [
"MIT"
] | 1 | 2021-07-30T10:05:43.000Z | 2021-07-30T10:05:43.000Z | hoofball/migrations/0002_comment.py | leo-holanda/Hoofball | ccf4399d33a6381acd2ff41efce3dbf0dca6a092 | [
"MIT"
] | null | null | null | hoofball/migrations/0002_comment.py | leo-holanda/Hoofball | ccf4399d33a6381acd2ff41efce3dbf0dca6a092 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-01 20:26
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('hoofball', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=50)),
('text', models.TextField(max_length=280)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('source', models.CharField(max_length=15)),
],
),
]
| 29.8 | 114 | 0.587919 |
e9417a482c1a501cccd1b38f496ec064d6eb9c78 | 627 | py | Python | clock/clock/lib.py | litheblas/blasrummet-rpi | 5300e61c0d0d93fd77cd489eb02165793453b99f | [
"MIT"
] | null | null | null | clock/clock/lib.py | litheblas/blasrummet-rpi | 5300e61c0d0d93fd77cd489eb02165793453b99f | [
"MIT"
] | null | null | null | clock/clock/lib.py | litheblas/blasrummet-rpi | 5300e61c0d0d93fd77cd489eb02165793453b99f | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import logging
from time import sleep
from datetime import datetime
from .settings import PULSE_DURATION
from .io import pull, release
logger = logging.getLogger(__name__)
def pulse():
logger.debug('Pulsing at {}'.format(datetime.now().time()))
pull()
sleep(PULSE_DURATION)
release()
def sleep_until_next_minute():
"""
Sleeps until the time is xx:xx:00 with good enough accuracy (the error is
usually in the order of 10-100 milliseconds).
"""
now = datetime.now().time()
sleep(60.0 - now.second - now.microsecond/1000000.0)
| 24.115385 | 77 | 0.717703 |
e9425f8008305f97cda9a9d9e3075c0d79dde033 | 38 | py | Python | CH_03_pythonic_syntax/T_17_mypy.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | CH_03_pythonic_syntax/T_17_mypy.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | CH_03_pythonic_syntax/T_17_mypy.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | some_number: int
some_number = 'test'
| 12.666667 | 20 | 0.763158 |
e9432f57bd7e613914b0ff79424dc9823a1f7a75 | 2,513 | py | Python | crowdgezwitscher/users/migrations/0004_auto_20170402_1358.py | Strassengezwitscher/Crowdgezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 4 | 2016-07-22T07:20:31.000Z | 2016-11-13T18:13:34.000Z | crowdgezwitscher/users/migrations/0004_auto_20170402_1358.py | Strassengezwitscher/Strassengezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 402 | 2016-04-26T08:38:17.000Z | 2022-03-11T23:26:49.000Z | crowdgezwitscher/users/migrations/0004_auto_20170402_1358.py | Strassengezwitscher/Crowdgezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 1 | 2018-01-14T16:58:57.000Z | 2018-01-14T16:58:57.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-02 13:58
from django.db import migrations
def forwards_func(apps, schema_editor):
"""Grant permissions for TwitterAccount to admins and mods.
Includes creating a new view permission.
"""
# We can't import the models directly as they may be newer versions than this migration expects.
# We use the historical versions instead.
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
TwitterAccount = apps.get_model('twitter', 'TwitterAccount')
db_alias = schema_editor.connection.alias
admins = Group.objects.using(db_alias).get(name="Administratoren")
mods = Group.objects.using(db_alias).get(name="Moderatoren")
content_type = ContentType.objects.get_for_model(TwitterAccount)
for perm_type in ['add', 'change', 'delete', 'view']:
permission, _ = Permission.objects.get_or_create(
codename='%s_twitteraccount' % perm_type,
name='Can %s twitter account' % perm_type,
content_type=content_type,
)
admins.permissions.add(permission)
mods.permissions.add(permission)
def reverse_func(apps, schema_editor):
"""Removes permissions for TwitterAccount from admins and mods.
Includes deleting (in addition to unassigning) the view permission.
"""
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
TwitterAccount = apps.get_model('twitter', 'TwitterAccount')
db_alias = schema_editor.connection.alias
admins = Group.objects.using(db_alias).get(name="Administratoren")
mods = Group.objects.using(db_alias).get(name="Moderatoren")
content_type = ContentType.objects.get_for_model(TwitterAccount)
for perm_type in ['add', 'change', 'delete', 'view']:
permission = Permission.objects.get(codename='%s_twitteraccount' % perm_type)
admins.permissions.remove(permission)
mods.permissions.remove(permission)
Permission.objects.using(db_alias).filter(content_type=content_type, codename__contains='view').delete()
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20160927_1456'),
('twitter', '0005_auto_20170226_1348'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| 36.955882 | 108 | 0.702746 |
e9451ca12bd25e2853456158cde9221357093674 | 544 | py | Python | solutions/python3/267.py | sm2774us/amazon_interview_prep_2021 | f580080e4a6b712b0b295bb429bf676eb15668de | [
"MIT"
] | 42 | 2020-08-02T07:03:49.000Z | 2022-03-26T07:50:15.000Z | solutions/python3/267.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | null | null | null | solutions/python3/267.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | 40 | 2020-02-08T02:50:24.000Z | 2022-03-26T15:38:10.000Z | class Solution:
def generatePalindromes(self, s):
cnt, n = collections.Counter(s), len(s) // 2
odd, s, q = [c for c in cnt if cnt[c] % 2], "".join(k * (cnt[k] // 2) for k in cnt), {"#" * n}
if len(odd) > 1: return []
for c in s:
new = set()
for w in q:
for i in range(n):
if w[i] == "#":
new.add(w[:i] + c + w[i + 1:])
q = new
return [w + odd[0] + w[::-1] for w in q] if odd else [w + w[::-1] for w in q] | 41.846154 | 102 | 0.397059 |
e945ff9db15d3f14ca3c606adc1612355944457e | 909 | py | Python | gallery/03_sp/plot_wavelets.py | RandallBalestriero/TheanoXLA | d8778c2eb3254b478cef4f45d934bf921e695619 | [
"Apache-2.0"
] | 67 | 2020-02-21T21:26:46.000Z | 2020-06-14T14:25:42.000Z | gallery/03_sp/plot_wavelets.py | RandallBalestriero/TheanoXLA | d8778c2eb3254b478cef4f45d934bf921e695619 | [
"Apache-2.0"
] | 8 | 2020-02-22T14:45:56.000Z | 2020-06-07T16:56:47.000Z | gallery/03_sp/plot_wavelets.py | RandallBalestriero/TheanoXLA | d8778c2eb3254b478cef4f45d934bf921e695619 | [
"Apache-2.0"
] | 4 | 2020-02-21T17:34:46.000Z | 2020-05-30T08:30:14.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# flake8: noqa
"""
Morlet Wavelet in time and Fourier domain
=========================================
This example shows how to generate a wavelet filter-bank.
"""
import symjax
import symjax.tensor as T
import matplotlib.pyplot as plt
import numpy as np
J = 5
Q = 4
scales = T.power(2, T.linspace(0.1, J - 1, J * Q))
scales = scales[:, None]
wavelet = symjax.tensor.signal.complex_morlet(5 * scales, np.pi / scales)
waveletw = symjax.tensor.signal.fourier_complex_morlet(
5 * scales, np.pi / scales, wavelet.shape[-1]
)
f = symjax.function(outputs=[wavelet, waveletw])
wavelet, waveletw = f()
plt.subplot(121)
for i in range(J * Q):
plt.plot(2 * i + wavelet[i].real, c="b")
plt.plot(2 * i + wavelet[i].imag, c="r")
plt.subplot(122)
for i in range(J * Q):
plt.plot(i + waveletw[i].real, c="b")
plt.plot(i + waveletw[i].imag, c="r")
| 22.170732 | 73 | 0.628163 |
3a5d52f7066df721bcc6a4454c0e49f976cabd83 | 39 | py | Python | kfdata/__main__.py | kylef-archive/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | 1 | 2015-11-08T13:23:39.000Z | 2015-11-08T13:23:39.000Z | kfdata/__main__.py | kylef/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | null | null | null | kfdata/__main__.py | kylef/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | null | null | null | from kfdata.manage import main
main()
| 9.75 | 30 | 0.769231 |
3a5f1a224d28494c27bf5124ac4e6a08b36bb55e | 240 | py | Python | com/sujoym/basic/sys.py | sujoym/python-examples | ee49a6aeb50749611341b2850587b30c38b34509 | [
"Apache-2.0"
] | null | null | null | com/sujoym/basic/sys.py | sujoym/python-examples | ee49a6aeb50749611341b2850587b30c38b34509 | [
"Apache-2.0"
] | null | null | null | com/sujoym/basic/sys.py | sujoym/python-examples | ee49a6aeb50749611341b2850587b30c38b34509 | [
"Apache-2.0"
] | null | null | null | import sys
#sys.stderr.write('Stderr text\n');
#sys.stderr.flush()
#sys.stdout.write('Stdout text\n');
#print(sys.argv[1])
#if len(sys.argv)>1:
# print(float(sys.argv[1])*5);
def main(arg1):
print(int(arg1)*4);
main(sys.argv[1]);
| 17.142857 | 35 | 0.641667 |
3a5f95c4dd3189822a688ab6608502a352c54b4b | 167 | py | Python | slackforms/handlers/__init__.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 1 | 2019-06-20T00:11:58.000Z | 2019-06-20T00:11:58.000Z | slackforms/handlers/__init__.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 3 | 2020-02-11T23:46:14.000Z | 2021-06-10T21:10:37.000Z | slackforms/handlers/__init__.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 3 | 2019-12-13T06:53:18.000Z | 2021-06-04T07:12:56.000Z | # flake8: noqa
from .form import FormHandler
from .slash import SlashHandler
from .manual import ManualHandler
from .interactions import ActionHandler, MessageHandler
| 27.833333 | 55 | 0.838323 |
3a5fa540aac3f4f6c53d13b040d514473b579d4b | 779 | py | Python | zensearch/exceptions.py | suryaavala/zen_search | 51a2a32a5963b8cda878b356c1225abe6aae6304 | [
"MIT"
] | null | null | null | zensearch/exceptions.py | suryaavala/zen_search | 51a2a32a5963b8cda878b356c1225abe6aae6304 | [
"MIT"
] | null | null | null | zensearch/exceptions.py | suryaavala/zen_search | 51a2a32a5963b8cda878b356c1225abe6aae6304 | [
"MIT"
] | null | null | null | class PrimaryKeyNotFoundError(Exception):
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = None
self.default_message = "Cannot find primary key in the data point. Every data point should at least have primary key"
def __str__(self):
if self.message:
return f"{self.message}"
else:
return self.default_message
class DuplicatePrimaryKeyError(PrimaryKeyNotFoundError):
def __init__(self, *args):
super().__init__(*args)
self.default_message = "Duplicate primary key value found in the data point. It's been assumed that every entity should have a unique set of primary keys"
def __str__(self):
return super().__str__()
| 33.869565 | 162 | 0.653402 |
3a61c5a4f7f2b0b08f169681bdd4f9538e9142c6 | 13,902 | py | Python | RocMethod.py | meiyuanqing/MetaThreshold | fbccc7e5356606b929211eedaf5371506232c1b5 | [
"MIT"
] | null | null | null | RocMethod.py | meiyuanqing/MetaThreshold | fbccc7e5356606b929211eedaf5371506232c1b5 | [
"MIT"
] | null | null | null | RocMethod.py | meiyuanqing/MetaThreshold | fbccc7e5356606b929211eedaf5371506232c1b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding:utf-8
"""
Author : Yuanqing Mei
Date : 2021/4/8
Time: 15:42
File: RocMethod.py
HomePage : http://github.com/yuanqingmei
Email : dg1533019@smail.nju.edu.cn
This script find out the cutoff of a metric value by maximizing the AUC value and ROC、BPP、MFM、GM methods.
References:
[1] Bender, R. Quantitative risk assessment in epidemiological studies investigating threshold effects.
Biometrical Journal, 41 (1999), 305-319.(计算VARL的SE(标准误)的参考文献P310)
[2] Zhou, Y., et al. "An in-depth study of the potentially confounding effect of class size in fault prediction."
ACM Trans. Softw. Eng. Methodol. (2014) 23(1): 1-51. (计算BPP、MFM(F1)值为阈值)
[3] Shatnawi, R. (2018). Identifying Threshold Values of Change-Prone Modules.
(计算sum(Sensitivity+Specificity)=sum(TPR+TNR)值为阈值)
"""
import time
def roc_threshold(working_dir="F:\\NJU\\MTmeta\\experiments\\supervised\\trainingData\\",
result_dir="F:\\NJU\\MTmeta\\experiments\\supervised\\",
training_list="List.txt"):
import os
import csv
import numpy as np
import pandas as pd
import statsmodels.api as sm
# from sklearn import metrics
from sklearn.metrics import recall_score, precision_score, f1_score, roc_curve, auc, roc_auc_score, confusion_matrix
# 显示所有列
pd.set_option('display.max_columns', None)
# 显示所有行
pd.set_option('display.max_rows', None)
# the item of row of dataframe
pd.set_option('display.width', 5000)
working_directory = working_dir
result_directory = result_dir
os.chdir(working_directory)
with open(working_dir + training_list) as l:
lines = l.readlines()
for line in lines:
file = line.replace("\n", "")
print('the file is ', file)
# 分别处理每一个项目: f1取出要被处理的项目;
# f2:用于存储每一个项目的阈值信息,f2用csv.writer写数据时没有newline参数,会多出一空行;
# deletedList: 用于存储项目中某个度量logistic回归时,系数不显著或系数为零的度量及该项目名
with open(working_directory + file, 'r', encoding="ISO-8859-1") as f1, \
open(result_directory + "RocThreshold\\ROC_Thresholds.csv", 'a+', encoding="utf-8", newline='') as f2, \
open(result_directory + "RocThreshold\\deletedList.csv", 'a+', encoding="utf-8") as deletedList:
reader = csv.reader(f1)
writer = csv.writer(f2)
writer_deletedList = csv.writer(deletedList)
# receives the first line of a file and convert to dict generator
fieldnames = next(reader)
# exclude the non metric fields (12 items) and metric values including undef and undefined (17 items)
non_metric = ["relName", "className", "bug"]
# metric_data stores the metric fields (102 items)
def fun_1(m):
return m if m not in non_metric else None
metric_data = filter(fun_1, fieldnames)
df = pd.read_csv(file)
# drop all rows that have any NaN values,删除表中含有任何NaN的行,并重新设置行号
df = df.dropna(axis=0, how='any', inplace=False).reset_index(drop=True)
if os.path.getsize(result_directory + "RocThreshold\\ROC_Thresholds.csv") == 0:
writer.writerow(["fileName", "metric", "Corr_metric_bug", "B_0", "B_0_pValue", "B_1", "B_1_pValue",
"cov11", "cov12", "cov22", "BaseProbability_1",
"auc_threshold", "auc_threshold_variance", "auc_max_value", "i_auc_max",
"gm_threshold", "gm_threshold_variance", "gm_max_value", "i_gm_max",
"bpp_threshold", "bpp_threshold_variance", "bpp_max_value", "i_bpp_max",
"mfm_threshold", "mfm_threshold_variance", "f1_max_value", "i_f1_max",
"roc_threshold", "roc_threshold_variance", "roc_max_value", "i_roc_max",
"varl_threshold", "varl_threshold_variance"])
if os.path.getsize(result_directory + "RocThreshold\\deletedList.csv") == 0:
writer_deletedList.writerow(["fileName", "metric", "B_0_pValue", "B_0",
"auc_max_value", "i_auc_max", "gm_max_value", "i_gm_max", "bpp_max_value",
"i_bpp_max", "f1_max_value", "i_f1_max", "roc_max_value", "i_roc_max"])
for metric in metric_data:
print("the current file is ", file)
print("the current metric is ", metric)
# 由于bug中存储的是缺陷个数,转化为二进制存储,若x>2,则可预测bug为3个以上的阈值,其他类推
df['bugBinary'] = df.bug.apply(lambda x: 1 if x > 0 else 0)
# 依次用该度量的每一个值作为阈值计算出auc和GM,然后选择auc最大值的那个度量值作为阈值,即断点回归的cutoff
# 同时计算BPP(Balanced-pf-pd)、MFM(F1)和ROC(Sensitivity+Specificity)=(TPR+TNR)值,
# 分别定义存入五个值list,最大值和取最大值的下标值
AUCs = []
GMs = []
BPPs = []
MFMs = []
ROCs = []
auc_max_value = 0
gm_max_value = 0
bpp_max_value = 0
f1_max_value = 0
roc_max_value = 0
i_auc_max = 0
i_gm_max = 0
i_bpp_max = 0
i_f1_max = 0
i_roc_max = 0
# 判断每个度量与bug之间的关系,因为该关系会影响到断点回归时,相关系数大于零,则LATE估计值大于零,反之,则LATE估计值小于零
Corr_metric_bug = df.loc[:, [metric, 'bug']].corr('spearman')
# the i value in this loop, is the subscript value in the list of AUCs, GMs etc.
for i in range(len(df)):
t = df.loc[i, metric]
if Corr_metric_bug[metric][1] < 0:
df['predictBinary'] = df[metric].apply(lambda x: 1 if x <= t else 0)
else:
df['predictBinary'] = df[metric].apply(lambda x: 1 if x >= t else 0)
# confusion_matrix()函数中需要给出label, 0和1,否则该函数算不出TP,因为不知道哪个标签是poistive.
c_matrix = confusion_matrix(df["bugBinary"], df['predictBinary'], labels=[0, 1])
tn, fp, fn, tp = c_matrix.ravel()
if (tn + fp) == 0:
tnr_value = 0
else:
tnr_value = tn / (tn + fp)
if (fp + tn) == 0:
fpr = 0
else:
fpr = fp / (fp + tn)
# fpr, tpr, thresholds = roc_curve(df['bugBinary'], df['predictBinary'])
# AUC = auc(fpr, tpr)
auc_value = roc_auc_score(df['bugBinary'], df['predictBinary'])
recall_value = recall_score(df['bugBinary'], df['predictBinary'], labels=[0, 1])
precision_value = precision_score(df['bugBinary'], df['predictBinary'], labels=[0, 1])
f1_value = f1_score(df['bugBinary'], df['predictBinary'], labels=[0, 1])
gm_value = (recall_value * tnr_value) ** 0.5
pfr = recall_value
pdr = fpr # fp / (fp + tn)
bpp_value = 1 - (((0 - pfr) ** 2 + (1 - pdr) ** 2) * 0.5) ** 0.5
roc_value = recall_value + tnr_value
AUCs.append(auc_value)
GMs.append(gm_value)
BPPs.append(bpp_value)
MFMs.append(f1_value)
ROCs.append(roc_value)
# 求出上述五个list中最大值,及对应的i值,可能会有几个值相同,且为最大值,则取第一次找到那个值(i)为阈值
if auc_value > auc_max_value:
auc_max_value = auc_value
i_auc_max = i
if gm_value > gm_max_value:
gm_max_value = gm_value
i_gm_max = i
if bpp_value > bpp_max_value:
bpp_max_value = bpp_value
i_bpp_max = i
if f1_value > f1_max_value:
f1_max_value = f1_value
i_f1_max = i
if roc_value > roc_max_value:
roc_max_value = roc_value
i_roc_max = i
print("auc_max_value is ", auc_max_value)
print("gm_max_value is ", gm_max_value)
print("bpp_max_value is ", bpp_max_value)
print("f1_max_value is ", f1_max_value)
print("roc_max_value is ", roc_max_value)
print("i_auc_max is ", i_auc_max)
print("i_gm_max is ", i_gm_max)
print("i_bpp_max is ", i_bpp_max)
print("i_f1_max is ", i_f1_max)
print("i_roc_max is ", i_roc_max)
df['intercept'] = 1.0
# 通过 statsmodels.api 逻辑回归分类; 指定作为训练变量的列,不含目标列`bug`
logit = sm.Logit(df['bugBinary'], df.loc[:, [metric, 'intercept']])
# 拟合模型,disp=1 用于显示结果
result = logit.fit(method='bfgs', disp=0)
print(result.summary())
pValueLogit = result.pvalues
if pValueLogit[0] > 0.05: # 自变量前的系数
writer_deletedList.writerow(
[file, metric, pValueLogit[0], B[0], auc_max_value, i_auc_max, gm_max_value,
i_gm_max, bpp_max_value, i_bpp_max, f1_max_value, i_f1_max, roc_max_value,
i_roc_max])
continue # 若训练数据LOGIT回归系数的P值大于0.05,放弃该数据。
B = result.params # logit回归系数
if B[0] == 0: # 自变量前的系数
writer_deletedList.writerow(
[file, metric, pValueLogit[0], B[0], auc_max_value, i_auc_max, gm_max_value,
i_gm_max, bpp_max_value, i_bpp_max, f1_max_value, i_f1_max, roc_max_value,
i_roc_max])
continue # 若训练数据LOGIT回归系数等于0,放弃该数据。
# 计算auc阈值及标准差,包括其他四个类型阈值
auc_threshold = df.loc[i_auc_max, metric]
gm_threshold = df.loc[i_gm_max, metric]
bpp_threshold = df.loc[i_bpp_max, metric]
mfm_threshold = df.loc[i_f1_max, metric]
roc_threshold = df.loc[i_roc_max, metric]
# 计算LOGIT回归系数矩阵的协方差矩阵,因为计算aucThreshold的标准差要用到,见参考文献[1],
# 此处借鉴VARL方法,本质上VARL也是度量值中的一个
cov = result.cov_params()
cov11 = cov.iloc[0, 0]
cov12 = cov.iloc[0, 1]
cov22 = cov.iloc[1, 1]
auc_threshold_se = ((cov.iloc[0, 0] + 2 * auc_threshold * cov.iloc[0, 1]
+ auc_threshold * auc_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
auc_threshold_variance = auc_threshold_se ** 2
gm_threshold_se = ((cov.iloc[0, 0] + 2 * gm_threshold * cov.iloc[0, 1]
+ gm_threshold * gm_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
gm_threshold_variance = gm_threshold_se ** 2
bpp_threshold_se = ((cov.iloc[0, 0] + 2 * bpp_threshold * cov.iloc[0, 1]
+ bpp_threshold * bpp_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
bpp_threshold_variance = bpp_threshold_se ** 2
mfm_threshold_se = ((cov.iloc[0, 0] + 2 * mfm_threshold * cov.iloc[0, 1]
+ mfm_threshold * mfm_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
mfm_threshold_variance = mfm_threshold_se ** 2
roc_threshold_se = ((cov.iloc[0, 0] + 2 * roc_threshold * cov.iloc[0, 1]
+ roc_threshold * roc_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
roc_threshold_variance = roc_threshold_se ** 2
# 求VARL作为阈值,此处未用10折交叉验证的方法 VARL.threshold = (log(Porbability[1]/Porbability[2])-B[1])/B[2]
valueOfbugBinary = df["bugBinary"].value_counts() # 0 和 1 的各自的个数
print("the value of valueOfbugBinary[0] is ", valueOfbugBinary[0])
print("the value of valueOfbugBinary[1] is ", valueOfbugBinary[1])
# 用缺陷为大于0的模块数占所有模块之比
BaseProbability_1 = valueOfbugBinary[1] / (valueOfbugBinary[0] + valueOfbugBinary[1])
# 计算VARL阈值及标准差
varl_threshold = (np.log(BaseProbability_1 / (1 - BaseProbability_1)) - B[1]) / B[0]
varl_threshold_se = ((cov.iloc[0, 0] + 2 * varl_threshold * cov.iloc[0, 1]
+ varl_threshold * varl_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
varl_threshold_variance = varl_threshold_se ** 2
# 输出每一度量的结果
writer.writerow([file, metric, Corr_metric_bug[metric][1], B[0], pValueLogit[0], B[1], pValueLogit[1],
cov11, cov12, cov22, BaseProbability_1,
auc_threshold, auc_threshold_variance, auc_max_value, i_auc_max,
gm_threshold, gm_threshold_variance, gm_max_value, i_gm_max,
bpp_threshold, bpp_threshold_variance, bpp_max_value, i_bpp_max,
mfm_threshold, mfm_threshold_variance, f1_max_value, i_f1_max,
roc_threshold, roc_threshold_variance, roc_max_value, i_roc_max,
varl_threshold, varl_threshold_variance])
# break
if __name__ == '__main__':
s_time = time.time()
roc_threshold()
e_time = time.time()
execution_time = e_time - s_time
print("The __name__ is ", __name__, ". This is end of RocMethod.py!\n",
"The execution time of Bender.py script is ", execution_time)
| 47.447099 | 120 | 0.535319 |
3a6257117bb5b39b2295a1e04bfebd82a73a4f27 | 1,372 | py | Python | app/models.py | jonathankamau/note-taking-app | f7315dbde34d1a06aa10bc0548fc85770fa4c142 | [
"MIT"
] | 1 | 2020-07-18T16:35:42.000Z | 2020-07-18T16:35:42.000Z | app/models.py | jonathankamau/note-taking-app | f7315dbde34d1a06aa10bc0548fc85770fa4c142 | [
"MIT"
] | 32 | 2020-07-19T18:19:24.000Z | 2021-06-04T23:45:34.000Z | app/models.py | jonathankamau/note-taking-app | f7315dbde34d1a06aa10bc0548fc85770fa4c142 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class BaseModel(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class UserProfile(BaseModel, User):
class Meta:
db_table = 'users'
def __str__(self):
return "User: {}".format(self.user.username)
class Note(BaseModel):
title = models.CharField(max_length=50)
org_name = models.CharField(max_length=50)
purpose = models.TextField(blank=False)
content = models.TextField(blank=False)
total_attendance = models.PositiveIntegerField(blank=False)
user = models.ForeignKey(User, on_delete=models.CASCADE)
meeting_category = models.CharField(max_length=50)
class Meta:
db_table = 'notes'
def __str__(self):
return "My Notes: {}".format(self.id)
class MeetingCategory(models.Model):
date_created = models.DateTimeField(default=timezone.now, editable=False)
date_modified = models.DateTimeField(default=timezone.now, editable=False)
name = models.CharField(max_length=50)
description = models.TextField(blank=True)
class Meta:
db_table = 'meeting_categories'
def __str__(self):
return "Meeting Categories: {}".format(self.id)
| 29.826087 | 78 | 0.715015 |
3a63a86305fa3e3ced908249d69f673dd8d16d58 | 717 | py | Python | migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 3cbc86a0a9d7
Revises: 77894fcde804
Create Date: 2018-09-27 12:25:31.893545
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3cbc86a0a9d7'
down_revision = '77894fcde804'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('unique_order_item', 'order_items', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('unique_order_item', 'order_items', ['item_id', 'order_id', 'lot_number'])
# ### end Alembic commands ###
| 24.724138 | 106 | 0.707113 |
3a640b59523119016904d7053ed1bc557df19331 | 2,685 | py | Python | mp_roguelike/ai.py | nonk123/mp_roguelike | 48785b44dd3f2518a5a639a6609670408e7ea1f5 | [
"MIT"
] | null | null | null | mp_roguelike/ai.py | nonk123/mp_roguelike | 48785b44dd3f2518a5a639a6609670408e7ea1f5 | [
"MIT"
] | null | null | null | mp_roguelike/ai.py | nonk123/mp_roguelike | 48785b44dd3f2518a5a639a6609670408e7ea1f5 | [
"MIT"
] | null | null | null | import random
from .util import sign
class AI:
def __init__(self, entity):
self.entity = entity
self.queued_path = []
def think(self):
if self.queued_path:
x, y = self.queued_path.pop(0)
self.move(x - self.entity.x, y - self.entity.y)
def move(self, dx, dy):
self.entity.queue_move(dx, dy)
def move_to(self, x, y):
self.queued_path = []
at = [self.entity.x, self.entity.y]
while at != [x, y]:
at[0] += sign(x - at[0])
at[1] += sign(y - at[1])
self.queued_path.append((*at,))
def is_enemy(self, entity):
return isinstance(entity.ai, ControlledAI)
def attack(self, entity):
if entity is not None:
self.move_to(entity.x, entity.y)
class ControlledAI(AI):
def think(self):
pass
def is_enemy(self, entity):
return not super().is_enemy(entity)
class AggressiveAI(AI):
def think(self):
self.attack(self.find_closest_enemy())
super().think()
self.entity.turn_done = True
def is_enemy(self, entity):
return super().is_enemy(entity) and isinstance(entity.ai, ControlledAI)
def find_closest_enemy(self):
closest = None
for entity in self.entity.get_visible_entities():
if closest is None:
ddist = -10
else:
ddist = entity.dist(self.entity) - closest.dist(self.entity)
if self.is_enemy(entity) and ddist < 0:
closest = entity
return closest
class SpawnerAI(AI):
def __init__(self, entity, spawn_fun, max_spawn=5, spawn_cooldown=15):
super().__init__(entity)
self.spawn_fun = spawn_fun
self.max_spawn = max_spawn
self.spawn_cooldown = spawn_cooldown
self.spawned = []
self.turns_since_last_spawn = 0
def position(self, entity):
while True:
entity.x = self.entity.x + random.randint(-1, 1)
entity.y = self.entity.y + random.randint(-1, 1)
if not self.entity.world.is_occupied(entity.x, entity.y):
return
def think(self):
if len(self.spawned) < self.max_spawn \
and self.turns_since_last_spawn >= self.spawn_cooldown:
entity = self.spawn_fun()
self.spawned.append(entity)
entity.added += lambda: self.position(entity)
entity.dead += lambda: self.spawned.remove(entity)
self.entity.world.add_entity(entity)
self.turns_since_last_spawn = 0
self.turns_since_last_spawn += 1
self.entity.turn_done = True
| 26.584158 | 79 | 0.582495 |
3a640e6170ae4b45fbad29d7cf0c3f5b49ab9f01 | 83 | py | Python | mach_cad/model_obj/materials/__init__.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 6 | 2021-11-02T20:12:32.000Z | 2021-11-13T10:50:35.000Z | mach_cad/model_obj/materials/__init__.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 18 | 2021-11-29T20:14:55.000Z | 2022-03-02T07:17:37.000Z | mach_cad/model_obj/materials/__init__.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 1 | 2022-01-29T00:52:38.000Z | 2022-01-29T00:52:38.000Z |
from .material_generic import *
__all__ = []
__all__ += material_generic.__all__ | 13.833333 | 35 | 0.759036 |
3a6432af138a6ef234f6f37cbbda6934b7bb3c37 | 7,657 | py | Python | hpc-historias-clinicas/settings.py | btenaglia/hpc-historias-clinicas | 649d8660381381b1c591667760c122d73071d5ec | [
"BSD-3-Clause"
] | null | null | null | hpc-historias-clinicas/settings.py | btenaglia/hpc-historias-clinicas | 649d8660381381b1c591667760c122d73071d5ec | [
"BSD-3-Clause"
] | null | null | null | hpc-historias-clinicas/settings.py | btenaglia/hpc-historias-clinicas | 649d8660381381b1c591667760c122d73071d5ec | [
"BSD-3-Clause"
] | null | null | null | import os
PROJECT_ROOT = os.path.dirname(__file__)
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-es'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static_ files should be collected to.
# Don't put anything in this directory yourself; store your static_ files
# in apps' "static_/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static_/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static_ files.
# Example: "http://media.lawrence.com/static_/"
STATIC_URL = '/static/'
# Additional locations of static_ files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'staticfiles'),
)
# List of finder classes that know how to find static_ files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'e89^m%8%qx)qfj^m8@*=pp9wyg=sujhy*z9xty4f^x)tzq7_&m'
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'pagination.middleware.PaginationMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
#'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# END TEMPLATE CONFIGURATION
ROOT_URLCONF = 'hpc-historias-clinicas.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'hpc-historias-clinicas.wsgi.application'
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'pagination', # paginacion
'easy_pdf',
'easy_thumbnails',
'djrill',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'hpc-historias-clinicas.users', # Usuarios
'hpc-historias-clinicas.medicos', # Medicos
'hpc-historias-clinicas.ayudantes', # Ayudantes
'hpc-historias-clinicas.pacientes', # Pacientes
'hpc-historias-clinicas.anamnesis', # Anamnesis
'hpc-historias-clinicas.antecedentes_familiares', # Antecedentes Familiares
'hpc-historias-clinicas.antecedentes_personales', # Antecedentes Personales
'hpc-historias-clinicas.diagnosticos', # Diagnosticos
'hpc-historias-clinicas.habitos', # Habitos
'hpc-historias-clinicas.aparatos', # Aparatos
'hpc-historias-clinicas.examen_fisico', # Examen Fisico
'hpc-historias-clinicas.planteos', # PLanteos diagnosticos
'hpc-historias-clinicas.metodologias', # Metodologias de estudio y tratamiento
'hpc-historias-clinicas.historias', # Historias clinicas
'hpc-historias-clinicas.inter_consultas', # Inter Consultas
'hpc-historias-clinicas.evoluciones', # Evoluciones
'hpc-historias-clinicas.fojas_quirurgicas', # Fojas quirurgicas
'hpc-historias-clinicas.procedimientos_quirurgicos', # Procedimientos quirurgicos
'hpc-historias-clinicas.epicrisis', # Epicrisis
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# END Custom user app defaults
APPEND_SLASH = False
THUMBNAIL_BASEDIR = 'thumbs'
# --------------------------
# IMPORT LOCAL SETTINGS
# --------------------------
try:
from settings_local import *
except ImportError:
pass
| 38.094527 | 87 | 0.750947 |
3a66f861ec173370f50a0b31924da0bccb5e1872 | 2,661 | py | Python | romanyh/transposition.py | napulen/romanyh | 34bc75d40bf532eb20607db763fcbc2693cac35f | [
"BSD-3-Clause"
] | null | null | null | romanyh/transposition.py | napulen/romanyh | 34bc75d40bf532eb20607db763fcbc2693cac35f | [
"BSD-3-Clause"
] | 5 | 2020-12-08T04:37:21.000Z | 2021-01-06T03:36:30.000Z | romanyh/transposition.py | napulen/romanyh | 34bc75d40bf532eb20607db763fcbc2693cac35f | [
"BSD-3-Clause"
] | null | null | null | import re
import sys
from music21.interval import Interval
from music21.key import Key
def findKeysInRomanTextString(rntxt):
"""Get all the keys in a RomanText string.
Receive a string with valid RomanText content.
Output a list of all the key changes that happen
throughout the content.
"""
return re.findall(r" ([a-gA-G][#b]?): ", rntxt)
def transposeKeys(keys, newTonic):
"""Transpose a list of keys relative to a new tonic."""
referenceKey = Key(keys[0])
newTonicKey = Key(newTonic, mode=referenceKey.mode)
intervalDiff = Interval(referenceKey.tonic, newTonicKey.tonic)
transposedKeys = [newTonicKey.tonicPitchNameWithCase]
for k in keys[1:]:
localKey = Key(k)
newLocalTonic = localKey.tonic.transpose(intervalDiff)
newLocalKey = Key(newLocalTonic, mode=localKey.mode)
if abs(newLocalKey.sharps) >= 7:
newLocalKey = Key(
newLocalTonic.getEnharmonic(), mode=localKey.mode
)
transposedKeys.append(newLocalKey.tonicPitchNameWithCase)
transposedKeys = [k.replace("-", "b") for k in transposedKeys]
return transposedKeys
def transposeRomanText(f, newTonic="C"):
"""Transposes a RomanText file into a different key.
The transposition is performed in the following way:
- The first key in the file is taken as the reference key
- An interval between the reference key and new tonic is computed
- Every transposed key respects that interval, unless it becomes
or exceeds a key signature with 7 sharps or 7 flats
- In that case, the enharmonic spelling is preferred
The mode of the original key is always respected. That is,
attempting to transpose an annotation in the key of C Major
with a newTonic of `a` will result in a transposition to
A Major. Change of mode is not trivial and it is not addressed
in this code.
"""
with open(f) as fd:
rntxt = fd.read()
keys = findKeysInRomanTextString(rntxt)
transposedKeys = transposeKeys(keys, newTonic)
keysString = [f" {k}: " for k in keys]
transposedKeysString = [f" {k}: " for k in transposedKeys]
transposedRntxt = ""
for original, transposed in zip(keysString, transposedKeysString):
solved, replace, remainder = rntxt.partition(original)
transposedRntxt += solved + transposed
rntxt = remainder
transposedRntxt += rntxt
return transposedRntxt
if __name__ == "__main__":
inputFile = sys.argv[1]
newTonic = sys.argv[2] if len(sys.argv) == 3 else "C"
transposedRntxt = transposeRomanText(inputFile, newTonic)
print(transposedRntxt)
| 36.452055 | 70 | 0.693724 |
3a67795832eb29853a6ccb60a0d65c013b0a8f82 | 4,847 | py | Python | management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py | LizaShak/AzureTRE | b845eb4b73439ef7819565aaadb36f43b6484ad9 | [
"MIT"
] | 2 | 2021-11-14T16:57:16.000Z | 2022-03-13T15:14:26.000Z | management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py | anatbal/AzureTRE | d1d4891657c737092e761c4aaf80b04ff0f03fc7 | [
"MIT"
] | null | null | null | management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py | anatbal/AzureTRE | d1d4891657c737092e761c4aaf80b04ff0f03fc7 | [
"MIT"
] | null | null | null | import json
import pytest
import uuid
from mock import AsyncMock, patch
from db.errors import EntityDoesNotExist
from models.domain.resource import Status
from models.domain.workspace import Workspace
from models.domain.resource import Deployment
from resources import strings
from service_bus.deployment_status_update import receive_message_and_update_deployment
pytestmark = pytest.mark.asyncio
test_data = [
'bad',
'{"good": "json", "bad": "message"}'
]
test_sb_message = {
"id": "59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76",
"status": Status.Deployed,
"message": "test message"
}
class ServiceBusReceivedMessageMock:
def __init__(self, message: dict):
self.message = json.dumps(message)
self.correlation_id = "test_correlation_id"
def __str__(self):
return self.message
def create_sample_workspace_object(workspace_id):
return Workspace(
id=workspace_id,
description="My workspace",
resourceTemplateName="tre-workspace-vanilla",
resourceTemplateVersion="0.1.0",
resourceTemplateParameters={},
deployment=Deployment(status=Status.NotDeployed, message="")
)
@pytest.mark.parametrize("payload", test_data)
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_receiving_bad_json_logs_error(app, sb_client, logging_mock, payload):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(payload)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
await receive_message_and_update_deployment(app)
error_message = logging_mock.call_args.args[0]
assert error_message.startswith(strings.DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT)
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.WorkspaceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_receiving_good_message(app, sb_client, logging_mock, repo):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
expected_workspace = create_sample_workspace_object(test_sb_message["id"])
repo().get_workspace_by_workspace_id.return_value = expected_workspace
await receive_message_and_update_deployment(app)
repo().get_workspace_by_workspace_id.assert_called_once_with(uuid.UUID(test_sb_message["id"]))
repo().update_workspace.assert_called_once_with(expected_workspace)
logging_mock.assert_not_called()
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.WorkspaceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_when_updating_non_existent_workspace_error_is_logged(app, sb_client, logging_mock, repo):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
repo().get_workspace_by_workspace_id.side_effect = EntityDoesNotExist
await receive_message_and_update_deployment(app)
expected_error_message = strings.DEPLOYMENT_STATUS_ID_NOT_FOUND.format(test_sb_message["id"])
logging_mock.assert_called_once_with(expected_error_message)
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.WorkspaceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_when_updating_and_state_store_exception(app, sb_client, logging_mock, repo):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
repo().get_workspace_by_workspace_id.side_effect = Exception
await receive_message_and_update_deployment(app)
logging_mock.assert_called_once_with(strings.STATE_STORE_ENDPOINT_NOT_RESPONDING + " ")
sb_client().get_queue_receiver().complete_message.assert_not_called()
| 41.784483 | 115 | 0.810192 |
3a67c5e3bdd0bcd555184047e8d52728b4026b70 | 1,384 | py | Python | fms-python/face_core.py | seanarwa/firm | 774965004766c2ce59e17bb08692370280b3e95c | [
"Apache-2.0"
] | null | null | null | fms-python/face_core.py | seanarwa/firm | 774965004766c2ce59e17bb08692370280b3e95c | [
"Apache-2.0"
] | null | null | null | fms-python/face_core.py | seanarwa/firm | 774965004766c2ce59e17bb08692370280b3e95c | [
"Apache-2.0"
] | null | null | null | import os
import time
import logging as log
import numpy as np
from scikit-learn.preprocessing import normalize
# local modules
import config
def process(encoding):
normalized_encoding = get_quantized_features(encoding)
return normalized_encoding
def get_median_values_for_bins(bins):
median_values = {}
for binidx in range(1, bins.shape[0]):
binval = bins[binidx]
binval_prev = bins[binidx - 1]
median_values[binidx] = binval_prev
median_values[bins.shape[0]] = bins[bins.shape[0]-1]
return median_values
def get_quantized_features(features, quantization_factor=30):
normalized_features = normalize(features, axis=1)
offset = np.abs(np.min(normalized_features))
offset_features = normalized_features + offset # Making all feature values positive
# Let's proceed to quantize these positive feature values
min_val = np.min(offset_features)
max_val = np.max(offset_features)
bins = np.linspace(start=min_val, stop=max_val, num=quantization_factor)
median_values = get_median_values_for_bins(bins)
original_quantized_features = np.digitize(offset_features, bins)
quantized_features = np.apply_along_axis(lambda row: map(lambda x: median_values[x], row), 1, original_quantized_features)
quantized_features = np.floor(quantization_factor*quantized_features)
return quantized_features | 33.756098 | 126 | 0.760838 |
3a695ae89ca40a6004f7716018ec39b583cbbbfd | 1,587 | py | Python | tests/sms/models/test_reschedule_sms_messages.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
] | null | null | null | tests/sms/models/test_reschedule_sms_messages.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
] | null | null | null | tests/sms/models/test_reschedule_sms_messages.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
] | null | null | null | from datetime import date, datetime, timedelta
import pytest
from pydantic.error_wrappers import ValidationError
from infobip_channels.sms.models.body.reschedule_sms_messages import (
RescheduleSMSMessagesMessageBody,
)
from infobip_channels.sms.models.query_parameters.reschedule_messages import (
RescheduleSMSMessagesQueryParameters,
)
@pytest.mark.parametrize("bulk_id", [{}, None])
def test_when_bulk_id_is_invalid__validation_error_is_raised(bulk_id):
with pytest.raises(ValidationError):
RescheduleSMSMessagesQueryParameters(
**{
"bulk_id": bulk_id,
}
)
def test_when_input_data_is_valid_query__validation_error_is_not_raised():
try:
RescheduleSMSMessagesQueryParameters(
**{
"bulk_id": "BulkId-xyz-123",
}
)
except ValidationError:
pytest.fail("Unexpected ValidationError raised")
@pytest.mark.parametrize(
"send_at",
[{}, "Test", "22-03-2022", date.today(), datetime.now() + timedelta(days=181)],
)
def test_when_send_at_is_invalid__validation_error_is_raised(send_at):
with pytest.raises(ValidationError):
RescheduleSMSMessagesMessageBody(
**{
"sendAt": send_at,
}
)
def test_when_input_data_is_valid_body__validation_error_is_not_raised():
try:
RescheduleSMSMessagesMessageBody(
**{
"sendAt": datetime.now(),
}
)
except ValidationError:
pytest.fail("Unexpected ValidationError raised")
| 27.842105 | 83 | 0.672968 |
3a69ee7bd76a61928b4ca3a0383eeeac9e541646 | 8,349 | py | Python | python/mxnet/device.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | 1 | 2021-11-09T01:40:17.000Z | 2021-11-09T01:40:17.000Z | python/mxnet/device.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | null | null | null | python/mxnet/device.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | 1 | 2018-07-19T00:43:30.000Z | 2018-07-19T00:43:30.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Device management API of mxnet."""
import contextvars
import ctypes
from .base import _LIB
from .base import check_call
class Device:
"""Constructs a device structure.
MXNet can run operations on CPU and different GPUs.
A Device class describes the device type and ID on which computation should be carried on.
One can use mx.cpu and mx.gpu for short.
See also
----------
`How to run MXNet on multiple CPU/GPUs <http://mxnet.incubator.apache.org/api/faq/distributed_training>`
for more details.
Parameters
----------
device_type : {'cpu', 'gpu'} or Device.
String representing the device type.
device_id : int (default=0)
The device id of the device, needed for GPU.
Note
----
Device can also be used as a way to change the default device.
Examples
--------
>>> # array on cpu
>>> cpu_array = mx.np.ones((2, 3))
>>> # switch default Device to GPU(2)
>>> with mx.Device(mx.gpu(2)):
... gpu_array = mx.np.ones((2, 3))
>>> gpu_array.device
gpu(2)
One can also explicitly specify the device when creating an array.
>>> gpu_array = mx.np.ones((2, 3), mx.gpu(1))
>>> gpu_array.device
gpu(1)
"""
devtype2str = {1: 'cpu', 2: 'gpu', 3: 'cpu_pinned', 5: 'cpu_shared'}
devstr2type = {'cpu': 1, 'gpu': 2, 'cpu_pinned': 3, 'cpu_shared': 5}
def __init__(self, device_type, device_id=0):
if isinstance(device_type, Device):
self.device_typeid = device_type.device_typeid
self.device_id = device_type.device_id
else:
self.device_typeid = Device.devstr2type[device_type]
self.device_id = device_id
self._old_ctx = None
@property
def device_type(self):
"""Returns the device type of current device.
Examples
-------
>>> mx.device.current_device().device_type
'cpu'
>>> mx.current_device().device_type
'cpu'
Returns
-------
device_type : str
"""
return Device.devtype2str[self.device_typeid]
def __hash__(self):
"""Compute hash value of device for dictionary lookup"""
return hash((self.device_typeid, self.device_id))
def __eq__(self, other):
"""Compares two devices. Two devices are equal if they
have the same device type and device id.
"""
return isinstance(other, Device) and \
self.device_typeid == other.device_typeid and \
self.device_id == other.device_id
def __str__(self):
return '%s(%d)' % (self.device_type, self.device_id)
def __repr__(self):
return self.__str__()
def __enter__(self):
# Token can't be pickled and Token.old_value is Token.MISSING if _current.get() uses default value
self._old_ctx = _current.get()
_current.set(self)
return self
def __exit__(self, ptype, value, trace):
_current.set(self._old_ctx)
def empty_cache(self):
"""Empties the memory cache for the current device.
MXNet utilizes a memory pool to avoid excessive allocations.
Calling empty_cache will empty the memory pool of the
device. This will only free the memory of the unreferenced data.
Examples
-------
>>> ctx = mx.gpu(0)
>>> arr = mx.np.ones((200,200), ctx=ctx)
>>> del arr
>>> ctx.empty_cache() # forces release of memory allocated for arr
"""
dev_type = ctypes.c_int(self.device_typeid)
dev_id = ctypes.c_int(self.device_id)
check_call(_LIB.MXStorageEmptyCache(dev_type, dev_id))
def cpu(device_id=0):
"""Returns a CPU device.
This function is a short cut for ``Device('cpu', device_id)``.
For most operations, when no device is specified, the default device is `cpu()`.
Examples
----------
>>> with mx.cpu():
... cpu_array = mx.np.ones((2, 3))
>>> cpu_array.device
cpu(0)
>>> cpu_array = mx.np.ones((2, 3), ctx=mx.cpu())
>>> cpu_array.device
cpu(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
device : Device
The corresponding CPU device.
"""
return Device('cpu', device_id)
def cpu_pinned(device_id=0):
"""Returns a CPU pinned memory device. Copying from CPU pinned memory to GPU
is faster than from normal CPU memory.
This function is a short cut for ``Device('cpu_pinned', device_id)``.
Examples
----------
>>> with mx.cpu_pinned():
... cpu_array = mx.np.ones((2, 3))
>>> cpu_array.device
cpu_pinned(0)
>>> cpu_array = mx.np.ones((2, 3), ctx=mx.cpu_pinned())
>>> cpu_array.device
cpu_pinned(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
device : Device
The corresponding CPU pinned memory device.
"""
return Device('cpu_pinned', device_id)
def gpu(device_id=0):
"""Returns a GPU device.
This function is a short cut for Device('gpu', device_id).
The K GPUs on a node are typically numbered as 0,...,K-1.
Examples
----------
>>> cpu_array = mx.np.ones((2, 3))
>>> cpu_array.device
cpu(0)
>>> with mx.gpu(1):
... gpu_array = mx.np.ones((2, 3))
>>> gpu_array.device
gpu(1)
>>> gpu_array = mx.np.ones((2, 3), ctx=mx.gpu(1))
>>> gpu_array.device
gpu(1)
Parameters
----------
device_id : int, optional
The device id of the device, needed for GPU.
Returns
-------
device : Device
The corresponding GPU device.
"""
return Device('gpu', device_id)
def num_gpus():
"""Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
"""
count = ctypes.c_int()
check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))
return count.value
def gpu_memory_info(device_id=0):
"""Query CUDA for the free and total bytes of GPU global memory.
Parameters
----------
device_id : int, optional
The device id of the GPU device.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
(free, total) : (int, int)
"""
free = ctypes.c_uint64()
total = ctypes.c_uint64()
dev_id = ctypes.c_int(device_id)
check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total)))
return (free.value, total.value)
_current = contextvars.ContextVar('namemanager', default=Device('cpu', 0))
def current_device():
"""Returns the current device.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Device(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_device()
cpu(0)
>>> with mx.Device('gpu', 1): # Device changed in `with` block.
... mx.current_device() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_device() # Back to default device.
cpu(0)
Returns
-------
default_device : Device
"""
return _current.get()
| 28.016779 | 108 | 0.618517 |
3a6a4945d24f523a66e8dd1cc3a18e4d3749558b | 5,578 | py | Python | _pkg_KuFunc/mod_SetLabel.py | tianlunjiang/_NukeStudio_v2 | 5ed9b9217aff16d903bdcda5c2f1e1cd3bebe367 | [
"CNRI-Python"
] | 6 | 2019-08-27T01:30:15.000Z | 2020-11-17T00:40:01.000Z | _pkg_KuFunc/mod_SetLabel.py | tianlunjiang/_NukeMods | 47861bfc273262abba55b9f9a61782a5d89479b1 | [
"CNRI-Python"
] | 2 | 2019-01-22T04:09:28.000Z | 2019-01-23T15:11:39.000Z | _pkg_KuFunc/mod_SetLabel.py | tianlunjiang/_NukeMods | 47861bfc273262abba55b9f9a61782a5d89479b1 | [
"CNRI-Python"
] | 1 | 2020-08-03T22:43:23.000Z | 2020-08-03T22:43:23.000Z |
# ------------------------------------------------------------------------------
# Module Import
# ------------------------------------------------------------------------------
import nuke, nukescripts
import platform
from Qt import QtWidgets, QtGui, QtCore
#------------------------------------------------------------------------------
#-Header
#------------------------------------------------------------------------------
__VERSION__ = '2.0'
__OS__ = platform.system()
__AUTHOR__ = "Tianlun Jiang"
__WEBSITE__ = "jiangovfx.com"
__COPYRIGHT__ = "copyright (c) %s - %s" % (__AUTHOR__, __WEBSITE__)
__TITLE__ = "SetLabel v%s" % __VERSION__
def _version_():
ver="""
version 2.0
- Add preset buttons for frames and knob values
- Add Node Context support
version 1.0
- Basically working, when run(), prompt a frameless popup with line edit field
- replace with Qt
"""
# ------------------------------------------------------------------------------
# Global Variables
# ------------------------------------------------------------------------------
KNOB_IGNORE = set(['layer', 'invert_mask', 'help',
'dope_sheet', 'hide_input', 'xpos',
'crop', 'channels', 'note_font_color',
'onCreate', 'quality', 'updateUI',
'knobChanged', 'note_font', 'tile_color',
'bookmark', 'selected', 'autolabel',
'process_mask', 'label', 'onDestroy',
'inject', 'indicators', 'icon',
'channel', 'maskFrom', 'maskChannelMask',
'enable', 'maskChannelInput', 'Mask',
'ypos', 'postage_stamp_frame', 'postage_stamp',
'lifetimeStart', 'maskChannel', 'panel',
'lifetimeEnd', 'maskFromFlag',
'name', 'cached', 'fringe',
'mask', 'note_font_size', 'filter',
'useLifetime', 'gl_color'])
KNOB_IGNORE_KEYWORDS = ['_panelDropped', 'enable', 'unpremult', 'clamp']
# ------------------------------------------------------------------------------
# Core Class
# ------------------------------------------------------------------------------
class Core_SetLabel(QtWidgets.QDialog):
def __init__(self):
super(Core_SetLabel,self).__init__()
self.lineInput = QtWidgets.QLineEdit()
self.lineInput.setAlignment(QtCore.Qt.AlignCenter)
self.lineInput.returnPressed.connect(self.onPressed)
self.title = QtWidgets.QLabel("<b>Set Label</b>")
self.title.setAlignment(QtCore.Qt.AlignHCenter)
self.btn_frame = QtWidgets.QPushButton("Current Frame")
self.btn_frame.clicked.connect(self.onPreset)
self.knoblist = QtWidgets.QComboBox()
self.knoblist.setEditable(True)
self.btn_knob = QtWidgets.QPushButton("Knob Value")
self.btn_knob.clicked.connect(self.onPreset)
self.layout = QtWidgets.QVBoxLayout()
self.layout_knobs = QtWidgets.QHBoxLayout()
self.layout.addWidget(self.title)
self.layout.addWidget(self.lineInput)
self.layout.addWidget(self.btn_frame)
self.layout_knobs.addWidget(self.knoblist)
self.layout_knobs.addWidget(self.btn_knob)
self.layout.addLayout(self.layout_knobs)
self.setLayout(self.layout)
self.resize(200,50)
self.setWindowTitle("Set Label")
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.Popup)
# self.setDefault()
def onPressed(self):
"""change label with enter-key is pressed"""
newLabel = self.lineInput.text()
for n in self.sel_nodes:
n['label'].setValue(newLabel)
self.close()
def onPreset(self):
"""When preset button is pressed"""
_sender = self.sender()
if _sender is self.btn_frame:
for n in self.sel_nodes:
n['label'].setValue('x%s' % nuke.frame())
elif _sender is self.btn_knob:
sel_knob = self.knoblist.currentText()
n = self.sel_nodes[0]
n['label'].setValue('[value %s]' % sel_knob)
self.close()
def setDefault(self):
"""get the existing label of selected nodes"""
context = get_dag()
with context:
self.sel_nodes = nuke.selectedNodes()
if self.sel_nodes != []:
self.lineInput.show()
self.title.setText("<b>Set Label</b>")
self.lineInput.setText(self.sel_nodes[0]['label'].value())
n = self.sel_nodes[0]
knobs = filterKnobs(n.knobs())
self.knoblist.clear()
self.knoblist.addItems(knobs)
else:
self.lineInput.hide()
self.title.setText("<b>Error:<br>No Node Selected</b>")
def run(self):
"""rerun instance"""
self.setDefault()
self.move(QtGui.QCursor.pos()+QtCore.QPoint(-100,-12))
self.raise_()
self.lineInput.setFocus()
self.lineInput.selectAll()
self.show()
# ------------------------------------------------------------------------------
# Supporting Fucntions
# ------------------------------------------------------------------------------
def filterKnobs(knobs):
"""filter knobs for labels
@knobs: (list) list of knobs
return: (list) filtered list of knobs
"""
ls_ignored = list( set(knobs)-KNOB_IGNORE )
ls_filtered = []
for k in ls_ignored:
count = 0
for f in KNOB_IGNORE_KEYWORDS:
if f not in k: count += 1
if count == len(KNOB_IGNORE_KEYWORDS): ls_filtered.append(k)
return sorted(ls_filtered)
def get_dag():
"""For DAG context when selecting nodes"""
app = QtWidgets.QApplication
pos = QtGui.QCursor.pos()
widget = app.widgetAt(pos)
#print dir(widget)
context = widget.parent().windowTitle().split('Node Graph')[0].strip()
print(context)
return nuke.root() if context == '' else nuke.toNode(context)
# ------------------------------------------------------------------------------
# Instancing
# ------------------------------------------------------------------------------
SetLabel = Core_SetLabel()
| 25.126126 | 103 | 0.573682 |
3a6bbdcced64c871e2fdd5b7e14da08f29defe31 | 897 | py | Python | account/migrations/0003_auto_20161110_2135.py | fitahol/fitahol | ce84dc909aa98f2dc7594ef26568e015cbfe0e94 | [
"MIT"
] | 2 | 2017-02-20T14:11:30.000Z | 2017-06-11T16:10:33.000Z | account/migrations/0003_auto_20161110_2135.py | fitahol/fitahol | ce84dc909aa98f2dc7594ef26568e015cbfe0e94 | [
"MIT"
] | null | null | null | account/migrations/0003_auto_20161110_2135.py | fitahol/fitahol | ce84dc909aa98f2dc7594ef26568e015cbfe0e94 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-10 21:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_auto_20161103_0100'),
]
operations = [
migrations.AlterModelOptions(
name='userrelationconfirm',
options={'ordering': ('-id',), 'verbose_name': '用户关联请求', 'verbose_name_plural': '用户关联请求'},
),
migrations.RemoveField(
model_name='wechatinfo',
name='apple_id',
),
migrations.RemoveField(
model_name='wechatinfo',
name='device',
),
migrations.AlterField(
model_name='wechatinfo',
name='wechat_subscribed',
field=models.BooleanField(default=False, verbose_name='微信关注'),
),
]
| 27.181818 | 102 | 0.585284 |
3a6c6afbecc178b754f00e36139090ce170c777c | 780 | py | Python | imgur_stuff.py | djs2022/DataEntrySite | aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d | [
"MIT"
] | null | null | null | imgur_stuff.py | djs2022/DataEntrySite | aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d | [
"MIT"
] | null | null | null | imgur_stuff.py | djs2022/DataEntrySite | aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d | [
"MIT"
] | null | null | null | import requests
import os
class Imgur():
client_id = None
remCredits = None
def __init__(self, clientID):
self.client_id = clientID
def uploadImage(self, file, title, description):
file.save(file.filename)
with open(file.filename, 'rb') as f:
data = f.read()
url = "https://api.imgur.com/3/image"
payload = {'image': data, 'title': title, 'description': description}
headers = {
"authorization": f"Client-ID {self.client_id}"
}
res = requests.request("POST", url, headers=headers, data=payload)
os.remove(file.filename)
response = res.json()
if response['success']:
return response['data']['link']
else:
return None
| 27.857143 | 77 | 0.574359 |
3a6d77c44f6c1309b10cae742c418b58169828c7 | 4,489 | py | Python | roles/tox/library/tox_parse_output.py | g-chauvel/zuul-jobs | 7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f | [
"Apache-2.0"
] | null | null | null | roles/tox/library/tox_parse_output.py | g-chauvel/zuul-jobs | 7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f | [
"Apache-2.0"
] | null | null | null | roles/tox/library/tox_parse_output.py | g-chauvel/zuul-jobs | 7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2018 Red Hat
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: tox_parse_output
short_description: Parses the output of tox looking for per-line comments
author: Monty Taylor (@mordred)
description:
- Looks for output from the tox command to find content that could be
returned as inline comments.
requirements:
- "python >= 3.5"
options:
tox_output:
description:
- Output from the tox command run
required: true
type: str
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
ANSI_RE = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
PEP8_RE = re.compile(r"^(.*):(\d+):(\d+): (.*)$")
SPHINX_RE = re.compile(r"^([^:]*):([\d]+):(\w.+)$")
def simple_matcher(line, regex, file_path_group, start_line_group,
message_group):
m = regex.match(line)
file_path = None
start_line = None
message = None
if m:
file_path = m.group(file_path_group)
start_line = m.group(start_line_group)
message = m.group(message_group)
return file_path, start_line, message
def pep8_matcher(line):
return simple_matcher(line, PEP8_RE, 1, 2, 4)
def sphinx_matcher(line):
return simple_matcher(line, SPHINX_RE, 1, 2, 3)
matchers = [
pep8_matcher,
sphinx_matcher,
]
def extract_line_comment(line):
"""
Extracts line comment data from a line using multiple matchers.
"""
file_path = None
start_line = None
message = None
for matcher in matchers:
file_path, start_line, message = matcher(line)
if file_path:
message = ANSI_RE.sub('', message)
break
return file_path, start_line, message
def extract_file_comments(tox_output, workdir, tox_envlist=None):
os.chdir(workdir)
ret = {}
for line in tox_output.split('\n'):
if not line:
continue
if line[0].isspace():
continue
file_path, start_line, message = extract_line_comment(line)
if not file_path:
continue
# Clean up the file path if it has a leading ./
if file_path.startswith('./'):
file_path = file_path[2:]
# Don't report if the file path isn't valid
if not os.path.isfile(file_path):
continue
# Strip current working dir to make absolute paths relative
cwd = os.getcwd() + '/'
if file_path.startswith(cwd):
file_path = file_path[len(cwd):]
# After stripping we don't allow absolute paths anymore since they
# cannot be linked to a file in the repo in zuul.
if file_path.startswith('/'):
continue
# We should only handle files that are in under version control.
# For now, skip .tox directory, we can enhance later.
if file_path.startswith('.tox'):
continue
ret.setdefault(file_path, [])
if tox_envlist:
message = "{envlist}: {message}".format(
envlist=tox_envlist,
message=message,
)
ret[file_path].append(dict(
line=int(start_line),
message=message,
))
return ret
def main():
module = AnsibleModule(
argument_spec=dict(
tox_output=dict(required=True, type='str', no_log=True),
tox_envlist=dict(required=True, type='str'),
workdir=dict(required=True, type='str'),
)
)
tox_output = module.params['tox_output']
tox_envlist = module.params['tox_envlist']
file_comments = extract_file_comments(
tox_output, module.params['workdir'], tox_envlist)
module.exit_json(changed=False, file_comments=file_comments)
if __name__ == '__main__':
main()
| 28.775641 | 74 | 0.642237 |
3a6f8d05144479257560ebcab7dfac73539e7dff | 4,199 | py | Python | rpyc/utils/authenticators.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | rpyc/utils/authenticators.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | rpyc/utils/authenticators.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | """
authenticators: the server instance accepts an authenticator object,
which is basically any callable (i.e., a function) that takes the newly
connected socket and "authenticates" it.
the authenticator should return a socket-like object with its associated
credentials (a tuple), or raise AuthenticationError if it fails.
a very trivial authenticator might be
def magic_word_authenticator(sock):
if sock.recv(5) != "Ma6ik":
raise AuthenticationError("wrong magic word")
return sock, None
s = ThreadedServer(...., authenticator = magic_word_authenticator)
your authenticator can return any socket-like object. for instance, it may
authenticate the client and return a TLS/SSL-wrapped socket object that
encrypts the transport.
the credentials returned alongside with the new socket can be any object.
it will be stored in the rpyc connection configruation under the key
"credentials", and may be used later by the service logic. if no credentials
are applicable, just return None as in the example above.
rpyc includes integration with tlslite, a TLS/SSL library:
the VdbAuthenticator class authenticates clients based on username-password
pairs.
"""
import os
import sys
import anydbm
from rpyc.lib import safe_import
tlsapi = safe_import("tlslite.api")
ssl = safe_import("ssl")
class AuthenticationError(Exception):
pass
class SSLAuthenticator(object):
def __init__(self, keyfile, certfile, ca_certs = None, ssl_version = None):
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
if ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
if ssl_version:
self.ssl_version = ssl_version
else:
self.ssl_version = ssl.PROTOCOL_TLSv1
def __call__(self, sock):
try:
sock2 = ssl.wrap_socket(sock, keyfile = self.keyfile, certfile = self.certfile,
server_side = True, ssl_version = self.ssl_version, ca_certs = self.ca_certs,
cert_reqs = self.cert_reqs)
except ssl.SSLError:
ex = sys.exc_info()[1]
raise AuthenticationError(str(ex))
return sock2, sock2.getpeercert()
class TlsliteVdbAuthenticator(object):
__slots__ = ["vdb"]
BITS = 2048
def __init__(self, vdb):
self.vdb = vdb
@classmethod
def from_dict(cls, users):
inst = cls(tlsapi.VerifierDB())
for username, password in users.iteritems():
inst.set_user(username, password)
return inst
@classmethod
def _load_vdb_with_mode(cls, vdb, mode):
"""taken from tlslite/BaseDB.py -- patched for file mode"""
# {{
db = anydbm.open(vdb.filename, mode)
try:
if db["--Reserved--type"] != vdb.type:
raise ValueError("Not a %s database" % (vdb.type,))
except KeyError:
raise ValueError("Not a recognized database")
vdb.db = db
# }}
@classmethod
def from_file(cls, filename, mode = "w"):
vdb = tlsapi.VerifierDB(filename)
if os.path.exists(filename):
cls._load_vdb_with_mode(vdb, mode)
else:
if mode not in "ncw":
raise ValueError("%s does not exist but mode does not allow "
"writing (%r)" % (filename, mode))
vdb.create()
return cls(vdb)
def sync(self):
self.vdb.db.sync()
def set_user(self, username, password):
self.vdb[username] = self.vdb.makeVerifier(username, password, self.BITS)
def del_user(self, username):
del self.vdb[username]
def list_users(self):
return self.vdb.keys()
def __call__(self, sock):
sock2 = tlsapi.TLSConnection(sock)
sock2.fileno = lambda fd = sock.fileno(): fd # tlslite omitted fileno
try:
sock2.handshakeServer(verifierDB = self.vdb)
except Exception:
ex = sys.exc_info()[1]
raise AuthenticationError(str(ex))
return sock2, sock2.allegedSrpUsername
| 32.550388 | 92 | 0.639676 |
3a71c85b49b297a21ae96cfc5a938c33c9b45b83 | 1,156 | py | Python | kratos/apps/configuration/migrations/0001_initial.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | 1 | 2020-11-30T09:53:40.000Z | 2020-11-30T09:53:40.000Z | kratos/apps/configuration/migrations/0001_initial.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | null | null | null | kratos/apps/configuration/migrations/0001_initial.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-09-21 06:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Configuration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(blank=True, max_length=50, null=True)),
('name', models.CharField(max_length=100)),
('path', models.CharField(blank=True, max_length=200, null=True)),
('content', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('app', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='configuration', to='app.App')),
],
options={
'db_table': 't_configuration',
},
),
]
| 35.030303 | 132 | 0.583045 |
3a734bc1e70aa19debbc9af66f403f4c7634a66a | 2,823 | py | Python | events/migrations/0002_auto_20180325_0035.py | eforsell/eurovisiontippning | 1a26dac0e06c5eef9a752ea6f68ad9b9567b6261 | [
"MIT"
] | null | null | null | events/migrations/0002_auto_20180325_0035.py | eforsell/eurovisiontippning | 1a26dac0e06c5eef9a752ea6f68ad9b9567b6261 | [
"MIT"
] | null | null | null | events/migrations/0002_auto_20180325_0035.py | eforsell/eurovisiontippning | 1a26dac0e06c5eef9a752ea6f68ad9b9567b6261 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.3 on 2018-03-24 23:35
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Competition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('competition_type', models.IntegerField(choices=[(1, 'Semifinal 1'), (1, 'Semifinal 2'), (3, 'Grand final')], default=1)),
('start_time', models.DateTimeField()),
('published', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='CompetitionEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_order', models.PositiveIntegerField(blank=True, null=True)),
('points', models.PositiveIntegerField(blank=True, null=True)),
('rank', models.PositiveIntegerField(blank=True, null=True)),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Competition')),
],
),
migrations.AlterModelOptions(
name='entry',
options={'verbose_name_plural': 'entries'},
),
migrations.RemoveField(
model_name='entry',
name='points',
),
migrations.RemoveField(
model_name='entry',
name='rank',
),
migrations.RemoveField(
model_name='entry',
name='start_order',
),
migrations.RemoveField(
model_name='event',
name='event_type',
),
migrations.RemoveField(
model_name='event',
name='published',
),
migrations.RemoveField(
model_name='event',
name='start_time',
),
migrations.AddField(
model_name='event',
name='start_date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.DeleteModel(
name='EventType',
),
migrations.AddField(
model_name='competitionentry',
name='entry',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Entry'),
),
migrations.AddField(
model_name='competition',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Event'),
),
]
| 34.426829 | 139 | 0.557563 |
3a737b4d0699668e68dfd11d0393dc995f8e0e88 | 574 | py | Python | python-code/transformer-sample/basic/sentiment_analysis.py | 87-midnight/NewbieInJava | ba84153c6b3a382e620c4df7892d653be2e1a607 | [
"MIT"
] | null | null | null | python-code/transformer-sample/basic/sentiment_analysis.py | 87-midnight/NewbieInJava | ba84153c6b3a382e620c4df7892d653be2e1a607 | [
"MIT"
] | 2 | 2019-10-22T08:21:09.000Z | 2019-10-22T08:21:09.000Z | python-code/transformer-sample/basic/sentiment_analysis.py | 87-midnight/NewbieInJava | ba84153c6b3a382e620c4df7892d653be2e1a607 | [
"MIT"
] | null | null | null | # 使用情绪分析流水线
import torch
from transformers import BertTokenizer, BertForSequenceClassification
torch.manual_seed(0)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", problem_type="multi_label_classification", num_labels=2)
inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss
outputs = model(**inputs, labels=labels)
loss = outputs.loss
logits = outputs.logits
list(logits.shape) | 41 | 131 | 0.801394 |
3a750f402f6cc67161071bf3b54785b45c55a45d | 1,293 | py | Python | examples/tutorial/parallel_amuse_script.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 131 | 2015-06-04T09:06:57.000Z | 2022-02-01T12:11:29.000Z | examples/tutorial/parallel_amuse_script.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 690 | 2015-10-17T12:18:08.000Z | 2022-03-31T16:15:58.000Z | examples/tutorial/parallel_amuse_script.py | rieder/amuse | 3ac3b6b8f922643657279ddee5c8ab3fc0440d5e | [
"Apache-2.0"
] | 102 | 2015-01-22T10:00:29.000Z | 2022-02-09T13:29:43.000Z | import time
import numpy
from amuse.lab import Huayno
from amuse.lab import Hermite
from amuse.lab import nbody_system
from amuse.lab import new_king_model
from matplotlib import pyplot
def gravity_minimal(bodies, t_end, nproc):
gravity = Hermite(number_of_workers=nproc)
gravity.particles.add_particles(bodies)
Etot_init = gravity.kinetic_energy + gravity.potential_energy
start_time = time.time()
gravity.evolve_model(t_end)
dtime = time.time() - start_time
Ekin = gravity.kinetic_energy
Epot = gravity.potential_energy
Etot = Ekin + Epot
dE = (Etot_init-Etot)/Etot
print()
print("T =", gravity.get_time(), " CPU time:", dtime, "[s]")
print("M =", bodies.mass.sum(), " E = ", Etot, " Q = ", -Ekin/Epot)
print("dE =", dE)
gravity.stop()
return dtime
if __name__ in ('__main__'):
N = 1024
W0 = 7.0
t_end = 0.1 | nbody_system.time
bodies = new_king_model(N, W0)
bodies.scale_to_standard()
nproc= 6
proc = numpy.arange(1, nproc+1, 1)
tcpu = []
for npi in proc:
tcpu.append(gravity_minimal(bodies, t_end, npi))
pyplot.scatter(proc, tcpu)
pyplot.xlabel("n proc")
pyplot.ylabel("CPU time [s]")
pyplot.savefig("fig_parallel_performance_N1k_Hermite.pdf")
| 25.352941 | 71 | 0.664346 |
3a75e62e27fdd3a634c7ec673852b4fb62407232 | 311 | py | Python | modules/random_cat.py | ChaseBosman/chatbot | a39e655e6d586fa596471cd20617dff5f9795a96 | [
"Unlicense"
] | 3 | 2019-10-19T12:07:06.000Z | 2020-10-05T17:24:56.000Z | modules/random_cat.py | ChaseBosman/chatbot | a39e655e6d586fa596471cd20617dff5f9795a96 | [
"Unlicense"
] | 17 | 2019-10-05T12:30:17.000Z | 2021-07-25T20:06:33.000Z | modules/random_cat.py | ChaseBosman/chatbot | a39e655e6d586fa596471cd20617dff5f9795a96 | [
"Unlicense"
] | 26 | 2018-10-19T05:43:12.000Z | 2020-10-02T05:27:48.000Z | import requests
import json
def random_cat_pic():
try:
url = 'http://aws.random.cat/meow'
response = requests.get(url)
response_json = json.loads(response.text)
return "Here's a super cute cat pic: " + response_json.get('file')
except:
return "Error meow"
| 25.916667 | 74 | 0.614148 |
3a788b9c9eab36584491247515f283acec64a519 | 407 | py | Python | Pycharm_Project/0414/2.py | duanjiefei/Python-Study | 88e17a3eab9112a2515f09b2bcf4e032059cc28b | [
"Apache-2.0"
] | null | null | null | Pycharm_Project/0414/2.py | duanjiefei/Python-Study | 88e17a3eab9112a2515f09b2bcf4e032059cc28b | [
"Apache-2.0"
] | null | null | null | Pycharm_Project/0414/2.py | duanjiefei/Python-Study | 88e17a3eab9112a2515f09b2bcf4e032059cc28b | [
"Apache-2.0"
] | null | null | null | #装饰器的结构
import time
def timer(func):
def wrapper(*args,**kwargs):#args 用来接收元祖 kwargs用来接收字典
start_time = time.time()
func(*args,**kwargs)
end_time = time.time()
print("the function run time is %s" %(end_time-start_time))
return wrapper;
@timer
def test():
time.sleep(1)
print("hello python")
# test = timer(test)
# test()
test()
#@timer == test=timer(test) | 20.35 | 67 | 0.619165 |
3a794a8245df8781f9b1fc1f4f5373e8c9e7411d | 3,255 | py | Python | docs/source/conf.py | DanielWarfield1/nClusterFramework | 21b68226ab1ffa810281a261feabe1d360a5146d | [
"MIT"
] | null | null | null | docs/source/conf.py | DanielWarfield1/nClusterFramework | 21b68226ab1ffa810281a261feabe1d360a5146d | [
"MIT"
] | 2 | 2021-12-15T05:36:01.000Z | 2021-12-20T01:15:45.000Z | docs/source/conf.py | DanielWarfield1/nClusterFramework | 21b68226ab1ffa810281a261feabe1d360a5146d | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'nClusterFramework'
copyright = '2021, Daniel Warfield'
author = 'Daniel Warfield'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', # Core Sphinx library for auto html doc generation from docstrings
'sphinx.ext.autosummary', # Create neat summary tables for modules/classes/methods etc
'sphinx.ext.intersphinx', # Link to other project's documentation (see mapping below)
'sphinx.ext.viewcode', # Add a link to the Python source code for classes, functions etc.
]
autosummary_generate = True # Turn on sphinx.ext.autosummary
autoclass_content = "both" # Add __init__ doc (ie. params) to class summaries
html_show_sourcelink = False # Remove 'view source code' from top of page (for html, not python)
autodoc_inherit_docstrings = True # If no docstring, inherit from base class
set_type_checking_flag = True # Enable 'expensive' imports for sphinx_autodoc_typehints
nbsphinx_allow_errors = True # Continue through Jupyter errors
add_module_names = False # Remove namespaces from class/method signatures
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# Readthedocs theme
# on_rtd is whether on readthedocs.org, this line of code grabbed from docs.readthedocs.org...
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_css_files = ["readthedocs-custom.css"] # Override some CSS settings
# Pydata theme
#html_theme = "pydata_sphinx_theme"
#html_logo = "_static/logo-company.png"
#html_theme_options = { "show_prev_next": False}
#html_css_files = ['pydata-custom.css']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 44.589041 | 97 | 0.710906 |