blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbb9dfef04bde38e63f84dfddf9bbc7d5b6ad1a2 | c404b7f9d30cd47550b621f8f243dc4b1c2bdf8a | /a_byte_of_python/chapter10_backup_ver3.py | ffed5450115aa74531d2ba191a0cde7ab2edb502 | [] | no_license | ilxsh/python_learning | 768d7857cece0a48f05524393eb12e985b174302 | efbaa88d0339c21eb78cf96a81dd163ae377759f | refs/heads/master | 2021-01-10T10:19:12.965879 | 2015-12-24T18:32:07 | 2015-12-24T18:32:07 | 48,186,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: backup_ver2.py
import os
import time
# 1. The files and directories to be backed up are specified in a list.
source = ['/home/test/backup1', '/home/test/backup2']
# If you are using Windows, use source = [r'C:\Documents', r'D:\Work'] or something list that
# 2. The backup must be stored in a main backup directory
target_dir = '/home/bluewind/bak/' #Remeber to change and time
# 3. The files are backed up into a zip file.
# 4. The name of the ip archive is the current date and time
today = target_dir + time.strftime('%Y%m%d%H%M%S') # + '.zip'
# The current time is the anme of the zip archive
now = time.strftime('%H%M%S')
# Take a comment from the user to create the name of the zip file
comment = input('Enter a comment --> ')
if len(comment) == 0:
target = today + os.sep + now + '.zip'
else:
target = today + os.sep + now + '_' +
comment.replace(' ', '_') + '.zip'
# Create the subdirectory if it isn't already there
if not os.path.exists(today):
os.mkdir(today) # make directory
print('Successfully created direcotry', today)
# 5. We use the zip command (in Unix/Linux) to put the files in a zip archive
zip_command = "zip -qr '%s' %s" % (target, ' '.join(source))
# Run the backup
if os.system(zip_command) == 0:
print('Successful backup to', target)
else:
print('Backup FAILED')
| [
"David@jdsu.com"
] | David@jdsu.com |
fa3dc1be669911bfd6a1f491293bb3b01b26b80d | 288a4eaad55d56f86d61d8354e4d35ca98b75308 | /Python /is_number_a_prime.py | a36ab6e29bd6d218fd640f74ae2dc840edae11d4 | [] | no_license | KaniahDunn/codewars-solutions | 003a9e40654203bf2bbf89a33164a7b6057de1e8 | 141cc2c9dcdba04641528208e3df20d50abef1a7 | refs/heads/master | 2021-06-25T23:25:47.658294 | 2021-03-16T01:24:50 | 2021-03-16T01:24:50 | 217,607,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | """
IS NUMBER A PRIME ?
Define a function that takes one integer argument and returns logical value true or false depending on if the integer is a prime.
Per Wikipedia, a prime number (or a prime) is a natural number greater than 1 that has no positive divisors other than 1 and itself.
Requirements
You can assume you will be given an integer input.
You can not assume that the integer will be only positive. You may be given negative numbers as well (or 0).
NOTE on performance: There are no fancy optimizations required, but still the most trivial solutions might time out. Numbers go up to 2^31 (or similar, depends on language version). Looping all the way up to n, or n/2, will be too slow.
Example
is_prime(1) /* false */
is_prime(2) /* true */
is_prime(-1) /* false */
"""
def is_prime(num):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
return False
break
else:
return True
else:
return False | [
"kaniahdunn@gmail.com"
] | kaniahdunn@gmail.com |
a31faa28ea7fa887dcbc8ad53795258aa189f931 | 498e792e16ab1a74ac034c53177c4cccbeef2749 | /classification/resnet/train.py | 662ceca52750777835c1b05e25f7eaacf8d247aa | [] | no_license | ydwisroad/imageprocessingpytorch | f97bec4469c087f6bbbca5d42da180c95be8b13f | bd8d1af228619c9c6c9c1a2b880422f7d5048dd5 | refs/heads/master | 2023-07-29T05:05:11.145832 | 2022-02-21T23:32:03 | 2022-02-21T23:32:03 | 284,976,501 | 7 | 3 | null | 2023-07-24T01:08:22 | 2020-08-04T12:43:24 | Jupyter Notebook | UTF-8 | Python | false | false | 4,052 | py | import torch
import torch.nn as nn
from torchvision import transforms, datasets
import json
import matplotlib.pyplot as plt
import os
import torch.optim as optim
from model import resnet34, resnet101
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
"val": transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}
data_root = os.path.abspath(os.path.join(os.getcwd(), "../../data")) # get data root path
image_path = data_root + "/flower_photos_simple/" # flower data set path
train_dataset = datasets.ImageFolder(root=image_path+"train",
transform=data_transform["train"])
train_num = len(train_dataset)
# {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list = train_dataset.class_to_idx
cla_dict = dict((val, key) for key, val in flower_list.items())
# write dict into json file
json_str = json.dumps(cla_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
batch_size = 4
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=True,
num_workers=0)
validate_dataset = datasets.ImageFolder(root=image_path + "val",
transform=data_transform["val"])
val_num = len(validate_dataset)
validate_loader = torch.utils.data.DataLoader(validate_dataset,
batch_size=batch_size, shuffle=False,
num_workers=0)
net = resnet34()
# load pretrain weights
#model_weight_path = "./resnet34-pre.pth"
#missing_keys, unexpected_keys = net.load_state_dict(torch.load(model_weight_path), strict=False)
# for param in net.parameters():
# param.requires_grad = False
# change fc layer structure
inchannel = net.fc.in_features
net.fc = nn.Linear(inchannel, 5)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001)
best_acc = 0.0
save_path = './resNet34.pth'
for epoch in range(10):
# train
net.train()
running_loss = 0.0
for step, data in enumerate(train_loader, start=0):
images, labels = data
optimizer.zero_grad()
logits = net(images.to(device))
loss = loss_function(logits, labels.to(device))
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# print train process
rate = (step+1)/len(train_loader)
a = "*" * int(rate * 50)
b = "." * int((1 - rate) * 50)
print("\rtrain loss: {:^3.0f}%[{}->{}]{:.4f}".format(int(rate*100), a, b, loss), end="")
print()
# validate
net.eval()
acc = 0.0 # accumulate accurate number / epoch
with torch.no_grad():
for val_data in validate_loader:
val_images, val_labels = val_data
outputs = net(val_images.to(device)) # eval model only have last output layer
# loss = loss_function(outputs, test_labels)
predict_y = torch.max(outputs, dim=1)[1]
acc += (predict_y == val_labels.to(device)).sum().item()
val_accurate = acc / val_num
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(net.state_dict(), save_path)
print('[epoch %d] train_loss: %.3f test_accuracy: %.3f' %
(epoch + 1, running_loss / step, val_accurate))
print('Finished Training')
| [
"wandf12345@163.com"
] | wandf12345@163.com |
5bf7470e827eea42e7c8955e6c2fb564dbc45de9 | f453f183834e3bf587a120023615ed2ddd38c157 | /tsa/lib/encoders.py | 969cdf1f6c1712d900097659bf0862df709f2d35 | [
"MIT"
] | permissive | chbrown/topic-sentiment-authorship | 72c21638eb72888c370cd3b1b5f06504df09ce2e | e8cacf11b06583d9ed85ff790e1d5322e59f2fd6 | refs/heads/master | 2022-07-05T22:58:24.456139 | 2020-03-29T16:12:21 | 2020-03-29T16:12:21 | 13,025,589 | 0 | 0 | MIT | 2020-03-29T16:13:35 | 2013-09-23T02:53:40 | Jupyter Notebook | UTF-8 | Python | false | false | 492 | py | import json
from datetime import datetime
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, '__json__'):
return obj.__json__()
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
# return super(JSONEncoder, self).default(obj)
return obj
# encoder = JSONEncoder()
# def json(obj):
# return encoder.encode(obj)
# c'mon, just DIY
def csv(obj):
return ','.join(map(str, obj))
| [
"io@henrian.com"
] | io@henrian.com |
701bd093bcd667f743047b4f0a4a5aac7dc88001 | 9b5d7dde000f5619ff4a752fc453f8ed5b2618e0 | /.history/main_20210924153614.py | d47094295983741c4ba3c32169bb49e2b9386307 | [] | no_license | Luyzr/EE7207_A1 | 4a2031221c5264ef70da516e370fbd9d6a068616 | 1c8592a0f8c7629848e494dd9dda8d14421a26a4 | refs/heads/master | 2023-08-14T10:10:51.113766 | 2021-09-29T10:58:00 | 2021-09-29T10:58:00 | 411,636,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,831 | py | import numpy as np
import math
import scipy.io
from sklearn.model_selection import KFold
# data_train.shape => (330, 33)
# label_train.shape => (330, 1)
# data_test.shape => (21, 33)
def Gaussian(x, c, t):
return math.exp(-1 * np.sum(np.square(x - c))/(2 * t**2))
def EstimateC(data, label, pn=30, pretrained=False):
print('Getting center vector...')
if pretrained:
return np.load('{}_center_vector.npy'.format(pn))
n, d = data.shape
e = np.zeros(n)
candi = [i for i in range(0, 330)]
for i in range(0, n):
c = data[i]
o, w = EstimateOW(data, c, 0.707, label)
f = np.dot(o, w)
e[i] = 1/2 * np.sum(np.square(f - label))
first = np.argmin(e)
err = e[first]
old_err = np.Inf
c = data[first].reshape((1,-1))
candi.pop(first)
m = 1
# print('round:{} error:{:.2f}\n'.format(m, err))
while m < pn and err <= old_err and np.abs(err - old_err) > 0.15:
m += 1
old_err = err
e = np.Inf * np.ones(n)
for k in range(0, n - m):
i = candi[k]
nc = np.concatenate((c, data[i].reshape(1,-1)), axis=0)
t = EstimateT(nc, m)
o, w = EstimateOW(data, nc, t, label)
f = np.dot(o, w)
e[i] = 1/2 * np.sum(np.square(f - label))
first = np.argmin(e)
err = e[first]
c = np.concatenate((c, data[first].reshape(1,-1)), axis=0)
candi.pop(candi.index(first))
# print('round:{} error:{:.2f}\n'.format(m, err))
print('Number of center vector:{}, saving'.format(m))
np.save('{}_center_vector.npy'.format(m), c)
return c
def EstimateT(c, m):
# Estimate the parameter of Gaussian
dis = [0]*m
for i in range(m):
for j in range(i, m):
dis[j] = max(dis[j], np.sqrt(np.sum(np.square(c[i] - c[j]))))
t = max(dis)/np.sqrt(2*m)
return t
def getO(data, c, t):
m = c.shape[0]
n, d = data.shape
o = [[0]*m for i in range(n)]
for i in range(n):
for j in range(m):
o[i][j] = Gaussian(data[i], c[j], t)
o = np.array(o).reshape(n, m)
return o
def EstimateOW(data, c, t, label):
# Estimate W
n, d = data.shape
m = c.shape[0]
o = getO(data, c, t)
w = np.dot(np.dot(np.linalg.pinv((np.dot(o.T,o))),o.T),np.array(label))
return o, w
def LinearRBF(data, label, pn, pretrained=False):
c = EstimateC(data, label, pn=pn, pretrained=pretrained)
m, _ = c.shape
t = EstimateT(c, m)
o, w = EstimateOW(data, c, t, label)
return c, w, t
def Dataloader():
train_data = scipy.io.loadmat('data_train.mat')['data_train']
train_label = scipy.io.loadmat('label_train.mat')['label_train']
test_data = scipy.io.loadmat('data_test.mat')['data_test']
return train_data, train_label, test_data
def Train(data_train, label_train, pn=4, pretrained=False):
c, w, t = LinearRBF(data_train, label_train, pn=pn , pretrained=pretrained)
m, d = c.shape
o = getO(data_train, c, t)
f = np.dot(o, w)
label_train = np.heaviside(label_train, 0.5)
f = np.heaviside(f, 0.5)
err = 0
n, _ = label_train.shape
for i in range(0, n):
if label_train[i] != f[i]:
err += 1
print('Train accuracy is {:.2f}%'.format(100 * (1 - err/n)))
return c, w, t
def Evaluate(data_test, label_test, c, w, t, mode='t'):
o = getO(data_test, c, t)
f = np.dot(o, w)
f = np.heaviside(f, 0.5)
err = 0
if mode == 't':
label_test = np.heaviside(label_test, 0.5)
print('Truth is {}'.format(label_test.reshape(1, -1)))
print('Result is {}'.format(f.reshape(1,-1)))
n, _ = label_test.shape
for i in range(0, n):
if label_test[i] != f[i]:
err += 1
print('Test accuracy is {:.2f}%'.format(100 * (1 - err/n)))
return 1 - err/n
if mode == 'e':
print('Result is {}'.format(f.reshape(1,-1)))
return
if __name__ == "__main__":
'''
参数都是一脉相承的
pn 表示设定的CV的数量
m 是计算过程中实际用到的CV的数量
c 是CV
w 是权重
t 是高斯参数
'''
print('Loading data')
train_data, train_label, test_data = Dataloader()
print('1:{}'.format(train_label.count(1)))
# 调参用这个
getCV = False
pn = 2
# # 得结果用这个(你得先调过参才行)
# getCV = True
# pn = 15
kf = KFold(5, shuffle=True, random_state=42)
rr = 1
if getCV:
best_pn = 0
best_score = 0
for pn in range(2, 20):
scores = []
rr = 1
for train_index, test_index in kf.split(train_data):
print('========================== The {}th experiment with pn={} =========================='.format(rr, pn))
rr += 1
data_train, label_train = train_data[train_index], train_label[train_index]
data_test, label_test = train_data[test_index], train_label[test_index]
print('Start Training...')
c, w, t = Train(data_train, label_train, pn, pretrained=False)
print('Start Evaluating..')
score = Evaluate(data_test, label_test, c, w, t, mode='t')
scores.append(score)
mean_score = np.mean(np.array(scores))
print('The mean score with pn={} is {}\n'.format(pn, mean_score))
if mean_score > best_score:
best_pn = pn
best_score = mean_score
print('The best pn is {}, with the best score: {}'.format(best_pn, best_score))
else:
c, w, t = Train(train_data, train_label, pn, pretrained=True)
print('pn is: {}; t is: {:.4f}'.format(pn, t))
Evaluate(test_data, None, c, w, t, mode='e') | [
"gentlelu@outlook.com"
] | gentlelu@outlook.com |
0c1a3d07a07d072f99c7a29312d13587a4198ea3 | f049ed97c00301ac9400bad7d53ad35909837ec2 | /doc_extractor/extractor/views_19.1.2021.py | c9dddd4be533a18412baf89a396f71f07555c397 | [] | no_license | vijay867777/sgk_git | 54afb4f826e65c8ef3f04698fc6fdda29e01587a | cc8856ebfd3639266bc2260f3d6a63047b50ff8a | refs/heads/main | 2023-02-24T22:08:55.310228 | 2021-02-03T07:12:17 | 2021-02-03T07:12:17 | 335,535,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,854 | py | # Django Libraries
# import concurrent.futures
from django.shortcuts import render
from django.http import HttpResponse , JsonResponse
from .models import *
# Custom Libraries
import pandas as pd
import numpy as np
import joblib
from laserembeddings import Laser
# from sentence_transformers import SentenceTransformer , models
from sklearn.neural_network import MLPClassifier # works great -- neural network
from langid import classify
from langdetect import detect
# from fastlangid.langid import LID
import os
import re
import pdfplumber
from docx2json import convert
import json
from docx import Document
import smbclient
import mammoth
from bs4 import BeautifulSoup
# Rest framework import
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view,permission_classes,authentication_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication, BasicAuthentication , TokenAuthentication
# Other file import
from environment import MODE
if MODE == 'local':
from .local_constants import *
else:
from .dev_constants import *
from .excel_processing import *
from .msd_processing import *
categories = ['nutrition','ingredients','allergen statement','shelf_life_statement',
'storage instruction','address',
# 'warning statement',
"gtin_number","serial_number","lot_number","expiry_date",'form_content',
'usage instruction','pc_number','general classification',"eu_number"]
msd_categories = ['name','active_substance','excipients','form_content','method_route','warning','expiry_date',
'storage_instructions','precautions','marketing_company','unique_identifier','classification',
'usage_instruction','braille_info','mfg_date','manufacturer','packing_site','appearance',
'product_info','label_dosage','box_info']
# Initialize Laser
# model_path = r"/Users/VIJAYKANAGARAJ/PycharmProjects/Schawk_document_xml/labse"
# model = SentenceTransformer(model_path)
# Initialize Laser
laser = Laser(path_to_bpe_codes,path_to_bpe_vocab,path_to_encoder)
# langid = LID()
# @authentication_classes([SessionAuthentication, BasicAuthentication])
# @api_view()
# @permission_classes([IsAuthenticated])
# @authentication_classes([TokenAuthentication])
# def extractor(request):
# content = {'message': 'Hello, World!'}
# return Response(content)
# return render(request,'extractor/index.html')
# Create your views here.
def model_training():
df = pd.read_excel(input_excel)
df = df.sample(frac=1)
X_train_laser = laser.embed_sentences(df['text'], lang='en')
# X_train_laser = model.encode(df['text'])
# mlp = MLPClassifier(hidden_layer_sizes=(125,), solver='adam', activation='tanh', random_state=0, shuffle=True)
mlp = MLPClassifier(hidden_layer_sizes=(70,),solver='adam',max_iter=500,activation='tanh',random_state=0,shuffle=True)
mlp.fit(X_train_laser, df['category'])
joblib.dump(mlp,model_location)
return mlp
def classifier(request):
text = request.GET.get('text','')
if text:
pass
else:
return render(request, 'extractor/index_classifier.html')
model = None
if os.path.exists(model_location):
model = joblib.load(model_location)
else:
model = model_training()
# lang_detected = detect(text)
lang_detected = classify(text)[0]
# print('lang----->',lang_detected)
# print(text)
prediction = model.predict(laser.embed_sentences([text],lang=lang_detected))
probability = model.predict_proba(laser.embed_sentences([text],lang=lang_detected))
probability[0].sort()
max_probability = max(probability[0])
if (max_probability-0.35) > probability[0][-2]:
pred_output = prediction[0]
else:
pred_output = 'None'
# print(probability)
print('{}-------------->{}'.format(max(probability[0]),pred_output))
result = {'probability':max(probability[0]),'output':pred_output,'actual_output':prediction[0],'text':text}
# return HttpResponse(pred_output)
# return render(request,'extractor/doc_result.html',{'result':dict})
return render(request,'extractor/index_result.html',result)
def prediction(text):
if os.path.exists(model_location):
model = joblib.load(model_location)
else:
model = model_training()
# lang_detected = detect(text)
lang_detected = classify(text)[0]
print(text)
prediction = model.predict(laser.embed_sentences([text],lang=lang_detected))
# prediction = model.predict(model.encode([text]))
probability = model.predict_proba(laser.embed_sentences([text],lang=lang_detected))
# probability = model.predict_proba(model.encode['text'])
probability[0].sort()
max_probability = max(probability[0])
# if (max_probability-0.35) > probability[0][-2]:
if max_probability > 0.63:
pred_output = prediction[0]
else:
pred_output = 'None'
print('{}-------------->{}'.format(max(probability[0]),pred_output))
return ({'probability': max(probability[0]), 'output': pred_output, 'actual_output': prediction[0]})
def doc_extractor(request):
final = {}
file_name = request.GET.get('file','no file')
if file_name == 'no file':
return render(request, 'extractor/index.html')
else:
pass
file = document_location+file_name
doc_format = os.path.splitext(file_name)[1].lower()
if doc_format == ".pdf":
if os.path.exists(file):
pdf = pdfplumber.open(file)
else:
return HttpResponse('File not found')
no_of_pages = len(pdf.pages)
tables = len(pdf.pages[0].extract_tables())
if tables > 2:
print('type 1 --- tables')
for page_no in range(no_of_pages):
page = pdf.pages[page_no]
extracted_table = page.extract_tables()
text = [" ".join(list(filter(None, content))).replace('\n', ' ') for table in extracted_table for content in table]
for sentence in text:
unique_identifiers = Regex_parsers(sentence)
if unique_identifiers:
final = {**final, **unique_identifiers}
else:
pass
result = prediction(sentence)['output']
if result != 'None':
if result in final.keys():
final[result].append(sentence)
else:
final[result] = [sentence]
else:
pass
if len(final['Nutrition']) > 1:
final['Nutrition'] = final['Nutrition'][:-1]
else:
pass
extracted_categories = {key:val for key, val in final.items() if key.lower() in categories}
# return JsonResponse(extracted_categories)
return render(request, 'extractor/doc_result.html', {'result': extracted_categories})
else:
print('type-2-paragraph')
for page_no in range(no_of_pages):
page = pdf.pages[page_no]
extracted_text = page.extract_text()
text = sentence_tokennizer(extracted_text)
for sentence in text:
unique_identifiers = Regex_parsers(sentence)
if unique_identifiers:
final = {**final, **unique_identifiers}
else:
pass
result = prediction(sentence)['output']
if result in final.keys():
final[result].append(sentence)
else:
final[result] = [sentence]
extracted_categories = {key:val for key, val in final.items() if key.lower() in categories}
return render(request, 'extractor/doc_result.html', {'result': extracted_categories})
elif (doc_format == '.docx') or (doc_format == '.doc'):
doc = convert(file,sepBold=True)
doc_to_json = json.loads(doc)
text = doc_to_json['nonbold']
if text:
pass
else:
text = doc_to_json['text']
for sentence in text:
unique_identifiers = Regex_parsers(sentence,regex_patterns)
if unique_identifiers:
final = {**final,**unique_identifiers}
else:
pass
result = prediction(sentence)['output']
if result in final.keys():
final[result].append(sentence)
else:
final[result] = [sentence]
# print(final)
extracted_categories = {key: val for key, val in final.items() if key.lower() in categories}
return render(request, 'extractor/doc_result.html', {'result': extracted_categories})
else:
return HttpResponse('This file format not supported currently')
def sentence_tokennizer(text):
#sentences = re.split(r"[.!?]", text)
# sentences = re.split(r"\.\s\n", text)
segments = re.split(r"\n\s\n", text)
sentences = [re.split(r"\.\s\n", seg) for seg in segments]
# token = [re.sub(r"\d\-.*",'number',text) for sublist in sentences for text in sublist]
token = [text for sublist in sentences for text in sublist]
# sentences = [sent.strip() for sent in sentences if sent]
return token
def Regex_parsers(text,regex_patterns):
unique_number = {}
print('regex---->',text)
for key , pattern in regex_patterns.items():
finding = re.findall(pattern,text,(re.IGNORECASE|re.MULTILINE))
try:
finding = str(finding[0]).strip()
except:
pass
if finding:
print("---------************{}".format(finding))
unique_number[key] = [finding]
else:
pass
return unique_number
def Regex_parsers_generator(text,regex_patterns):
print('regex---->',text)
for value in text:
for key , pattern in regex_patterns.items():
finding = re.findall(pattern,value,(re.IGNORECASE|re.MULTILINE))
if finding:
yield key , finding[0]
def msd_data_extractor(list,regex_heading_msd):
tmp = []
final = {}
key = ''
for i in range(len(list)):
text = str(list[i])
if re.findall(regex_heading_msd, text):
try:
if key != '':
final[key] = '\n'.join(tmp)
else:
pass
key = text
tmp.clear()
except:
pass
else:
if i == len(list) - 1:
tmp.append(text)
final[key] = ' '.join(tmp)
else:
tmp.append(text)
return final
def msd_prediction(text):
model = None
if os.path.exists(msd_model_location):
model = joblib.load(msd_model_location)
else:
model = msd_model_training()
print('new model trained')
# lang_detected = detect(text)
lang_detected = classify(text)[0]
# print('lang----->',lang_detected)
print(text)
prediction = model.predict(laser.embed_sentences([text],lang=lang_detected))
probability = model.predict_proba(laser.embed_sentences([text],lang=lang_detected))
probability[0].sort()
max_probability = max(probability[0])
# if (max_probability-(max_probability/2)) > probability[0][-2]:
if max_probability > 0.60:
pred_output = prediction[0]
else:
pred_output = 'None'
print('{}-------------->{}'.format(max(probability[0]),pred_output))
return ({'probability': max(probability[0]), 'output': pred_output, 'actual_output': prediction[0]})
def msd_model_training():
df = pd.read_excel(msd_input_excel)
df = df.sample(frac=1)
X_train_laser = laser.embed_sentences(df['text'], lang='en')
# mlp = MLPClassifier(hidden_layer_sizes=(125,), solver='adam', activation='tanh', random_state=0, shuffle=True)
mlp = MLPClassifier(hidden_layer_sizes=(70,),solver='adam',max_iter=500,activation='tanh',random_state=0,shuffle=True)
# mlp = MLPClassifier(hidden_layer_sizes=(70,),solver='adam',max_iter=300,activation='relu',learning_rate='constant',learning_rate_init=0.001,random_state=0,shuffle=True)
mlp.fit(X_train_laser, df['category'])
joblib.dump(mlp,msd_model_location)
return mlp
# @api_view()
# @permission_classes([IsAuthenticated])
# @authentication_classes([TokenAuthentication])
def msd(request):
final_json = {}
# getting value from query string
file_name_list = request.GET.getlist('file','no file')
print('file_list',file_name_list)
if file_name_list == 'no file':
return render(request, 'extractor/index_msd.html')
# return Response({'status':'0'})
else:
pass
for file_index , file_name in enumerate(file_name_list):
final = {}
cate_tmp = {}
lang_final = set()
doc_format = os.path.splitext(file_name)[1].lower()
if doc_format == '.docx':
# Reading file from storage
if MODE == 'local':
file = document_location + file_name
extracted = text_extraction(file)
else:
file = file_name
extracted = text_extraction(file,method='SMB')
# file = get_file_smb(r"{}".format(file_name))
for key,value in extracted.items():
if "".join(value).strip() != '':
result = msd_prediction(key)['output'] # classifier
if result != 'None':
if result in final.keys():
final[result].extend([val.replace("\n",' ').strip() for val in value])
else:
final[result] = [val.replace("\n",' ').strip() for val in value]
else:
pass
unique = {}
if 'unique_identifier' in final:
# unique = Regex_parsers(str(final['unique_identifier']),regex_patterns)
for key , identifier in Regex_parsers_generator(final['unique_identifier'],regex_patterns):
unique[key] = [str(identifier).strip()]
final.pop('unique_identifier')
else:
pass
for cate , value in final.items():
if cate in msd_categories_lang:
for t in value:
if '$$' in t:
list_text = t.split('$$')
topic = ''
for index, text in enumerate(list_text):
text = text.replace('$$',' ')
if len(str(text).split()) > 2:
text = ' '.join((topic,text)).strip()
topic = ''
lang = detect(text)
lang_final.add(lang)
if cate in cate_tmp:
cate_tmp[cate].append({lang: text})
else:
cate_tmp[cate] = [{lang: text}]
else:
topic = ' '.join((topic,text)).strip()
if index == len(list_text)-1:
lang = detect(topic)
lang_final.add(lang)
if cate in cate_tmp:
cate_tmp[cate].append({lang: topic})
else:
cate_tmp[cate] = [{lang: topic}]
topic = ''
else:
pass
else:
lang = detect(t)
lang_final.add(lang)
if cate in cate_tmp:
cate_tmp[cate].append({lang: t})
else:
cate_tmp[cate] = [{lang: t}]
elif cate in msd_categories_lang_exception:
# print('^^^^$$$$', value)
for t in value:
t = t.replace('$$',' ')
lang = detect(t)
lang_final.add(lang)
if cate in cate_tmp:
cate_tmp[cate].append({lang: t})
else:
cate_tmp[cate] = [{lang: t}]
else:
# print('cate------>',cate)
cate_tmp[cate] = value
status = {'status':'1','language': list(lang_final),'file_name':[file_name]}
extracted_categories = {**status,**cate_tmp,**unique}
final_json[file_index] = extracted_categories
# return render(request, 'extractor/doc_result.html', {'result': extracted_categories})
else:
status = {'status': '0','file_name': [file_name]}
final_json[file_index] = status
# return JsonResponse(status)
# return Response(final_json)
return JsonResponse(final_json)
def get_file_smb(file_name):
data = ''
try:
data = smbclient.open_file(r"{}".format(file_name),mode='rb',username=smb_username,password=smb_password)
print('file found')
except:
smbclient.reset_connection_cache()
data = smbclient.open_file(r"{}".format(file_name), mode='rb',username=smb_username,password=smb_password)
finally:
return data
def text_extraction(file,method=None):
tmp = []
final = {}
key = ''
if method == 'SMB':
try:
with smbclient.open_file(r"{}".format(file), mode='rb', username=smb_username, password=smb_password) as f:
html = mammoth.convert_to_html(f).value
print('file found')
except:
smbclient.reset_connection_cache()
with smbclient.open_file(r"{}".format(file), mode='rb', username=smb_username, password=smb_password) as f:
html = mammoth.convert_to_html(f).value
print('file found')
else:
html = mammoth.convert_to_html(file).value
'''
soup = BeautifulSoup(html,'html.parser')
paragraphs = soup.find_all('p')
# list = [ele.text for ele in paragraphs]
list = [ele.next for ele in paragraphs]
'''
soup = BeautifulSoup(html, 'html.parser')
paragraphs = soup.find_all(['p','li'])
# -----
for i, text in enumerate(paragraphs):
text = str(text)
if '' in tmp:
tmp.remove('')
if re.findall(regex_heading_msd, text):
try:
if key and (key not in final):
if tmp:
final[key] = ['$$'.join(tmp)]
elif key in final:
if tmp:
final[key].append('$$'.join(tmp))
else:
pass
key = re.sub(r'<.*?>', '', text)
# print(key)
tmp.clear()
except:
pass
else:
if i == len(paragraphs) - 1:
text = text.strip()
tmp = [t for t in tmp if t]
if text and not re.findall(r"Panel\s\d", text):
text = text.replace('<strong>', '<b>').replace('</strong>', '</b>')
text = re.sub(r"<(\/?[^/bems]).*?>", '', text)
tmp.append(text)
if key not in final:
if tmp:
final[key] = ['$$'.join(tmp)]
elif key in final:
if tmp:
final[key].append('$$'.join(tmp))
else:
pass
else:
text = text.strip()
tmp = [t for t in tmp if t]
if text and not re.findall(r"Panel\s\d", text): # filter out heading like 'big panel 1'
text = text.replace('<strong>', '<b>').replace('</strong>', '</b>')
text = re.sub(r"<(\/?[^/bems]).*?>", '', text)
tmp.append(text)
# return final , max(lang,key=lang.count)
# print(final)
return final
def extractor(request):
final_json = {}
# getting value from query string
file_name_list = request.GET.getlist('file','no file')
print('file_list',file_name_list)
if file_name_list == 'no file':
return render(request, 'extractor/index_msd.html')
# return Response({'status':'0'})
else:
pass
for file_index , file_name in enumerate(file_name_list):
doc_format = os.path.splitext(file_name)[1].lower()
if doc_format == '.xlsx':
output = Excel_extraction(file_name).main()
final_json[file_index] = output
elif doc_format == '.docx':
output = msd_extraction().main(file_name)
final_json[file_index] = output
else:
print('format not supported')
return JsonResponse(final_json)
# def dataset_to_mangodb(request):
# from pymongo import MongoClient
# client = MongoClient('172.28.42.150',27017)
# db = client['dataset']
# collection = db['msd']
# data = [msd_dataset(category=i['category'], text=i['text'], language_code=i['language_code'],
# language=i['language'],
# type=i['type']) for i in collection.find({})]
# if data:
# msd_dataset.objects.bulk_create(data)
# return HttpResponse('success')
# else:
# return HttpResponse('Failure')
def dataset_to_mangodb(request,django_model,mongo_table):
from pymongo import MongoClient
client = MongoClient('172.28.42.150',27017)
db = client['dataset']
collection = db[mongo_table]
data = [django_model(category=i['category'], text=i['text'], language_code=i['language_code'],
language=i['language'],
category_actual=i['category_actual']) for i in collection.find({})]
if data:
msd_dataset.objects.bulk_create(data)
return HttpResponse('success')
else:
return HttpResponse('Failure')
dataset_to_mangodb(msd_contents,'msd_contents')
| [
"VIJAYKANAGARAJ@CHENMACL16.local"
] | VIJAYKANAGARAJ@CHENMACL16.local |
714db4e6784f72403c33549680bae507b0d5bb17 | b951b28a8957cc788ea297a294766117b6ee9e04 | /task_2.py | a6aef9d9e9ef01ef8c7cbd86d178b29c6a4962f5 | [] | no_license | goodprogrammist/module3 | ada4b757a1020614b86a6e36eca9718aa6aeb546 | 4dd60fbf7b948dfa4398406960a73a25d3eb6ec6 | refs/heads/master | 2023-04-20T22:37:58.072734 | 2021-05-15T15:26:08 | 2021-05-15T15:26:08 | 367,620,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | print('Задача 2. Финансовый отчёт')
# Наде дали задание сформировать финансовый отчёт за последние 20 лет по полугодиям.
# Нужно сумму дохода первых двух кварталов поделить на сумму последних двух кварталов,
# чтобы понять динамику роста или падения дохода. И так за каждый год.
#
# Надя решила,
# что быстрее будет написать простую программу, которая сделает всё за неё.
#
#
# Запросите у пользователя четыре числа.
# Отдельно сложите первые два и отдельно вторые два.
# Разделите первую сумму на вторую.
# Выведите результат на экран.
| [
""
] | |
6b340d1d189577e17193f55e983367c6b759bdae | 459cc494fc3555b52c12b1861db0abfbd07a54b5 | /0x07-python-test_driven_development/2-main.py | 5af2d947114baf0589dcb6a498621a89a69bd5d4 | [] | no_license | ymcastellar/holbertonschool-higher_level_programming | a5dd910d9b1c52d21a2bed557147c3c9a5aa3ccf | f5bd4d7e6b7ada991b5d81cea2bb14212abb8888 | refs/heads/master | 2022-12-22T13:47:07.154798 | 2020-09-24T21:17:20 | 2020-09-24T21:17:20 | 259,443,832 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/bin/python3
matrix_divided = __import__('2-matrix_divided').matrix_divided
matrix = [
[1, 2, 3],
[4, 5, 6]
]
print(matrix_divided(matrix, 3))
print(matrix)
print(matrix_divided(matrix, 3)) | [
"yoycas@hotmail.com"
] | yoycas@hotmail.com |
68f576c1e6ca803266988f8b0b0c5c830237888e | 6a2c43da788a27910bb11881e0e32b734b700e8e | /src/components/elements/groups_element.py | c1a3fbb027a3e7e22ddbb5efd5cf7daf6d68fa00 | [] | no_license | pitikdmitry/homework-4 | fa5ae5cd1e153dd98ccbbf299ee9ce0463efa275 | f456669dca4987b384f09bf4f00a1dcbc8e16467 | refs/heads/master | 2020-03-17T21:04:53.197276 | 2018-05-25T11:58:30 | 2018-05-25T11:58:30 | 133,942,398 | 0 | 0 | null | 2018-05-25T11:58:31 | 2018-05-18T10:53:20 | Python | UTF-8 | Python | false | false | 479 | py | from src.components.base_element import BaseElement
class GroupsElement(BaseElement):
MARKED_ITEM_NAV_BAR = '//a[@hrefattrs="st.cmd=userAltGroup&st._aid=NavMenu_User_AltGroups"]' \
'[@class="mctc_navMenuSec mctc_navMenuActiveSec"]'
def is_marked(self):
"""
Check for the existence of marked friends item in nav bar
:return: Bool
"""
return self.existence_of_element_by_xpath(self.MARKED_ITEM_NAV_BAR)
| [
"ya.zubarevanton@yandex.ru"
] | ya.zubarevanton@yandex.ru |
da82a7c906d4c100176b5994979f58c4b7d290da | ebc356d79226ffbb56438436b4abd13dc0594767 | /2018/pset6/cash/cash.py | e0e3f0d9757e1fe739dc7b043414089485a2c071 | [] | no_license | KevinStoneCode/CS50_2018 | 2f7aafe5b7f21bbecd3f91c3fa9b2827bc646df4 | 4030b9c9d884e30350d77466d82443a9de955840 | refs/heads/master | 2022-08-29T08:11:22.732539 | 2018-09-06T10:12:59 | 2018-09-06T10:12:59 | 145,199,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | from cs50 import get_float
while True:
change = get_float("Change owed: ")
if change >= 0:
break
cents = round(change * 100)
coins = 0
coins += cents // 25
cents %= 25
coins += cents // 10
cents %= 10
coins += cents // 5
cents %= 5
coins += cents
print(coins) | [
"kwshih0212@gmail.com"
] | kwshih0212@gmail.com |
f18101feaea2825e198453f972be02107ee83e77 | ed0a3ebb8d26ea8451e5fab3af65aa37fe343c13 | /joins/forms.py | 0cb18bfebd20b82af03de008bb8afb8ba08e7236 | [] | no_license | alisaleh65/first_app | 7fbf516bae300a11ab31f36c14d5002750c3d3fb | 90e2c8a4a44e1f8e62a5f49215bfc4d4c49d8c4b | refs/heads/master | 2021-01-20T22:19:58.806751 | 2016-06-26T08:54:29 | 2016-06-26T08:54:29 | 61,530,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from django import forms
from .models import Join
class EmailForm(forms.Form):
name = forms.CharField(required=False)
email = forms.EmailField()
class JoinForm(forms.ModelForm):
class Meta:
model = Join
fields = ["email"]
| [
"alisaleh65@yahoo.com"
] | alisaleh65@yahoo.com |
1eafdd1f445b525cf93c63c5472861c04502650d | 8bfdfde9886c85e5354bd97c9c754b821249c803 | /lib/OpenTokSDK.py | 1249027fddc8935ef371b1780db4885930e7a462 | [] | no_license | merrypuck/hotpot | 89587a715e4968c613f5fb67894f9f4985d1175c | 1debfa76ca206257c08a00710ba8c96b8c1b0635 | refs/heads/master | 2021-05-27T01:52:54.682419 | 2012-05-12T04:29:17 | 2012-05-12T04:29:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,821 | py |
"""
OpenTok Python Library v0.90.0
http://www.tokbox.com/
Copyright 2010, TokBox, Inc.
Last modified: 2011-10-12
"""
import urllib
import urllib2
import datetime
import calendar
import time
import hmac
import hashlib
import base64
import random
TIMEOUT = 10
class OpenTokException(BaseException):
"Generic OpenTok Error. All other errors extend this."
pass
class RequestError(OpenTokException):
"Indicates an error during the request. Most likely an error connecting to the OpenTok API servers. (HTTP 500 error)"
pass
class AuthError(OpenTokException):
"Indicates that the problem was likely with credentials. Check your API key and API secret and try again"
pass
class SessionProperties(object):
echoSuppression_enabled = None
multiplexer_numOutputStreams = None
multiplexer_switchType = None
multiplexer_switchTimeout = None
p2p_preference = None
def __iter__(self):
d= {'echoSuppression.enabled' : self.echoSuppression_enabled,
'multiplexer.numOutputStreams' : self.multiplexer_numOutputStreams,
'multiplexer.switchType' : self.multiplexer_switchType,
'multiplexer.switchTimeout' : self.multiplexer_switchTimeout,
'p2p.preference' : self.p2p_preference,
}
return d.iteritems()
class RoleConstants:
"List of valid roles for a token"
SUBSCRIBER = "subscriber" #Can only subscribe
PUBLISHER = "publisher" #Can publish, subscribe, and signal
MODERATOR = "moderator" #Can do the above along with forceDisconnect and forceUnpublish
class OpenTokSession(object):
def __init__(self, session_id):
self.session_id = session_id
class OpenTokSDK(object):
"""
Use this SDK to create tokens and interface with the server-side portion of the Opentok API.
"""
TOKEN_SENTINEL = "T1=="
SDK_VERSION = "tbpy-v0.91.2011-10-12"
API_URL = "https://staging.tokbox.com/hl"
# Uncomment this line when you launch your app
API_URL = "https://api.opentok.com/hl";
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret.strip()
def generate_token(self, session_id=None, role=None, expire_time=None, connection_data=None, **kwargs):
"""
Generate a token which is passed to the JS API to enable widgets to connect to the Opentok api.
session_id: Specify a session_id to make this token only valid for that session_id.
role: One of the constants defined in RoleConstants. Default is publisher, look in the documentation to learn more about roles.
expire_time: Integer timestamp. You can override the default token expire time of 24h by choosing an explicit expire time. Can be up to 7d after create_time.
"""
create_time = datetime.datetime.utcnow()
if session_id is None:
session_id = ''
if not role:
role = RoleConstants.PUBLISHER
data_params = dict(session_id=session_id,
create_time=calendar.timegm(create_time.timetuple()),
role=role,
)
if expire_time is not None:
if isinstance(expire_time, datetime.datetime):
data_params['expire_time'] = calendar.timegm(expire_time.timetuple())
else:
data_params['expire_time'] = expire_time
if type(data_params['expire_time']) != int and \
type(data_params['expire_time']) != long and \
type(data_params['expire_time']) != float:
raise OpenTokException("Expire time must be a number")
if data_params['expire_time'] < time.time():
raise OpenTokException("Expire time must be in the future")
if data_params['expire_time'] > time.time() + 604800:
raise OpenTokException("Expire time must be in the next 7 days")
if connection_data is not None:
if len(connection_data) > 1000:
raise OpenTokException("Connection data must be less than 1000 characters")
data_params['connection_data'] = connection_data
data_params['nonce'] = random.randint(0,999999)
data_string = urllib.urlencode(data_params, True)
sig = self._sign_string(data_string, self.api_secret)
token_string = "%s%s" % (self.TOKEN_SENTINEL, base64.b64encode("partner_id=%s&sdk_version=%s&sig=%s:%s" % (self.api_key, self.SDK_VERSION, sig, data_string)))
return token_string
def create_session(self, location='', properties={}, **kwargs):
"""
Create a new session in the OpenTok API. Returns an OpenTokSession object with a session_id property.
location: IP address of the user requesting the session. This is used for geolocation to choose which datacenter the session will live on.
properties: An instance of the SessionProperties object. Fill in the fields that you are interested in to use features of the groups API. Look in the documentation for more details. Also accepts any dict-like object.
"""
#ip_passthru is a deprecated argument and has been replaced with location
if 'ip_passthru' in kwargs:
location = kwargs['ip_passthru']
params = dict(api_key=self.api_key)
params['location'] = location
params.update(properties)
dom = ''
try:
dom = self._do_request("/session/create", params)
except RequestError:
raise
except Exception, e:
raise RequestError("Failed to create session: %s" % str(e) )
try:
error = dom.getElementsByTagName('error')
if error:
error = error[0]
raise AuthError("Failed to create session (code=%s): %s" % (error.attributes['code'].value, error.firstChild.attributes['message'].value))
session_id = dom.getElementsByTagName('session_id')[0].childNodes[0].nodeValue
return OpenTokSession(session_id)
except Exception, e:
raise OpenTokException("Failed to generate session: %s" % str(e))
def _sign_string(self, string, secret):
return hmac.new(secret, string.encode("utf-8"), hashlib.sha1).hexdigest()
def _do_request(self, url, params):
import xml.dom.minidom as xmldom
if '_token' in params: #Do token auth if _token is present, partner auth normally
auth_header = ('X-TB-TOKEN-AUTH', params['_token'])
del params['_token']
else:
auth_header = ('X-TB-PARTNER-AUTH', "%s:%s" % (self.api_key, self.api_secret))
method = "POST" if params else "GET"
data_string = urllib.urlencode(params, True)
context_source = [
('method', method),
('Content-Type', 'application-xml'),
('Content-Length', len(data_string)),
auth_header
]
req_string = self.API_URL + url
try:
opener = urllib2.build_opener()
opener.addheaders = context_source
if data_string:
request = urllib2.Request(url=req_string, data=data_string)
else: #GET if no data_string
request = urllib2.Request(url=req_string)
try:
response = opener.open(request, timeout=TIMEOUT)
except TypeError: #Python2.6 added the timeout keyword, if it doesn't get accepted, try without it
response = opener.open(request)
dom = xmldom.parseString(response.read())
response.close()
except urllib2.HTTPError, e:
raise RequestError("Failed to send request: %s" % str(e))
return dom
| [
"t@tinabeans.com"
] | t@tinabeans.com |
0f48da39bcd7577cbe908aaee672175a9921e808 | fc5e2a7873d39f93300f030eeff7678b7d09918c | /auth_middleware.py | 69158f54dc426f440f2aac2d3e5ce55a33f614c0 | [] | no_license | kubamvictor/mlflow-tracking-template | 357d01d82dd789bfab6fbda5e9ef70bdf58858c2 | 1990459979be28fb0d6495cc22ddc364f19dd2e8 | refs/heads/master | 2022-11-12T00:52:46.453922 | 2020-07-01T18:34:08 | 2020-07-01T18:34:08 | 276,577,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | class AuthMiddleware():
"""
Middleware for authenticating requests sent to a flask app
"""
def __init__(self, app):
self.app = app
self.token = 'YHON36336hhngEYEY'
def __call__(self, environ, start_response):
# Validate token
if self._authenticated(environ.get('HTTP_AUTHORIZATION')):
return self.app(environ, start_response)
return self._login(environ, start_response)
def _authenticated(self, header):
"""
Function to extract and validate user token from request header
argument:
header: request header
output:
boolean: if user is validated this is set to true, otherwise false
"""
try:
if not header:
return False
_, token = header.split(None, 1)
print("User has been validated? {}".format(token == self.token))
return token == self.token
except Exception:
return False
def _login(self, environ, start_response):
start_response(
'401 Authentication Required',
[('Content-Type', 'text/html'), ('WWW-Authenticate', 'Bearer realm="mlflow server"')])
return [b'Authentication Failed! Please check your authorization token or contact system admins.']
| [
"kubamvictor@gmail.com"
] | kubamvictor@gmail.com |
c2527e5e639b13cb1d28b6c112b7544a225ffc45 | d538b6bae84256e241ebdc976bbe1a0d0b4ec7ee | /productDetails/forms.py | 23c107d05e7a99e435f01291b0f3565e22609a3c | [] | no_license | Sangeethsajan/WarehouseManagement | 871b3661c74922f7b524232867e5dc5d7b3dee59 | 30731a7538fa6278e9744c3060aa93f7e37fed79 | refs/heads/master | 2023-02-10T03:59:19.142042 | 2021-01-08T15:23:03 | 2021-01-08T15:23:03 | 327,938,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | from django import forms
from .models import productDetails, productPath
class productForm(forms.ModelForm):
class Meta:
model = productDetails
fields ="__all__"
labels ={
'Product Name':'Name of Product',
}
def __init__(self, *args, **kwargs):
super(productForm,self).__init__(*args, **kwargs)
self.fields['category_Id'].empty_label ="Select"
self.fields['manufacturer_Id'].empty_label ="Select"
class pathForm(forms.ModelForm):
class Meta:
model = productPath
fields="__all__" | [
"31395590+Sangeethsajan@users.noreply.github.com"
] | 31395590+Sangeethsajan@users.noreply.github.com |
3dbdb608cd2de3f1278d8f0339287fd5ce40c676 | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/0844-Backspace-String-Compare/soln-1.py | cf9b6afca02d5b7deeaed1a8aa8d927a70cbd4e0 | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 600 | py | class Solution:
def backspaceCompare(self, S, T):
"""
:type S: str
:type T: str
:rtype: bool
"""
i, j = len(S) - 1, len(T) - 1
bs, bt = 0, 0
while True:
while i >= 0 and (bs or S[i] == '#'):
bs = bs + 1 if S[i] == '#' else bs - 1
i -= 1
while j >= 0 and (bt or T[j] == '#'):
bt = bt + 1 if T[j] == '#' else bt - 1
j -= 1
if not(i >= 0 and j >= 0 and S[i] == T[j]):
return i == j == -1
i, j = i - 1, j - 1 | [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
08ab74257fcfe8e582694e17d8f70578c069d383 | f15449e438b0b799a3866ba21243924ce0e4fa2d | /survey/models.py | e6565f3535ec711e92f3831b062f00dd86ac58f5 | [] | no_license | xmduhan/qisite | 46af79d0e4d1af814298862cfaa18c6f7ddf3a74 | 2c9d7513c3e0cd483341dc457a8d289e5e174f20 | refs/heads/master | 2021-01-17T08:44:29.826082 | 2020-02-07T11:22:29 | 2020-02-07T11:22:29 | 14,419,020 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,025 | py | # -*- coding: utf-8 -*-
from __future__ import division
from django.db import models
from django.db.models import F
import account.models
from datetime import datetime
from numstyle import NumStyle, defaultQuestionNumStyle, defaultBranchNumStyle
from django.core.exceptions import ValidationError
from django.core.signing import Signer
import copy
from dateutil.relativedelta import relativedelta
import operator
import re
from jieba.analyse import extract_tags
from qisite.definitions import MAX_TEXT_CONTENT_LENGTH
phonePattern = re.compile(r'^((13[0-9])|(15[^4,\D])|(14[57])|(17[0])|(18[0,0-9]))\d{8}$')
def validate_phone(phone):
if not phonePattern.match(phone):
raise ValidationError(u'phone:手机号码的格式不正确')
class TimeModel(models.Model):
createTime = models.DateTimeField("创建时间", default=datetime.now)
modifyTime = models.DateTimeField("修改时间", default=datetime.now)
class Meta:
abstract = True
class Paper(TimeModel):
def __unicode__(self):
return self.title
# PAPER_STYLE = ( ('F', '平展'), ('P', '分页'))
QUESTION_NUM_STYLE = (('123', '1.2.3.……'), ('(1)(2)(3)', '(1).(2).(3).……'), ('Q1Q2Q3', 'Q1.Q2.Q3.……'))
PAPER_TYPE = (('T', '模板'), ('I', '实例'))
code = models.CharField('编码', max_length=100, blank=True, null=True, default=None) # 用于在测试中找到对象
title = models.CharField('问卷标题', max_length=500)
description = models.CharField('问卷说明', max_length=500, blank=True)
# 题目集 question_set (ok) (已在Question中设置外键引用)
inOrder = models.BooleanField('顺序答题', default=False)
questionNumStyle = models.CharField(
'问题标号样式', max_length=50, choices=QUESTION_NUM_STYLE, default=defaultQuestionNumStyle)
lookBack = models.BooleanField('返回修改', default=False)
# style = models.CharField('展现方式', max_length=5, choices=PAPER_STYLE) #使用paging字段取代
# paging = models.BooleanField('分页答题', default=True) # 正在考虑用step字段取代
step = models.BooleanField('分步答题', default=False)
type = models.CharField('问题类型', choices=PAPER_TYPE, max_length=10, default='T')
survey = models.ForeignKey('Survey', related_name='paperReversed_set', verbose_name="调查", null=True,
blank=True) # 执行调查的反向链接,用于自动删除
createBy = models.ForeignKey(
account.models.User, verbose_name="创建者", related_name='paperCreated_set', blank=True, null=True)
modifyBy = models.ForeignKey(
account.models.User, verbose_name="修改者", related_name='paperModified_set', blank=True, null=True)
# 样本集 sample_set (ok) (已在sample中设置外键引用)
previewSurvey = models.ForeignKey(
'Survey', related_name='paperPreview_set', verbose_name="预览对象", null=True, blank=True, on_delete=models.SET_NULL)
def clean(self):
'''
说明:
1、createBy和modifyBy不能为空的校验放在这里,主要是考虑到我们经常需要创建一些测试用的Paper,如果这两个字段在
定义时就限定死成不能为空,则每次我们都还要多创建一个User,比较麻烦。
'''
if self.createBy is None:
raise ValidationError(u'创建者信息不能为空')
if self.modifyBy is None:
raise ValidationError(u'修改者信息不能为空')
# 处理那些向前跳转的选项
invalidBranchSet = Branch.objects.filter(
question__paper=self, question__ord__gte=F('nextQuestion__ord'))
invalidBranchSet.update(nextQuestion=None)
class Meta:
verbose_name = "问卷"
verbose_name_plural = "[01].问卷"
ordering = ["title"]
def getQuestionSetInOrder(self):
return self.question_set.order_by('ord')
def getNumStyleAvailable(self):
return Paper.QUESTION_NUM_STYLE
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def copy(self, user=None):
'''
拷贝问卷信息
'''
# 拷贝问题对象本身的信息
newPaper = copy.copy(self)
newPaper.createTime = datetime.now()
newPaper.modifyTime = datetime.now()
if user:
newPaper.createBy = user
newPaper.modifyBy = user
newPaper.id = None
newPaper.save()
# 号码问卷的所有问题
questionContrast = {}
for question in self.question_set.all():
newQuestion = question.copy(user)
newQuestion.paper = newPaper
newQuestion.save()
questionContrast[question] = newQuestion
# 将选项指向新拷贝出来的问题
for question in newPaper.question_set.all():
for branch in question.branch_set.all():
if branch.nextQuestion in questionContrast:
branch.nextQuestion = questionContrast[branch.nextQuestion]
branch.save()
return newPaper
def getSampleCount(self):
"""
获取文件采集到的样本数量
"""
return self.sample_set.count()
def createPaperInstance(self, user):
'''
通过一个模板paper创建调查问卷的实例
'''
if self.type != 'T':
raise Exception('非模板Paper对象不能创建Instance')
newPaper = self.copy(user)
newPaper.type = 'I'
newPaper.save()
return newPaper
def isStepNeed(self):
"""
检查文件是否需要分步
"""
count = Branch.objects.filter(question__paper=self, nextQuestion__isnull=False).count()
return count != 0
class PaperCatalog(TimeModel):
name = models.CharField("目录名称", max_length=100)
code = models.CharField("目录编码", max_length=50, unique=True)
parent = models.ForeignKey('self', verbose_name="上级目录", blank=True, null=True)
ord = models.IntegerField("排序号")
paper_set = models.ManyToManyField(Paper, verbose_name='包含问卷', through='PaperCatalogPaper')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='paperCatalogCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='paperCatalogModified_set')
class Meta:
verbose_name = "问卷目录"
verbose_name_plural = "[02].问卷目录"
class PaperCatalogPaper(TimeModel):
paperCatalog = models.ForeignKey(PaperCatalog, verbose_name='对应的目录')
paper = models.ForeignKey(Paper, verbose_name='对应的问卷')
ord = models.IntegerField("排序号")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='paperCatalogPaperCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='paperCatalogPaperModified_set')
class Meta:
verbose_name = "问卷目录-问卷"
verbose_name_plural = "[03].问卷目录-问卷"
class Question(TimeModel):
QUESTION_TYPE = (
('Single', '单选题'), ('Multiple', '多选题'), ('Text', '问答题'), ('Score', '评分题'),
('EndValid', '有效结束'), ('EndInvalid', '无效结束')
)
QUESTION_TYPE_AVAILABLE = ('Single', 'Multiple', 'Text', 'Score')
BRANCH_NUM_STYLE = (('ABC', 'A.B.C.……'), ('abc.', 'a.b.c.……'), ('123.', '1.2.3……'))
text = models.CharField('文字', max_length=300)
type = models.CharField('题型', max_length=100, choices=QUESTION_TYPE)
ord = models.IntegerField("排序号")
# contentLength = models.IntegerField('内容长度', default=MAX_TEXT_CONTENT_LENGTH) # 仅填空题有效,是否可以作为多选题的选项数量限制
contentLength = models.IntegerField('内容长度', default=0) # 仅填空题有效,是否可以作为多选题的选项数量限制
valueMin = models.FloatField('最小值', null=True, blank=True, default=0) # 仅评分题有效
valueMax = models.FloatField('最大值', null=True, blank=True, default=10) # 仅评分题有效
# 题支 branch_set 对象集 (ok) (已在branche中设置反向外键)
confused = models.BooleanField('乱序', default=False)
branchNumStyle = models.CharField('标号样式', max_length=50, choices=BRANCH_NUM_STYLE, default=defaultBranchNumStyle)
# nextQuestion 是否需要这个信息,似乎多余?
nextQuestion = models.ForeignKey('self', verbose_name='下一题', blank=True, null=True, on_delete=models.SET_NULL)
paper = models.ForeignKey(Paper, verbose_name='所属问卷', null=True, blank=True)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='questionCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='questionModified_set')
def clean(self):
'''
问题模型校验
'''
if self.type not in Question.QUESTION_TYPE_AVAILABLE:
raise ValidationError(u'无效的问题类型')
if self.type in ('Single', 'Multiple') and self.contentLength != 0:
raise ValidationError(u'选择题不能有填写值长度')
if self.type not in ('Single', 'Multiple') and self.confused:
raise ValidationError(u'非选择题不能指定乱序选项')
def setOrd(self, newOrd):
"""
修改当前问题的顺序,其他问题将自动响应调整顺序,并且讲删除无效的选项跳转引用
参数:
newOrd 问题的新排序号
"""
paper = Paper.objects.select_for_update().get(id=self.paper.id)
ord = self.ord
# 锁定所有的问题
questionList = list(paper.question_set.select_for_update().order_by('ord'))
questionCount = len(questionList)
if newOrd == ord:
return
if (newOrd > questionCount - 1) or (newOrd < 0):
# TODO : 这里需要设置合适的异常类型
raise Exception()
questionList.insert(newOrd, questionList.pop(ord))
for i, q in enumerate(questionList):
if q.ord != i:
q.ord = i
q.save()
paper.clean()
def getStemText(self):
'''
通过问题直接读取题干的文字信息
'''
return self.text
getStemText.short_description = '题干信息'
def getBranchSetInOrder(self):
return self.branch_set.order_by('ord')
def getNum(self):
# 针对特殊问题类型做特殊处理
if self.type in ('EndValid', 'EndInvalid'):
return self.get_type_display()
else:
numStyle = NumStyle(self.paper.questionNumStyle)
return numStyle.getNum(self.ord)
def __unicode__(self):
return u"(%d)(%s)%s" % (self.ord, self.type, unicode(self.text))
class Meta:
verbose_name = "问题"
verbose_name_plural = "[04].问题"
ordering = ["ord"]
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def getScoreStat(self, max=10):
"""
获取评分分布统计信息
"""
querySet = SampleItem.objects.filter(question=self)
r1 = querySet.values('score').annotate(count=models.Count('score'))
r2 = {i['score']: i['count']for i in r1}
r3 = sorted(r2.items(), key=operator.itemgetter(1), reverse=True)[:10]
r4 = zip(*r3)
return r4
def getTextKeywords(self, n=10):
"""
从文字题中提取关键字
"""
querySet = SampleItem.objects.filter(question=self)
text = ' '.join([rec['content'] for rec in querySet.values('content')])
tags = extract_tags(text, topK=n)
return tags
def copy(self, user=None):
'''
拷贝一个问题
'''
# 拷贝问题对象本身的信息
newQuestion = copy.copy(self)
newQuestion.createTime = datetime.now()
newQuestion.modifyTime = datetime.now()
if user:
newQuestion.createBy = user
newQuestion.modifyBy = user
newQuestion.id = None
newQuestion.save()
# 拷贝问题所属选项信息
for branch in self.branch_set.all():
newBranch = branch.copy(user)
newBranch.question = newQuestion
newBranch.save()
return newQuestion
class QuestionCatalog(TimeModel):
name = models.CharField("目录名称", max_length=100)
code = models.CharField("目录编码", max_length=50, unique=True)
parent = models.ForeignKey('self', blank=True, null=True, verbose_name="上级目录")
ord = models.IntegerField("排序号")
question_set = models.ManyToManyField(Question, verbose_name='包含问题', through='QuestionCatalogQuestion')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='questionCatalogCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者",
related_name='questionCatalogModified_set')
class Meta:
verbose_name = "问题目录"
verbose_name_plural = "[05].问题目录"
def __unicode__(self):
return '%s(%s)' % (self.name, self.code)
class QuestionCatalogQuestion(TimeModel):
questionCatalog = models.ForeignKey(QuestionCatalog, verbose_name='对应的目录')
question = models.ForeignKey(Question, verbose_name='对应的问题')
ord = models.IntegerField("排序号")
createBy = models.ForeignKey(
account.models.User, verbose_name="创建者", related_name='questionCatalogQuestionCreated_set')
modifyBy = models.ForeignKey(
account.models.User, verbose_name="修改者", related_name='questionCatalogQuestionModified_set')
class Meta:
verbose_name = "问题目录-问题"
verbose_name_plural = "[06].问题目录-问题"
class Resource(TimeModel):
RESOURCE_TYPE = (('Picture', '图片'), ('Audio', '音频'), ('Video', '视频'))
resourceType = models.CharField('文字', max_length=50, choices=RESOURCE_TYPE)
resourceUrl = models.CharField('文字', max_length=1000)
width = models.FloatField("资源宽度")
height = models.FloatField("资源高度")
question = models.ForeignKey(Question, verbose_name="对应问题")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='resourceCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='resourceModified_set')
class Meta:
verbose_name = "资源"
verbose_name_plural = "[08].资源"
class Branch(TimeModel):
text = models.CharField('文字', max_length=200)
ord = models.IntegerField('排序号')
nextQuestion = models.ForeignKey(
# 如何包含结果信息呢?(结束无效问卷,结束有效问卷)
'Question', verbose_name='下个问题', related_name='fromBranch', null=True, blank=True, on_delete=models.SET_NULL)
question = models.ForeignKey(Question, verbose_name="问题")
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='branchCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='branchModified_set')
class Meta:
verbose_name = "题支"
verbose_name_plural = "[09].题支"
def getNum(self):
numStyle = NumStyle(self.question.branchNumStyle)
return numStyle.getNum(self.ord)
def getReachableQuestionList(self):
# 获取当前选项对应问题的之后的所有问题
question = self.question
paper = question.paper
reachableQuestion = list(paper.question_set.filter(ord__gt=question.ord).order_by('ord'))
return reachableQuestion
def getSystemPredefined(self):
# 获取预定义的问题
systemPredefinedCatalog = QuestionCatalog.objects.filter(code='SystemPredefined')[0]
systemPredefined = list(systemPredefinedCatalog.question_set.order_by('ord'))
return systemPredefined
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def copy(self, user=None):
newBranch = copy.copy(self)
newBranch.createTime = datetime.now()
newBranch.modifyTime = datetime.now()
if user:
newBranch.createBy = user
newBranch.modifyBy = user
newBranch.id = None
newBranch.save()
return newBranch
def getSelectedCount(self):
"""
获取选择该选项的样本项的数量,实际就是统计该选项被用户选了几次
"""
return self.sampleitem_set.count()
def getSelectedPct(self):
"""
获得当前选项的选择比例
其值为0-100之间
"""
sampleCount = self.question.paper.sample_set.count()
if sampleCount == 0:
return None
else:
return self.getSelectedCount() / sampleCount * 100
def oneYearLater():
return datetime.now() + relativedelta(years=1)
class Survey(TimeModel):
code = models.CharField('编码', max_length=100, blank=True, null=True, default=None) # 用于在测试中找到对象
paper = models.ForeignKey('Paper', related_name='survey_set', verbose_name="问卷", null=True, blank=True)
# 目标客户清单 targetcust_set (ok) (已在目标客户中设置外键)
targetOnly = models.BooleanField('定向调查', default=False)
custList = models.ForeignKey('CustList', verbose_name='客户清单', blank=True, null=True, default=None)
state = models.CharField("状态", max_length=5, default='A')
paused = models.BooleanField('暂停', default=False)
shared = models.BooleanField('是否分享', default=False)
viewResult = models.BooleanField('查看结果', default=True)
anonymous = models.BooleanField('查看结果', default=False)
resubmit = models.BooleanField('是否允许重填', default=True)
password = models.CharField("参与密码", max_length=10, blank=True)
ipLimit = models.IntegerField("IP限制", default=5)
macLimit = models.IntegerField("MAC限制", default=5)
publishTime = models.DateTimeField("发布时间", default=datetime.now)
endTime = models.DateTimeField("结束时间", default=oneYearLater)
# 参与者约束 constraints 对象集 (hold)
pay = models.BooleanField('查看结果', default=True)
hardCost = models.FloatField('调查费', default=0)
bonus = models.FloatField('奖金', default=0)
fee = models.FloatField('手续费', default=0)
validSampleLimit = models.IntegerField("有效样本上限", default=0) # 0 表示无限制
lastSmsSendTime = models.DateTimeField("最后一次推送短信时间", blank=True, null=True, default=None)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='surveyCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='surveyModified_set')
def getResubmitText(self):
return u'是' if self.resubmit else u'否'
def getVeiwResultText(self):
return u'是' if self.viewResult else u'否'
def getAnonymousText(self):
return u'是' if self.anonymous else u'否'
def getSharedText(self):
return u'是' if self.shared else u'否'
class Meta:
verbose_name = "调查"
verbose_name_plural = "[10].调查"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
if self.custList:
name = self.custList.name
else:
name = 'None'
return '<%s,%s>' % (self.paper.title, name)
class TargetCust(TimeModel):
name = models.CharField('姓名', max_length=50)
phone = models.CharField('手机号码', max_length=50)
email = models.CharField('电子邮件', max_length=100)
defineInfo_set = models.ManyToManyField('DefineInfo', verbose_name='附件信息', blank=True, null=True)
# sample = models.ForeignKey('Sample', verbose_name='样本') 在样本中已设定了一对一关系 (ok)
token = models.CharField('访问令牌', max_length=50)
survey = models.ForeignKey(Survey, verbose_name="所属调查", related_name='targetCust_set')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='targetCustCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='targetCustModified_set')
class Meta:
verbose_name = "目标客户"
verbose_name_plural = "[11].目标客户"
def __unicode__(self):
return u'<%s,%s>' % (self.name, self.phone)
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
class Sample(TimeModel):
# 样本项集 sampleItems 对象集 (ok) (已在样本中设置对应外键)
targetCust = models.ForeignKey('TargetCust', verbose_name='清单项', null=True, blank=True)
# session字段用户保存无定向调查客户端标识信息
session = models.CharField('客户端会话标识', max_length=40, null=True, blank=True)
user = models.ForeignKey(account.models.User, verbose_name="参与用户", null=True,
blank=True) # 这里是否设置一个related_name
ipAddress = models.CharField('受访IP', max_length=50)
# macAddress = models.CharField('受访MAC', max_length=50) web端实际无法获得该字段
finished = models.BooleanField('是否完成', default=True)
# lastQuestion用于单步答题,保存最后一次回答的题目,以便之后继续回答
# lastQuestion = models.ForeignKey('Question', verbose_name='下一题', null=True, blank=True, on_delete=models.SET_NULL)
# nextQuestion用于单步答题,保存最后一次回答的题目,以便之后继续回答
# 之前考虑使用的是lastQuestion但是每次进入答题页面时,还要显示判断上次答题结果才能知道要从哪题开始,不直观。
nextQuestion = models.ForeignKey('Question', verbose_name='下一题', null=True, blank=True, on_delete=models.SET_NULL)
isValid = models.BooleanField('是否有效', default=True)
paper = models.ForeignKey(Paper, verbose_name='所属问卷')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='sampleCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='sampleModified_set')
class Meta:
verbose_name = "样本"
verbose_name_plural = "[12].样本"
class SampleItem(TimeModel):
question = models.ForeignKey('Question', verbose_name='问题')
branch_set = models.ManyToManyField(Branch, verbose_name='已选')
content = models.CharField('内容', max_length=MAX_TEXT_CONTENT_LENGTH, blank=True, null=True)
score = models.FloatField('得分', default=0)
sample = models.ForeignKey(Sample, verbose_name='所属样本')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='sampleItemCreated_set',
null=True, blank=True)
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='sampleItemModified_set',
null=True, blank=True)
class Meta:
verbose_name = "样本项"
verbose_name_plural = "[13].样本项"
class CustList(TimeModel):
name = models.CharField('清单名称', max_length=50)
descrition = models.CharField('清单说明', max_length=200, blank=True, null=True, default='')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='custListCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='custListModified_set')
class Meta:
verbose_name = "客户清单"
verbose_name_plural = "[14].客户清单"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
return self.name
class CustListItem(TimeModel):
name = models.CharField('客户名称', max_length=50)
phone = models.CharField('手机号', max_length=50, validators=[validate_phone])
email = models.CharField('电子邮件', max_length=100, blank=True, null=True, default='')
custList = models.ForeignKey(CustList, verbose_name='所属清单', related_name="custListItem_set")
defineInfo_set = models.ManyToManyField('DefineInfo', verbose_name='附件信息', blank=True, null=True)
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='custListItemCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='custListItemModified_set')
class Meta:
verbose_name = "客户清单项"
verbose_name_plural = "[15].客户清单项"
def getIdSigned(self):
signer = Signer()
return signer.sign(self.id)
def __unicode__(self):
return self.name
class DefineInfo(TimeModel):
name = models.CharField('信息名称', max_length=100)
value = models.CharField('信息值', max_length=200)
ord = models.IntegerField('排序号')
createBy = models.ForeignKey(account.models.User, verbose_name="创建者", related_name='defineInfoCreated_set')
modifyBy = models.ForeignKey(account.models.User, verbose_name="修改者", related_name='defineInfoModified_set')
class Meta:
verbose_name = "自定义信息"
verbose_name_plural = "[16].自定义信息"
| [
"xmduhan@gmail.com"
] | xmduhan@gmail.com |
4b2f6eff5f5301a631de8ff075d91eec3458cd2e | 7f43804828706cfd3635c1f1d8cfff1c7fc33beb | /2019/day08/day8.py | c42c2ca2f84644ceb98f510cc13bac1a3db0501b | [] | no_license | aldewereld/AventOfCode | b9149a0161d89cd550c53b20a3a42f069b520e41 | 2da755700593e98445736d23b16dfb10483df049 | refs/heads/master | 2020-09-23T08:44:25.553145 | 2019-12-12T13:43:14 | 2019-12-12T13:43:14 | 225,455,849 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | from typing import List, Tuple
from functools import reduce
with open("input.txt") as file:
line = list(file.readline().strip())
def split_layers(input_message: List[str], width: int = 25, height: int = 6) -> List[List[str]]:
layers = []
start = 0
step = width * height
end = step
for i in range(0, len(input_message), step):
layers.append(input_message[start:end])
start, end = end, end+step
return layers
def count(object: int, list: List[int]) -> int:
if list == []:
return 0
else:
head, *tail = list
if head == object:
return 1+count(object, tail)
else:
return count(object, tail)
# Part 1
layers = split_layers(line)
counted = list(map(lambda x: count('0', x), layers))
least_zeros = min(counted)
index = counted.index(least_zeros)
ones, twos = count('1', layers[index]), count('2', layers[index])
print(ones * twos)
# Part 2
#layers = split_layers(list("0222112222120000"), 2, 2)
def decode_layers(layer1: List[int], layer2: List[int]) -> List[int]:
return list(map(decode_pixel, zip(layer1, layer2)))
def decode_pixel(pixel_pair: Tuple[int, int]) -> int:
if pixel_pair[0] == '2': # if transparent
return pixel_pair[1] # return lower pixel
else: # else use upper pixel
return pixel_pair[0]
image = reduce(decode_layers, layers)
start = 0
end = 25
for _ in range(6):
print(image[start:end])
start, end = end, end+25 | [
"huib.aldewereld@hu.nl"
] | huib.aldewereld@hu.nl |
a9f7a5d7afa2ce9694901a23dc8266394a8b4b54 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/iptvrange_b15e81483d5be2cf30e042843e79b969.py | 1574bc8306f8407f267c617e318f6311dba42fcd | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,452 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class IptvRange(Base):
"""
The IptvRange class encapsulates a list of iptvRange resources that are managed by the user.
A list of resources can be retrieved from the server using the IptvRange.find() method.
The list can be managed by using the IptvRange.add() and IptvRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'iptvRange'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'GeneralQueryResponseMode': 'generalQueryResponseMode',
'ImmediateResponse': 'immediateResponse',
'InterStbStartDelay': 'interStbStartDelay',
'JoinLatencyThreshold': 'joinLatencyThreshold',
'JoinLeaveMultiplier': 'joinLeaveMultiplier',
'LeaveLatencyThreshold': 'leaveLatencyThreshold',
'LogFailureTimestamps': 'logFailureTimestamps',
'Name': 'name',
'ObjectId': 'objectId',
'ReportFrequency': 'reportFrequency',
'RouterAlert': 'routerAlert',
'SpecificQueryResponseMode': 'specificQueryResponseMode',
'StbLeaveJoinDelay': 'stbLeaveJoinDelay',
'UnsolicitedResponseMode': 'unsolicitedResponseMode',
'Version': 'version',
'ViewingProfile': 'viewingProfile',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(IptvRange, self).__init__(parent, list_op)
@property
def IptvChannels(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iptvchannels_7305b62e9ac2aa9f13637cc3a90a716f.IptvChannels): An instance of the IptvChannels class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iptvchannels_7305b62e9ac2aa9f13637cc3a90a716f import IptvChannels
if self._properties.get('IptvChannels', None) is not None:
return self._properties.get('IptvChannels')
else:
return IptvChannels(self)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def GeneralQueryResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, responds to General Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'])
@GeneralQueryResponseMode.setter
def GeneralQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'], value)
@property
def ImmediateResponse(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
"""
return self._get_attribute(self._SDM_ATT_MAP['ImmediateResponse'])
@ImmediateResponse.setter
def ImmediateResponse(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ImmediateResponse'], value)
@property
def InterStbStartDelay(self):
# type: () -> int
"""
Returns
-------
- number: Time in milliseconds between Join messages from clients within the same range.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterStbStartDelay'])
@InterStbStartDelay.setter
def InterStbStartDelay(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InterStbStartDelay'], value)
@property
def JoinLatencyThreshold(self):
# type: () -> int
"""
Returns
-------
- number: The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['JoinLatencyThreshold'])
@JoinLatencyThreshold.setter
def JoinLatencyThreshold(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['JoinLatencyThreshold'], value)
@property
def JoinLeaveMultiplier(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: The number of times a host sends every Join or Leave message.
"""
return self._get_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'])
@JoinLeaveMultiplier.setter
def JoinLeaveMultiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'], value)
@property
def LeaveLatencyThreshold(self):
# type: () -> int
"""
Returns
-------
- number: The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['LeaveLatencyThreshold'])
@LeaveLatencyThreshold.setter
def LeaveLatencyThreshold(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LeaveLatencyThreshold'], value)
@property
def LogFailureTimestamps(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, the timestamps for Join and Leave failures are saved to a log file.
"""
return self._get_attribute(self._SDM_ATT_MAP['LogFailureTimestamps'])
@LogFailureTimestamps.setter
def LogFailureTimestamps(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['LogFailureTimestamps'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def ReportFrequency(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
"""
return self._get_attribute(self._SDM_ATT_MAP['ReportFrequency'])
@ReportFrequency.setter
def ReportFrequency(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['ReportFrequency'], value)
@property
def RouterAlert(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, sets the Send Router Alert bit in the IP header.
"""
return self._get_attribute(self._SDM_ATT_MAP['RouterAlert'])
@RouterAlert.setter
def RouterAlert(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['RouterAlert'], value)
@property
def SpecificQueryResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, responds to Group-Specific Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'])
@SpecificQueryResponseMode.setter
def SpecificQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'], value)
@property
def StbLeaveJoinDelay(self):
# type: () -> int
"""
Returns
-------
- number: Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
"""
return self._get_attribute(self._SDM_ATT_MAP['StbLeaveJoinDelay'])
@StbLeaveJoinDelay.setter
def StbLeaveJoinDelay(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['StbLeaveJoinDelay'], value)
@property
def UnsolicitedResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
"""
return self._get_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'])
@UnsolicitedResponseMode.setter
def UnsolicitedResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'], value)
@property
def Version(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str: IGMP/MLD protocol version.
"""
return self._get_attribute(self._SDM_ATT_MAP['Version'])
@Version.setter
def Version(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Version'], value)
@property
def ViewingProfile(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile): Template describing the behavior of how clients view the lists of channels.
"""
return self._get_attribute(self._SDM_ATT_MAP['ViewingProfile'])
@ViewingProfile.setter
def ViewingProfile(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['ViewingProfile'], value)
def update(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Updates iptvRange resource on the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Adds a new iptvRange resource on the server and adds it to the container.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Returns
-------
- self: This instance with all currently retrieved iptvRange resources using find and the newly added iptvRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained iptvRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ObjectId=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Finds and retrieves iptvRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve iptvRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all iptvRange resources from the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ObjectId (str): Unique identifier for this object
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Returns
-------
- self: This instance with matching iptvRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of iptvRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the iptvRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def IptvStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the iptvStart operation on the server.
Start IPTV on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
iptvStart(async_operation=bool)
-------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
iptvStart(Arg2=enum, async_operation=bool)
------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/iptv,/vport/protocolStack/atm/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/iptv,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/iptv,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ipEndpoint/iptv,/vport/protocolStack/atm/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/iptv,/vport/protocolStack/atm/pppoxEndpoint/iptv,/vport/protocolStack/atm/pppoxEndpoint/range/iptvRange,/vport/protocolStack/ethernet/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/iptv,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/iptv,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ipEndpoint/iptv,/vport/protocolStack/ethernet/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/range/iptvRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('iptvStart', payload=payload, response_object=None)
def IptvStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the iptvStop operation on the server.
Stop IPTV on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
iptvStop(async_operation=bool)
------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
iptvStop(Arg2=enum, async_operation=bool)
-----------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/iptv,/vport/protocolStack/atm/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/iptv,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/iptv,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ipEndpoint/iptv,/vport/protocolStack/atm/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/iptv,/vport/protocolStack/atm/pppoxEndpoint/iptv,/vport/protocolStack/atm/pppoxEndpoint/range/iptvRange,/vport/protocolStack/ethernet/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/iptv,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/iptv,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ipEndpoint/iptv,/vport/protocolStack/ethernet/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/range/iptvRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('iptvStop', payload=payload, response_object=None)
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
987811a6ffc6d66edfc20e22d3a6be53f16d4372 | 0b6dade21d327773ab958db6b85af3ef68dfbbe0 | /hello.py | bad42797591e2329f10c59a41eb9f9c0fd568a41 | [
"Apache-2.0"
] | permissive | maharsh-cmput404/cgi-lab | 8e10ba1762dee5650d2911c032b94e1dd3decd46 | b8b5a2bf15b7e689d7507f89e57506ab718e6863 | refs/heads/master | 2020-12-15T19:20:40.657703 | 2020-01-21T00:49:43 | 2020-01-21T00:49:43 | 235,226,103 | 0 | 0 | null | 2020-01-21T00:39:14 | 2020-01-21T00:39:13 | null | UTF-8 | Python | false | false | 202 | py | #!/usr/bin/env python3
import os
import cgi
import cgitb
cgitb.enable()
print("Content-Type: text/plain\n")
print()
# print("<!doctype html><title>Hello</title><h2>Hello World</h2>")
print(os.environ)
| [
"patel@maharsh.net"
] | patel@maharsh.net |
9dea79ebe2acef41d229a77657e6b1cf232caf43 | 5215715a4cbcf9ce065b1542db224a2b1997c760 | /T3/t3.py | 5523731288ed9eb2d6e7a4f542b5a35d71a18b89 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | Cgipson06/reddit | 02deac29ead779890e42d48400d2233ce888e5a0 | deb1da398840bbd311a79eec25ef2a8b5a8ed5b1 | refs/heads/master | 2021-01-08T23:19:55.245559 | 2014-12-28T20:52:26 | 2014-12-28T20:52:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,034 | py | #/u/GoldenSights
import praw
import time
import sqlite3
import datetime
import random
USERAGENT = """
/u/GoldenSights T3 data collection: Gathering Submission data for
statistical analysis.
More info at https://github.com/voussoir/reddit/tree/master/T3
"""
r = praw.Reddit(USERAGENT)
print('Connected to reddit.')
sql = sqlite3.connect('D:/T3/t3.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS meta(label TEXT, data TEXT)')
cur.execute(('CREATE TABLE IF NOT EXISTS posts(idint INT, idstr TEXT, '
'created INT, self INT, nsfw INT, author TEXT, title TEXT, '
'url TEXT, selftext TEXT, score INT, subreddit TEXT, distinguish INT, '
'textlen INT)'))
DISTINGUISHMAP = {0:"user", 1:"moderator", 2:"admin"}
DISTINGUISHMAP_R = {"user":0, "moderator":1, "admin":2}
LOWERBOUND = 9999000
# 5yba0
UPPERBOUND = 164790958
# 2q41im
# 1,679,616 = 10000
# 9,999,000 = 5yba0
# 60,466,176 = 100000
# 120,932,352 = 200000
# 164,790,958 = 2q41im
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
class Post:
''' Used to map the indices of DB entries to names '''
def __init__(self, data):
self.idint = data[0]
self.idstr = data[1]
self.created_utc = data[2]
self.is_self = True if data[3] == 1 else False
self.over_18 = True if data[4] == 1 else False
self.author = data[5]
self.title = data[6]
self.url = data[7]
self.selftext = data[8]
self.score = data[9]
self.subreddit = data[10]
self.distinguished = DISTINGUISHMAP[data[11]]
self.textlen = data[12]
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def human(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def process(itemid, log=True, kill=True):
if isinstance(itemid, str):
itemid = [itemid]
if isinstance(itemid, list):
if isinstance(itemid[0], str):
itemid = verify_t3(itemid)
try:
itemid = remove_existing(itemid)
temp = itemid[:]
except Exception:
return
itemid = r.get_info(thing_id=itemid)
try:
len(itemid)
except:
print(temp, "DEAD")
if kill:
logdead(temp[0])
process(temp, kill=kill)
return
for index in range(len(itemid)):
item = itemid[index]
item.idint = b36(item.id)
item.idstr = item.id
if item.distinguished is None:
item.distinguished = 0
else:
item.distinguished = DISTINGUISHMAP_R[item.distinguished]
item.url = "self" if item.is_self else item.url
item.created_utc = int(item.created_utc)
item.is_self = 1 if item.is_self else 0
item.over_18 = 1 if item.over_18 else 0
item.sub = item.subreddit.display_name
item.textlen = len(item.selftext)
try:
item.auth = item.author.name
except AttributeError:
item.auth = "[deleted]"
item = [item.idint, item.idstr, item.created_utc,
item.is_self, item.over_18, item.auth, item.title,
item.url, item.selftext, item.score, item.sub,
item.distinguished, item.textlen]
itemid[index] = item
if log:
logdb(itemid)
else:
return itemid
if len(itemid) < len(temp):
process(temp)
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - self
# 4 - nsfw
# 5 - author
# 6 - title
# 7 - url
# 8 - selftext
# 9 - score
# 10 - subreddit
# 11 - distinguished
# 12 - textlen
def logdb(items):
for item in items:
cur.execute('INSERT INTO posts VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', item)
sql.commit()
def logdead(i):
#If an ID is dead, let's at least add it to the db.
i = i.replace('t3_', '')
data = [b36(i), i, 0, 0, 0, '?', '?', '?', '?', 0, '?', 0, 0]
logdb([data])
def verify_t3(items):
for index in range(len(items)):
i = items[index]
if 't3_' not in i:
items[index] = 't3_' + i
return items
def remove_existing(items):
done = False
items = verify_t3(items)
while not done:
done = True
for item in items:
cur.execute('SELECT * FROM posts WHERE idint=?', [b36(item[3:])])
f = cur.fetchone()
if f:
items.remove(item)
done = False
break
if len(items) == 0:
raise Exception("Nothing new")
return items
def processrange(lower, upper, kill=True):
if isinstance(lower, str):
lower = b36(lower)
if isinstance(upper, int):
upper = lower + upper
if isinstance(upper, str):
upper = b36(upper)
if upper <= lower:
print("Upper must be higher than lower")
return
ids = [b36(x) for x in range(lower, upper)]
while len(ids) > 0:
p = ids[:100]
print("%s >>> %s (%d)" % (p[0], p[-1], len(ids)))
ids = ids[100:]
process(p, kill=kill)
def lastitem():
cur.execute('SELECT * FROM posts ORDER BY idint DESC LIMIT 1')
return cur.fetchone()[1]
def show():
filea = open('show/missing.txt', 'w')
fileb = open('show/stats.txt', 'w')
cur.execute('SELECT Count(*) FROM posts')
count = cur.fetchone()
count = count[0]
counts = '{0:,}'.format(count)
mainstats = '%s posts collected; ' % counts
print('Current total:', counts)
print('Counting dead posts')
cur.execute('SELECT * FROM posts WHERE created=0')
dead = cur.fetchall()
dead = [x[1] for x in dead]
deadcount = len(dead)
deadcount = '{0:,}'.format(deadcount)
mainstats += '%s dead.\n' % deadcount
for deaditem in dead:
print(deaditem, file=filea)
filea.close()
print('Counting selfposts')
cur.execute('SELECT * FROM posts WHERE self=1')
self = cur.fetchall()
self = len(self)
link = count-self
selfs = '{0:,}'.format(self)
links = '{0:,}'.format(link)
selfstats = '%s linkposts; %s selfposts\n' % (links, selfs)
readmefile = open('README.md', 'r')
readmelines = readmefile.readlines()
readmefile.close()
readmelines[3] = mainstats
readmelines[4] = selfstats
readmefile = open('README.md', 'w')
readmefile.write(''.join(readmelines))
readmefile.close()
#STATS TIME
print('Writing subreddit stats')
cur.execute('SELECT * FROM posts')
subredditcounts = {}
while True:
fetch = cur.fetchone()
if fetch:
fetch = Post(fetch)
try:
subredditcounts[fetch.subreddit] += 1
except KeyError:
subredditcounts[fetch.subreddit] = 1
else:
break
subkeys = list(subredditcounts.keys())
subkeys.sort(key=subredditcounts.get, reverse=True)
for key in subkeys:
out = key
out += '.'*(25-len(key))
num = '{0:,}'.format(subredditcounts[key])
out += '.'*(14-len(num))
out += num
print(out, file=fileb)
fileb.close() | [
"edalool@yahoo.com"
] | edalool@yahoo.com |
668403015d3558f567d49b9ab11c48be8202d6a0 | 69aaa71852bde5b2a4d297b6814ca0bf607053c8 | /jarvis.py | 6d46a86f112682ac4f06fe08a9938d84af0c382b | [] | no_license | sckzy/JarvisAI | cf36bf827a06faeb05a2a26d4f3a21dfd0eaa44c | 147b62cb2002b099865c10b2be023e0147fd96a1 | refs/heads/master | 2023-01-22T09:34:33.070981 | 2020-12-03T04:39:40 | 2020-12-03T04:39:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,217 | py | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning!")
elif hour >= 12 and hour < 18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
print("I am Jarvis Sir. Please tell me how may I help you")
speak("I am Jarvis Sir. Please tell me how may I help you")
print(
"System Info: system-started\nConnection to Internet: Connection Succeeded\nConnecting to Stelite No. 74: Done\n"
)
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print("Status: time = ", strTime)
print("System Speed: 1Tb/s")
print("Internet Speed: 1Gbps")
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# print(voices[0].id)
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def closecmd():
os.system('TASKKILL /F /IM chrome.exe')
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Waiting for command Input...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Identifying Command Input...")
query = r.recognize_google(audio, language='en-in')
print(f"Voice Input : {query}\n")
except Exception as e:
# print(e)
print("Sir please say that again I couldnt get It")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('your_email_addreess@gmail.com', 'password')
server.sendmail(dhangarjayashree51 @ gmail.com, to, content)
server.close()
def multiplication():
speak("PLease Enter the first Number")
print("PLease Enter the first Number")
a = takeCommand()
a = int(a)
speak("PLease Enter the second Number")
print("PLease Enter the Second Number")
b = takeCommand()
b = int(b)
product = a * b
print("The prduct is ", product)
speak(f"The answer is {product}")
def add():
speak("PLease Enter the first Number")
print("PLease Enter the first Number")
a = takeCommand()
a = int(a)
speak("PLease Enter the second Number")
print("PLease Enter the Second Number")
b = takeCommand()
b = int(b)
sum = a + b
print("The prduct is ", sum)
speak(f"The answer is {sum}")
def minus():
speak("Please Enter first number")
print("Please Enter first number")
a = takeCommand()
a = int(a)
speak("Please Enter second number")
print("Please Enter second number")
diff = a - b
print("The difference of the numbers is ", diff)
speak(f"The differnce is {diff}")
def devide():
speak("Please Enter first number")
print("Please Enter first number")
a = takeCommand()
a = int(a)
speak("Please Enter second number")
print("Please Enter second number")
que = a / b
print("The difference of the numbers is ", que)
speak(f"The differnce is {que}")
if __name__ == "__main__":
wishMe()
while True:
query = takeCommand().lower()
#Logic for executing commands
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
speak(results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open facebook' in query:
webbrowser.open("facebook.com")
elif 'open my website' in query:
webbrowser.open("https://codewithdhruva.github.io/MyWebsite/")
elif 'play music' in query:
music_dir = 'C:\\Users\\Prakash Dhangar\\Desktop\\My Projects\\Jarvis AI\\music'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
elif 'close' in query:
speak("What should I close")
print("Waiting for the answer")
if 'command prompt' in query:
os.system('TASKKILL /F /IM cmd.exe')
elif 'browser' in query:
os.system('TASKKILL /F /IM chrome.exe')
elif 'Browser' in query:
os.system('TASKKILL /F /IM chrome.exe')
elif 'code' in query:
os.systen('TASKKILL /F /IM code.exe')
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir the time is {strTime}")
elif 'open code' in query:
print("Opening Code")
speak("Opening Code")
codePath = "C:\\Users\\Prakash Dhangar\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'open idea' in query:
print("Opening IntelliJ Idea")
speak("Opening IntelliJ Idea")
ideaPath = "C:\\Program Files\\JetBrains\\IntelliJ IDEA Community Edition 2020.1.2\\bin\\idea64.exe"
os.startfile(ideaPath)
elif 'open studio' in query:
print("Opening Android Studio")
speak("Opening Android Studio")
studioPath = "C:\\Program Files\\Android\\Android Studio\\bin\\studio64.exe"
os.startfile(studioPath)
elif 'play upgrade' in query:
upgradePath = "C:\\Users\\Prakash Dhangar\\Desktop\\Programms\\Upgrade.mkv"
speak("Playing Upgrade")
os.startfile(upgradePath)
elif 'send mail to prakash' in query:
try:
speak("What should I say?")
content = takeCommand()
to = "itworldcs@gmail.com"
sendEmail(to, content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry I couldnt send this Email")
elif 'thank you' in query:
speak(
"At your service I am always there. I am glad that I could help you."
)
elif 'open teams' in query:
teamsPath = "C:\\Users\\Prakash Dhangar\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Microsoft Teams.ink"
speak("Starting Teams")
os.startfile(teamsPath)
elif 'play old music' in query:
webbrowser.open("https://youtu.be/kJIidWqWjUs?t=115")
elif 'multiply' in query:
multiplication()
elif 'sum of' in query:
add()
elif 'devide' in query:
devide()
elif 'minus' in query:
minus()
elif 'sleep' in query:
break
| [
"noreply@github.com"
] | sckzy.noreply@github.com |
829fb7ef3ac2065f715fbf5de042422456c84678 | 0fba7a95a16bd8351ccfdf806dde5f48f1e0b137 | /localsite/migrations/0031_auto__add_field_seatlocation_hallscheme.py | 2af37f723707ba51878ebeb9ab87092cba79eda7 | [] | no_license | yanchenko-igor/shop-ticket | 3e36c2c664241391fc02e90d239ff9458e601209 | 0093a3c1eb8025f480ecf67692ad1340ddf38296 | refs/heads/master | 2020-05-21T12:13:18.652894 | 2011-12-02T15:00:44 | 2011-12-02T15:00:44 | 3,631,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,138 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SeatLocation.hallscheme'
db.add_column('localsite_seatlocation', 'hallscheme', self.gf('django.db.models.fields.related.ForeignKey')(default=1, related_name='seats', to=orm['localsite.HallScheme']), keep_default=False)
def backwards(self, orm):
# Deleting field 'SeatLocation.hallscheme'
db.delete_column('localsite_seatlocation', 'hallscheme_id')
models = {
'localsite.announcement': {
'Meta': {'ordering': "['ordering', 'begin']", 'object_name': 'Announcement'},
'begin': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'announcements'", 'to': "orm['localsite.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'localsite.city': {
'Meta': {'ordering': "['ordering', 'name']", 'object_name': 'City'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '25', 'db_index': 'True'})
},
'localsite.event': {
'Meta': {'ordering': "['min_date', 'max_date']", 'object_name': 'Event'},
'hallscheme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': "orm['localsite.HallScheme']"}),
'max_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'max_price': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'min_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'}),
'tags': ('tagging.fields.TagField', [], {})
},
'localsite.eventdate': {
'Meta': {'ordering': "['datetime', 'event']", 'unique_together': "(('event', 'datetime'),)", 'object_name': 'EventDate'},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dates'", 'to': "orm['localsite.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'localsite.hall': {
'Meta': {'ordering': "['city', 'name']", 'unique_together': "(('name', 'city'),)", 'object_name': 'Hall'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'halls'", 'to': "orm['localsite.City']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'localsite.hallscheme': {
'Meta': {'ordering': "['hall', 'name']", 'unique_together': "(('name', 'hall'),)", 'object_name': 'HallScheme'},
'hall': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'schemes'", 'to': "orm['localsite.Hall']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'substrate': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'localsite.seatgroup': {
'Meta': {'ordering': "['hallscheme', 'name']", 'unique_together': "(('hallscheme', 'name'), ('hallscheme', 'slug'))", 'object_name': 'SeatGroup'},
'hallscheme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['localsite.HallScheme']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'localsite.seatgroupprice': {
'Meta': {'ordering': "['event', 'group']", 'unique_together': "(('group', 'event'),)", 'object_name': 'SeatGroupPrice'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': "orm['localsite.Event']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': "orm['localsite.SeatGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'localsite.seatlocation': {
'Meta': {'ordering': "['section', 'row', 'col']", 'unique_together': "(('section', 'row', 'col'),)", 'object_name': 'SeatLocation'},
'col': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seats'", 'to': "orm['localsite.SeatGroup']"}),
'hallscheme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seats'", 'to': "orm['localsite.HallScheme']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'row': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seats'", 'to': "orm['localsite.SeatSection']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'localsite.seatsection': {
'Meta': {'ordering': "['hallscheme', 'name']", 'unique_together': "(('hallscheme', 'name'),)", 'object_name': 'SeatSection'},
'hallscheme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['localsite.HallScheme']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'localsite.ticket': {
'Meta': {'ordering': "['datetime', 'event']", 'unique_together': "(('product', 'datetime', 'seat'), ('event', 'datetime', 'seat'))", 'object_name': 'Ticket'},
'datetime': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tickets'", 'to': "orm['localsite.EventDate']"}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tickets'", 'to': "orm['localsite.Event']"}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'}),
'seat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localsite.SeatLocation']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'freely'", 'max_length': '8'})
},
'product.category': {
'Meta': {'ordering': "['site', 'parent__id', 'ordering', 'name']", 'unique_together': "(('site', 'slug'),)", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child'", 'null': 'True', 'to': "orm['product.Category']"}),
'related_categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_categories_rel_+'", 'null': 'True', 'to': "orm['product.Category']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'product.product': {
'Meta': {'ordering': "('site', 'ordering', 'name')", 'unique_together': "(('site', 'sku'), ('site', 'slug'))", 'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'also_purchased': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'also_purchased_rel_+'", 'null': 'True', 'to': "orm['product.Product']"}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'height_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_in_stock': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'length': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'length_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_items_rel_+'", 'null': 'True', 'to': "orm['product.Product']"}),
'shipclass': ('django.db.models.fields.CharField', [], {'default': "'DEFAULT'", 'max_length': '10'}),
'short_description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'taxClass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.TaxClass']", 'null': 'True', 'blank': 'True'}),
'taxable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total_sold': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'weight_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'width_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
'product.taxclass': {
'Meta': {'object_name': 'TaxClass'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['localsite']
| [
"yanchenko.igor@gmail.com"
] | yanchenko.igor@gmail.com |
79697ef0b2a734acad1ab51ba409899d1f512b1f | ad314542195e2eb8ae2ba343a47a27a530f5969d | /utils/label_plot.py | 5202c7398090e2de686bc1961839e000b2d73a44 | [] | no_license | vivianyaya/SHL2021 | 183ad722f8aa34dab93c4fb5cb07f7a1f1473a35 | d610ba2b2134a915e617c81d5e24c31acac3b3c3 | refs/heads/main | 2023-06-04T03:10:05.535868 | 2021-06-25T03:19:10 | 2021-06-25T03:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import seaborn as sns
plt.style.use("dark_background")
label_dic = {1: 'Still', 2: 'Walking', 3: 'Run', 4: 'Bike', 5: 'Car', 6: 'Bus', 7: 'Train', 8: 'Subway'}
cmap = mpl.cm.get_cmap('viridis', 8)
label_cols = cmap(np.linspace(0, 1, 8))
label_cols = [mpl.colors.to_hex(i) for i in label_cols]
def plot_label_each(df, col_name, this_label):
label_index = np.where(df.label == this_label)[0]
p = plt.scatter(df.index[label_index], df[col_name][label_index], c = label_cols[this_label - 1], label = label_dic[this_label])
return p
def plot_label(df, col_name):
plt.figure(figsize = [12, 8])
for this_label in np.unique(list(df.label)):
plot_label_each(df, col_name, this_label)
plt.legend(loc = 'best')
def box_label(df, col_name, use_log = False, violin = False):
plt.figure(figsize = [12, 8])
y = np.log(df[col_name]+1) if use_log else df[col_name]
if violin:
sns.violinplot(x = df['label'], y = y)
else:
sns.boxplot(x = df['label'], y = y)
plt.xticks(range(8), list(label_dic.values())) | [
"1609532549@qq.com"
] | 1609532549@qq.com |
50eb91f0fbd18a59b7eb5e494dc487cac5caadee | 821985c62bf469b72a7d451560a455aefe2c7bd0 | /A、机器学习/机器学习实践/RandomForest/4.RandomForestClassifier_Enum_iris.py | bc292c16243930d4ddca12758d85363a76bc9f66 | [] | no_license | FangChao1086/Machine_Learning | 625106c977055328368100dae30f00909aff590c | 603900b510a9a64f4bb6fcc7753546b34828e022 | refs/heads/master | 2020-04-16T13:20:53.675722 | 2020-03-14T03:33:21 | 2020-03-14T03:33:21 | 165,622,654 | 14 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,770 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.ensemble import RandomForestClassifier
def iris_type(s):
it = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}
return it[s]
# 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = [u'SimHei'] # 黑体 FangSong/KaiTi
mpl.rcParams['axes.unicode_minus'] = False
path = 'DATASET_iris.data' # 数据文件路径
data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type})
x_prime, y = np.split(data, (4,), axis=1)
feature_pairs = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
plt.figure(figsize=(10, 9), facecolor='#FFFFFF')
for i, pair in enumerate(feature_pairs):
# 准备数据
x = x_prime[:, pair]
# 随机森林
clf = RandomForestClassifier(n_estimators=200, criterion='entropy', max_depth=3)
rf_clf = clf.fit(x, y.ravel())
# 画图
N, M = 50, 50 # 横纵各采样多少个值
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_test = np.stack((x1.flat, x2.flat), axis=1) # 测试点
# 训练集上的预测结果
y_hat = rf_clf.predict(x)
y = y.reshape(-1)
c = np.count_nonzero(y_hat == y) # 统计预测正确的个数
print('特征: ', iris_feature[pair[0]], ' + ', iris_feature[pair[1]],)
print('预测正确数目:', c,)
print('准确率: %.2f%%' % (100 * float(c) / float(len(y))))
# 显示
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_hat = rf_clf.predict(x_test) # 预测值
y_hat = y_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.subplot(2, 3, i+1)
plt.pcolormesh(x1, x2, y_hat, cmap=cm_light) # 预测值
plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', cmap=cm_dark) # 样本
plt.xlabel(iris_feature[pair[0]], fontsize=14)
plt.ylabel(iris_feature[pair[1]], fontsize=14)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.grid()
plt.tight_layout(2.5)
plt.subplots_adjust(top=0.92)
plt.suptitle(u'随机森林对鸢尾花数据的两特征组合的分类结果', fontsize=18)
plt.show()
| [
"“fangchao1086@163.com"
] | “fangchao1086@163.com |
09e6ba57f63d8ca9d88d39ff34881ab5d048bc96 | e823bc36af457f229f6879d6e6a3ef6247c129aa | /virtualenv/Lib/site-packages/pyasn1_modules/rfc2560.py | e41994abf64813e9eb5f70b1dd1b03519a320ed1 | [
"MIT"
] | permissive | William-An/DFB_Final | e772fa979c41f2f83a4bf657cde499456215fb3b | 49a9244c98116574676992ebecd1d9435e1d5b1e | refs/heads/master | 2022-11-07T15:47:36.189057 | 2017-07-22T01:01:37 | 2017-07-22T01:01:43 | 97,426,562 | 1 | 1 | MIT | 2022-10-15T02:45:57 | 2017-07-17T02:21:42 | Python | UTF-8 | Python | false | false | 8,251 | py | #
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
# OCSP request/response syntax
#
# Derived from a minimal OCSP library (RFC2560) code written by
# Bud P. Bruegger <bud@ancitel.it>
# Copyright: Ancitel, S.p.a, Rome, Italy
# License: BSD
#
#
# current limitations:
# * request and response works only for a single certificate
# * only some values are parsed out of the response
# * the request does't set a nonce nor signature
# * there is no signature validation of the response
# * dates are left as strings in GeneralizedTime format -- datetime.datetime
# would be nicer
#
from pyasn1.type import tag, namedtype, namedval, univ, useful
from pyasn1_modules import rfc2459
# Start of OCSP module definitions
# This should be in directory Authentication Framework (X.509) module
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
# end of directory Authentication Framework (X.509) module
# This should be in PKIX Certificate Extensions module
class GeneralName(univ.OctetString):
pass
# end of PKIX Certificate Extensions module
id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
class AcceptableResponses(univ.SequenceOf):
componentType = univ.ObjectIdentifier()
class ArchiveCutoff(useful.GeneralizedTime):
pass
class UnknownInfo(univ.Null):
pass
class RevokedInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CertID(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('issuerNameHash', univ.OctetString()),
namedtype.NamedType('issuerKeyHash', univ.OctetString()),
namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
)
class CertStatus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('good',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('revoked',
RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('unknown',
UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class SingleResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certID', CertID()),
namedtype.NamedType('certStatus', CertStatus()),
namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class KeyHash(univ.OctetString):
pass
class ResponderID(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byName',
rfc2459.Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('byKey',
KeyHash().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class Version(univ.Integer):
namedValues = namedval.NamedValues(('v1', 0))
class ResponseData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('responderID', ResponderID()),
namedtype.NamedType('producedAt', useful.GeneralizedTime()),
namedtype.NamedType('responses', univ.SequenceOf(SingleResponse())),
namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class BasicOCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsResponseData', ResponseData()),
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class ResponseBytes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseType', univ.ObjectIdentifier()),
namedtype.NamedType('response', univ.OctetString())
)
class OCSPResponseStatus(univ.Enumerated):
namedValues = namedval.NamedValues(
('successful', 0),
('malformedRequest', 1),
('internalError', 2),
('tryLater', 3),
('undefinedStatus', 4), # should never occur
('sigRequired', 5),
('unauthorized', 6)
)
class OCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseStatus', OCSPResponseStatus()),
namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Request(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('reqCert', CertID()),
namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Signature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class TBSRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('requestList', univ.SequenceOf(Request())),
namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class OCSPRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsRequest', TBSRequest()),
namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
| [
"China_Aisa@live.com"
] | China_Aisa@live.com |
1dd9830ae74fa5d06a572e1f0b7f6445fd3ae66c | bb27630e7af7f1bccbb5cfc892d0b0e6445fe874 | /05_django/01_djangoIntro/django_formTest/formTestApp/apps.py | f318035ab1e56dfebb9f0f7f047dd9a3a89e5c53 | [] | no_license | MrBreakIT/pythonStack | fe8cd9418ee1060ada8cd1c446332d81facecf4e | f615436dbff581c50ded70dec6532f6339977c1d | refs/heads/main | 2023-02-06T14:37:05.688885 | 2020-12-27T22:37:09 | 2020-12-27T22:37:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class FormtestAppConfig(AppConfig):
name = 'formTestApp'
| [
"johnpike1022@gmail.com"
] | johnpike1022@gmail.com |
4b26d689090b162798e9ad7184c3ae1a58f20af4 | 67113383dcc2a9484a82cfc6de3e1546d1000a13 | /accounts/filter.py | da704192a0f60f8d439fdbe3b7eee7521b809bba | [] | no_license | Yair-BD/Customer-Managment | 4ef703f6562b88875086425d8509ba8aa363c0cd | a92fb7341d5383da2e8f15818133b955174ce05b | refs/heads/master | 2023-06-26T07:16:41.855959 | 2021-07-27T11:48:06 | 2021-07-27T11:48:06 | 382,893,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | import django_filters
from .models import *
from django_filters import DateFilter, CharFilter
class OrderFilter(django_filters.FilterSet):
start_date = DateFilter(field_name='date_create', lookup_expr='gte')
end_date = DateFilter(field_name='date_create', lookup_expr='lte')
note = CharFilter(field_name='note', lookup_expr='icontains') # נון את היכולת לחפש ביותר מדוייק
class Meta:
model = Order
fields = '__all__'
exclude = ['customer', 'date_create', 'date_update' ] # ברירת משתתפים בחיפוש | [
"79565335+Yair-BD@users.noreply.github.com"
] | 79565335+Yair-BD@users.noreply.github.com |
bb146c14c39b06484ad348ceeed461644b34d13c | df3853b41ed05d86f5bcd992fcc265f637c67784 | /graph/blini1.py | a90284faa70677f0bfa9cbcff1a51bda97d6ec7c | [] | no_license | KseniaMIPT/Adamasta | 6ab0121519581dbbbf6ae788d1da85f545f718d1 | e91c34c80834c3f4bf176bc4bf6bf790f9f72ca3 | refs/heads/master | 2021-01-10T16:48:31.141709 | 2016-11-23T21:02:25 | 2016-11-23T21:02:25 | 43,350,507 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | def check():
for i in range(5):
yield i+1
print(check())
for i in check():
print(i)
| [
"ksenia22.11@yandex.ru"
] | ksenia22.11@yandex.ru |
13c52a06183456edede58129114517b459640d65 | 879e9f1d5ef7a48a33dc7851c7ebf8bddf8cfc2b | /apistar/backends/django_orm.py | f2ddd2f99439b61c8b22148bbced29eaa47f06a9 | [
"BSD-3-Clause"
] | permissive | mehrdadmhd/apistar | ed3dcd1f1a86ddfc602421f5f720009a535cd841 | 5cd30d380457b870961c32fad394745778d08a1c | refs/heads/master | 2021-04-09T16:04:22.655897 | 2018-03-18T11:09:40 | 2018-03-18T11:09:40 | 125,716,342 | 0 | 0 | BSD-3-Clause | 2018-03-18T11:06:28 | 2018-03-18T11:06:28 | null | UTF-8 | Python | false | false | 2,275 | py | import contextlib
import sys
import typing
import django
from django.apps import apps
from django.conf import settings as django_settings
from django.core.management import call_command
from django.db import connections, transaction
from apistar import Command, Component, Settings
class DjangoORM(object):
def __init__(self, settings: Settings) -> None:
config = {
'INSTALLED_APPS': settings.get('INSTALLED_APPS', []),
'DATABASES': settings.get('DATABASES', {}),
'AUTH_USER_MODEL': settings.get('AUTH_USER_MODEL', 'auth.User')
}
django_settings.configure(**config)
django.setup()
self.models = {
model.__name__: model
for model in apps.get_models()
}
class Session(object):
def __init__(self, orm: DjangoORM) -> None:
for name, model in orm.models.items():
setattr(self, name, model)
@contextlib.contextmanager
def get_session(backend: DjangoORM) -> typing.Generator[Session, None, None]:
"""
Create a new context-managed database session, which automatically
handles atomic rollback or commit behavior.
Args:
backend: The configured database backend.
"""
for conn in connections.all():
conn.queries_log.clear()
conn.close_if_unusable_or_obsolete()
atomic = transaction.Atomic(using=None, savepoint=True)
atomic.__enter__()
try:
yield Session(backend)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
atomic.__exit__(exc_type, exc_value, exc_traceback)
raise
exc_type, exc_value, exc_traceback = (None, None, None)
atomic.__exit__(exc_type, exc_value, exc_traceback)
def flush(): # pragma: nocover
call_command('flush', '--no-input')
def makemigrations(): # pragma: nocover
call_command('makemigrations')
def migrate():
call_command('migrate')
def showmigrations(): # pragma: nocover
call_command('showmigrations')
components = [
Component(DjangoORM),
Component(Session, init=get_session, preload=False)
]
commands = [
Command('flush', flush),
Command('makemigrations', makemigrations),
Command('migrate', migrate),
Command('showmigrations', showmigrations)
]
| [
"noreply@github.com"
] | mehrdadmhd.noreply@github.com |
d62cb9ed15fdf25fbcf76191f5229784b9ee13e5 | cf3ef8f3eca858bd3c64ba6159a2ba7cdb1722ad | /studygroups/views/organizer.py | 06e748fc35dd2c27b9b26843e6b0676e4ae6d0d6 | [] | no_license | alvarmaciel/learning-circles | 2ff956dcbe0b5a42f64036c33613644115063a8d | 3ac444fd6f5a81f655face733e7d41786e085cd4 | refs/heads/master | 2021-01-11T00:45:04.513019 | 2016-10-05T14:13:16 | 2016-10-05T14:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,094 | py | import datetime
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.core.mail import EmailMultiAlternatives, send_mail
from django.contrib import messages
from django.conf import settings
from django import http
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.generic.base import View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic import ListView
from studygroups.models import Course
from studygroups.models import StudyGroup
from studygroups.models import TeamMembership
from studygroups.models import Facilitator
from studygroups.models import StudyGroupMeeting
from studygroups.models import report_data
from studygroups.models import generate_all_meetings
from studygroups.models import get_team_users
from studygroups.models import get_user_team
from studygroups.forms import StudyGroupForm
from studygroups.forms import FacilitatorForm
from studygroups.decorators import user_is_organizer
@user_is_organizer
def organize(request):
today = datetime.datetime.now().date()
two_weeks_ago = today - datetime.timedelta(weeks=2, days=today.weekday())
two_weeks = today - datetime.timedelta(days=today.weekday()) + datetime.timedelta(weeks=3)
study_groups = StudyGroup.objects.active()
facilitators = Facilitator.objects.all()
courses = []# TODO Remove courses until we implement course selection for teams
team = None
if not request.user.is_staff:
team = get_user_team(request.user)
team_users = get_team_users(request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
facilitators = facilitators.filter(user__in=team_users)
active_study_groups = study_groups.filter(
id__in=StudyGroupMeeting.objects.active().filter(meeting_date__gte=two_weeks_ago).values('study_group')
)
meetings = StudyGroupMeeting.objects.active()\
.filter(study_group__in=study_groups, meeting_date__gte=two_weeks_ago)\
.exclude(meeting_date__gte=two_weeks)
context = {
'team': team,
'courses': courses,
'meetings': meetings,
'study_groups': study_groups,
'active_study_groups': active_study_groups,
'facilitators': facilitators,
'today': timezone.now(),
}
return render_to_response('studygroups/organize.html', context, context_instance=RequestContext(request))
class StudyGroupList(ListView):
model = StudyGroup
def get_queryset(self):
study_groups = StudyGroup.objects.active()
if not self.request.user.is_staff:
team_users = get_team_users(self.request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
return study_groups
class StudyGroupMeetingList(ListView):
model = StudyGroupMeeting
def get_queryset(self):
study_groups = StudyGroup.objects.active()
if not self.request.user.is_staff:
team_users = get_team_users(self.request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
meetings = StudyGroupMeeting.objects.active().filter(study_group__in=study_groups)
return meetings
class TeamMembershipDelete(DeleteView):
model = TeamMembership
success_url = reverse_lazy('studygroups_organize')
template_name = 'studygroups/confirm_delete_membership.html'
def get_object(self, queryset=None):
if queryset == None:
queryset = TeamMembership.objects
return queryset.get(user_id=self.kwargs.get('user_id'), team_id=self.kwargs.get('team_id'))
class CourseUpdate(UpdateView):
model = Course
fields = [
'title',
'provider',
'link',
'start_date',
'duration',
'prerequisite',
'time_required',
'caption',
]
success_url = reverse_lazy('studygroups_organize')
class CourseDelete(DeleteView):
model = Course
success_url = reverse_lazy('studygroups_organize')
template_name = 'studygroups/confirm_delete.html'
class StudyGroupCreate(CreateView):
model = StudyGroup
form_class = StudyGroupForm
success_url = reverse_lazy('studygroups_organize')
def form_valid(self, form):
self.object = form.save()
generate_all_meetings(self.object)
return http.HttpResponseRedirect(self.get_success_url())
@user_is_organizer
def report(request):
# TODO - remove this view
study_groups = StudyGroup.objects.active()
for study_group in study_groups:
study_group.laptop_stats = {}
context = {
'study_groups': study_groups,
}
return render_to_response('studygroups/report.html', context, context_instance=RequestContext(request))
@user_is_organizer
def weekly_report(request, year=None, month=None, day=None ):
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
if month and day and year:
today = today.replace(year=int(year), month=int(month), day=int(day))
start_time = today - datetime.timedelta(days=today.weekday())
end_time = start_time + datetime.timedelta(days=7)
context = {
'start_time': start_time,
'end_time': end_time,
}
# get team for current user
team = None
membership = TeamMembership.objects.filter(user=request.user, role=TeamMembership.ORGANIZER).first()
if membership:
team = membership.team
context.update(report_data(start_time, end_time, team))
return render_to_response('studygroups/weekly-update.html', context, context_instance=RequestContext(request))
| [
"dirkcuys@gmail.com"
] | dirkcuys@gmail.com |
a4016f8a6330e19ed8a96f4454f25d67f80f281e | b8ec3e360f091240304afcbebb0860730ac3d835 | /mpu6050.py | c18ac2caf048a629782f454ffde99e9326f409a3 | [] | no_license | brbza/motion | 89a8a9a2ede6906ecb9bf25edd9b90d2c4cd94cf | d903e346e941e5d3e96363a97eaf27fd0ba04019 | refs/heads/master | 2023-05-11T15:03:43.891032 | 2019-06-24T08:05:20 | 2019-06-24T08:05:20 | 193,440,400 | 0 | 0 | null | 2023-05-01T20:35:16 | 2019-06-24T05:35:16 | Python | UTF-8 | Python | false | false | 3,946 | py | import machine
class MPU6050(object):
SELF_TEST_X = 0x0D
SELF_TEST_Y = 0x0E
SELF_TEST_Z = 0x0F
SELF_TEST_A = 0x10
SMPLRT_DIV = 0x19
CONFIG = 0x1A
GYRO_CONFIG = 0x1B
ACCEL_CONFIG = 0x1C
FIFO_EN = 0x23
I2C_MST_CTRL = 0x24
I2C_SLV0_ADDR = 0x25
I2C_SLV0_REG = 0x26
I2C_SLV0_CTRL = 0x27
I2C_SLV1_ADDR = 0x28
I2C_SLV1_REG = 0x29
I2C_SLV1_CTRL = 0x2A
I2C_SLV2_ADDR = 0x2B
I2C_SLV2_REG = 0x2C
I2C_SLV2_CTRL = 0x2D
I2C_SLV3_ADDR = 0x2E
I2C_SLV3_REG = 0x2F
I2C_SLV3_CTRL = 0x30
I2C_SLV4_ADDR = 0x31
I2C_SLV4_REG = 0x32
I2C_SLV4_DO = 0x33
I2C_SLV4_CTRL = 0x34
I2C_SLV4_DI = 0x35
I2C_MST_STATUS = 0x36
INT_PIN_CFG = 0x37
INT_ENABLE = 0x38
INT_STATUS = 0x3A
ACCEL_XOUT_H = 0x3B
ACCEL_XOUT_L = 0x3C
ACCEL_YOUT_H = 0x3D
ACCEL_YOUT_L = 0x3E
ACCEL_ZOUT_H = 0x3F
ACCEL_ZOUT_L = 0x40
TEMP_OUT_H = 0x41
TEMP_OUT_L = 0x42
GYRO_XOUT_H = 0x43
GYRO_XOUT_L = 0x44
GYRO_YOUT_H = 0x45
GYRO_YOUT_L = 0x46
GYRO_ZOUT_H = 0x47
GYRO_ZOUT_L = 0x48
EXT_SENS_DATA_00 = 0x49
EXT_SENS_DATA_01 = 0x4A
EXT_SENS_DATA_02 = 0x4B
EXT_SENS_DATA_03 = 0x4C
EXT_SENS_DATA_04 = 0x4D
EXT_SENS_DATA_05 = 0x4E
EXT_SENS_DATA_06 = 0x4F
EXT_SENS_DATA_07 = 0x50
EXT_SENS_DATA_08 = 0x51
EXT_SENS_DATA_09 = 0x52
EXT_SENS_DATA_10 = 0x53
EXT_SENS_DATA_11 = 0x54
EXT_SENS_DATA_12 = 0x55
EXT_SENS_DATA_13 = 0x56
EXT_SENS_DATA_14 = 0x57
EXT_SENS_DATA_15 = 0x58
EXT_SENS_DATA_16 = 0x59
EXT_SENS_DATA_17 = 0x5A
EXT_SENS_DATA_18 = 0x5B
EXT_SENS_DATA_19 = 0x5C
EXT_SENS_DATA_20 = 0x5D
EXT_SENS_DATA_21 = 0x5E
EXT_SENS_DATA_22 = 0x5F
EXT_SENS_DATA_23 = 0x60
I2C_SLV0_DO = 0x63
I2C_SLV1_DO = 0x64
I2C_SLV2_DO = 0x65
I2C_SLV3_DO = 0x66
I2C_MST_DELAY_CTRL = 0x67
SIGNAL_PATH_RESET = 0x68
USER_CTRL = 0x6A
PWR_MGMT_1 = 0x6B
PWR_MGMT_2 = 0x6C
FIFO_COUNTH = 0x72
FIFO_COUNTL = 0x73
FIFO_R_W = 0x74
WHO_AM_I = 0x75
def __init__(self, i2c, addr=0x68):
self.iic = i2c
self.addr = addr
self.iic.start()
self.iic.writeto(self.addr, bytearray([107, 0]))
self.iic.stop()
def _session(self):
self.iic.start()
yield
self.iic.stop()
def _read_from_mem(self, address, num_bytes):
self._session()
return self.iic.readfrom_mem(self.addr, address, num_bytes)
def _write_to_mem(self, address, values):
self._session()
self.iic.writeto_mem(self.addr, address, values)
def get_raw_values(self):
self.iic.start()
a = self.iic.readfrom_mem(self.addr, 0x3B, 14)
self.iic.stop()
return a
def get_ints(self):
b = self.get_raw_values()
c = []
for i in b:
c.append(i)
return c
def bytes_toint(self, firstbyte, secondbyte):
if not firstbyte & 0x80:
return firstbyte << 8 | secondbyte
return - (((firstbyte ^ 255) << 8) | (secondbyte ^ 255) + 1)
def get_values(self):
raw_ints = self.get_raw_values()
vals = {}
vals["AcX"] = self.bytes_toint(raw_ints[0], raw_ints[1])
vals["AcY"] = self.bytes_toint(raw_ints[2], raw_ints[3])
vals["AcZ"] = self.bytes_toint(raw_ints[4], raw_ints[5])
vals["Tmp"] = self.bytes_toint(raw_ints[6], raw_ints[7]) / 340.00 + 36.53
vals["GyX"] = self.bytes_toint(raw_ints[8], raw_ints[9])
vals["GyY"] = self.bytes_toint(raw_ints[10], raw_ints[11])
vals["GyZ"] = self.bytes_toint(raw_ints[12], raw_ints[13])
return vals # returned in range of Int16
# -32768 to 32767
def val_test(self): # ONLY FOR TESTING! Also, fast reading sometimes crashes IIC
from time import sleep
while 1:
print(self.get_values())
sleep(0.05)
| [
"carlos.barboza@gmail.com"
] | carlos.barboza@gmail.com |
74c507fea182bc755c7aeb7685aba03bf3b95d65 | 29592ead4965e1152a1558351b3c60005438c85f | /v0.97/dc/models/batches.py | 2969e64c06b0fa446afe5c7ef5bdd6d41c5d9319 | [
"MIT"
] | permissive | iaiting/Flask-and-pywebview-followup-application-gui | 5e489eda889c6d02ffb8efd40098a380dedd0558 | b665334403b4a8471b5f28054ee2dc7adda7d9fc | refs/heads/master | 2020-04-04T11:30:57.294114 | 2018-10-29T09:14:34 | 2018-10-29T09:14:34 | 155,893,938 | 0 | 1 | MIT | 2018-11-02T16:36:19 | 2018-11-02T16:36:18 | null | UTF-8 | Python | false | false | 13,049 | py | import os
import re
import pandas as pd
from lxml import etree
from dc.utils.commun import Commun
from dc.utils.dbwrap import Dbwrap
from dc.utils.folders import Folders
from dc.models.users import User
class Batches:
func = Commun()
conf = func.config_info()
db = Dbwrap(conf["path_to_database"])
folder = Folders()
user = User()
def process_files_paths(self, filesInfodict):
"""Extract files names, set and id for each file path"""
id_lentgh = int(self.conf["IDlentgh"])
new_opfiles_path = self.conf["path_to_new_opfiles"]
paths_to_files = list(filesInfodict.keys())
original_files_names = []
new_files_names = []
for file_path in paths_to_files:
fid = self.func.generate_id(id_lentgh)
file_name = self.func.get_file_name(file_path)
original_files_names.append(file_name)
new_file_name = os.path.join("{} {}".format(fid, file_name))
new_files_names.append(new_file_name)
files = ", ".join(original_files_names)
new_files = ", ".join(new_files_names)
return files, new_files
def get_paths_for_unassigned_prepared(self, bid_info):
"""Create the paths to move the folders from new to unassigned and prepared"""
org_path = bid_info["OriginalFilesPath"]
opfolder = "{} {}".format(bid_info["Operator"], bid_info["Aircraft"])
new_path = os.path.join(org_path, opfolder)
bid_opfolder = "{} _{}".format(opfolder, bid_info["BatchID"])
unassigned_path = os.path.join(self.conf["path_to_batches_unassigned"], bid_opfolder)
prepared_path = os.path.join(self.conf["path_to_batches_prepfiles"], bid_opfolder)
return new_path, unassigned_path, prepared_path, bid_opfolder
def add_id_to_prepfiles(self, bid_info, bid_opfolder):
"""Add the new files names to files in prepared/unassigned files"""
org_files = self.func.listify_string(bid_info["OriginalFilesName"])
new_files = self.func.listify_string(bid_info["FilesID"])
prep_path = self.conf["path_to_batches_prepfiles"]
unassg_path = self.conf["path_to_batches_unassigned"]
prep_bid_path = os.path.join(prep_path, bid_opfolder)
unassg_bid_path = os.path.join(unassg_path, bid_opfolder)
for org, new in zip(org_files, new_files):
#Paths for prepared files
preporg_path = os.path.join(prep_bid_path, org)
prepnew_path = os.path.join(prep_bid_path, new)
#Paths for unassigned files
unassgorg_path = os.path.join(unassg_bid_path, org)
unassgnew_path = os.path.join(unassg_bid_path, new)
self.func.move_folder(preporg_path, prepnew_path)
self.func.move_folder(unassgorg_path, unassgnew_path)
def copy_files_to_unassigned_prepared_dirs(self, bid_info):
"""Copy files from new folder to unassigned and prepared path folder"""
new_path, unassigned_path, prepared_path, bid_opfolder = self.get_paths_for_unassigned_prepared(bid_info)
#Raise error if a dir is found in new operator files
self.func.accept_only_files(new_path)
#Copy from new to unassigned
self.func.copy_dirs(new_path, unassigned_path)
#Copy from unassigned to prepared
self.func.copy_dirs(unassigned_path, prepared_path)
#Rename files from prepared folder
self.add_id_to_prepfiles(bid_info, bid_opfolder)
def check_file_history(self, file_info):
"""Check if an operator file was added before based on name, size and modification date"""
dfgen, conn = self.db.read_table("fileshistory", chunk_size=50000)
for df in dfgen:
df_name = df[df["FileName"] == file_info["FileName"]]
if df_name.shape[0] > 0:
df_size = df[df["FileSize"] == int(file_info["FileSize"])]
df_date = df[df["ModificationDate"] == file_info["ModificationDate"]]
if df_size.shape[0] > 0 and df_date.shape[0] > 0:
conn.close()
raise Exception("File '{}' was added before in batch '{}'".format(df_name["FileName"].tolist()[0], df_name["AddedInBatch"].tolist()[0]))
conn.close()
def prepare_fileshistory_info(self, bid_info, files_info):
"""Get rows to insert in fileshistory and check if was previously added"""
rows = []
for file_path, info in files_info["FilesInfo"].items():
file_name = self.func.get_file_name(file_path)
file_info = {"FileName": file_name,
"AddedInBatch": bid_info["BatchID"],
"FileSize": info["FileSize"],
"ModificationDate": info["ModificationDate"]}
self.check_file_history(file_info)
rows.append(file_info)
return rows
def add_batch(self):
"""Add batch to database """
bid = self.func.generate_id(int(self.conf["IDlentgh"]))
files_info = self.folder.new_opfiles_info()
files_names, new_files_names = self.process_files_paths(files_info["FilesInfo"])
userdata = self.user.session_info()
batch_info_followup = {"BatchID": bid,
"Aircraft": files_info["Aircraft"],
"Operator": files_info["Operator"],
"OriginalFilesName": files_names,
"OriginalFilesPath": self.conf["path_to_new_opfiles"],
"FilesID": new_files_names,
"AddedDate": self.func.current_date(),
"Responsible": "UNASSIGNED",
"Proofreader": "UNASSIGNED",
"ResponsibleStatus": "",
"ProofreaderStatus": "",
"OverallStatus": "UNASSIGNED",
"ChangesLog": "Batch added by {},".format(userdata["User"])}
batch_info_fileshistory = self.prepare_fileshistory_info(batch_info_followup, files_info)
#Data for db is prepared now copy the files.
self.copy_files_to_unassigned_prepared_dirs(batch_info_followup)
#Now the files a copied now insert data to database
#Insert the batch
self.db.create_row("followup", batch_info_followup)
#Insert the file history
for file_history in batch_info_fileshistory:
self.db.create_row("fileshistory", file_history)
return bid
def followup_reversed(self):
"""Get followup table from db reverse it and return it as dict"""
df = self.db.read_table("followup")
df_dict = df.reindex(index=df.index[::-1]).to_dict('list')
return df_dict
def followup_for_responsible(self):
"""Get followup table from db reverse it filter it for current user and return it as dict"""
userdata = self.user.session_info()
df = self.db.select_row("followup", "Responsible", userdata["User"], asdict=False)
df_dict = df.reindex(index=df.index[::-1]).to_dict('list')
return df_dict
def get_batch(self, bid=""):
"""Get batch id if specified"""
bid_data = self.db.select_row("followup", "BatchID", bid)
data = {}
for col, val in bid_data.items():
data[col] = val[0]
return data
def bid_options(self):
"""Batch status options from config.json file"""
batch_opt = {
"status_user" : self.func.listify_string(self.conf["batch_status_options_responsible"]),
"status_proofreader" : self.func.listify_string(self.conf["batch_status_options_proofreader"]),
"status_overall" : self.func.listify_string(self.conf["batch_status_options_overall"]),
"aircrafts" : self.func.listify_string(self.conf["aircrafts"]),
"split_batch_factor" : self.func.listify_string(self.conf["split_batch_factor"]),
}
bid_data = self.func.read_json("batch.json")
batch_opt.update(bid_data)
batch_opt.update(self.user.context_disable())
batch_opt.update({"users": self.user.get_users(), "proofreaders": self.user.get_proofreaders()})
return batch_opt
def set_default_proofreader(self, data):
"""Update with the default proofreader for the given responsible if needed"""
if "Responsible" in list(data.keys()):
user_data = self.db.select_row("users", "User", data["Responsible"])
data["Proofreader"] = user_data["Proofreader"][0]
data["ResponsibleStatus"] = ""
data["ProofreaderStatus"] = ""
data["OverallStatus"] = "ONGOING"
data["StartDate"] = self.func.current_date()
bid_info = self.func.read_json("batch.json")
data["ChangesLog"] = bid_info["ChangesLog"] + "\nAssigned to {} on {},".format(data["Responsible"], data["StartDate"])
return data
else:
return data
def clear_start_date(self, data):
"""If Status is STANDBY or UNRECORDABLE then clear start date"""
if "ProofreaderStatus" in list(data.keys()):
if data["ProofreaderStatus"] == "UNRECORDABLE" or data["ProofreaderStatus"] == "STANDBY":
data["StartDate"] = ""
data["ImportedDateISAIM"] = ""
data["TotalRowsNbr"] = ""
data["MPDTaskRowsNbr"] = ""
data["OperatorRowsNbr"] = ""
data["FindingsRowsNbr"] = ""
data["EstimatedTaskNbr"] = ""
data["EstimatedFdgNbr"] = ""
return data
def dcs_info(self, dcspath):
"""Get xml info from the extracted xml from FE"""
file = open(dcspath)
tree = etree.parse(file)
sumAll = tree.xpath('//sum')
totalRows = sum([int(s.text) for s in sumAll])
sumMpd = tree.xpath('//mpdTask//sum')
mpdtask = sum([int(s.text) for s in sumMpd])
sumOp = tree.xpath('//opeTask//sum')
optask = sum([int(s.text) for s in sumOp])
sumFindings = tree.xpath("//finding[@activated='true']//sum")
findings = sum([int(s.text) for s in sumFindings])
info_dcs = {"TotalRowsNbr": totalRows,
"MPDTaskRowsNbr": mpdtask,
"OperatorRowsNbr": optask,
"FindingsRowsNbr": findings
}
return info_dcs
def update_ifstatus_finished(self, data):
"""If Status is FINISHED update ImportedDateISAIM column"""
if "ProofreaderStatus" in list(data.keys()):
if data["ProofreaderStatus"] == "FINISHED":
data["ImportedDateISAIM"] = self.func.current_date()
return data
def update_ifstatus_toimport(self, data):
"""If ProofreaderStatus is To import then look for dcs info and update data"""
if "ProofreaderStatus" in list(data.keys()):
if data["ProofreaderStatus"] == "TO BE IMPORTED":
dcs_files = os.listdir(self.conf["path_to_dcs_info"])
dcs_file = [f for f in dcs_files if data["BatchID"] in f]
if len(dcs_file) == 1:
dcs_file_path = os.path.join(self.conf["path_to_dcs_info"], dcs_file[0])
dcs_result = self.dcs_info(dcs_file_path)
data.update(dcs_result)
return data
else:
raise Exception("DCS file for batch '{}' not found!".format(data["BatchID"]))
else:
return data
def process_status_batch_form(self, data):
"""Process dict from, form received from update_status page"""
data = self.func.remove_null(data)
data = self.set_default_proofreader(data)
data = self.clear_start_date(data)
data = self.update_ifstatus_toimport(data)
data = self.update_ifstatus_finished(data)
#Move if needed the folders
self.folder.move_prepfile_in_assigned(data) #assign
self.folder.move_assigned_in_tbchecked(data) #to be checked
self.folder.move_tobchecked_in_tbimported(data) #to be imported
self.folder.move_tbimported_in_finished(data) #finished
self.folder.move_tbchecked_in_assigned(data) #rework
self.folder.move_tobchecked_in_standby(data) #standby
self.folder.move_tobchecked_in_unrecordable(data) #unrecordable
return self.db.update_row("followup", data, "BatchID") | [
"noreply@github.com"
] | iaiting.noreply@github.com |
219af0b72ccdda85c00c48773350f2e417059536 | a9d079617071855dfbd183cac5a9588ab6e30d84 | /models/test_model.py | 791316b6416c23091bf5fa7b3878ea7ab5ee4ec2 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | yunyanxing/pairwise_xray_augmentation | 1ecb1a1114688b307673e5346a27f6300dd2874c | 057927cf247b3854e311fbdf52b74001175915d8 | refs/heads/master | 2020-08-09T13:32:21.899166 | 2020-01-22T00:58:49 | 2020-01-22T00:58:49 | 214,098,075 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | from .base_model import BaseModel
from . import networks
from .cycle_gan_model import CycleGANModel
class TestModel(BaseModel):
def name(self):
return 'TestModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
assert not is_train, 'TestModel cannot be used in train mode'
parser = CycleGANModel.modify_commandline_options(parser, is_train=False)
parser.set_defaults(dataset_mode='single')
parser.add_argument('--model_suffix', type=str, default='',
help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will'
' be loaded as the generator of TestModel')
return parser
def initialize(self, opt):
assert (not opt.isTrain)
BaseModel.initialize(self, opt)
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = []
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = ['real_A', 'fake_B']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
self.model_names = ['G' + opt.model_suffix]
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# assigns the model to self.netG_[suffix] so that it can be loaded
# please see BaseModel.load_networks
setattr(self, 'netG' + opt.model_suffix, self.netG)
def set_input(self, input):
# we need to use single_dataset mode
self.real_A = input['A'].to(self.device)
self.image_paths = input['A_paths']
def forward(self):
self.fake_B = self.netG(self.real_A)
| [
"yunyan.xing@monash.edu"
] | yunyan.xing@monash.edu |
148610beec78d8585a9828b6d141e4d2459acfd4 | 77f47db313b4f018d64f0f5095ac10ae12655507 | /campaign/urls.py | 48cbb5118bac3376b0b1526b54854552694d9177 | [] | no_license | idlivada/campaign | e03ef58cb9fac05513734625ce2b94d4a430da4f | d85ad505c420b71ca4bf2de754b53e8ec0d7629e | refs/heads/master | 2020-04-16T16:40:49.666387 | 2017-06-27T12:49:58 | 2017-06-27T12:49:58 | 19,208,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^^$', 'core.views.home', name='home'),
url(r'^locator/', 'core.views.locator', name='locator'),
url(r'^call/', 'core.views.call', name='call'),
url(r'^dial-callback/', 'core.views.dial_callback', name='dial-callback'),
url(r'^s/(?P<slug>[a-zA-Z0-9_.-]+)/', 'core.views.single_campaign', name='single-campaign'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"idlivada@gmail.com"
] | idlivada@gmail.com |
3f1407aeca182aa219f12783beac7371b4dd6ad1 | 3c3df48ff6239c5387847200c25d67c8c1b59491 | /alien_invasion.py | e53683fdd6c8210596db0118bfe98acfcdf4cf9c | [] | no_license | sdkcouto/coronapython-game | cf8930721883f9ec7786d61959989bcdf2a381d0 | 5e11f35d6ae8035d640d4e768432074f7a80f427 | refs/heads/master | 2022-11-19T03:47:27.375952 | 2020-07-21T18:44:25 | 2020-07-21T18:44:25 | 278,443,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | import pygame
from pygame.sprite import Group
from settings import Settings
from game_stats import GameStats
from scoreboard import Scoreboard
from button import Button
from ship import Ship
from alien import Alien
import game_functions as gf
def run_game():
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
play_button = Button(ai_settings, screen, "Play")
stats = GameStats(ai_settings)
sb = Scoreboard(ai_settings, screen, stats)
ship = Ship(ai_settings, screen)
aliens = Group()
bullets = Group()
gf.create_fleet(ai_settings, screen, ship, aliens)
while True:
gf.check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets)
if stats.game_active:
ship.update()
bullets.update()
gf.update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets)
gf.update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets)
gf.update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button)
run_game() | [
"matheuscouto.eng@gmail.com"
] | matheuscouto.eng@gmail.com |
29fb1dc505940b341e55979260d1600d60d2fd10 | 6fad4c35342a27bd9882b3a55e99da461df8f6c3 | /scripts/token_check.py | 81bcb49afee511a7d3b182e99de819cba6c8a4f2 | [
"MIT"
] | permissive | tonychew1986/erc20-token | 137bdb06dbbd275165cf6f9c6703b6927c9c4cb3 | dd206c2e2282f0fce57c4da05111210102bbfbde | refs/heads/main | 2023-03-28T01:12:14.142687 | 2021-03-28T08:54:10 | 2021-03-28T08:54:10 | 352,282,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!/usr/bin/python3
from brownie import Token, accounts
#etherscan api
#Q9JWR2ZC58AGT4MTM7CEFI8UD8IH3VQ2WS
def main():
# user_account = accounts.load('xxx_shared_1')
# user_account = accounts.load('xxx_shared_mainnet')
token_contract = Token.at("0xe63d6B308BCe0F6193AeC6b7E6eBa005f41e36AB") # use the address where the contract is deployed
Token.publish_source(token_contract) | [
"tonychew1986@gmail.com"
] | tonychew1986@gmail.com |
89b88fec9ac2985d22a7fca2da8155088f1e5567 | 219389652f57204b5d942ae8cdb7647650cc495f | /src/metricas.py | a92b9e13999190a65cc88303917aeaa402cc4b70 | [] | no_license | vsantsal/DesafioFolhaSistemaEstelarPython | dcc5bfb519ab1af3d022c642320511ef8a143115 | 19fa1e2102ec2348b2d5b7319efb2fd4efb66ddc | refs/heads/master | 2023-07-09T12:56:00.349140 | 2021-08-19T00:04:47 | 2021-08-19T00:04:47 | 267,566,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | #!/usr/bin/env python3.9
from functools import reduce
from operator import mul
from typing import (
Any,
Dict,
List,
Protocol,
TypeVar,
)
"""
Classe responsável em que se a regra de negócio RN1 para descobrir o destino
da espaçonave está definida
RN1:O local de chegada pode ser conhecido sabendo que as vogais do nome da estrela
são atribuídas à uma sequência Fibonacci que começa em 1 e termina em 8,
onde A = 1, E = 2, I = 3, O = 5 e U = 8.
Se a multiplicação das vogais der o mesmo número
que a quantidade de engenheiros, a estrela de destino será conhecida.
"""
class Comparavel(Protocol):
def __eq__(self, other: Any) -> bool: ...
CT = TypeVar('CT', bound=Comparavel)
class Balanca:
@staticmethod
def compara(valor: CT, peso: CT):
# função que compara duas variáveis e retorna o booleano da comparação
return valor == peso
class Calculadora:
_MAPA_DE_VALORES: Dict[str, int] = {
'a': 1,
'e': 2,
'i': 3,
'o': 5,
'u': 8
}
def calcula(self, nome):
# converte a string fornecida em lista de inteiros
lista_a_multiplicar = self._converte_nome_em_lista_de_inteiros(nome)
# multiplica os elementos da lista de inteiros
resultado = reduce(mul, lista_a_multiplicar)
return resultado
def _converte_nome_em_lista_de_inteiros(self, nome: str) -> List[int]:
# inicializa lista para armazenar inteiros
resultado = [self._MAPA_DE_VALORES.get(letra.lower(),
1)
for letra in nome]
return resultado
| [
"vsantsal@gmail.com"
] | vsantsal@gmail.com |
d57deec55818ba052668da21c0a7d21e7d37f27c | 3365209124a099d96b7397012865a544c60fbab3 | /PyPoll/PyPoll_IGR.py | 3f65ff292c2d4a187c26ced1618990ca64bd6531 | [] | no_license | igr1993/python-challenge | 317f651c154edc862907998e543e11ceaab6f07e | 857977490b622c6f7bf4edf0e0f0065c91149959 | refs/heads/main | 2023-03-07T06:04:02.207958 | 2021-02-21T01:41:07 | 2021-02-21T01:41:07 | 340,744,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py |
#lets go PyPoll!
import os
import csv
candidates = {}
election_csv = os.path.join('.','Resources','election_data.csv')
with open(election_csv, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
for row in csvreader:
if row[2] in candidates.keys():
candidates[row[2]]+=1
else:
candidates[row[2]] = 1
total = candidates.values()
total_votes = sum(total)
list_candidates = candidates.keys()
votes_per = [f'{(x/total_votes)*100:.3f}%' for x in candidates.values()]
winner = list(candidates.keys())[list(candidates.values()).index(max(candidates.values()))]
winner
print("Election results")
print("--------------------------------")
print(f" Total votes: {int(total_votes)}")
print("---------------------------------")
i = 0
for candidate, vote in candidates.items():
print(f'{candidate}, {vote} , {votes_per[i]}')
i+=1
print("------------------------------")
print(f" Winner: {winner}")
print("------------------------------") | [
"igr@Ians-MacBook-Pro.local"
] | igr@Ians-MacBook-Pro.local |
ca99b5cf3ba81bd26679882f2f553c50dc8dabe1 | eb40dce4039d528b9cd06dbeda75da09d09d7fc5 | /need_install/Django-1.8.17/django/db/models/fields/__init__.py | 8bba4cb3216e4eef1a0fcb5822bd70cfc3c685c8 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MulticsYin/MulticsSH | 39b62189446787c7f0f037b1640c9c780bd1dddd | 5837a0bff0e7da0e8535e4e0b31ef6baf24274b4 | refs/heads/master | 2021-08-28T07:53:51.759679 | 2017-12-11T15:31:03 | 2017-12-11T15:31:03 | 82,428,902 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 89,179 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import uuid
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.apps import apps
from django.db import connection
from django.db.models.lookups import default_lookups, RegisterLookupMixin
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators, checks
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time, parse_duration
from django.utils.duration import duration_string
from django.utils.functional import cached_property, curry, total_ordering, Promise
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import (smart_text, force_text, force_bytes,
python_2_unicode_compatible)
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
from django.utils.itercompat import is_iterable
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty',
'Field', 'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
class_lookups = default_lookups.copy()
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.is_relation = self.rel is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
errors.extend(self._check_deprecation_details())
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=self,
id='fields.E003',
)
]
else:
return []
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
("'choices' must be an iterable containing "
"(actual value, human readable name) tuples."),
hint=None,
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
hint=None,
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
return connection.validation.check_field(self, **kwargs)
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"choices": "_choices",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
# Some validators can't be created at field initialization time.
# This method provides a way to delay their creation until required.
return self.default_validators + self._validators
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
type_string = self.db_type(connection)
try:
check_string = connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_field(self, virtual=True)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in {
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'month', 'day', 'week_day', 'hour', 'minute', 'second',
'isnull', 'search', 'regex', 'iregex',
}:
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
return self.get_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
'second', 'search', 'regex', 'iregex', 'contains',
'icontains', 'iexact', 'startswith', 'endswith',
'istartswith', 'iendswith'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if isinstance(self, DateTimeField):
return connection.ops.year_lookup_bounds_for_datetime_field(value)
elif isinstance(self, DateField):
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return [value] # this isn't supposed to happen
else:
return [value]
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.rel.to
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = blank_choice if include_blank else []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def _get_choices(self):
if isinstance(self._choices, collections.Iterator):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
try:
max_length = int(self.max_length)
if max_length <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=self,
id='fields.E120',
)
]
except ValueError:
return [
checks.Error(
"'max_length' must be a positive integer.",
hint=None,
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now,
self.has_default()]
enabled_options = [option not in (None, False)
for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
hint=None,
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For dates lookups, convert the value to an int
# so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=self,
id='fields.E134',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types):
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class DurationField(Field):
"""Stores timedelta objects.
Uses interval on postgres, INVERAL DAY TO SECOND on Oracle, and bigint of
microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
# Discard any fractional microseconds due to floating point arithmetic.
return int(round(value.total_seconds() * 1000000))
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super(DurationField, self).get_db_converters(connection)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.DurationField,
}
defaults.update(kwargs)
return super(DurationField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
range_validators = []
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
range_validators.append(validators.MinValueValidator(min_value))
if max_value is not None:
range_validators.append(validators.MaxValueValidator(max_value))
return super(IntegerField, self).validators + range_validators
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_deprecated_details = {
'msg': (
'IPAddressField has been deprecated. Support for it (except in '
'historical migrations) will be removed in Django 1.9.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.W900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length", None) == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_ipaddress(value)
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length", None) == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length", None) == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, six.string_types):
value = uuid.UUID(value.replace('-', ''))
if isinstance(value, uuid.UUID):
if connection.features.has_native_uuid_field:
return value
return value.hex
return value
def to_python(self, value):
if value and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
| [
"multics_luo@163.com"
] | multics_luo@163.com |
ebc550669115d5534bc61b8f5cd01e7b0b38ed40 | c32d6a87858139a66dde152097e333eee92a08de | /csrv/model/actions/play_event_action.py | a57f67402f82243d5b293609aa208f7bb54e38fa | [
"Apache-2.0"
] | permissive | cpjolicoeur/CentralServer | 79b8b5a3a3ed4aa29167ecdcb4e40983a1d5e598 | e377c65d8f3adf5a2d3273acd4f459be697aea56 | refs/heads/master | 2020-12-29T00:12:31.245099 | 2014-03-08T07:33:38 | 2014-03-08T07:33:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | """Base actions for the players to take."""
from csrv.model.actions import action
from csrv.model import cost
from csrv.model import errors
from csrv.model import events
from csrv.model import game_object
from csrv.model import parameters
class PlayEventAction(action.Action):
DESCRIPTION = '[click]: Play an event'
COST_CLASS = cost.EventCost
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
action.Action.resolve(
self,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.player.grip.remove(self.card)
self.card.is_faceup = True
self.card.play()
self.card.trash()
self.game.log('The runner plays %s' % self.card.NAME, self.card.game_id)
@property
def description(self):
return 'Play %s' % self.card.NAME
| [
"mrroach@okmaybe.com"
] | mrroach@okmaybe.com |
de74d6e820b635b6bbf1e43e970cb33fe5450482 | 0bfb74494054dc9e0774618687b5d234e6db6a92 | /ch12/gen_throw.py | fa2bf2ecf95d8d411c7a56100871ebf2f7390a2c | [] | no_license | FSSlc/AdvancePython | dddee32524cf1071a36eb4316efc2b53185645e3 | 61ed98f951fc877cc01e43a50488fc91b775c0f0 | refs/heads/master | 2022-11-06T10:53:07.812770 | 2020-06-25T23:43:24 | 2020-06-25T23:43:24 | 273,885,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | #!/usr/bin/env python
# coding: utf-8
def gen_func():
try:
yield "user"
except Exception as e:
pass
yield 2
yield 3
return 'user'
if __name__ == '__main__':
gen = gen_func()
print(next(gen))
gen.throw(Exception, "download error")
print(next(gen))
gen.throw(Exception, "download error")
| [
"fsslc235@gmail.com"
] | fsslc235@gmail.com |
8a5602d811f96072614c0fb9de6a9ce93ee0dc75 | 53095b893d0a80614aa43ab5a50b07d929fa4cf8 | /HRC_archive/LETG/extract_pha_file.py | 1cc120ec5884157a1a500717ef8a97b01372c3bf | [] | no_license | chandra-mta/HRC | e4cd297373e5e8f6dfb0e014275f5cee82b326b6 | aafb1f552394efd3367635a968f8df1e26b6f1c8 | refs/heads/main | 2021-10-23T23:06:45.964300 | 2021-10-21T10:10:09 | 2021-10-21T10:10:09 | 154,149,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,854 | py | #!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# extract_pha_file.py: create a pha2 file and a tg directory for LETG observation #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Apr 16, 2021 #
# #
#########################################################################################
import sys
import os
import string
import re
import math
import time
import astropy.io.fits as pyfits
#
#--- from ska
#
from Ska.Shell import getenv, bash
#
#--- set ciao environment
#
ciaoenv = getenv('source /soft/ciao/bin/ciao.csh -o', shell='tcsh')
#
#--- reading directory list
#
path = '/data/aschrc6/wilton/isobe/Project9/Scripts/LETG/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folders
#
sys.path.append(mta_dir)
sys.path.append(bin_dir)
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time())
zspace = '/tmp/zspace' + str(rtail)
#
#--- a couple of other settings
#
acmd = '/usr/bin/env PERL5LIB="" ;'
ilist = ['i', 's']
#------------------------------------------------------------------------------------
#-- extract_pha_file: create a pha2 file and a tg directory for LETG observation --
#------------------------------------------------------------------------------------
def extract_pha_file():
"""
create a pha2 file and a tg directory for LETG observation
input: none, but read from /data/hrc/<inst>
output: /data/hrc/<inst>/<obsid>/repro/*pha2.fits
/data/hrc/<inst>/<obsid>/repro/tg/*
"""
#
#--- get obsid list; d_list = [hrci_list, hrcs_list]
#
d_list = make_process_list()
for k in range(0, 2):
dlen = len(d_list[k])
for j in range(0, dlen):
obsid = d_list[k][j]
if j > 0:
cmd = 'rm -rf ./' + d_list[k][j-1]
os.system(cmd)
print("OBSID: " + str(obsid))
#
#--- original data directory name
#
d_dir = '/data/hrc/' + ilist[k] + '/' + str(obsid)
#
#--- copy the data to the local directory
#
cmd = 'cp -r ' + d_dir + ' ./' + str(obsid)
os.system(cmd)
#
#--- remove the files with "new" in the file names
#
cmd = 'rm -f ' + str(obsid) + '/secondary/*new*'
os.system(cmd)
#
#--- extract_pha_file chandra_repro
#
try:
pcmd = 'chandra_repro indir=' + str(obsid) + ' outdir='
pcmd = pcmd + str(obsid) + '/new cleanup=no'
cmd = acmd + pcmd
bash(cmd, env=ciaoenv)
except:
#
#--- if failed, keep the obsid in the record
#
ofile = house_keeping + 'no_repro_list'
with open(ofile, 'a') as fo:
eline = mcf.add_leading_zero(obsid, 5) + '\n'
fo.write(eline)
continue
#
#--- move pha2 file and tg directroy to the repro directory
#
outdir = d_dir + '/repro/'
cmd = 'mkdir -p ' + outdir #--- just in a case analysis dir does not exist
os.system(cmd)
cmd = 'mv -f ' + str(obsid) + '/new/*_pha2.fits* ' + outdir + '/.'
os.system(cmd)
cmd = 'mv -f ' + str(obsid) + '/new/tg ' + outdir + '/.'
os.system(cmd)
#
#--- change permission etc
#
cmd = 'chmod -R 775 ' + outdir
os.system(cmd)
cmd = 'chgrp -R hat ' + outdir
os.system(cmd)
#
#--- remove the copied data
#
cmd = 'rm -rf ./' + str(obsid)
os.system(cmd)
#
#--- fix naming to 5 digit obsid
#
correct_naming(obsid, ilist[k])
#
#--- send email
#
# line = 'HRC pha process finished\n'
# with open(zspace, 'w') as fo:
# fo.write(line)
#
# cmd = 'cat ' + zspace + '|mailx -s "Subject: HRC PHA finished" tisobe@cfa.harvard.edu'
# os.system(cmd)
# cmd = 'rm -rf ' + zspace
# os.system(cmd)
#------------------------------------------------------------------------------------
#-- make_process_list: create a list of unprocessed obsid lists --
#------------------------------------------------------------------------------------
def make_process_list():
"""
create a list of unprocessed obsid lists
input: none
output: a list of lists of [<hrc_i obsids>, <hrc_s obsids>]
"""
#
#--- create a dict: obsid <---> grating
#
[obs_list, dict_inst, dict_grat] = make_inst_dict()
#
#--- read failed repro obsid list
#
ifile = house_keeping + 'no_repro_list'
out = mcf.read_data_file(ifile)
rfailed = []
for ent in out:
rfailed.append(ent)
save = []
for inst in ['i', 's']:
hdir = '/data/hrc/' + inst + '/'
olist = []
#
#--- choose data with evt1 exists in the directory
#
cmd = 'ls -d ' + hdir + '*/secondary/*evt1.fits* > ' + zspace + ' 2>/dev/null'
os.system(cmd)
out = mcf.read_data_file(zspace, remove=1)
for ent in out:
atemp = re.split('\/', ent)
obsid = atemp[-3]
#
#--- check whether this obsid was previously checked, but failed to get the data
#
test = mcf.add_leading_zero(obsid, 5)
if test in rfailed:
continue
#
#--- check whether the pha2 file already exists
#
cmd = 'ls ' + hdir + obsid + '/repro/* > ' + zspace + ' 2>/dev/null'
os.system(cmd)
with open(zspace, 'r') as f:
ochk = f.read()
cmd = 'rm -rf ' + zspace
os.system(cmd)
mc = re.search('pha2', ochk)
#
#--- check whether it is an grating observation
#
if mc is None:
try:
iobsid = str(int(float(obsid)))
grat = dict_grat[iobsid]
except:
grat = 'NONE'
#
#--- special treatment for 6**** level calib observations
#
if obsid[0] == '6':
try:
grat = check_grating_from_header(inst, obsid)
except:
grat = 'NONE'
if grat == 'LETG':
olist.append(obsid)
save.append(olist)
return save
#------------------------------------------------------------------------------------
#-- make_inst_dict: create obsid <---> inst, obsid <---> grating dictionaries -
#------------------------------------------------------------------------------------
def make_inst_dict():
"""
create obsid <---> inst, obsid <---> grating dictionaries
input: none, but read from /data/mta4/obs_ss/sot_ocat.out
output: a list of <obsid list>, <dict of instruments>, <dict of grating>
note: only letg is taken as grating. hetg is ignored
"""
ifile = '/data/mta4/obs_ss/sot_ocat.out'
data = mcf.read_data_file(ifile)
obs_list = []
dict_inst = {}
dict_grat = {}
for ent in data:
mc1 = re.search('HRC', ent)
if mc1 is None:
continue
mc2 = re.search('archived', ent)
mc3 = re.search('observed', ent)
if (mc2 is None) and (mc3 is None):
continue
mc4 = re.search('LETG', ent)
if mc4 is not None:
grat = 'LETG'
else:
grat = 'NONE'
atemp = re.split('\^', ent)
obsid = atemp[1].strip()
obsid = str(int(float(obsid)))
inst = atemp[12].strip()
if inst in ['HRC-I', 'HRC-S']:
obs_list.append(obsid)
dict_inst[obsid] = inst
dict_grat[obsid] = grat
return [obs_list, dict_inst, dict_grat]
#------------------------------------------------------------------------------------
#-- check_grating_from_header: checking grating from a header of the evt1 file of obsid
#------------------------------------------------------------------------------------
def check_grating_from_header(hrc, obsid):
"""
checking grating from a header of the evt1 file of obsid
input: hrc --- either i or s
obsid --- obsid
output: grat --- gating, such as LETG, HETG, or NONE
"""
cmd = ' ls /data/hrc/' + hrc + '/' + obsid + '/secondary/*evt1.fits* > ' + zspace + ' 2>/dev/null'
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
try:
fits = data[0].strip()
except:
return 'NONE'
flist = pyfits.open(fits)
try:
grat = flist[1].header['GRATING']
except:
grat = 'NONE'
flist.close()
return grat
#------------------------------------------------------------------------------------------------
#-- correct_naming: check repro directory and correct wrongly named fits and par file
#------------------------------------------------------------------------------------------------
def correct_naming(obsid, inst):
"""
check repro directory and correct wrongly named fits and par file
input: obsid --- obsid
inst --- instrument. either "i" or "s"
"""
cobsid = str(int(float(obsid)))
if len(cobsid) == 5:
return
lobsid = mcf.add_leading_zero(obsid, 5)
cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/repro/hrcf* >' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
for ent in data:
atemp = re.split('\/', ent)
fname = atemp[-1]
mc = re.search(lobsid, fname)
if mc is not None:
continue
else:
atemp = re.split('hrcf', fname)
btemp = re.split('_', atemp[1])
sobs = btemp[0]
new = fname.replace(sobs, lobsid)
full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new
cmd = 'mv ' + ent + ' ' + full
os.system(cmd)
#
#--- compress fits files
#
cmd = 'gzip /data/hrc/' + inst + '/' + lobsid + '/repro/*fits'
os.system(cmd)
#------------------------------------------------------------------------------------
if __name__ == "__main__":
extract_pha_file()
| [
"bspitzbart@cfa.harvard.edu"
] | bspitzbart@cfa.harvard.edu |
b23b322eb6d1bfaf868d64b5b3e0f1fc530701a6 | d783f5e6b02d91990e30b7e9edb41b9725f540cf | /intro-cs/intro-to-programming/exercises/ex_06_02.py | f7ca5b3c1d5f30d22db7facaa3d2df4d7888af4c | [
"MIT"
] | permissive | solanyn/cs-courses | 1c4ac7ea146b816731f7016c988423a8a5c7d180 | db10f8d9ad9a4f00a661c58eed7d2423ff9871bc | refs/heads/main | 2023-07-05T16:51:49.384749 | 2021-08-06T04:01:38 | 2021-08-06T04:01:38 | 332,115,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | """Exercise 2: Given that fruit is a string, what does fruit[:] mean?"""
# fruit[:] means all characters in the sequence fruit
| [
"andrewchen1520@gmail.com"
] | andrewchen1520@gmail.com |
27bb5b5457ce8249495d9fcc5263dd01e827aed6 | e7d2c2c7fbcffc3b4e8976f01b354f794fc3b71d | /bmga/utils/formatting.py | 6ab11a6dfcbe02e87b651ba6371989e3ca2c1403 | [
"BSD-3-Clause"
] | permissive | vituocgia/boxme-api | 41da50fcec12089e59a29786b3bcff6c9b169d99 | 10c8054a223f124a85e70669d17313e3a2991226 | refs/heads/master | 2020-03-08T19:18:21.829490 | 2018-04-06T09:11:30 | 2018-04-06T09:11:30 | 128,347,542 | 0 | 0 | null | 2018-04-27T04:43:38 | 2018-04-06T05:24:05 | Python | UTF-8 | Python | false | false | 822 | py | from __future__ import unicode_literals
from django.utils import dateformat
from bmga.utils.timezone import make_naive, aware_datetime
from dateutil.parser import parse as mk_datetime # flake8: noqa
def format_datetime(dt):
"""
RFC 2822 datetime formatter
"""
return dateformat.format(make_naive(dt), 'r')
def format_date(d):
"""
RFC 2822 date formatter
"""
# workaround because Django's dateformat utility requires a datetime
# object (not just date)
dt = aware_datetime(d.year, d.month, d.day, 0, 0, 0)
return dateformat.format(dt, 'j M Y')
def format_time(t):
"""
RFC 2822 time formatter
"""
# again, workaround dateformat input requirement
dt = aware_datetime(2000, 1, 1, t.hour, t.minute, t.second)
return dateformat.format(dt, 'H:i:s O')
| [
"dotiendiep@gmail.com"
] | dotiendiep@gmail.com |
fe36ebadacdc7dccccf286e4f14226a17838d061 | e5b4d9e3240b3302db47d770260dd1bcff93d5ef | /BatchShell/batchshell/batchrun.py | e7b2149a60d35511889eb259d80525464d279240 | [
"MIT"
] | permissive | fczj/python-S14 | a28f255b13c15f17d632cf920bca53ed5035752a | 53a1350dae7c797134463c8c42edae44bb5427cb | refs/heads/master | 2021-01-02T09:23:48.195642 | 2017-10-30T12:35:57 | 2017-10-30T12:35:57 | 99,205,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,829 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-8-26 下午10:08
# @Author : xiongzhibiao
# @Email : 158349411@qqcom
# @File : batchrun.py
# @Software: PyCharm
import sys
sys.path.append('/home/xiongzhibiao/github/python-S14/Common/common')
import paramiko
from multiprocessing import Process,Pool,Lock
import threading
import yaml
import argparse
import sys
import time
from utility import run_time
GROUP_CONF = './../conf/group_conf'
HOST_CONF = './../conf/host_conf'
class Conf():
def __init__(self):
with open(GROUP_CONF,'r') as f:
self.groups = yaml.load(f)
with open(HOST_CONF,'r') as f:
self.hosts= yaml.load(f)
self.all_group = self.groups.keys()
def get_host(self,host_name):
return self.hosts[host_name]
def get_group_all_host(self,group_name):
return self.groups.get(group_name)
class RunCmd():
def __init__(self,user,host,password,port=22):
self.user = user
self.port = port
self.host = host
self.password = password
def run_cmd(self,cmd,lock=''):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=self.host, port=self.port,
username=self.user,password=self.password)
stdin, stdout, stderr = ssh.exec_command(cmd)
res,err = stdout.read(),stderr.read()
result = res if res else err
ssh.close()
if not lock == '':
lock.acquire()
print ('='*80)
print (self.host)
print(result.decode())
if not lock == '':
lock.release()
def put(self,local,remote):
pass
def get(self,remote,local):
pass
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("-host", "--hostname", nargs='*',dest="host", help="hostname")
parser.add_argument("-g", "--group", nargs='*',dest="group", help="groupname")
parser.add_argument("-cmd", "--command", dest="cmd", help="command")
parser.add_argument("-a", "--action", dest="action", help="action")
args = parser.parse_args()
return args
@run_time
def thread(all_host,cmd):
#多线程版本
lock = threading.Lock()
thread_lst = []
for host in all_host:
conn_info = conf.get_host(host)
cmd_obj = RunCmd(**conn_info)
t = threading.Thread(target=cmd_obj.run_cmd,args=(cmd,lock,))
t.start()
thread_lst.append(t)
for i in thread_lst:
i.join()
def print_log(*args):
print ('='*80)
for i in args:
print (i)
@run_time
def process(all_host,cmd):
lock = Lock()
process_lst = []
for host in all_host:
conn_info = conf.get_host(host)
cmd_obj = RunCmd(**conn_info)
p = Process(target=cmd_obj.run_cmd,args=(cmd,lock,))
p.start()
process_lst.append(p)
for pro in process_lst:
pro.join()
@run_time
def serial(all_host,cmd):
#串行版本
for host in all_host:
conn_info = conf.get_host(host)
cmd_obj = RunCmd(**conn_info)
cmd_obj.run_cmd(cmd)
if __name__ == "__main__":
conf = Conf()
args = parse()
all_host = []
if args.host is not None:
all_host = args.host
if args.group is not None:
groups = args.group
for i in groups:
hosts = conf.get_group_all_host(i)
all_host.extend(hosts)
if args.host is None and args.group is None:
print ('没有可操作对象')
sys.exit(9)
all_host = list(set(all_host))
cmd = args.cmd
#serial(all_host,cmd) #三台主机-1.1秒
thread(all_host,cmd) #三台主机-0.4秒
# process(all_host,cmd) #多线程和多进程时间差不多
| [
"158349411@qq.com"
] | 158349411@qq.com |
814949dee15c29811a0f3cb2131cabed7e0f87f0 | 52b17383556c615f6e9ea8888efbfc3c0bfbe726 | /tests/__init__.py | 2aac0df30f37c2d7f9d7ec10bada80f300ecf814 | [
"Apache-2.0"
] | permissive | nerdfiles/bloom-filter | 725f164bd5b99339f128d34f6b355f4efdbdc0f0 | affd787cec3804fb806146eb464f77f37ed5b85e | refs/heads/master | 2021-09-15T03:16:02.775833 | 2018-05-25T00:02:02 | 2018-05-25T00:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | #-----------------------------------------------------------------------------#
# __init__.py #
# #
# Copyright (c) 2017-2018, Rajiv Bakulesh Shah, original author. #
# All rights reserved. #
#-----------------------------------------------------------------------------#
| [
"noreply@github.com"
] | nerdfiles.noreply@github.com |
c2f5d321adc23f137909c4b7b15ea48ad477e908 | d9874a4d69027dd401314828a9d19e686a62b31d | /stamps.py | 799b1f7b60be0575dc81351a41ef953ca79ab61a | [] | no_license | 09o/ProgrammingBasis | e9612b7ac59aadf6cf406dbbb7bb42c5d9502e42 | 6fdd6b69653cc6d89b6841883e6795191a742847 | refs/heads/master | 2020-04-03T13:03:30.455309 | 2018-11-14T08:44:54 | 2018-11-14T08:44:54 | 155,272,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | # Define a procedure, stamps, which takes as its input a positive integer in
# pence and returns the number of 5p, 2p and 1p stamps (p is pence) required
# to make up that value. The return value should be a tuple of three numbers
# (that is, your return statement should be followed by the number of 5p,
# the number of 2p, and the nuber of 1p stamps).
#
# Your answer should use as few total stamps as possible by first using as
# many 5p stamps as possible, then 2 pence stamps and finally 1p stamps as
# needed to make up the total.
#
# (No fair for USians to just say use a "Forever" stamp and be done with it!)
#
def stamps(n):
i = j = k = 0
if n != 0:
while i*5 <= n:
i += 1
i -= 1
n -= i*5
while j*2 <= n:
j += 1
j -= 1
n -= j*2
if n == 1:
k = 1
else:
k = 0
else:
return i,j,k
return i,j,k
# FOR TEST
print(stamps(8))
#>>> (1, 1, 1) # one 5p stamp, one 2p stamp and one 1p stamp
print(stamps(5))
#>>> (1, 0, 0) # one 5p stamp, no 2p stamps and no 1p stamps
print(stamps(29))
#>>> (5, 2, 0) # five 5p stamps, two 2p stamps and no 1p stamps
print(stamps(0))
#>>> (0, 0, 0) # no 5p stamps, no 2p stamps and no 1p stamps
| [
"igsober@gmail.com"
] | igsober@gmail.com |
e16ebb8614032c72852153165835f46bd16e6965 | 787407f3afc659a75915352368374670c265134f | /3dcnn_gwak/network.py | fa19e211c452b485ec440f4b9c7fd24b26478e2a | [] | no_license | swilso421/shiny-barnacle | 39c189d380488718cb56084225be705c7dce52fb | 01285a82fefa32f6ec06cd299c8bb994d593ab10 | refs/heads/master | 2020-03-18T04:09:26.326083 | 2018-07-18T20:29:17 | 2018-07-18T20:29:17 | 134,272,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,445 | py | import tensorflow as tf
NUM_CLASSES = 18
def CNN_3D(input_node, stddev = 0.03, name = '3DCNN'):
filter1 = (3, 3, 3, 1, 32)
filter2 = (3, 3, 3, 32, 64)
filter3 = (3, 3, 3, 64, 128)
filter4 = (3, 3, 3, 128, 256)
weights1 = tf.Variable(tf.truncated_normal(filter1, stddev = stddev), name = name + '_W1')
weights2 = tf.Variable(tf.truncated_normal(filter2, stddev = stddev), name = name + '_W2')
weights3 = tf.Variable(tf.truncated_normal(filter3, stddev = stddev), name = name + '_W3')
weights4 = tf.Variable(tf.truncated_normal(filter4, stddev = stddev), name = name + '_W4')
weights5 = tf.Variable(tf.truncated_normal([2048, 1024], stddev = stddev), name = name + '_W5')
weights6 = tf.Variable(tf.truncated_normal([1024, 1024], stddev = stddev), name = name + '_W6')
weights7 = tf.Variable(tf.truncated_normal([1024, NUM_CLASSES], stddev = stddev), name = name + '_W7')
bias1 = tf.Variable(tf.truncated_normal(32), name = name + '_B1')
bias2 = tf.Variable(tf.truncated_normal(64), name = name + '_B2')
bias3 = tf.Variable(tf.truncated_normal(128), name = name + '_B3')
bias4 = tf.Variable(tf.truncated_normal(256), name = name + '_B4')
bias5 = tf.Variable(tf.truncated_normal(1024), name = name + '_B5')
bias6 = tf.Variable(tf.truncated_normal(1024), name = name + '_B6')
bias7 = tf.Variable(tf.truncated_normal(NUM_CLASSES), name = name + '_B7')
window = [1, 2, 2, 2, 1]
#Convolutional Layer #1
conv1 = tf.nn.conv3d(input_node,
filter = weights1,
strides = [1, 1, 1, 1, 1],
padding = 'SAME'
name = name + '_CONV1')
conv1 += bias1
relu1 = tf.nn.relu(conv1, name = name + '_RELU1')
pool1 = tf.nn.max_pool3d(relu1,
ksize = window,
strides = window,
padding = 'SAME',
name = name + '_POOL1')
#Convolutional Layer #2
conv2 = tf.nn.conv3d(pool1,
filter = weights2,
strides = [1, 1, 1, 1, 1],
padding = 'SAME'
name = name + '_CONV2')
conv2 += bias2
relu2 = tf.nn.relu(conv2, name = name + '_RELU2')
pool2 = tf.nn.max_pool3d(relu2,
ksize = window,
strides = window,
padding = 'SAME',
name = name + '_POOL2')
#Convolutional Layer #3
conv3 = tf.nn.conv3d(pool2,
filter = weights3,
strides = [1, 1, 1, 1, 1],
padding = 'SAME'
name = name + '_CONV3')
conv3 += bias3
relu3 = tf.nn.relu(conv3, name = name + '_RELU3')
pool3 = tf.nn.max_pool3d(relu3,
ksize = window,
strides = window,
padding = 'SAME',
name = name + '_POOL3')
#Convolutional Layer #4
conv4 = tf.nn.conv3d(pool3,
filter = weights4,
strides = [1, 1, 1, 1, 1],
padding = 'SAME'
name = name + '_CONV4')
conv4 += bias4
relu4 = tf.nn.relu(conv4, name = name + '_RELU4')
pool4 = tf.nn.max_pool3d(relu4,
ksize = window,
strides = window,
padding = 'SAME',
name = name + '_POOL4')
flattened = tf.reshape(pool4, [-1, 2048])
fc5 = tf.nn.matmul(flattened, weights5) + bias5
fc5_relu = tf.nn.relu(fc5, name = name + '_FC5')
if 0.0 < dropout < 1.0:
fc5_relu = tf.nn.dropout(fc5_relu, dropout, name = name + '_DROP1')
fc6 = tf.nn.matmul(fc5_relu, weights6) + bias6
fc6_relu = tf.nn.relu(fc6, name = name + '_FC6')
if 0.0 < dropout < 1.0:
fc6_relu = tf.nn.dropout(fc6_relu, dropout, name = name + '_DROP2')
fc7 = tf.nn.matmul(fc6_relu, weights7) + bias7
return fc7
| [
"swilso421@gmail.com"
] | swilso421@gmail.com |
dede7897aa89f5106650a0cc96628e0b6108d67e | 9ee537601cbca4be70870d60910ce808da4f766e | /PROJECT-WORKING VERSION-issue.py | bcf71964dd23b6d934705ca27ceb5766335b8449 | [] | no_license | sanjanab003/Library-Management | 063b98e98d50bbd16e358329b7683707561965c6 | eafcbc7814a7933c6243ef6cec02b803c4810a2d | refs/heads/master | 2020-08-01T01:51:46.160900 | 2019-10-20T04:48:08 | 2019-10-20T04:48:08 | 210,818,939 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,914 | py | c=0
areMem=()
c1=0
t=()
u=""
p=""
q=""
b=0
a=0
user=""
pas=""
z=0
t1=()
import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="0025",
database="library"
)
mycursor = mydb.cursor()
def regName():
print("Please register")
global u
global p
u=input("Enter your name: ")
p=input("Enter a password: ")
# function to register
def reg():
add_member = ("INSERT INTO member "
"(Name, M_Pass) "
"VALUES (%s, %s)")
data_member = (u,p)
mycursor.execute(add_member, data_member)
mydb.commit()
# function to loop through members' table
def loopMem():
mycursor.execute("select * from member")
global c
global t1
row = mycursor.fetchone()
while row is not None:
if(row[0]==pas and row[1]==user):
c=c+1
t1=t1+(row,)
row=mycursor.fetchone()
else:
row = mycursor.fetchone()
def areMem():
global areMem
areMem=input("Are you a already a member of Sanchansu(Type in 'y' for yes and 'n' for no): ")
def ask():
global user
global pas
user=input("Enter username: ")
pas=input("Enter password: ")
# function to loop through the books
def loopBook(d):
mycursor.execute("select * from books")
global c1
row=mycursor.fetchone()
while row is not None:
if(row[1]==d):
global t
t=t+(row,)
c1=c1+1
row=mycursor.fetchone()
else:
row=mycursor.fetchone()
# function to ask for user's choice
def inp():
global q
q=input("Enter the name of the book you want to borrow: ")
# function to issue book
def iss(e,f):
op= """ update books set Qty = %s where Book_Name = %s """
op3= """ update member set Books_borrowed = %s where M_Pass = %s"""
dat=(e,f)
dat3=(f,pas)
mycursor.execute(op,dat)
mycursor.execute(op3,dat3)
if(e==0):
op2= """ update books set Status = %s where Book_Name= %s """
dat2= ("Not available", f)
mycursor.execute(op2,dat2)
else:
pass;
mydb.commit()
print("Book borrowed successfully")
def tup():
global a
global b
for x in t:
a=x[1]
b=x[4]-1
def tup1():
global z
for p in t1:
z=p[2]
def issue(r):
inp()
loopBook(q)
tup()
if(c1>0):
print(t)
if(b+1>0):
q2=input("Do you want to borrow this book: ")
if(q2=='y'):
if(r=="NULL"):
iss(b,a)
else:
print("Please return book before borrowing")
else:
pass;
else:
print("Sorry Book currently not available")
else:
print("Sorry no matching results")
# checking for membership
areMem()
if(areMem=="y"):
ask()
loopMem()
tup1()
if(c==1):
print("Welcome,", user)
print("Books borrowed by you: ", z)
print("Books to be returned: ", z)
issue(z)
else:
print("Incorrect password or username")
elif(areMem=="n"):
regName()
loopMem()
if(c==1):
print("Username or password is already taken")
else:
reg()
print("You are now a member of Sanchansu.")
print()
issue()
else:
print("Please give a valid answer")
| [
"noreply@github.com"
] | sanjanab003.noreply@github.com |
8a7e729fa2cc60c00b630bcd1e18b05703782c80 | 88d33f643f0116f0557a60d409183f66f528cb53 | /blogs/migrations/0043_auto_20190603_2054.py | ea9de7e0b4c9625696d3006dd57c7d5843a0b532 | [
"MIT"
] | permissive | vikrantgoutam17/my_blogs | 13e8e531a35173fbb22c07456b72444ba8391ee3 | 64b48eb8c466f46ac21e7ee137bbe8033f6a28ab | refs/heads/master | 2022-04-15T17:28:48.982593 | 2020-04-11T07:46:09 | 2020-04-11T07:46:09 | 254,733,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | # Generated by Django 2.2.1 on 2019-06-03 15:24
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blogs', '0042_auto_20190603_1452'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 3, 20, 54, 4, 48767), verbose_name='date published'),
),
migrations.CreateModel(
name='likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blog_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blogs.blog')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"vikrantgoutam17@gmail.com"
] | vikrantgoutam17@gmail.com |
17f9e6c0e56e8413c26b56d50634d33d99988d02 | 0dcb743d6409478d01706f48616df6a1c089c0f0 | /Project/code/loadbalancer-server.py | 097411dbfc2d4e9664c83cfd5f70331a2709c1ef | [] | no_license | marcotcr/systems | 063b2b78b99e9e0a7733760125a999106d7fc6ba | b35358cbc83daa7900f049bf544bd4df500b0749 | refs/heads/master | 2021-01-20T10:50:28.518802 | 2014-03-14T16:42:32 | 2014-03-14T16:42:32 | 15,811,702 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,224 | py | from __future__ import division
import sys
sys.path.append('gen-py/autoscale')
sys.path.append('gen-py/')
import LoadBalancer
from autoscale.ttypes import *
from autoscale import LoadBalancer
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import collections
import socket
import time
import argparse
import numpy as np
import os
import random
import threading
from itertools import cycle
class LoadBalancerHandler:
def __init__(self):
self.node_times = {}
self.nodelist = []
self.n_cycle = cycle(range(0))
self.n = 0
self.n_threads = 10
pass
def Finish(self, node):
time.sleep(self.node_times[node])
self.n_running[node] -= 1
def GetNode(self):
# print 'GetNode'
node = self.nodelist[self.n_cycle.next()]
while self.n_running[node] >= self.n_threads:
node = self.nodelist[self.n_cycle.next()]
self.n_running[node] += 1
self.n += 1
t = threading.Thread(target=self.Finish, args=(node,))
t.start()
return node
def SetNodes(self, state):
print 'SetNodes', state
self.node_times = {}
self.previous_state = state
for node, quota in state.iteritems():
self.node_times[node] = float(quota)
self.nodelist = self.node_times.keys()
self.n_running = collections.defaultdict(lambda: 0)
self.n_cycle = cycle(range(len(self.nodelist)))
def NumRequests(self):
return self.n
def main():
parser = argparse.ArgumentParser(description='TODO')
parser.add_argument('-p', '--port', type=int, required=True, help="My port")
args = parser.parse_args()
port = args.port
handler = LoadBalancerHandler()
processor = LoadBalancer.Processor(handler)
transport = TSocket.TServerSocket(port=port)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TThreadPoolServer(processor, transport, tfactory, pfactory)
server.setNumThreads(10)
# server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
print "Starting python server..."
server.serve()
print "done!"
if __name__ == '__main__':
main()
| [
"marcotcr@gmail.com"
] | marcotcr@gmail.com |
7617b75c7a76b5009e6dca9b3e54dd53393f66d3 | b76876afc024f33491c0e9838366eb8934ae1e65 | /blog/tests.py | 8aae2a6a15cdd63918dad96cb0ab16cb648e644d | [] | no_license | wiremine/djang-modelfun | 03fb2ffc68a01822c7e767f9e51913be938d8a8f | a0188b53b8355839b78306ab9a8d7058cd00bafd | refs/heads/master | 2020-05-16T03:37:14.994587 | 2011-01-20T03:30:21 | 2011-01-20T03:30:21 | 977,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | from django.test import TestCase
from django.test.client import Client
from django.core import mail
# mail.outbox will contain emails during tests
from django.http import HttpResponse
from django.core.urlresolvers import clear_url_caches
from django.conf import settings
class ControllerTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_controller_urls(self):
response = self.client.get('/blog/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "Hello World")
response = self.client.get('/blog2/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "Hello World")
| [
"wiremine@gmail.com"
] | wiremine@gmail.com |
3ffdef70c90547fe0a5a46765f97abf944b606a2 | 2ebbeef48fc7094f2089c5a82119d02af65a82dc | /codegolf/temperatures/solution.py | 1131f415d0ff594a97c6a114b11d5ce49eea0708 | [] | no_license | marty-Wallace/CodinGame | b93812c253795a3fba8ecc3bfa01f6f6b5047d26 | 6eeb5b5d1cff15c475cc2a7b6b8bcac68367b9fd | refs/heads/master | 2020-07-01T22:07:44.289785 | 2017-03-01T04:59:45 | 2017-03-01T04:59:45 | 74,251,457 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | input()
print(min(map(int,input().split()),key=lambda x:x*x-x/9,default=0)) | [
"martin.v.wallace@ieee.org"
] | martin.v.wallace@ieee.org |
e7573bed0af5548993771609353cbf19e6c6588c | 9dbd215e72f410f810606109632db30556d07f74 | /BuildingSystems/Resources/Scripts/JModelica/Buildings/Examples/BuildingTherma1Zone1DCylinder.py | b5b655006868817fcaba5def05d7357c4c6a71aa | [
"BSD-3-Clause"
] | permissive | TonyJava/BuildingSystems | a7bc822e2277ef87fd7dbce2f2dd685bd311b013 | 56ccd43d1a8de8e2f6dfaed3a2cef9e3edf27b7f | refs/heads/master | 2020-04-24T21:48:34.956614 | 2019-02-22T13:22:34 | 2019-02-22T13:22:34 | 172,290,652 | 1 | 0 | BSD-3-Clause | 2019-02-24T03:18:32 | 2019-02-24T03:18:31 | null | UTF-8 | Python | false | false | 1,817 | py | # paths and info
import os, sys
homeDir = os.environ['HOMEPATH']
jmodDir = os.environ['JMODELICA_HOME']
workDir = "Desktop" # has to be adapted by the user !!!
moLiDir = os.path.join(homeDir, workDir, "BuildingSystems")
# give the path to directory where package.mo is stored
moLibs = [os.path.join(jmodDir, "ThirdParty\MSL\Modelica"),
os.path.join(moLiDir,"BuildingSystems"),
]
print(sys.version)
print(all(os.path.isfile(os.path.join(moLib, "package.mo")) for moLib in moLibs))
print(os.getcwd())
# compile model to fmu
from pymodelica import compile_fmu
model_name = 'BuildingSystems.Buildings.Examples.BuildingThermal1Zone1DCylinder'
my_fmu = compile_fmu(model_name, moLibs)
# simulate the fmu and store results
from pyfmi import load_fmu
myModel = load_fmu(my_fmu)
opts = myModel.simulate_options()
opts['solver'] = "CVode"
opts['ncp'] = 8760
opts['result_handling']="file"
opts["CVode_options"]['discr'] = 'BDF'
opts['CVode_options']['iter'] = 'Newton'
opts['CVode_options']['maxord'] = 5
opts['CVode_options']['atol'] = 1e-5
opts['CVode_options']['rtol'] = 1e-5
res = myModel.simulate(start_time=0.0, final_time=31536000, options=opts)
# plotting of the results
import pylab as P
fig = P.figure(1)
P.clf()
# building
# temperatures
y1 = res['ambient.TAirRef']
y2 = res['building.zone.TAir']
y3 = res['building.zone.TOperative']
t = res['time']
P.subplot(2,1,1)
P.plot(t, y1, t, y2, t, y3)
P.legend(['ambient.TAirRef','building.zone.TAir','building.zone.TOperative'])
P.ylabel('Temperature (K)')
P.xlabel('Time (s)')
# Heating and cooling load
y1 = res['building.zone.Q_flow_heating']
y2 = res['building.zone.Q_flow_cooling']
P.subplot(2,1,2)
P.plot(t, y1, t, y2)
P.legend(['building.zone.Q_flow_heating','building.zone.Q_flow_cooling'])
P.ylabel('power (W)')
P.xlabel('Time (s)')
P.show()
| [
"nytsch@udk-berlin.de"
] | nytsch@udk-berlin.de |
76d236d6b4d1a10f3a74eee09e3478109205972e | 830db802b4cce9d4b5a34c8351caf6573bf5f385 | /basic_functions.py | 2da3b0a3d1d10dea785c7a1b0ddb5a0be4d7addb | [] | no_license | 317002/Normal-Modes | 856e33dbe103c45ab17e9c0c417a7f8bbd64aed8 | dc599e1c4abc035bae8abdced600119ec42fc7df | refs/heads/master | 2020-08-31T22:59:41.116273 | 2019-11-12T01:46:38 | 2019-11-12T01:46:38 | 218,807,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,471 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 8 14:38:11 2019
@author: Nathan Richard
"""
import numpy as np
from math import floor
def gen_k(n):
index = []
ind = []
for i in range(0,n+2):
ind += [0]
for i in range(0,n+2):
index += [ind]
for i in range(0,len(index)):
for j in range(0,len(index)):
index[i][j] = n**2
index = np.array(index)
c = 0
for i in range(1,n+1):
for j in range(1,n+1):
index[i][j] = c
c += 1
K = []
for i in range(1,n+1):
for j in range(1,n+1):
m = []
for l in range(0,n**2+1):
m += [0]
m[index[i][j]] = -4
m[index[i+1][j]] = 1
m[index[i-1][j]] = 1
m[index[i][j+1]] = 1
m[index[i][j-1]] = 1
K += [m[:-1]]
K = np.array(K)
return(-K)
def gen_square_posistion_matrix(N):
if N%2 != 1:
#checking if the value is odd
raise Exception('N must be odd')
'''So the matrix can be centered around zero'''
if N == 1:
a = 1
return np.asarray([1]),np.asarray([1])
else:
a = floor(N/2)#gives me the exstent of the x vector
x = np.linspace(-a,a,N)
y = x.copy()
x = np.zeros((x.shape[0],1),dtype=x.dtype) + x
y = x.transpose()
return (x,y) | [
"ndr1005@wildcats.unh.edu"
] | ndr1005@wildcats.unh.edu |
ae79c1df65cf49985b7a5950ad1c6f7c40f0cad6 | 3c80d771e2ca592a54db7b7a07e1d75180cf423d | /TestFolder/TestClasses.py | d033c998f4426f1cf07f305b681d683ff6e0310b | [] | no_license | TobiasTiedtke/Projekt_dsd_gui | 4873c759739f1011fd473eb69d1426225f123d21 | 83c02efd8776353db003f23404b25dc52df8bb33 | refs/heads/master | 2020-08-26T16:49:35.754859 | 2020-01-29T10:21:43 | 2020-01-29T10:21:43 | 217,078,904 | 0 | 0 | null | 2020-01-29T13:05:13 | 2019-10-23T14:32:50 | Python | UTF-8 | Python | false | false | 436 | py | import os, sys
def SingleBrowse():
#browsing for a folder and changing it to a string
filePath = "/informatik2/students/home/6kornell/Schreibtisch/bitbots_behavior-master/bitbots_head_behavior/src/bitbots_head_behavior/actions/look_at.py"
f = open(filePath, 'r')
for line in f:
line = str(line)
if line.startswith("class"):
line = line.split(" ")[1]
line = line.split("(")[0]
print(line)
SingleBrowse()
| [
"fwwx652@informatik.uni-hamburg.de"
] | fwwx652@informatik.uni-hamburg.de |
84c66116e8584f0c2d0ad42b6fdb367629748331 | a51453f9d5f7a93d5ce7f909e96507d24a34b40c | /small_knowledge.py/extract_num.py | e8159d0681311895eb5599a7cf0383cf6312cf4b | [] | no_license | justkissme/crawler | 99a228019836700c4cdb548d6ab76f3e3b1c0ddb | b0a6964d657486f6af90519f885352945e107409 | refs/heads/master | 2021-03-22T15:57:46.195433 | 2020-03-15T01:49:10 | 2020-03-15T01:49:10 | 247,380,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | link = 'https://www.xslou.com/yuedu/9356/'
id_list = list(filter(str.isdigit,link))
book_id = ''.join(id_list)
print(type(book_id))
print(book_id) | [
"noreply@github.com"
] | justkissme.noreply@github.com |
780ef530a2c311710a82743db16d99d91edf5d3b | 35387a94b884466188c525a11cf7bf4dbf3c291f | /deepspeed/pt/deepspeed_constants.py | 1ba3dd482e25b1782b88a86ceab5d7a308f7bf4d | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | liuyq47/DeepSpeed | 6808653e9adf490799fe9fe630d8cf20bc4a51b2 | cd68e6e55a2155ffaea681b18e012aafd686dce4 | refs/heads/master | 2022-12-12T07:35:00.362124 | 2020-08-10T17:09:40 | 2020-08-10T17:09:40 | 278,738,026 | 0 | 0 | MIT | 2020-08-26T16:14:54 | 2020-07-10T21:37:45 | Python | UTF-8 | Python | false | false | 8,682 | py | """
Copyright (c) Microsoft Corporation
Licensed under the MIT license.
"""
#############################################
# Routes
#############################################
ROUTE_TRAIN = "train"
ROUTE_EVAL = "eval"
ROUTE_PREDICT = "predict"
ROUTE_ENCODE = "encode"
#############################################
# Batch size
#############################################
TRAIN_BATCH_SIZE = "train_batch_size"
TRAIN_BATCH_SIZE_DEFAULT = None
#############################################
# Optimizer and lr scheduler
#############################################
OPTIMIZER = "optimizer"
OPTIMIZER_TYPE_DEFAULT = None
OPTIMIZER_PARAMS = "params"
TYPE = "type"
LEGACY_FUSION = "legacy_fusion"
LEGACY_FUSION_DEFAULT = False
SCHEDULER = "scheduler"
SCHEDULER_TYPE_DEFAULT = None
SCHEDULER_PARAMS = "params"
MAX_GRAD_NORM = 'max_grad_norm'
#############################################
# Optimizer and lr scheduler
#############################################
ZERO_ALLOW_UNTESTED_OPTIMIZER = "zero_allow_untested_optimizer"
ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT = False
#############################################
# Torch distributed constants
#############################################
TORCH_DISTRIBUTED_DEFAULT_PORT = "29500"
# Steps
STEPS_PER_PRINT = "steps_per_print"
STEPS_PER_PRINT_DEFAULT = 10
#########################################
# Training micro batch size per GPU
#########################################
# Batch size for one training step. This is used when the
# TRAIN_BATCH_SIZE cannot fit in GPU memory to determine
# the number of gradient accumulation steps. By default, this
# is set to None. Users can configure in ds_config.json as below example:
TRAIN_MICRO_BATCH_SIZE_PER_GPU = '''
TRAIN_MICRO_BATCH_SIZE_PER_GPU is defined in this format:
"train_micro_batch_size_per_gpu": 1
'''
TRAIN_MICRO_BATCH_SIZE_PER_GPU = "train_micro_batch_size_per_gpu"
TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = None
#########################################
# Gradient Accumulation
#########################################
# Gradient accumulation feature. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
GRADIENT_ACCUMULATION_FORMAT = '''
Gradient Accumulation should be of the format:
"gradient_accumulation_steps": 1
'''
GRADIENT_ACCUMULATION_STEPS = "gradient_accumulation_steps"
GRADIENT_ACCUMULATION_STEPS_DEFAULT = None
# DeepSpeed CSR gradient sparsity
SPARSE_GRADIENTS = "sparse_gradients"
SPARSE_GRADIENTS_DEFAULT = False
#########################################
# FP16 support
#########################################
# FP16 feature. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
FP16_FORMAT = '''
FP16 parameters should be of the format:
"fp16": {
"enabled": true,
"loss_scale": 0,
"initial_scale_power": 32,
"loss_scale_window": 1000,
"hysteresis": 2,
"min_loss_scale": 1
}
'''
FP16 = "fp16"
FP16_ENABLED = "enabled"
FP16_ENABLED_DEFAULT = False
# FP16 loss scale, zero means using dynamic scaling
FP16_LOSS_SCALE = "loss_scale"
FP16_LOSS_SCALE_DEFAULT = 0
# FP16 initial dynamic scale loss power
FP16_INITIAL_SCALE_POWER = "initial_scale_power"
FP16_INITIAL_SCALE_POWER_DEFAULT = 32
# FP16 loss scale window
FP16_LOSS_SCALE_WINDOW = "loss_scale_window"
FP16_LOSS_SCALE_WINDOW_DEFAULT = 1000
# FP16 hysteresis
FP16_HYSTERESIS = "hysteresis"
FP16_HYSTERESIS_DEFAULT = 2
# FP16 min loss scale
FP16_MIN_LOSS_SCALE = "min_loss_scale"
FP16_MIN_LOSS_SCALE_DEFAULT = 1
#########################################
# Apex AMP support
#########################################
# Use Apex AMP for mixed precision support, all parameters (other than 'enabled') will be passed to
# amp.initialize(model, optimizer, **amp_params)
# See apex documentation for supported parameters/features: https://nvidia.github.io/apex/amp.html#apex.amp.initialize
AMP_FORMAT = '''
"amp" {
"enabled: true,
"opt_level": "O1",
...
}
'''
AMP = "amp"
AMP_ENABLED = "enabled"
AMP_ENABLED_DEFAULT = False
#########################################
# Gradient clipping
#########################################
# Gradient clipping. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
GRADIENT_CLIPPING_FORMAT = '''
Gradient clipping should be enabled as:
"gradient_clipping": 1.0
'''
GRADIENT_CLIPPING = 'gradient_clipping'
GRADIENT_CLIPPING_DEFAULT = 0.
#########################################
# ZeRO optimization
#########################################
# ZeRO optimization. By default, this optimization is not enabled.
# Users have to configure the desired optimization (0 means disabled) in params.json as below example:
ZERO_FORMAT = '''
ZeRO optimization should be enabled as:
"session_params": {
"zero_optimization": [0|1|2],
"zero_all_gather_size": 200
}
'''
ZERO_OPTIMIZATION = 'zero_optimization'
ZERO_OPTIMIZATION_DEFAULT = 0
ZERO_OPTIMIZATION_OPTIMIZER_STATES = 1
ZERO_OPTIMIZATION_GRADIENTS = 2
ZERO_OPTIMIZATION_WEIGHTS = 3
MAX_STAGE_ZERO_OPTIMIZATION = ZERO_OPTIMIZATION_GRADIENTS
ZERO_REDUCE_SCATTER = "zero_reduce_scatter"
ZERO_REDUCE_SCATTER_DEFAULT = True
ZERO_MAX_ELEMENTS_PER_COMM = "zero_max_elements_per_comm"
ZERO_MAX_ELEMENTS_PER_COMM_DEFAULT = 5e8
ALLGATHER_SIZE = 'allgather_size'
ALLGATHER_SIZE_DEFAULT = 500000000
#########################################
# FP32 AllReduce
#########################################
# FP32 All reduce. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
FP32_ALLREDUCE_FORMAT = '''
FP32 Allreduce should be enabled as:
"fp32_allreduce": true
'''
FP32_ALLREDUCE = "fp32_allreduce"
FP32_ALLREDUCE_DEFAULT = False
#########################################
# Scale/predivide gradients before allreduce
#########################################
# Prescale gradients. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
PRESCALE_GRADIENTS_FORMAT = '''
Gradient prescaling should be enabled as:
"prescale_gradients": true
'''
PRESCALE_GRADIENTS = "prescale_gradients"
PRESCALE_GRADIENTS_DEFAULT = False
GRADIENT_PREDIVIDE_FACTOR_FORMAT = '''
Gradient predivide factor should be enabled as:
"gradient_predivide_factor": 1.0
'''
GRADIENT_PREDIVIDE_FACTOR = "gradient_predivide_factor"
GRADIENT_PREDIVIDE_FACTOR_DEFAULT = 1.0
#########################################
# Disable AllGather
#########################################
# Disable AllGather. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
DISABLE_ALLGATHER_FORMAT = '''
Disable AllGather should be enabled as:
"disable_allgather": true
'''
DISABLE_ALLGATHER = "disable_allgather"
DISABLE_ALLGATHER_DEFAULT = False
#########################################
# Dump DeepSpeed state
#########################################
# Dump State. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
DUMP_STATE_FORMAT = '''
Dump state should be enabled as:
"dump_state": true
'''
DUMP_STATE = 'dump_state'
DUMP_STATE_DEFAULT = False
#########################################
# Vocabulary size
#########################################
# Vocabulary size.
# Users can configure in ds_config.json as below example:
VOCABULARY_SIZE_FORMAT = '''
Vocabulary size can be specified as:
"vocabulary_size": 1024
'''
VOCABULARY_SIZE = 'vocabulary_size'
VOCABULARY_SIZE_DEFAULT = None
#########################################
# Wall block breakdown
#########################################
# Wall clock breakdown. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
WALL_CLOCK_BREAKDOWN_FORMAT = '''
Wall block breakdown should be enabled as:
"wall_clock_breakdown": true
'''
WALL_CLOCK_BREAKDOWN = 'wall_clock_breakdown'
WALL_CLOCK_BREAKDOWN_DEFAULT = False
MEMORY_BREAKDOWN = 'memory_breakdown'
MEMORY_BREAKDOWN_DEFAULT = False
#########################################
# Tensorboard
#########################################
# Tensorboard. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
TENSORBOARD_FORMAT = '''
Tensorboard can be specified as:
"tensorboard": {
"enabled": true,
"output_path": "/home/myname/foo",
"job_name": "model_lr2e-5_epoch3_seed2_seq64"
}
'''
TENSORBOARD = "tensorboard"
# Tensorboard enable signal
TENSORBOARD_ENABLED = "enabled"
TENSORBOARD_ENABLED_DEFAULT = False
# Tensorboard output path
TENSORBOARD_OUTPUT_PATH = "output_path"
TENSORBOARD_OUTPUT_PATH_DEFAULT = ""
# Tensorboard job name
TENSORBOARD_JOB_NAME = "job_name"
TENSORBOARD_JOB_NAME_DEFAULT = "DeepSpeedJobName"
| [
"noreply@github.com"
] | liuyq47.noreply@github.com |
343ee30c9eeda3d263414b6da3a2aaaee932a506 | 74bd3a29425a6dae085af292d71b0bcbee5a0744 | /music_eda_STFT.py | b1246e477fffb8439fddcd019a0936dc6ce1720e | [] | no_license | tacyi/graduation_p | 3d42746d3e87f08754203a0a9fec19df95297324 | ce9112537165f8cd40f4253d938ffef8cd1b4728 | refs/heads/master | 2020-11-27T17:10:57.098501 | 2019-12-22T08:08:13 | 2019-12-22T08:08:13 | 229,536,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/3 18:38
# @Author : tacyi
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
from scipy.io import wavfile
import os
# 画短时傅里叶声谱图并保存图片,代码相关参考链接https://blog.csdn.net/qq7835144/article/details/88887576,
# 最主要的疑问是我不确定
# (sample_rate, x) = wavfile.read(rd)中的总采样数x是否是
# signal.stft(x, **params)的参数x;官方给了例子我看的很懵懂。
# 官方链接https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.stft.html#scipy.signal.stft
# 下面是我的实现
def stft_specgram(g, n, **params):
rd = 'C:/Users/tacyi/Desktop/genres/' + g + '/' + g + '.' + str(n).zfill(5) + '.wav'
# 主要深究的函数有下面几个
(sample_rate, x) = wavfile.read(rd)
# f: 采样频率数组;t: 时间数组;Zxx: STFT结果
f, t, zxx = signal.stft(x, **params)
plt.pcolormesh(t, f, np.abs(zxx))
plt.colorbar()
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
# plt.tight_layout(pad=0)
plt.tight_layout()
sd = './picture/STFT/' + g
if not os.path.exists(sd):
os.makedirs(sd)
sp = './picture/STFT/' + g + '/' + g + '.' + str(n).zfill(5) + '.png'
# if sp is not None:
if not os.path.exists(sp):
plt.savefig(sp) # 保存图像
plt.clf() # 清除画布
# return t, f, zxx
genre_list = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock']
for g in genre_list:
for n in range(100):
stft_specgram(g, n)
| [
"tacyi@139.com"
] | tacyi@139.com |
55eda8146943aa974f2b0b70a0e47c6a5dbd6c54 | da013048a61c4e34419c0b7ec721203ab1081173 | /large_shirts.py | f01231ceec2dadb643abac4273e53785d9a5964c | [] | no_license | makeTaller/Crash_Course_Excercises | 2c3bd3ac7c50a1830ce087b8187b93d93fab771e | 66b50906e6b33e6ba0d15a8a98ee17ca689392c4 | refs/heads/master | 2020-03-18T04:17:42.342365 | 2018-07-17T22:02:45 | 2018-07-17T22:02:45 | 134,280,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | size = [ "small","medium","large"]
def large_shirts(message="I love python",size="large"):
print("Your shirt will be " + size + " and will read " + message)
large_shirts()
large_shirts("I Love the world!","medium")
| [
"kirktolliver28@gmail.com"
] | kirktolliver28@gmail.com |
0cb89a8adfed56ffbd5bc7113d6cc6a65c390491 | a0c130ab410d2ceebe8b8b28a80cb95a3f33f35c | /aei2/locweat.py | d0cf84e76d6ca9467740c4c393a9c69e6987fe09 | [] | no_license | zkevinfu/emotion-simulator | 1180dd7f5694ceae1c38ad291d49143950fdbce0 | 46679fbc18d7640c808eff388da56700cad07c24 | refs/heads/master | 2020-03-30T02:36:06.748842 | 2018-09-28T00:23:51 | 2018-09-28T00:23:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | import requests
import Queue
import json
def get_loc():
loc_url = 'http://freegeoip.net/json'
r = requests.get(loc_url)
j = json.loads(r.text)
return (j['latitude'], j['longitude'])
def get_weat():
ds_url_base = 'https://api.darksky.net/forecast'
ds_api_key = '12caf1dbd09b0e555c8b4a27339abb75'
exclude = '?exclude=minutely,hourly,daily,flags,alerts'
lat_lon = get_loc()
latitude, longitude = lat_lon[0], lat_lon[1]
ds_request = '%s/%s/%s,%s%s'%(ds_url_base, ds_api_key,latitude,longitude,exclude)
ds_r = requests.get(ds_request)
ds_j = json.loads(ds_r.text)
return ds_j['currently']
def get_stim():
ds_j = get_weat()
weat = ds_j['icon']
temp = ds_j['temperature']
clear = ['clear-day', 'clear-night', 'wind']
p_cloudy = ['partly-cloudy-day', 'partly-cloudy-night']
cloudy = ['cloudy', 'fog']
rain = ['rain', 'sleet']
snow = ['snow']
e_queue = Queue.Queue()
#TODO finish this
if (any(weat == icon for icon in clear)):
e_queue.put(('joy', 1))
elif (any(weat == icon for icon in p_cloudy)):
e_queue.put(('sadness', 1))
elif (any(weat == icon for icon in cloudy)):
e_queue.put(('sadness', 1.5))
elif (any(weat == icon for icon in rain)):
e_queue.put(('sadness', 1.5))
elif (any(weat == icon for icon in snow)):
e_queue.put(('joy', 1))
if 40 <= temp <= 80:
e_queue.put(('joy', 1))
elif temp < 40:
e_queue.put(('sadness', 1.5))
elif temp > 80:
e_queue.put(('anger', 1.5))
return e_queue
| [
"zkevinfu@gmail.com"
] | zkevinfu@gmail.com |
5c9d3d6c5f48bd56c8f41c7340f23a939dd6540c | 7bf59a25a66578cff650b0435ac95485ff51b43f | /model/couresDB.py | 4e1b59b80a6807a10bccdb4571b6ca3412fc9c41 | [] | no_license | amekaki/cmajorMiniProgram | a02ee5e9fcf93922e13c15ce9d2504211fc4324f | 15517e6d5c8514f8ac262265b77f2e60a0b3b9b9 | refs/heads/master | 2023-03-10T04:10:20.036776 | 2020-12-11T02:49:59 | 2020-12-11T02:49:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # -*- coding: UTF-8 -*-
from model.modelDB import Course
def getAllCourses():
res = Course.query.all()
return list(map(lambda x: {"cId": x.cId, "cname": x.cname}, res))
| [
"1150774341@qq.com"
] | 1150774341@qq.com |
03fbeb1450ccc44bd26fc126ce64cfd378980fa0 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_117/1246.py | 70e284873ba26f9f9d0eb663271fb9c4b2097cdc | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | __author__ = 'joranvar'
__problem__ = 'B'
class Field(object):
def __init__(self, data, height, width):
self.data = data
self.height = height
self.width = width
def is_cuttable(self):
max_x = [max([self.data[y][x] for x in range(self.width)]) for y in range(self.height)]
max_y = [max([self.data[y][x] for y in range(self.height)]) for x in range(self.width)]
for x in range(self.width):
for y in range(self.height):
if self.data[y][x] < min(max_x[y], max_y[x]):
return False
return True
def read_field(f_in, width, height):
field_data = [[int(square) for square in f_in.readline().split()] for line in range(height)]
field = Field(field_data, height, width)
return field
def solve(case, f_in):
N, M = list(map(int, f_in.readline().split()))
field = read_field(f_in, M, N)
if field.is_cuttable(): return ['Case #{}: YES\n'.format(case + 1)]
return ['Case #{}: NO\n'.format(case + 1)]
def open_last_file():
for problem_type in ['-large', '-small-attempt1', '-sample']:
try:
return problem_type, open(__problem__ + problem_type + '.in', 'r')
except FileNotFoundError:
pass
raise FileNotFoundError("No input file found!")
if __name__ == '__main__':
problem_type, f_in = open_last_file()
print (problem_type)
f_out = open(__problem__ + problem_type + '.out', 'w')
T = int(f_in.readline())
for case in range(T):
f_out.writelines(solve(case, f_in))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
486782999f26b2d806dfe00600731c63363f0469 | 5f9d274602d630173ac36227d8a70ed163d7a8d6 | /quickroute/utils.py | aeda2a4660fbeb2a7cdd6960a6d3538fc3826cc8 | [
"Apache-2.0"
] | permissive | jvtm/python-quickroute | 88ef1f594f6da99e1300bf36e6bdc1b16d6ab752 | 758e97cf38200efd649cb797c64c51b608db5de5 | refs/heads/master | 2020-03-30T23:06:24.348614 | 2014-08-14T20:37:54 | 2014-08-14T20:37:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | '''
Python QuickRoute utility functions
'''
from datetime import datetime, timedelta
import json
class DateTimeEncoder(json.JSONEncoder):
""" Replacement JSON encoder supporting datetime and timedelta objects """
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, timedelta):
return str(obj)
else:
return super(DateTimeEncoder, self).default(obj)
| [
"jvtm@kruu.org"
] | jvtm@kruu.org |
f5b01891fa5b5a79a702ba2de7a0f3e064675000 | 6d86d4e07eeed66fdc08f68560dce9696bb35c01 | /app/main/errors.py | d787733609d6910457a6556500d9d042d3fa9ec0 | [] | no_license | Moxikai/law_show | 5d4828a85a2bf1ae530a7e1f3d87e117d2fec4aa | 9819b263a7c22680695d8a04a3992a627a73f585 | refs/heads/master | 2021-01-13T10:30:58.666067 | 2016-10-13T08:47:31 | 2016-10-13T08:47:31 | 68,830,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | #_*_coding:utf-8_*_
from flask import render_template
from . import main
#使用app_errorhandler修饰器,处理全局错误
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'),404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'),500
| [
"zhu-hero@qq.com"
] | zhu-hero@qq.com |
f9184c9bc8feb760a6b06e8e4065cacd924722c5 | 24223cba74ca8c293aaa4381fecf80acd237fc94 | /jensen_comparison/bin/jupyter-kernelspec | 048319db09ad921800f7139bc93ab93c2745267a | [] | no_license | Aequivinius/jensen-comparison | df0f4424160d5c4d37f29eb8939518c96b446b9c | 04910a40869103a618dd4cea5035ff2c9c6797bf | refs/heads/master | 2020-04-30T00:44:18.755131 | 2019-03-19T12:48:10 | 2019-03-19T12:48:10 | 176,510,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | #!/Users/sazerac/jensen_comparison/jensen_comparison/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelspecapp import KernelSpecApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(KernelSpecApp.launch_instance())
| [
"ncolic@gmail.com"
] | ncolic@gmail.com | |
7d0942492c486ab43f4c39a5adee4453c034f50e | c1fe97208afe479b7ae1ee67d69866a6911564ca | /AdvCBV/basicapp/admin.py | 046528c9de692ae1d3f199606430ad8437e9c4a1 | [] | no_license | jaindhairyahere/Python_Django | a0a46c57b6ca60d0942ae181fe28ea56bb1ee948 | f170a2e38b78df698a02821a454a3baea0c358a6 | refs/heads/master | 2020-06-18T09:17:56.364928 | 2019-11-02T18:34:12 | 2019-11-02T18:34:12 | 196,249,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from django.contrib import admin
from basicapp.models import School, Student
# Register your models here.
admin.site.register(Student)
admin.site.register(School)
| [
"jaindhairya2001@gmail.com"
] | jaindhairya2001@gmail.com |
6be8285fe34928e223d9dcc5d0e598306f563657 | c762138fc50d2ac66a3fa9314102c9f15c8e0a2e | /S3utils.py | 1029978ca960245b77350ddc4dce7bffa9444663 | [] | no_license | saivamsikayala/garbage-detection-flask | 500492d013a47196dd04acc490ec955893a0560c | 5d380f4468af5de6290e19f555cf5b1223f6495f | refs/heads/master | 2022-11-30T10:10:22.134815 | 2020-08-01T09:37:32 | 2020-08-01T09:37:32 | 283,982,797 | 0 | 1 | null | 2020-08-01T04:30:49 | 2020-07-31T08:24:24 | Python | UTF-8 | Python | false | false | 1,180 | py | import boto3
import uuid
import io
from . import settings
class S3utils():
def __init__(self):
self._session = boto3.session.Session()
self._client = self._session.client('s3',
region_name = settings.AWS_S3_REGION_NAME,
endpoint_url = settings.AWS_S3_ENDPOINT_URL,
aws_access_key_id = settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key = settings.AWS_SECRET_ACCESS_KEY)
def getfilename(self):
return f"smartbins/{str(uuid.uuid4())}.jpg"
def upload(self, filepath):
filename = self.getfilename()
#data = pilimage #self.toByteArray(pilimage)
#print(settings.AWS_STORAGE_BUCKET_NAME, filename)
#self._client.put_object(ACL='public_read', Bucket=settings.AWS_STORAGE_BUCKET_NAME, Key=filename, Body=data)
self._client.upload_file(filepath, settings.AWS_STORAGE_BUCKET_NAME, filename, ExtraArgs={'ACL': 'public-read'})
return f'{settings.AWS_S3_ENDPOINT_URL}/{settings.AWS_STORAGE_BUCKET_NAME}/{filename}'
def delete(self, filename):
return self._client.delete_object(Bucket='SIH2020', Key=filename)
S3Connection = S3utils() | [
"abhinav.dayal@hotmail.com"
] | abhinav.dayal@hotmail.com |
151fc23e1533e76eb12ce1b8bb1392755667dbab | 7f54637e347e5773dfbfded7b46b58b50544cfe5 | /7-3/chainxy/spiders/tradesecretscanada.py | dc8b30b2b1997267ec5b41a42628814c788f3cc0 | [] | no_license | simba999/all-scrapy | 5cc26fd92b1d03366b74d4fff58c4a0641c85609 | d48aeb3c00fa2474153fbc8d131cf58402976e1d | refs/heads/master | 2021-01-25T14:24:04.715550 | 2018-03-03T13:43:13 | 2018-03-03T13:43:13 | 123,695,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | import scrapy
import json
import os
from scrapy.spiders import Spider
from scrapy.http import FormRequest
from scrapy.http import Request
from chainxy.items import ChainItem
from lxml import etree
from selenium import webdriver
from lxml import html
import usaddress
import pdb
class tradesecretscanada(scrapy.Spider):
name = 'tradesecretscanada'
domain = ''
history = []
def start_requests(self):
init_url = 'http://talk.tradesecrets.ca/locations-reviews/'
yield scrapy.Request(url=init_url, callback=self.body)
def body(self, response):
print("========= Checking.......")
store_list = response.xpath('//a[@rel="noopener noreferrer"]/@href').extract()
for store in store_list:
yield scrapy.Request(url=store, callback=self.parse_page)
def parse_page(self, response):
try:
item = ChainItem()
detail = self.eliminate_space(response.xpath('//div[contains(@class, "fusion-one-half fusion-layout-column fusion-spacing-no")]//h4//text()').extract())
h_temp = ''
for de in detail:
if '(' in de and '-' in de:
try:
item['phone_number'] = self.validate('(' + de.split('(')[1])
except:
item['phone_number'] = self.validate(de)
if ':' in de:
h_temp += de + ', '
if '(' in detail[0]:
detail[0] = self.validate(detail[0].split('(')[0]).replace('|','')
addr = detail[0].replace('|','').split(',')
if len(addr) == 4:
item['address'] = self.validate(addr[1])
item['city'] = self.validate(addr[2])
item['state'] = self.validate(addr[3].strip())[:2].strip()
item['zip_code'] = self.validate(addr[3])[2:].strip()
elif len(addr) == 3:
item['address'] = self.validate(addr[0])
item['city'] = self.validate(addr[1])
item['state'] = self.validate(addr[2].strip())[:2].strip()
item['zip_code'] = self.validate(addr[2])[2:].strip()
else:
pdb.set_trace()
item['country'] = 'Canada'
item['store_hours'] = h_temp[:-2]
yield item
except:
pass
def validate(self, item):
try:
return item.encode('raw-unicode-escape').replace('\u2013', '').replace('\xa0', '').replace('|','').strip()
except:
return ''
def eliminate_space(self, items):
tmp = []
for item in items:
if self.validate(item) != '' and 'try' not in self.validate(item).lower() and 'http' not in self.validate(item).lower():
tmp.append(self.validate(item))
return tmp | [
"oliverking8985@yahoo.com"
] | oliverking8985@yahoo.com |
8bbd1f616dd60b38607f42fb53b0bed2badfe9b5 | 583f91afcb377707c830ad739b3d22d38f7eb4bf | /venv/bin/easy_install | d37d504868e9c4dc0754e20af384f4987ab2648d | [] | no_license | wjwalcher/CSP | 1544556606ea83f54aaceff9ad701c8eb4bedb7a | 1bddfd6dc5d70350323cd60224df804ef67dd581 | refs/heads/master | 2020-04-01T12:49:25.271546 | 2018-10-16T05:03:35 | 2018-10-16T05:03:35 | 153,225,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | #!/Users/williamwalcher/PycharmProjects/322CSP/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"williamwalcher@Williams-MacBook-Pro-3.local"
] | williamwalcher@Williams-MacBook-Pro-3.local | |
7728d77ed32221009bb1df33c077dfc8ef3d3b5f | 8dcd3ee098b4f5b80879c37a62292f42f6b2ae17 | /venv/Lib/site-packages/pandas/tests/frame/test_api.py | 1cfbd5961177ae6b74b0110ac71414299c5047fe | [] | no_license | GregVargas1999/InfinityAreaInfo | 53fdfefc11c4af8f5d2b8f511f7461d11a3f7533 | 2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a | refs/heads/master | 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,965 | py | import datetime
import pydoc
from copy import deepcopy
import numpy as np
import pandas as pd
import pandas._testing as tm
import pytest
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark
class TestDataFrameMisc:
def test_copy_index_name_checking(self, float_frame):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ("index", "columns"):
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index(["foo#{c}".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index(["{c}#foo".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index(["%{c}".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index(["{c}%".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_series_equal(s, expected)
df = DataFrame(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[["a", "a"]]
assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]
# repr with int on 32-bit/windows
if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (
repr(list(df.itertuples(name=None)))
== "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]"
)
tup = next(df.itertuples(name="TestName"))
assert tup._fields == ("Index", "a", "b")
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == "TestName"
df.columns = ["def", "return"]
tup2 = next(df.itertuples(name="TestName"))
assert tup2 == (0, 1, 4)
assert tup2._fields == ("Index", "_1", "_2")
df3 = DataFrame({"f" + str(i): [i] for i in range(1024)})
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
assert isinstance(tup3, tuple)
if PY37:
assert hasattr(tup3, "_fields")
else:
assert not hasattr(tup3, "_fields")
# GH 28282
df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}])
result_254_columns = next(df_254_columns.itertuples(index=False))
assert isinstance(result_254_columns, tuple)
assert hasattr(result_254_columns, "_fields")
df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}])
result_255_columns = next(df_255_columns.itertuples(index=False))
assert isinstance(result_255_columns, tuple)
# Dataframes with >=255 columns will fallback to regular tuples on python < 3.7
if PY37:
assert hasattr(result_255_columns, "_fields")
else:
assert not hasattr(result_255_columns, "_fields")
def test_sequence_like_with_categorical(self):
# GH 7839
# make sure can iterate
df = DataFrame(
{"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
df["grade"] = Categorical(df["raw_grade"])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.items():
str(s)
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
def test_to_numpy(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4.5]])
result = df.to_numpy()
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dtype(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4]], dtype="int64")
result = df.to_numpy(dtype="int64")
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_copy(self):
arr = np.random.randn(4, 3)
df = pd.DataFrame(arr)
assert df.values.base is arr
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is None
def test_transpose(self, float_frame):
frame = float_frame
dft = frame.T
for idx, series in dft.items():
for col, value in series.items():
if np.isnan(value):
assert np.isnan(frame[col][idx])
else:
assert value == frame[col][idx]
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in mixed_T.items():
assert s.dtype == np.object_
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
tm.assert_frame_equal(df.T, df.swapaxes(1, 0))
tm.assert_frame_equal(df, df.swapaxes(0, 0))
msg = (
"No axis named 2 for object type"
r" <class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>"
)
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
tm.assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
tm.assert_series_equal(result, expected)
def test_class_axis(self):
# GH 18147
# no exception and no empty docstring
assert pydoc.getdoc(DataFrame.index)
assert pydoc.getdoc(DataFrame.columns)
def test_more_values(self, float_string_frame):
values = float_string_frame.values
assert values.shape[1] == len(float_string_frame.columns)
def test_repr_with_mi_nat(self, float_string_frame):
df = DataFrame(
{"X": [1, 2]}, index=[[pd.NaT, pd.Timestamp("20130101")], ["a", "b"]]
)
result = repr(df)
expected = " X\nNaT a 1\n2013-01-01 b 2"
assert result == expected
def test_items_names(self, float_string_frame):
for k, v in float_string_frame.items():
assert v.name == k
def test_series_put_names(self, float_string_frame):
series = float_string_frame._series
for k, v in series.items():
assert v.name == k
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
assert not df.empty
df = DataFrame(index=[1], columns=[1])
assert not df.empty
df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna()
assert df.empty
assert df.T.empty
empty_frames = [
DataFrame(),
DataFrame(index=[1]),
DataFrame(columns=[1]),
DataFrame({1: []}),
]
for df in empty_frames:
assert df.empty
assert df.T.empty
def test_with_datetimelikes(self):
df = DataFrame(
{
"A": date_range("20130101", periods=10),
"B": timedelta_range("1 day", periods=10),
}
)
t = df.T
result = t.dtypes.value_counts()
expected = Series({np.dtype("object"): 10})
tm.assert_series_equal(result, expected)
def test_values(self, float_frame):
float_frame.values[:, 0] = 5.0
assert (float_frame.values[:, 0] == 5).all()
def test_deepcopy(self, float_frame):
cp = deepcopy(float_frame)
series = cp["A"]
series[:] = 10
for idx, value in series.items():
assert float_frame["A"][idx] != value
def test_transpose_get_view(self, float_frame):
dft = float_frame.T
dft.values[:, 5:10] = 5
assert (float_frame.values[5:10] == 5).all()
def test_inplace_return_self(self):
# GH 1893
data = DataFrame(
{"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]}
)
def _check_f(base, f):
result = f(base)
assert result is None
# -----DataFrame-----
# set_index
f = lambda x: x.set_index("a", inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index("a"), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values("b", inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()["c"]
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index("a")["c"], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(d.copy(), f)
@async_mark()
async def test_tab_complete_warning(self, ip):
# GH 16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; df = pd.DataFrame()"
await ip.run_code(code)
# TODO: remove it when Ipython updates
# GH 33567, jedi version raises Deprecation warning in Ipython
import jedi
if jedi.__version__ < "0.17.0":
warning = tm.assert_produces_warning(None)
else:
warning = tm.assert_produces_warning(
DeprecationWarning, check_stacklevel=False
)
with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("df.", 1))
def test_attrs(self):
df = pd.DataFrame({"A": [2, 3]})
assert df.attrs == {}
df.attrs["version"] = 1
result = df.rename(columns=str)
assert result.attrs == {"version": 1}
| [
"44142880+GregVargas1999@users.noreply.github.com"
] | 44142880+GregVargas1999@users.noreply.github.com |
d4a5a2155aa71f6f81e1301fb6dea5d302b0742f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_greens.py | ce3b23baa678edd94ee5bf830fa189133e5ffadb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.nouns._green import _GREEN
#calss header
class _GREENS(_GREEN, ):
def __init__(self,):
_GREEN.__init__(self)
self.name = "GREENS"
self.specie = 'nouns'
self.basic = "green"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8fe101007d836f2e2cedb46604f7ca5e40fe18f5 | 3b8deb008d8bb5199e4ea37a0715886bf02559c0 | /examples/fakerstc.py | 0f4eaa7793229cdf198523acd71dec790b001314 | [] | no_license | ryanmwhitephd/ADD | 759be1b8afd4433ff9d76606ad3deead64814977 | 60ad72f0e4360cf66e204451efbc9a5baa0732fa | refs/heads/master | 2021-09-18T14:40:17.398413 | 2018-07-16T06:57:01 | 2018-07-16T06:57:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,834 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 25 12:24:32 2018
@author: ryanwhi
"""
import logging
import unittest
import argparse
import json
import functools
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from faker.providers import BaseProvider
from faker import Faker
# Use functools.partial
# https://stackoverflow.com/questions/29832410/python-use-dictionary-keys-as-function-names
class ModelType(object):
'''
Base Model class
provides all the required functions
needed for accessing properties
of the model
'''
def __init__(self,mdata):
self.__meta = mdata
@property
def meta(self):
return self.__meta
@meta.setter
def meta(self,mdata):
self.__meta = mdata
def provider(self,name):
return self.meta[name]
class MetaModel(object):
'''
Wrapper class to instatiate
user-defined model
provides easy access to
Faker providers and optional arguments
'''
def __init__(self,name):
self.model = globals()[name]
@property
def model(self):
return self.__model
@model.setter
def model(self,ctor):
self.__model = ctor()
def providers(self):
'''
return model dict
'''
return self.model.meta
class MyModel(ModelType):
'''
User-defined model
Define a dictionary
with the Faker provider (or custom provider)
optional arguments
@TODO
add the header information for column names
'''
def __init__(self):
meta = {'name':None,
'ean':13}
super(MyModel,self).__init__(meta)
class AltModel(ModelType):
def __init__(self):
meta = {'name':None,
'address':None,
'longitude':None,
'iban':None,
'credit_card_full':'visa16'
}
super(AltModel,self).__init__(meta)
def fakerstc(*args):
mymodel = MetaModel(args[0])
print(mymodel.model.meta)
print(mymodel.providers())
print(mymodel.model.provider('name'))
fake = Faker('en_CA')
for _ in range(args[1]):
row=[]
for key in mymodel.providers():
parms=mymodel.providers()[key]
if parms is None:
row.append(functools.partial(fake.__getattribute__(key))())
else:
row.append(functools.partial(fake.__getattribute__(key))(parms))
print(row)
if __name__ == '__main__':
fakerstc('MyModel',10)
fakerstc('AltModel',10) | [
"noreply@github.com"
] | ryanmwhitephd.noreply@github.com |
62f1f7e28e890ada8f842b1295e4c295bd93ce6b | 3d14e2430b696a21661ee1b5fc000aa031a81a67 | /locators/books_page_locators.py | 403e152b6985de415825fb662971e8422cf916ea | [] | no_license | a-soliman/books-toscrap | 53defb67df7167917a53fa1193fff5f3bccd998c | 958c347eeb881178346f228326b4ccfe47b7acd2 | refs/heads/master | 2020-04-25T17:25:36.492827 | 2019-02-28T16:22:59 | 2019-02-28T16:22:59 | 172,947,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | """
extracts books from a page
"""
class BooksPageLocators:
BOOK = "li.col-xs-6.col-sm-4.col-md-3.col-lg-3"
| [
"ahmed.soliman@programmer.net"
] | ahmed.soliman@programmer.net |
6e2274d661b06ca37349193c0aebe92de6199296 | e7a79c157fe0edde0cabb4cebc707ce1530b5b7e | /answers/cartridge_base_state.py | e281bddc7ae481e400eea459831529801ab1ff55 | [] | no_license | Logsod/noti_rest_server | 1684d1d03c67aea46b4c6888d0951477de2888af | 529e9a0c66a6c7021224a2daf60f378f01164ca5 | refs/heads/master | 2023-08-10T16:39:43.000835 | 2021-09-29T13:19:41 | 2021-09-29T13:19:41 | 408,777,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | import falcon
from flask import json
import db
class CartridgeBaseState:
def on_post(self, req, resp):
"""handle post"""
param = req.media
action = int(param['action'])
data = db.Db()
if action == 0: # add
if "cartridge_id" in param and "amount" in param:
result = data.addCartridgesToBaseState(param)
resp.status = falcon.HTTP_200
resp.content_type = falcon.MEDIA_TEXT
resp.text = result
elif action == 1: # change amount
data.changeBaseStateCartridgeAmount(param)
resp.status = falcon.HTTP_200
resp.content_type = falcon.MEDIA_TEXT
elif action == 2: # take one cartridge (insert into state table list and set status to 1) 1 equal work status
data.takeOneCartridge(param)
resp.status = falcon.HTTP_200
resp.content_type = falcon.MEDIA_TEXT
elif action == 3: # delete
data.deleteCartridgeFromBaseState(param['id'])
resp.status = falcon.HTTP_200
resp.content_type = falcon.MEDIA_TEXT
def on_get(self, req, resp):
data = db.Db()
result = data.getAllBaseStateCartridges()
print(result)
resp.status = falcon.HTTP_200
resp.content_type = falcon.MEDIA_TEXT
resp.text = result
| [
"wwwref@gmail.com"
] | wwwref@gmail.com |
5c6e5146bb6f11b1d58dd88282e817ec96737c23 | 6051b1df3a84ca6dc3d4c4ce90202e64a31da22b | /test_studentlists.py | 8d714b81118ded8f1a951b467ba98d2ec7583061 | [] | no_license | omarmohamud23/Labweek4--studentlist | e4c614ee93c0cf2e911210aaeeb368ef37916953 | 8b11a661ffdc226c049d48b4d4e0479926e004a4 | refs/heads/master | 2023-02-26T22:32:06.700044 | 2021-02-05T21:59:53 | 2021-02-05T21:59:53 | 336,397,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,545 | py | '''
Practice using
assertTrue
assertFalse
assertIsNone
assertIsNotNone
assertIn
assertNotIn
'''
from studentlists import ClassList, StudentError
from unittest import TestCase
class TestStudentLists(TestCase):
def test_add_student_check_student_in_list(self):
test_class = ClassList(2)
test_class.add_student('Test Student')
self.assertIn('Test Student', test_class.class_list)
test_class.add_student('Another Test Student')
self.assertIn('Test Student', test_class.class_list)
self.assertIn('Another Test Student', test_class.class_list)
def test_add_student_already_in_list(self):
test_class = ClassList(2)
test_class.add_student('Test Student')
with self.assertRaises(StudentError):
test_class.add_student('Test Student')
## TODO write a test that adds and removes a student,
# and asserts the student is removed. Use assertNotIn
def test_add_remove_student_ensure_removed(self):
test_class = ClassList(2)
test_class.add_student('Test Student')
test_class.remove_student('Test Student')
self.assertNotIn('Test Student', test_class.class_list)
## TODO write a test that adds some example students,
# then removes a student not in the list, and asserts a StudentError is raised
def test_add_students_remove_student_not_in_list(self):
test_class = ClassList(4)
test_class.add_student('Student One')
test_class.add_student('Student Two')
test_class.add_student('Student Three')
with self.assertRaises(StudentError):
test_class.remove_student('Student Four')
## TODO write a test that removes a student from an
# empty list, and asserts a StudentError is raised
def test_removes_student_in_empty_list(self):
test_class = ClassList(3)
with self.assertRaises(StudentError):
test_class.remove_student('Abdi')
def test_is_enrolled_when_student_present(self):
test_class = ClassList(2)
test_class.add_student('Snoop Dogg')
test_class.add_student('Martha Stewart')
self.assertTrue(test_class.is_enrolled('Snoop Dogg'))
self.assertTrue(test_class.is_enrolled('Martha Stewart'))
def test_is_enrolled_empty_class_list(self):
test_class = ClassList(2)
self.assertFalse(test_class.is_enrolled('Snoop Dogg'))
## TODO write a test that adds some example students to a test class,
## then, call is_enrolled for a student who is not enrolled.
# Use assertFalse to verify is_enrolled returns False.
def add_student_examples_check_student_not_enrolled(self):
test_class = ClassList(2)
test_class.add_student('Alhandro')
test_class.add_student('Mutina')
self.assertFalse(test_class.is_enrolled('Abdi'))
def test_string_with_students_enrolled(self):
test_class = ClassList(2)
test_class.add_student('Taylor Swift')
test_class.add_student('Kanye West')
self.assertEqual('Taylor Swift, Kanye West', str(test_class))
def test_string_empty_class(self):
test_class = ClassList(2)
self.assertEqual('', str(test_class))
def test_index_of_student_student_present(self):
test_class = ClassList(3)
test_class.add_student('Harry')
test_class.add_student('Hermione')
test_class.add_student('Ron')
self.assertEqual(1, test_class.index_of_student('Harry'))
self.assertEqual(2, test_class.index_of_student('Hermione'))
self.assertEqual(3, test_class.index_of_student('Ron'))
# This assert passes, but it's redundant - the first assert statement will fail if
# the method call returns None
self.assertIsNotNone(test_class.index_of_student('Harry'))
## TODO write a test for index_of_student when the class_list list is empty.
# Assert index_of_student returns None for a student if the list is empty.
# use assertIsNone.
def test_index_of_student_in_empty_list_return_None(self):
test_class = ClassList(1)
self.assertIsNone(test_class.index_of_student('Hamze'))
## TODO write another test for index_of_student. In the case when the
# class_list is not empty but has some students.
# assert that searching for a student name that is not in the list, returns None.
def test_index_of_student_list_not_empty_search_student_not_in_list(self):
test_class = ClassList(2)
test_class.add_student('Mohamud')
test_class.add_student('Muhumed')
self.assertIsNone(test_class.index_of_student('Mary'))
## TODO write a test for your new is_class_full method when the class is full.
# use assertTrue.
def test_is_class_full(self):
test_class = ClassList(4)
test_class.add_student('Farmaajo')
test_class.add_student('Mukami')
test_class.add_student('Abdi')
test_class.add_student('Bisharo')
self.assertTrue(test_class.is_class_full())
## TODO write a test for your new is_class_full method for when is empty,
# and when it is not full. Use assertFalse.
def test_is_class_full_empty(self):
test_class = ClassList(2)
self.assertFalse(test_class.is_class_full())
def test_is_class_full_when_not_full(self):
test_class = ClassList(4)
test_class.add_student('Otieno')
test_class.add_student('odinga')
self.assertFalse(test_class.is_class_full()) | [
"Omar"
] | Omar |
2e7cbcd4794cba57103a59e55b86743311629cf7 | 9ed58ef096d558a024aef9927e7f53fdf8805f85 | /applications/expensive_seq/expensive_seq.py | a599aede02737774b47914d10e7b5686d1e4d3e6 | [] | no_license | squashgray/Hash-Tables | 11e6c4e0cb9aa0dd3a2592fd04918311e5a23346 | d97ab7dad3dd34277b8084badcd27f19cb848ee6 | refs/heads/master | 2022-10-26T11:24:43.664825 | 2020-06-10T04:34:42 | 2020-06-10T04:34:42 | 261,280,162 | 1 | 0 | null | 2020-05-07T03:31:35 | 2020-05-04T19:46:26 | null | UTF-8 | Python | false | false | 223 | py | def expensive_seq(x, y, z):
# Implement me
if __name__ == "__main__":
for i in range(10):
x = expensive_seq(i*2, i*3, i*4)
print(f"{i*2} {i*3} {i*4} = {x}")
print(expensive_seq(150, 400, 800))
| [
"beej@beej.us"
] | beej@beej.us |
16609f41774f606706d680afcec9bb61b9b40b3d | e540673e49a0ea897b0df0d15c2c5df38e54e8b0 | /TF_polynomial_regression_2.py | 85025c0668bb062ec899d2646b6dc3697620fdbc | [] | no_license | rottendoom001/PYTHON_TF | ec97a287ec71cfb24e4bf94cc27df36c8d451a1a | 0de5489b08f60c91bb71145c16836bbf91174409 | refs/heads/master | 2021-09-03T17:56:04.114074 | 2018-01-10T22:06:01 | 2018-01-10T22:06:01 | 109,761,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | """Simple tutorial for using TensorFlow to compute polynomial regression.
Parag K. Mital, Jan. 2016"""
# %% Imports
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# Normalizar datos
def normalize (xs):
ln = []
maxValue = max(xs)
minValue = min(xs)
for e in xs :
# Formula para nomalizar
# (x - min(x)) / (max(x) - min(x))
r = ( e - minValue ) / ( maxValue - minValue )
ln.append(r)
return ln
# Construir el vector de valores para X
def build_x_vector(x, grade):
x_new = np.zeros([x.size, grade])
for i in range(grade):
x_new[:,i] = np.power(x,(i+1))
return x_new
# %% Let's create some toy data
plt.ion()
fig, ax = plt.subplots(1, 1)
#xs = np.linspace(-3, 3, n_observations)
#ys = np.sin(xs) + np.random.uniform(-0.5, 0.5, n_observations)
xs = [1,2,3,4,5,6,7,8,9,10,11,12,13]
xs = np.array(xs)
#ys = [449,525,412,161,639,732]
ysn = [15, 14, 13, 6, 19, 16, 16, 11, 17, 15, 12, 25, 14]
#ysn = normalize(ys)
print (ysn)
ax.scatter(xs, ysn)
fig.show()
plt.draw()
# Grado de la ecuación polinomial
n = 15
W = tf.Variable(tf.random_normal([n,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
X_new = build_x_vector(xs, n)
print("X_new : \n%s"%(X_new))
Y_pred = tf.add(tf.matmul(X,W),b)
loss = tf.reduce_mean(tf.square(Y_pred - Y))
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
n_epochs = 1000
with tf.Session() as sess:
for step in range(n_epochs):
sess.run(tf.global_variables_initializer())
_, c = sess.run([optimizer, loss], feed_dict={X: X_new, Y: ysn})
if step % 10 == 0:
print("COSTO: %s"%(c))
print ("W: %s"%(sess.run(W)))
print ("b: %s"%(sess.run(b)))
y_test = sess.run(Y_pred, feed_dict={X: X_new})
ax.scatter(xs, y_test)
fig.show()
plt.draw()
plt.waitforbuttonpress()
| [
"alanorlando.cruz.manrique@bbva.com"
] | alanorlando.cruz.manrique@bbva.com |
7548f99c00f871df535a5e2a2a86c932211f256d | 3609f3da79af9ad44f047c58edd92da590afa17e | /PythonExerciseFolder/WearwolfOutTheDoor.py | 11961bf0da3d0167ce6cc75f3e556955080e28fc | [] | no_license | Poludzku/master | bcfaab3d4e8e20d4cb5b6e31ea349342c67a7f8f | 8fbc44b30b35f9b0e7608782da4d61e673d68bab | refs/heads/master | 2020-12-25T18:22:24.650293 | 2012-07-28T12:25:13 | 2012-07-28T12:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | # -*- coding: utf-8 *-*
from WearwolfZones import mountines
from WearwolfZones import spring_oasis
from WearwolfZones import forest
def steping_out_the_door():
print """As you take a step out the door the fresh air fills your lungs,
you can feel that tonight it will be a good kill, but where will you hunt?
In the mountines which Anuk calls his home and the moste elite predators
reside, the well spring oasis where many fine elks make there wattering hole
or the forest benethe the mountines where old hunters claim they have seen
the mystical blue fox"""
next = raw_input('> ')
if next == "mountines":
mountines()
elif next == "spring oasis":
spring_oasis()
elif next == "forest":
forest()
else:
print """You can go hunt in the 'mountines', the great forest or the
spring_oasis"""
steping_out_the_door()
| [
"jacek@mint"
] | jacek@mint |
92e1f7036fa5a7c131b843407e94744c4c5e5dee | 67212da14d34fe69de64dcc14301a6d3f2004b89 | /main.py | 95789d840eaf50d28773ee383491fa71b7346331 | [] | no_license | seiya-tsukada/my_gae_sample | 7212b60bc48da4a06a7ff3d83c2c0a1db4dbc28f | 4223923fb00c2b75fac0520f5938bfdeb9b3db92 | refs/heads/master | 2020-04-01T23:45:36.970914 | 2018-10-19T15:47:17 | 2018-10-19T15:47:17 | 153,772,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #! /usr/bin/env python
# coding: utf-8
from flask import Flask, jsonify
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route("/")
def index():
return jsonify({"message": "hello world"})
if __name__ == "__main__":
app.run() | [
"seiya.tsukada@accenture.com"
] | seiya.tsukada@accenture.com |
a8b4064eeb4bb83cbf8897edf30fd9ef90b99913 | 88940cbee65e15f8d9dce61ea313a03d38131e85 | /fbproj.py | 59263db4e13a12a883d849aa33f66cb8d17624a3 | [] | no_license | hanslee95/206FB | 025fd961deffbab868238addd276177cc3839e5a | f79640b84c9f47a75b98cadded6228ec0784afac | refs/heads/master | 2021-08-28T21:38:44.846473 | 2017-12-13T06:44:25 | 2017-12-13T06:44:25 | 112,550,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,275 | py | import urllib3
import facebook
import requests
import json
import datetime
import sqlite3
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
from facepy import GraphAPI
#NOTE: I have gotten 100 interactions extracting 100 posts, however, the final data table will have 7 items (rows) in each column.
#as it's information on the 7 days of the week. Specifically, in each post of the 100, I collected how many times I either posted a story or message to see how active I am on the certain day.
# ############## CACHING THE DATA ###########################################################################################################################################################
access_token = 'access_token'
graph = GraphAPI(access_token)
# ############## CACHING THE DATA ###########################################################################################################################################################
CACHE_FNAME = "Posts_cache.json"
try:
cache_file = open(CACHE_FNAME, 'r') # Try to read the data from the file
cache_contents = cache_file.read() # If it's there, get it into a string
CACHE_DICTION = json.loads(cache_contents) # And then load it into a dictionary
cache_file.close() # Close the file, we're good, we got the data in a dictionary.
except:
CACHE_DICTION = {}
#checking the cache of a particular user and returns that data or retrieves that cache'd data.
def get_posts():
# if statement is checking if you already looked it up, if you did, then use the thing you cache'd
if 'posts' in CACHE_DICTION:
print('using cache')
post_results = CACHE_DICTION['posts']
else:
print('getting data from internet')
post_results = graph.get('me?fields=posts.limit(118)')
CACHE_DICTION['posts'] = post_results['posts']
cache_file = open(CACHE_FNAME, 'w')
#json.dumps prints out the string of dictionary in a json file in one line
cache_file.write(json.dumps(CACHE_DICTION))
cache_file.close()
return CACHE_DICTION['posts']
posts = get_posts()
########## HELPER FUNCTIONS #####################################################################################################################
#To convert the datetime to a day of the week
def get_day_of_week(date):
return datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%a')
#stripping the created_time value to only get the year-month-day
def strip_time(time_str):
date = time_str[:-14]
return date
#iterate over the weeks to compare when each element was posted. Then iterate over the dictionary and if that post happened on
#first day (mon), then check and add the stories and messages separately. Then Repeat. Returns dictionary with week day as keys and
#list of tuples (activites)
def calculate_activity(week_l, p):
week_dict = {}
for day in week_l:
for elem in p['data']:
m_count = 0
s_count = 0
#if the created time is equal to that day of the week. Only getting message/story from that day
if get_day_of_week(strip_time(elem['created_time'])) == day:
#if that day has not been added to the dictionary, add it
if get_day_of_week(strip_time(elem['created_time'])) not in week_dict:
week_dict[get_day_of_week(strip_time(elem['created_time']))] = []
else:
for key in elem:
if key != 'story' and key == 'message':
m_count = m_count + 1
if key == 'story' and key != 'message':
s_count = s_count + 1
# else both story and message are in that date so add two activities
if key == 'story' and key == 'message':
m_count = m_count + 1
s_count = s_count + 1
tup = s_count, m_count
week_dict[get_day_of_week(strip_time(elem['created_time']))].append(tup)
return week_dict
#adding up all the elements in list of tuples for story and message then creating 3 element tuple to insert to table.
def insert_tup3(d):
for key in d:
add_s = sum(i[0] for i in d[key])
add_m = sum(i[1] for i in d[key])
tup = add_s, add_m
d[key] = []
d[key].append(tup)
for x in d[key]:
tup3 = key, x[0], x[1]
cur.execute('INSERT INTO WeekDay (created_time, story, message) VALUES (?, ?, ?)', tup3)
conn.commit()
########### CREATING AND LOADING IN DATA INTO DATABASE #################################################################################################
conn = sqlite3.connect('Posts_Day.sqlite', timeout = 10)
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS WeekDay')
cur.execute("CREATE TABLE WeekDay (created_time TEXT, story NUMBER, message NUMBER)")
week_lst = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
#calling helper functions to insert data in database
insert_tup3(calculate_activity(week_lst, posts))
#getting data for x-axis
cur.execute('SELECT created_time FROM WeekDay')
lst_x = cur.fetchall()
lst_day = [elem[0] for elem in lst_x]
#getting data for y-axis (story)
cur.execute('SELECT story FROM WeekDay')
lst_y1 = cur.fetchall()
lst_activity = [elem[0] for elem in lst_y1]
#getting data for y-axis (message)
cur.execute('SELECT message FROM WeekDay')
lst_y2 = cur.fetchall()
lst_message = [elem[0] for elem in lst_y2]
########### CREATE DATA VIZ USING PLOTLY #################################################################################################
#importing the data to create graph
trace_high = go.Scatter(
x=lst_day,
y=lst_activity,
name = "Activity",
line = dict(color = '#03B9DF'),
opacity = 0.8)
trace_low = go.Scatter(
x=lst_day,
y=lst_message,
name = "Message",
line = dict(color = '#00E264'),
opacity = 0.8)
data = [trace_high,trace_low]
#labeling graph with title, x/y axis.
layout = dict(
title = "FACEBOOK LIFE",
xaxis=dict(
title='Day of Week',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='Number of Posts',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = dict(data=data, layout=layout)
py.iplot(fig, filename = "FACEBOOK LIFE")
| [
"hanheum@umich.edu"
] | hanheum@umich.edu |
2370e7452bcc9e77a37e5853184a510e1184341d | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingMedian_NoCycle_LSTM.py | 52f0b5d941ab48694348dbd8ae8a86fd89845917 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 155 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingMedian'] , ['NoCycle'] , ['LSTM'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
b119a388c312e6aa8b39e003c88c95a830239c8a | 39e1f30888267253a0e2976c05e364c001f386df | /validate-user.py | 81499137b2e28d5885e4b4ca891499f25dc7672b | [] | no_license | atomWeb/facegnition-back-end | e1ce12aced40bc9fc902fa3d86695746f811814e | f6ef3ec256c05f5db64d25e12c006aef12f5848d | refs/heads/main | 2023-08-17T01:05:32.200992 | 2021-10-08T10:32:33 | 2021-10-08T10:32:33 | 403,296,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,631 | py | import json
import boto3
from boto3.dynamodb.conditions import Key
import os
from utils import jsonify, get_str_timestamp_by_zone
S3_BUCKET = os.environ["IMAGE_BUCKET"]
REGION = os.environ["REGION"]
USERS_TABLE = os.environ["USERS_TABLE"]
VALIDATIONS_TABLE = os.environ["VALIDATIONS_TABLE"]
COLLECT_NAME = os.environ["REKOGN_COLLECTION"]
ZONE = os.environ["TIME_ZONE"]
s3_client = boto3.client("s3", region_name=REGION)
dynamo_resource = boto3.resource("dynamodb", region_name=REGION)
users_table = dynamo_resource.Table(USERS_TABLE)
validations_table = dynamo_resource.Table(VALIDATIONS_TABLE)
rekog = boto3.client('rekognition', region_name=REGION)
def validate(event, context):
print(event)
jresp = {'data': ''}
statusCode=200
faceId = "None"
usr_name = ""
try:
# Datos de la ejecución
data = json.loads(event["body"])
s3path = data["imgname"]
# buscar caras por imagen en una colección
response = rekog.search_faces_by_image(
CollectionId=COLLECT_NAME,
Image={
'S3Object': {
'Bucket': S3_BUCKET,
'Name': s3path
}
},
MaxFaces=5,
FaceMatchThreshold=97.0,
QualityFilter='AUTO'
)
print("Response rekognition buscar cara: ", response)
if not response['FaceMatches']:
statusCode = 404
else:
faceId = response['FaceMatches'][0]['Face']['FaceId']
timestamp = get_str_timestamp_by_zone(ZONE)
# Guarda en la DB el registro de la validacion.
response = validations_table.put_item(
Item={
'faceid': faceId,
'timest': timestamp,
's3path': s3path
}
)
print("Response Dynamo tabla validations: ", response)
# Nota: Al guardar este registro en dynamo disparará la lambda que verifica el notify en la tabla de user y lanzara el email
usrs_resp = users_table.query(
KeyConditionExpression=Key("faceid").eq(faceId),
Limit=1
)
items = usrs_resp.get('Items', [])
if items:
usr_name = items[0]["name"]
except Exception as e:
msgError = "An exception occurred " + str(e) + "."
print(msgError)
jresp = {'Error': msgError}
statusCode = 500
jresp = {"face-id": faceId, "name": usr_name}
return jsonify(jresp, statusCode)
| [
"cristianjose@gmail.com"
] | cristianjose@gmail.com |
be6628d9dba373743c57c916d5222277a1ceab7e | e4284c03268b662eb6d121fedd32aedbabd2ae4b | /test/FileIoTest.py | 59794fc60bae2af560a73de5df088c4f42da0105 | [] | no_license | rennX/FunctionPointParser | 99af5588158163f78b07dd3b4bfb8196db71b557 | 65fce373fae5baf1b34e8a2863c7358981139fa6 | refs/heads/master | 2016-09-09T23:42:56.676573 | 2014-04-20T14:02:36 | 2014-04-20T14:02:36 | 16,235,300 | 1 | 0 | null | 2014-01-26T22:53:35 | 2014-01-25T17:05:10 | null | UTF-8 | Python | false | false | 474 | py | #! /usr/bin/env python
"""This class reads in and processes
input text from a file"""
import unittest
class FileIoTest(unittest.TestCase):
det setUp(self):
self.fileIo = FileIo("input.txt")
def tearDown(self):
self.fileIo = None
def toStringTest(self):
"""Prints out the contents of the input file"""
assert
print str(self.fileIo.inputFile())
def getFileTest(self):
"""Returns the contents of the input file as a string"""
return self.inputFile
| [
"renn.xa@gmail.com"
] | renn.xa@gmail.com |
34612b6bfe10f23da813bdc293d433a1103a70bd | ad9eaab5b382190ce76fb921c1a12f1c3adc4e09 | /main.py | 5993a711e30fa1537e727e5512bebe83a5847a92 | [] | no_license | dex1cre/asistent_bot | 9e7bc1ed3a293e1eea77fea258aafcafd469531e | 9b93166117a600815d45ab2b05940fdd9e065747 | refs/heads/master | 2021-01-21T10:04:34.188968 | 2017-03-07T19:30:36 | 2017-03-07T19:30:36 | 83,360,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,185 | py | #!/usr/bin/python3
#-*- coding: utf-8 -*-
#главный(здесь) модуль для работы с telegram
import telebot as tlb
#модуль для определения дня недели
from datetime import datetime
#модуль для многопоточности
import threading as potok
#модуль для работы с базами данных sqlite3
import sqlite3
#модуль с основными константами
import config
#constant
import string
#for pause
import time
bot = tlb.TeleBot(config.token)
def pr_mn(tk):
con = sqlite3.connect(config.url)
cur = con.cursor()
sql = "SELECT * FROM users WHERE id_user=" + str(tk)
try:
st = cur.execute(sql)
except sqlite3.DatabaseError as err:
print("Ошибка: " + err)
else:
try:
st = st.fetchone()
t = st[1]
except:
return "bad"
else:
return "good"
cur.close()
con.close()
def plans(text):
first = 0
second = 0
text = text[::-1]
#first symbol is number?
try:
first = int(text[0])
except:
return False
else:
#try to get second number
st = ""
for i in text:
try:
g = int(i)
except:
if i == "\n":
second = int(st[::-1])
st = ""
ind = text.index(i) + 1
for i in range(ind, len(text)):
try:
g = int(text[i])
except:
if st != "":
first = int(st[::-1])
return (first, second, -1 * i)
else:
return False
else:
st = st + text[i]
else:
return False
else:
st = st + i
def mn():
try:
@bot.message_handler(commands=["start"])
def send_to_start(message):
if pr_mn(message.from_user.id) == "good":
print(message.from_user.id, "перезапустил сессию")
else:
print(message.from_user.id, "создал первую сессию")
con = sqlite3.connect(config.url)
cur = con.cursor()
sql = "INSERT INTO users(id_user) VALUES('" + str(message.from_user.id) + "')"
try:
cur.execute(sql)
except sqlite3.DatabaseError as err:
print("Ошибка ", err)
else:
print(message.from_user.id, " добавлен в базу данных")
con.commit()
cur.close()
con.close()
um = tlb.types.ReplyKeyboardMarkup(True)
um.row("/start", "/now", "/stop")
um.row("/new", "/snew", "/week")
bot.send_message(message.from_user.id, "Начнём!", reply_markup=um)
#Завершение сессии
@bot.message_handler(commands=["stop"])
def send_to_stop(message):
config.wt = False
hm = tlb.types.ReplyKeyboardRemove()
bot.send_message(message.from_user.id, "До встречи!", reply_markup=hm)
#what's now?
@bot.message_handler(commands=["now"])
def send_to_stop(message):
print("The user with id: " + str(message.from_user.id) + " use the command NOW")
idd = message.from_user.id
now = datetime.now()
id_day = now.weekday()
print(id_day)
sql = "SELECT * FROM plans WHERE id_user=" + str(idd) + " AND id_day=" + str(id_day)
con = sqlite3.connect(config.url)
cur = con.cursor()
try:
t = con.execute(sql).fetchall()
except sqlite3.DatabaseError as err:
print(err)
bot.send_message(message.from_user.id, "some Error, ssory =)\nnow is " + str(id_day+1))
else:
st = ""
for i in t:
st = st + str(i[1]) + "time start: " + str(i[3]) + "\ntime stop: " + str(i[4]) + "\n---------------\n"
bot.send_message(message.from_user.id, st)
cur.close()
con.close()
#new
@bot.message_handler(commands=["new"])
def send_to_new(message):
config.wt = True
print("The user with id: " + str(message.from_user.id) + " use the command NEW")
st = "Чтобы добавить новые задачи напишите номер дня недели от 1 до 7"
bot.send_message(message.from_user.id, st)
#stop plans
@bot.message_handler(commands=["snew"])
def send_to_stop(message):
print("The user with id: " + str(message.from_user.id) + " use the command SNEW")
config.wt = False
bot.send_message(message.from_user.id, "Вы закончили писать задания,\n вы молодец!")
#text with commands
@bot.message_handler(content_types=["text"])
def send_to_text(message):
print(config.number)
if config.wt and message.text in config.numbers:
config.number = int(message.text)
bot.send_message(message.from_user.id, "Вы выбрали " + config.days[int(message.text)-1] + config.write_plans)
elif config.wt:
bl = plans(message.text)
tp = type(bl)
if tp == tuple:
idd = message.from_user.id
text = message.text
start = bl[0]
stop = bl[1]
ms = text[:bl[2]]
con = sqlite3.connect(config.url)
cur = con.cursor()
number = config.number-1
st = (ms, idd, start, stop, number)
sql = "INSERT INTO plans(what, id_user, dtime_start, dtime_stop, id_day) VALUES(?, ?, ?, ?, ?)"
try:
cur.execute(sql, st)
except sqlite3.DatabaseError as err:
print(err)
else:
con.commit()
print(ms)
bot.send_message(message.from_user.id, config.write_more)
elif tp == bool and not bl:
bot.send_message(message.from_user.id, "Что-то с вашим заданием не так! Проверьте, правильно ли вы вводите время начала и конца!")
else:
bot.send_message(message.from_user.id, "Nope, 1 2 3 4 5 6 7")
except:
print("Warning!")
#clear plans
def clear_plans():
now = datetime.now()
day = now.weekday()
while True:
if day == 6 and not config.week_none:
config.week_none = True
if day == 0 and config.week_none:
config.week_none = False
con = sqlite3.connect(config.url)
sql = "SELECT * FROM users"
t = cur.execute(t).fetchall()
for i in t:
bot.send_message(i[1], "Через минуту будет произведена отчистка данных прошлой недели, \nпожалуйста не вводите новых данных, иначе они будут утеряны")
time.sleep(60)
cur = con.cursor()
sql = "DELETE FROM plans"
cur.execute(sql)
con.commit()
cur.close()
con.close()
print("plans are cleared")
time.sleep(60)
t1 = potok.Thread(target=mn)
t2 = potok.Thread(target=clear_plans)
t1.daemon = True
t2.daemon = True
t1.start()
t2.start()
bot.polling(none_stop=True, interval=0)
| [
"dex1cre@mail.ru"
] | dex1cre@mail.ru |
e3a17a5121943d355e6b723092ef64da003de344 | 40b52661769a803f470b84872d692a388095cca6 | /lovelace/migrations/0013_auto_20190319_2002.py | 67687e3fc0868d8e75c8be7e5a20dd96cdf83b91 | [] | no_license | leilesca/demodayfinalizado | 4ff2e3fc4fabfa85c96cd4778b257cb20d00dd71 | edb117f3d73bcda53f6db0625335e3bbd688f6f8 | refs/heads/master | 2020-04-30T06:55:57.077625 | 2019-03-20T15:14:00 | 2019-03-20T15:14:00 | 176,668,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # Generated by Django 2.1.7 on 2019-03-19 23:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lovelace', '0012_perfilusuario'),
]
operations = [
migrations.AddField(
model_name='estabelecimento',
name='imagem',
field=models.CharField(default='', max_length=250),
),
migrations.AlterField(
model_name='estabelecimento',
name='telefone_estabelecimento',
field=models.CharField(max_length=20),
),
]
| [
"alessandra.mitie@gmail.com"
] | alessandra.mitie@gmail.com |
b7c679595c429b8800f1d4f502e2a7f308771f09 | c478f4c1c14a250633666b9eac9835b36f86f704 | /ckcore/core/task/task_handler.py | dbfabe44bf93545706eaeb0b91447a503a52210a | [
"Apache-2.0"
] | permissive | yasinai/cloudkeeper | 10d49df5e9a69b4106ca16fff6bb04e150dd743d | 7d1708deaf237b4175006852d87a2ecce1372779 | refs/heads/main | 2023-08-14T07:10:33.071714 | 2021-10-13T08:37:23 | 2021-10-13T08:37:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,786 | py | from __future__ import annotations
import asyncio
import logging
import re
from asyncio import Task, CancelledError
from datetime import timedelta
from io import TextIOWrapper
from typing import Optional, Any, Callable, Union, Sequence, Dict, List, Tuple
import argparse
from aiostream import stream
from argparse import ArgumentParser, Namespace
from functools import reduce
from copy import copy
from core.cli.cli import CLI
from core.db.jobdb import JobDb
from core.db.runningtaskdb import RunningTaskData, RunningTaskDb
from core.error import ParseError, CLIParseError
from core.message_bus import MessageBus, Event, Action, ActionDone, Message, ActionError
from core.task.job_handler import JobHandler
from core.task.model import Subscriber
from core.task.scheduler import Scheduler
from core.task.start_workflow_on_first_subscriber import wait_and_start
from core.task.subscribers import SubscriptionHandler
from core.task.task_description import (
Workflow,
RunningTask,
EventTrigger,
TimeTrigger,
TaskSurpassBehaviour,
PerformAction,
Step,
TaskDescription,
Job,
ExecuteCommand,
TaskCommand,
SendMessage,
ExecuteOnCLI,
StepErrorBehaviour,
RestartAgainStepAction,
)
from core.util import first, Periodic, group_by, uuid_str, utc_str
log = logging.getLogger(__name__)
class TaskHandler(JobHandler):
# region init
@staticmethod
def add_args(arg_parser: ArgumentParser) -> None:
arg_parser.add_argument(
"--jobs",
nargs="*",
type=argparse.FileType("r"),
help="Read job definitions from given file.",
)
arg_parser.add_argument(
"--start-collect-on-subscriber-connect",
default=False,
action="store_true",
help="Start the collect workflow, when the first handling actor connects to the system.",
)
def __init__(
self,
running_task_db: RunningTaskDb,
job_db: JobDb,
message_bus: MessageBus,
subscription_handler: SubscriptionHandler,
scheduler: Scheduler,
cli: CLI,
args: Namespace,
):
self.running_task_db = running_task_db
self.job_db = job_db
self.message_bus = message_bus
self.subscription_handler = subscription_handler
self.scheduler = scheduler
self.cli = cli
self.args = args
# Step1: define all workflows and jobs in code: later it will be persisted and read from database
self.task_descriptions: Sequence[TaskDescription] = [*self.known_workflows(), *self.known_jobs()]
self.tasks: Dict[str, RunningTask] = {}
self.message_bus_watcher: Optional[Task] = None # type: ignore # pypy
self.initial_start_workflow_task: Optional[Task] = None # type: ignore # pypy
self.timeout_watcher = Periodic("task_timeout_watcher", self.check_overdue_tasks, timedelta(seconds=10))
self.registered_event_trigger: List[Tuple[EventTrigger, TaskDescription]] = []
self.registered_event_trigger_by_message_type: Dict[str, List[Tuple[EventTrigger, TaskDescription]]] = {}
# endregion
# region startup and teardown
async def update_trigger(self, desc: TaskDescription, register: bool = True) -> None:
# safeguard: unregister all event trigger of this task
for existing in (tup for tup in self.registered_event_trigger if desc.id == tup[1].id):
self.registered_event_trigger.remove(existing)
# safeguard: unregister all schedule trigger of this task
for job in self.scheduler.list_jobs():
if str(job.id).startswith(desc.id):
job.remove()
# add all triggers
if register:
for trigger in desc.triggers:
if isinstance(trigger, EventTrigger):
self.registered_event_trigger.append((trigger, desc))
if isinstance(trigger, TimeTrigger):
uid = f"{desc.id}_{trigger.cron_expression}"
name = f"Trigger for task {desc.id} on cron expression {trigger.cron_expression}"
self.scheduler.cron(uid, name, trigger.cron_expression, self.time_triggered, desc, trigger)
# recompute the lookup table
self.registered_event_trigger_by_message_type = group_by(
lambda t: t[0].message_type, self.registered_event_trigger
)
# task descriptors can hold placeholders (e.g. @NOW@)
# which should be replaced, when the task is started (or restarted).
def evaluate_task_definition(self, descriptor: TaskDescription, **env: str) -> TaskDescription:
def evaluate(step: Step) -> Step:
if isinstance(step.action, ExecuteCommand):
update = copy(step)
update.action = ExecuteCommand(self.cli.replace_placeholder(step.action.command, **env))
return update
else:
return step
updated = copy(descriptor)
updated.steps = [evaluate(step) for step in descriptor.steps]
return updated
async def start_task(self, desc: TaskDescription) -> Optional[RunningTask]:
existing = first(lambda x: x.descriptor.id == desc.id and x.is_active, self.tasks.values()) # type: ignore
if existing:
if desc.on_surpass == TaskSurpassBehaviour.Skip:
log.info(
f"Task {desc.name} has been triggered. Since the last job is not finished, "
f"the execution will be skipped, as defined by the task"
)
return None
elif desc.on_surpass == TaskSurpassBehaviour.Replace:
log.info(f"New task {desc.name} should replace existing run: {existing.id}.")
existing.end()
await self.store_running_task_state(existing)
elif desc.on_surpass == TaskSurpassBehaviour.Parallel:
log.info(f"New task {desc.name} will race with existing run {existing.id}.")
else:
raise AttributeError(f"Surpass behaviour not handled: {desc.on_surpass}")
updated = self.evaluate_task_definition(desc)
wi, commands = RunningTask.empty(updated, self.subscription_handler.subscribers_by_event)
log.info(f"Start new task: {updated.name} with id {wi.id}")
# store initial state in database
await self.running_task_db.insert(wi)
self.tasks[wi.id] = wi
await self.execute_task_commands(wi, commands)
return wi
async def start_interrupted_tasks(self) -> List[RunningTask]:
descriptions = {w.id: w for w in self.task_descriptions}
def reset_state(wi: RunningTask, task_data: RunningTaskData) -> RunningTask:
# reset the received messages
wi.received_messages = task_data.received_messages # type: ignore
# move the fsm into the last known state
wi.machine.set_state(task_data.current_state_name)
# import state of the current step
wi.current_state.import_state(task_data.current_state_snapshot)
# reset times
wi.task_started_at = task_data.task_started_at
wi.step_started_at = task_data.step_started_at
# ignore all messages that would be emitted
wi.move_to_next_state()
return wi
instances: List[RunningTask] = []
async for data in self.running_task_db.all():
descriptor = descriptions.get(data.task_descriptor_id)
if descriptor:
# we have captured the timestamp when the task has been started
updated = self.evaluate_task_definition(descriptor, now=utc_str(data.task_started_at))
rt = RunningTask(data.id, updated, self.subscription_handler.subscribers_by_event)
instance = reset_state(rt, data)
if isinstance(instance.current_step.action, RestartAgainStepAction):
log.info(f"Restart interrupted action: {instance.current_step.action}")
await self.execute_task_commands(instance, instance.current_state.commands_to_execute())
instances.append(instance)
else:
log.warning(f"No task description with this id found: {data.task_descriptor_id}. Remove instance data.")
await self.running_task_db.delete(data.id)
return instances
async def __aenter__(self) -> TaskHandler:
log.info("TaskHandler is starting up!")
# load job descriptions from configuration files
file_jobs = [await self.parse_job_file(file) for file in self.args.jobs] if self.args.jobs else []
jobs: List[Job] = reduce(lambda r, l: r + l, file_jobs, [])
# load job descriptions from database
db_jobs = [job async for job in self.job_db.all()]
self.task_descriptions = [*self.task_descriptions, *jobs, *db_jobs]
# load and restore all tasks
self.tasks = {wi.id: wi for wi in await self.start_interrupted_tasks()}
await self.timeout_watcher.start()
for descriptor in self.task_descriptions:
await self.update_trigger(descriptor)
if self.args.start_collect_on_subscriber_connect:
filtered = [wf for wf in self.known_workflows() if wf.id == "collect"]
self.initial_start_workflow_task = wait_and_start(filtered, self, self.message_bus)
async def listen_to_message_bus() -> None:
async with self.message_bus.subscribe("task_handler") as messages:
while True:
message = None
try:
message = await messages.get()
if isinstance(message, Event):
await self.handle_event(message)
elif isinstance(message, Action):
await self.handle_action(message)
elif isinstance(message, (ActionDone, ActionError)):
log.info(f"Ignore message via event bus: {message}")
except asyncio.CancelledError as ex:
# if we outer task is cancelled, give up
raise ex
except Exception as ex:
log.error(f"Could not handle event {message} - give up.", exc_info=ex)
self.message_bus_watcher = asyncio.create_task(listen_to_message_bus())
return self
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
log.info("Tear down task handler")
# deregister from all triggers
for descriptor in self.task_descriptions:
await self.update_trigger(descriptor, register=False)
# stop timeout watcher
await self.timeout_watcher.stop()
# stop event listener
if self.message_bus_watcher:
self.message_bus_watcher.cancel()
try:
await self.message_bus_watcher
except CancelledError:
log.info("task has been cancelled")
# wait for all running commands to complete
for task in list(self.tasks.values()):
if task.update_task:
await task.update_task
del self.tasks[task.id]
# in case the task is not done
if self.initial_start_workflow_task and not self.initial_start_workflow_task.done():
self.initial_start_workflow_task.cancel()
# endregion
# region job handler
async def running_tasks(self) -> List[RunningTask]:
return list(self.tasks.values())
async def start_task_by_descriptor_id(self, uid: str) -> Optional[RunningTask]:
td = first(lambda t: t.id == uid, self.task_descriptions) # type: ignore # pypy
if td:
return await self.start_task(td)
else:
raise NameError(f"No task with such id: {uid}")
async def list_jobs(self) -> List[Job]:
return [job for job in self.task_descriptions if isinstance(job, Job)]
async def add_job(self, job: Job) -> None:
descriptions = list(self.task_descriptions)
existing = first(lambda td: td.id == job.id, descriptions) # type: ignore # pypy
if existing:
if not existing.mutable:
raise AttributeError(f"There is an existing job with this {job.id} which can not be deleted!")
log.info(f"Job with id {job.id} already exists. Update this job.")
descriptions.remove(existing)
# store in database
await self.job_db.update(job)
descriptions.append(job)
self.task_descriptions = descriptions
await self.update_trigger(job)
async def delete_running_task(self, task: RunningTask) -> None:
task.descriptor_alive = False
# remove tasks from list of running tasks
self.tasks.pop(task.id, None)
if task.update_task and not task.update_task.done():
task.update_task.cancel()
# mark step as error
task.end()
# remove from database
await self.running_task_db.delete(task.id)
async def delete_job(self, job_id: str) -> Optional[Job]:
job: Job = first(lambda td: td.id == job_id and isinstance(td, Job), self.task_descriptions) # type: ignore
if job:
if not job.mutable:
raise AttributeError(f"Can not delete job: {job.id} - it is defined in a system file!")
# delete all running tasks of this job
for task in list(filter(lambda x: x.descriptor.id == job.id, self.tasks.values())):
log.info(f"Job: {job_id}: delete running task: {task.id}")
await self.delete_running_task(task)
await self.job_db.delete(job_id)
descriptions = list(self.task_descriptions)
descriptions.remove(job)
self.task_descriptions = descriptions
await self.update_trigger(job, register=False)
return job
# endregion
# region maintain running tasks
async def time_triggered(self, descriptor: TaskDescription, trigger: TimeTrigger) -> None:
log.info(f"Task {descriptor.name} triggered by time: {trigger.cron_expression}")
await self.start_task(descriptor)
async def check_for_task_to_start_on_message(self, msg: Message) -> None:
# check if this event triggers any new task
for trigger, descriptor in self.registered_event_trigger_by_message_type.get(msg.message_type, []):
if msg.message_type == trigger.message_type:
comp = trigger.filter_data
if {key: msg.data.get(key) for key in comp} == comp if comp else True:
log.info(f"Event {msg.message_type} triggers task: {descriptor.name}")
await self.start_task(descriptor)
async def handle_event(self, event: Event) -> None:
# check if any running task want's to handle this event
for wi in list(self.tasks.values()):
handled, commands = wi.handle_event(event)
if handled:
await self.execute_task_commands(wi, commands, event)
# check if this event triggers any new task
await self.check_for_task_to_start_on_message(event)
# noinspection PyMethodMayBeStatic
async def handle_action(self, action: Action) -> None:
await self.check_for_task_to_start_on_message(action)
async def handle_action_result(
self, done: Union[ActionDone, ActionError], fn: Callable[[RunningTask], Sequence[TaskCommand]]
) -> None:
wi = self.tasks.get(done.task_id)
if wi:
commands = fn(wi)
return await self.execute_task_commands(wi, commands, done)
else:
log.warning(
f"Received an ack for an unknown task={done.task_id} "
f"event={done.message_type} from={done.subscriber_id}. Ignore."
)
async def handle_action_done(self, done: ActionDone) -> None:
return await self.handle_action_result(done, lambda wi: wi.handle_done(done))
async def handle_action_error(self, err: ActionError) -> None:
log.info(f"Received error: {err.error} {err.step_name}:{err.message_type} of {err.task_id}")
return await self.handle_action_result(err, lambda wi: wi.handle_error(err))
async def execute_task_commands(
self, wi: RunningTask, commands: Sequence[TaskCommand], origin_message: Optional[Message] = None
) -> None:
async def execute_commands() -> None:
# execute and collect all task commands
results: Dict[TaskCommand, Any] = {}
for command in commands:
if isinstance(command, SendMessage):
await self.message_bus.emit(command.message)
results[command] = None
elif isinstance(command, ExecuteOnCLI):
# TODO: instead of executing it in process, we should do an http call here to a worker core.
result = await self.cli.execute_cli_command(command.command, stream.list, **command.env)
results[command] = result
else:
raise AttributeError(f"Does not understand this command: {wi.descriptor.name}: {command}")
# The descriptor might be removed in the mean time. If this is the case stop execution.
if wi.descriptor_alive:
active_before_result = wi.is_active
# before we move on, we need to store the current state of the task (or delete if it is done)
await self.store_running_task_state(wi, origin_message)
# inform the task about the result, which might trigger new tasks to execute
new_commands = wi.handle_command_results(results)
if new_commands:
# note: recursion depth is defined by the number of steps in a job description and should be safe.
await self.execute_task_commands(wi, new_commands)
elif active_before_result and not wi.is_active:
# if this was the last result the task was waiting for, delete the task
await self.store_running_task_state(wi, origin_message)
async def execute_in_order(task: Task) -> None: # type: ignore # pypy
# make sure the last execution is finished, before the new execution starts
await task
await execute_commands()
# start execution of commands in own task to not block the task handler
# note: the task is awaited finally in the timeout handler or context handler shutdown
wi.update_task = asyncio.create_task(execute_in_order(wi.update_task) if wi.update_task else execute_commands())
async def store_running_task_state(self, wi: RunningTask, origin_message: Optional[Message] = None) -> None:
if wi.is_active:
await self.running_task_db.update_state(wi, origin_message)
elif wi.id in self.tasks:
await self.running_task_db.delete(wi.id)
async def list_all_pending_actions_for(self, subscriber: Subscriber) -> List[Action]:
pending = map(lambda x: x.pending_action_for(subscriber), self.tasks.values())
return [x for x in pending if x]
# endregion
# region periodic task checker
async def check_overdue_tasks(self) -> None:
"""
Called periodically by the system.
In case there is an overdue task, an action error is injected into the task.
"""
for task in list(self.tasks.values()):
if task.is_active: # task is still active
if task.current_state.check_timeout():
if task.current_step.on_error == StepErrorBehaviour.Continue:
current_step = task.current_step.name
commands = task.move_to_next_state()
log.warning(
f"Task {task.id}: {task.descriptor.name} timed out in step "
f"{current_step}. Moving on to step: {task.current_step.name}."
)
await self.execute_task_commands(task, commands)
else:
log.warning(
f"Task {task.id}: {task.descriptor.name} timed out "
f"in step {task.current_step.name}. Stop the task."
)
task.end()
await self.store_running_task_state(task)
# check again for active (might have changed for overdue tasks)
if not task.is_active:
if task.update_task:
await task.update_task
del self.tasks[task.id]
await self.running_task_db.delete(task.id)
# endregion
# region parse job data
async def parse_job_file(self, file: TextIOWrapper) -> List[Job]:
"""
Parse a file with job definitions.
Every line is either a blank line, a comment or a job definition.
Example
# cron based trigger
0 5 * * sat : reported name="foo" | desire name="bla"
# cron based + event based trigger
0 5 * * sat event_name : reported name="foo" | desire name="bla"
:param file: the file handle to parse.
:return: all parsed jobs.
:raises: ParseError if the job can not be parsed
"""
jobs = []
with file as reader:
for line in reader:
stripped = line.strip()
if stripped and not stripped.startswith("#"):
job = await self.parse_job_line(f"file {file.name}", stripped, mutable=False)
jobs.append(job)
return jobs
async def parse_job_line(self, source: str, line: str, mutable: bool = True) -> Job:
"""
Parse a single job line.
:param source: the source of this line (just for naming purposes)
:param line: the line of text
:param mutable: defines if the resulting job is mutable or not.
:return: the parsed jon
"""
stripped = line.strip()
uid = uuid_str(stripped)[0:8]
timeout = timedelta(hours=1)
wait_timeout = timedelta(hours=24)
async def parse_with_cron() -> Job:
parts = re.split("\\s+", stripped, 5)
if len(parts) != 6:
raise ValueError(f"Invalid job {stripped}")
wait: Optional[Tuple[EventTrigger, timedelta]] = None
trigger = TimeTrigger(" ".join(parts[0:5]))
command = parts[5]
# check if we also need to wait for an event: name_of_event : command
if self.event_re.match(command):
event, command = re.split("\\s*:\\s*", command, 1)
wait = EventTrigger(event), wait_timeout
await self.cli.evaluate_cli_command(command, replace_place_holder=False)
return Job(uid, ExecuteCommand(command), trigger, timeout, wait, mutable)
async def parse_event() -> Job:
event, command = re.split("\\s*:\\s*", stripped, 1)
await self.cli.evaluate_cli_command(command, replace_place_holder=False)
return Job(uid, ExecuteCommand(command), EventTrigger(event), timeout, mutable=mutable)
try:
return await (parse_event() if self.event_re.match(stripped) else parse_with_cron())
except CLIParseError as ex:
raise ex
except Exception as ex:
raise ParseError(f"Can not parse job command line: {stripped}") from ex
event_re = re.compile("^[A-Za-z][A-Za-z0-9_\\-]*\\s*:")
# endregion
# region known task descriptors
@staticmethod
def known_jobs() -> List[Job]:
return [
Job(
"example-job",
ExecuteCommand("echo hello"),
EventTrigger("run_job"),
timedelta(seconds=10),
mutable=False,
),
Job(
"example-wait-job",
ExecuteCommand("sleep 10; echo I was started at @NOW@"),
EventTrigger("run_job"),
timedelta(seconds=10),
(EventTrigger("wait"), timedelta(seconds=30)),
mutable=False,
),
]
@staticmethod
def known_workflows() -> List[Workflow]:
collect_steps = [
Step("pre_collect", PerformAction("pre_collect"), timedelta(seconds=10)),
Step("collect", PerformAction("collect"), timedelta(seconds=10)),
Step("post_collect", PerformAction("post_collect"), timedelta(seconds=10)),
]
cleanup_steps = [
Step("pre_plan", PerformAction("pre_cleanup_plan"), timedelta(seconds=10)),
Step("plan", PerformAction("cleanup_plan"), timedelta(seconds=10)),
Step("post_plan", PerformAction("post_cleanup_plan"), timedelta(seconds=10)),
Step("pre_clean", PerformAction("pre_cleanup"), timedelta(seconds=10)),
Step("clean", PerformAction("cleanup"), timedelta(seconds=10)),
Step("post_clean", PerformAction("post_cleanup"), timedelta(seconds=10)),
]
metrics_steps = [
Step("pre_metrics", PerformAction("pre_generate_metrics"), timedelta(seconds=10)),
Step("metrics", PerformAction("generate_metrics"), timedelta(seconds=10)),
Step("post_metrics", PerformAction("post_generate_metrics"), timedelta(seconds=10)),
]
return [
Workflow(
uid="collect",
name="collect",
steps=collect_steps + metrics_steps,
triggers=[EventTrigger("start_collect_workflow")],
),
Workflow(
uid="cleanup",
name="cleanup",
steps=cleanup_steps + metrics_steps,
triggers=[EventTrigger("start_cleanup_workflow")],
),
Workflow(
uid="metrics",
name="metrics",
steps=metrics_steps,
triggers=[EventTrigger("start_metrics_workflow")],
),
Workflow(
uid="collect_and_cleanup",
name="collect_and_cleanup",
steps=collect_steps + cleanup_steps + metrics_steps,
triggers=[EventTrigger("start_collect_and_cleanup_workflow"), TimeTrigger("0 * * * *")],
),
]
# endregion
| [
"noreply@github.com"
] | yasinai.noreply@github.com |
efafbc94b23a7a62be62c4ca61b6578234ec47c0 | 92e2145b2424134112ef0d7b1b5cf1c1220dbc1e | /Sith/wsgi.py | fe015aa0f796905984ddf362e66f7fe3c8aba921 | [] | no_license | rustam66637/SithRecruitTest | 9c52725d99df8ae074068d17e7412565a12d9f2d | 1f8fd5da3b71d8b1af59b6d8214807ec37941dc5 | refs/heads/master | 2023-08-06T17:02:51.966481 | 2021-02-07T15:48:20 | 2021-02-07T15:48:20 | 265,617,598 | 0 | 0 | null | 2021-09-22T19:03:30 | 2020-05-20T16:04:42 | Python | UTF-8 | Python | false | false | 385 | py | """
WSGI config for Sith project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Sith.settings')
application = get_wsgi_application()
| [
"rust66637@gmail.com"
] | rust66637@gmail.com |
f236d30755d31dd21adfeefd650ed057583684a9 | 084191aab1a6f975515fc33b7c7e55f7e2fb90fb | /DataRace/DataFountain/消费金融场景下的用户购买预测/git方案学习/rank1/extract_feature.py | ecfd7818044085ce984ee27e3eae86d562e76d44 | [] | no_license | whoami-zy/StudyML | 481829be126a789faa79524a2cc5acee28aa3174 | 5f12d6cb1b97ba4b2dbabe83062c6435b7729fd4 | refs/heads/master | 2020-04-05T20:30:05.789389 | 2018-11-13T07:14:33 | 2018-11-13T07:14:33 | 157,183,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,700 | py | import pandas as pd
import numpy as np
from collections import Counter
import scipy.stats as sp
import time
import datetime
def get_continue_launch_count(strs,parm):
time = strs.split(":")
time = dict(Counter(time))
time = sorted(time.items(), key=lambda x: x[0], reverse=False)
key_list = []
value_list = []
if len(time) == 1:
return -2
for key,value in dict(time).items():
key_list.append(int(key))
value_list.append(int(value))
if np.mean(np.diff(key_list, 1)) == 1:
if parm == '1':
return np.mean(value_list)
elif parm == '2':
return np.max(value_list)
elif parm == '3':
return np.min(value_list)
elif parm == '4':
return np.sum(value_list)
elif parm == '5':
return np.std(value_list)
else:
return -1
def get_time_gap(strs,parm):
time = strs.split(":")
time = list(set(time))
time = sorted(list(map(lambda x:int(x),time)))
time_gap = []
#用户只在当天活跃
if len(time) == 1:
return -20
for index, value in enumerate(time):
if index <= len(time) - 2:
gap = abs(time[index] - time[index + 1])
time_gap.append(gap)
if parm == '1':
return np.mean(time_gap)
elif parm == '2':
return np.max(time_gap)
elif parm == '3':
return np.min(time_gap)
elif parm == '4':
return np.std(time_gap)
elif parm == '5':
return sp.stats.skew(time_gap)
elif parm == '6':
return sp.stats.kurtosis(time_gap)
def get_week(day):
day = int(day)
if day >= 1 and day <= 7:
return 1
if day >= 8 and day <= 14:
return 2
if day >= 15 and day <= 21:
return 3
if day >= 22 and day <= 28:
return 4
if day >= 28:
return 5
def cur_day_repeat_count(strs):
time = strs.split(":")
time = dict(Counter(time))
time = sorted(time.items(), key=lambda x: x[1], reverse=False)
# 一天一次启动
if (len(time) == 1) & (time[0][1] == 1):
return 0
# 一天多次启动
elif (len(time) == 1) & (time[0][1] > 1):
return 1
# 多天多次启动
elif (len(time) > 1) & (time[0][1] >= 2):
return 2
else:
return 3
def get_lianxu_day(day_list):
time = day_list.split(":")
time = list(map(lambda x:int(x),time))
m = np.array(time)
if len(set(m)) == 1:
return -1
m = list(set(m))
if len(m) == 0:
return -20
n = np.where(np.diff(m) == 1)[0]
i = 0
result = []
while i < len(n) - 1:
state = 1
while n[i + 1] - n[i] == 1:
state += 1
i += 1
if i == len(n) - 1:
break
if state == 1:
i += 1
result.append(2)
else:
i += 1
result.append(state + 1)
if len(n) == 1:
result.append(2)
if len(result) != 0:
# print(result)
return np.max(result)
def load_csv():
train_agg = pd.read_csv('../orig_data/train_agg.csv',sep='\t')
train_log = pd.read_csv('../orig_data/train_log.csv', sep='\t')
train_flg = pd.read_csv('../orig_data/train_flg.csv', sep='\t')
test_agg = pd.read_csv('../orig_data/test_agg.csv', sep='\t')
test_log = pd.read_csv('../orig_data/test_log.csv', sep='\t')
return train_agg,train_log,train_flg,test_agg,test_log
def merge_table(train_agg, train_log, train_flg, test_agg, test_log):
train_log['label'] = 1
test_log['label'] = 0
data = pd.concat([train_log,test_log],axis=0)
data = extract_feature(data)
train_log = data[data.label == 1]
test_log = data[data.label == 0]
del train_log['label']
del test_log['label']
all_train = pd.merge(train_flg, train_agg, on=['USRID'], how='left')
train = pd.merge(all_train,train_log,on='USRID',how='left')
test = pd.merge(test_agg,test_log,on='USRID',how='left')
return train,test
def extract_feature(data):
data['cate_1'] = data['EVT_LBL'].apply(lambda x: int(x.split('-')[0]))
data['cate_2'] = data['EVT_LBL'].apply(lambda x: int(x.split('-')[1]))
data['cate_3'] = data['EVT_LBL'].apply(lambda x: int(x.split('-')[2]))
data['day'] = data['OCC_TIM'].apply(lambda x: int(x[8:10]))
data['hour'] = data['OCC_TIM'].apply(lambda x: int(x[11:13]))
data['week'] = data['day'].apply(get_week)
feat1 = data.groupby(['USRID'], as_index=False)['OCC_TIM'].agg({"user_count": "count"})
feat2 = data.groupby(['USRID'], as_index=False)['day'].agg({"user_act_day_count": "nunique"})
feat3 = data[['USRID', 'day']]
feat3['day'] = feat3['day'].astype('str')
feat3 = feat3.groupby(['USRID'])['day'].agg(lambda x: ':'.join(x)).reset_index()
feat3.rename(columns={'day': 'act_list'}, inplace=True)
# 用户是否多天有多次启动(均值)
feat3['time_gap_mean'] = feat3['act_list'].apply(get_time_gap,args=('1'))
# 最大
feat3['time_gap_max'] = feat3['act_list'].apply(get_time_gap,args=('2'))
# 最小
feat3['time_gap_min'] = feat3['act_list'].apply(get_time_gap,args=('3'))
# 方差
feat3['time_gap_std'] = feat3['act_list'].apply(get_time_gap,args=('4'))
# 锋度
feat3['time_gap_skew'] = feat3['act_list'].apply(get_time_gap, args=('5'))
# 偏度
feat3['time_gap_kurt'] = feat3['act_list'].apply(get_time_gap, args=('6'))
# 平均行为次数
feat3['mean_act_count'] = feat3['act_list'].apply(lambda x: len(x.split(":")) / len(set(x.split(":"))))
# 平均行为日期
feat3['act_mean_date'] = feat3['act_list'].apply(lambda x: np.sum([int(ele) for ele in x.split(":")]) / len(x.split(":")))
# 活动天数占当月的比率
# feat3['act_rate'] = feat3['act_list'].apply(lambda x: len(list(set(x.split(":")))) / 31)
# 用户是否当天有多次启动
feat3['cur_day_repeat_count'] = feat3['act_list'].apply(cur_day_repeat_count)
# 连续几天启动次数的均值,
feat3['con_act_day_count_mean'] = feat3['act_list'].apply(get_continue_launch_count, args=('1'))
# 最大值,
feat3['con_act_day_count_max'] = feat3['act_list'].apply(get_continue_launch_count, args=('2'))
# 最小值
feat3['con_act_day_count_min'] = feat3['act_list'].apply(get_continue_launch_count, args=('3'))
# 次数
feat3['con_act_day_count_total'] = feat3['act_list'].apply(get_continue_launch_count, args=('4'))
# 方差
feat3['con_act_day_count_std'] = feat3['act_list'].apply(get_continue_launch_count, args=('5'))
feat3['con_act_max'] = feat3['act_list'].apply(get_lianxu_day)
del feat3['act_list']
# 用户发生行为的天数
feat4 = data.groupby(['USRID'], as_index=False)['cate_1'].agg({'user_cate_1_count': "count"})
feat5 = data.groupby(['USRID'], as_index=False)['cate_2'].agg({'user_cate_2_count': "count"})
feat6 = data.groupby(['USRID'], as_index=False)['cate_3'].agg({'user_cate_3_count': "count"})
# 判断时期是否为高峰日
higt_act_day_list = [7, 14, 21, 28]
feat8 = data[['USRID', 'day']]
feat8['is_higt_act'] = feat8['day'].apply(lambda x: 1 if x in higt_act_day_list else 0)
feat8 = feat8.drop_duplicates(subset=['USRID'])
feat10 = data.groupby(['USRID','day'], as_index=False)['TCH_TYP'].agg({'user_per_count': "count"})
feat10_copy = feat10.copy()
# 用户平均每天启动次数
feat11 = feat10_copy.groupby(['USRID'],as_index=False)['user_per_count'].agg({"user_per_count_mean":"mean"})
# 用户启动次数最大值
feat12 = feat10_copy.groupby(['USRID'], as_index=False)['user_per_count'].agg({"user_per_count_max": "max"})
# 用户启动次数最小值
feat13 = feat10_copy.groupby(['USRID'], as_index=False)['user_per_count'].agg({"user_per_count_min": "min"})
# 用户每天启动次数的众值
feat14 = feat10_copy.groupby(['USRID'], as_index=False)['user_per_count'].agg({"user_mode_count":lambda x: x.value_counts().index[0]})
# 方差
feat15 = feat10_copy.groupby(['USRID'], as_index=False)['user_per_count'].agg({"user_std_count":np.std})
# 峰度
feat16 = feat10_copy.groupby(['USRID'], as_index=False)['user_per_count'].agg({"user_skew_count": sp.stats.skew})
# 偏度
feat17 = feat10_copy.groupby(['USRID'], as_index=False)['user_per_count'].agg({"user_kurt_count": sp.stats.kurtosis})
# 中位数
feat18 = feat10_copy.groupby(['USRID'], as_index=False)['user_per_count'].agg({"user_median_count": np.median})
feat27 = data[['USRID', 'OCC_TIM']]
feat27['OCC_TIM'] = feat27['OCC_TIM'].apply(lambda x: time.mktime(time.strptime(x, "%Y-%m-%d %H:%M:%S")))
log = feat27.sort_values(['USRID', 'OCC_TIM'])
log['next_time'] = log.groupby(['USRID'])['OCC_TIM'].diff(-1).apply(np.abs)
log = log.groupby(['USRID'], as_index=False)['next_time'].agg({
'next_time_mean': np.mean,
'next_time_std': np.std,
'next_time_min': np.min,
'next_time_max': np.max
})
# 每周的平均消费次数
feat28_sp = data.groupby(['USRID','week'], as_index=False)['TCH_TYP'].agg({'user_per_week_count': "count"})
feat28_sp_copy = feat28_sp.copy()
# 用户平均每天启动次数
feat11_sp = feat28_sp_copy.groupby(['USRID'], as_index=False)['user_per_week_count'].agg({"user_per_week_count_mean": "mean"})
# 用户启动次数最大值
feat12_sp = feat28_sp_copy.groupby(['USRID'], as_index=False)['user_per_week_count'].agg({"user_per_week_count_max": "max"})
# 用户启动次数最小值
feat13_sp = feat28_sp_copy.groupby(['USRID'], as_index=False)['user_per_week_count'].agg({"user_per_week_count_min": "min"})
# 用户每天启动次数的众值
feat14_sp = feat28_sp_copy.groupby(['USRID'], as_index=False)['user_per_week_count'].agg({"user_per_week_count_mode": lambda x: x.value_counts().index[0]})
# 方差
feat15_sp = feat28_sp_copy.groupby(['USRID'], as_index=False)['user_per_week_count'].agg({"user_per_week_count_std": np.std})
# 峰度
feat16_sp = feat28_sp_copy.groupby(['USRID'], as_index=False)['user_per_week_count'].agg({"user_per_week_count_skew": sp.stats.skew})
# 偏度
feat17_sp = feat28_sp_copy.groupby(['USRID'], as_index=False)['user_per_week_count'].agg({"user_per_week_count_kurt": sp.stats.kurtosis})
# 中位数
feat18_sp = feat28_sp_copy.groupby(['USRID'], as_index=False)['user_per_week_count'].agg({"user_per_week_count_median": np.median})
# 离周末越近,越消费的可能性比较大,统计前2天的特征
before_three = data[(data.day >= 28) & (data.day <= 31)]
before_three_copy = before_three.copy()
feat1_before = before_three_copy.groupby(['USRID'], as_index=False)['OCC_TIM'].agg({"user_count_before": "count"})
feat2_before = before_three_copy.groupby(['USRID'], as_index=False)['day'].agg({"user_act_day_count_before": "nunique"})
feat3_before = before_three_copy[['USRID', 'day']]
feat3_before['day'] = feat3_before['day'].astype('str')
feat3_before = feat3_before.groupby(['USRID'])['day'].agg(lambda x: ':'.join(x)).reset_index()
feat3_before.rename(columns={'day': 'act_list'}, inplace=True)
# 用户是否多天有多次启动(均值)
feat3_before['before_time_gap_mean'] = feat3_before['act_list'].apply(get_time_gap, args=('1'))
# 最大
feat3_before['before_time_gap_max'] = feat3_before['act_list'].apply(get_time_gap, args=('2'))
# 最小
feat3_before['before_time_gap_min'] = feat3_before['act_list'].apply(get_time_gap, args=('3'))
# 方差
feat3_before['before_time_gap_std'] = feat3_before['act_list'].apply(get_time_gap, args=('4'))
# 锋度
feat3_before['before_time_gap_skew'] = feat3_before['act_list'].apply(get_time_gap, args=('5'))
# 偏度
feat3_before['before_time_gap_kurt'] = feat3_before['act_list'].apply(get_time_gap, args=('6'))
# 平均行为次数
feat3_before['before_mean_act_count'] = feat3_before['act_list'].apply(lambda x: len(x.split(":")) / len(set(x.split(":"))))
# 平均行为日期
feat3_before['before_act_mean_date'] = feat3_before['act_list'].apply(lambda x: np.sum([int(ele) for ele in x.split(":")]) / len(x.split(":")))
# 用户是否当天有多次启动
feat3_before['before_cur_day_repeat_count'] = feat3_before['act_list'].apply(cur_day_repeat_count)
# 连续几天启动次数的均值,
feat3_before['before_con_act_day_count_mean'] = feat3_before['act_list'].apply(get_continue_launch_count, args=('1'))
# 最大值,
feat3_before['before_con_act_day_count_max'] = feat3_before['act_list'].apply(get_continue_launch_count, args=('2'))
# 最小值
feat3_before['before_con_act_day_count_min'] = feat3_before['act_list'].apply(get_continue_launch_count, args=('3'))
# 次数
feat3_before['before_con_act_day_count_total'] = feat3_before['act_list'].apply(get_continue_launch_count, args=('4'))
# 方差
feat3_before['before_con_act_day_count_std'] = feat3_before['act_list'].apply(get_continue_launch_count, args=('5'))
feat3_before['before_con_act_max'] = feat3_before['act_list'].apply(get_lianxu_day)
del feat3_before['act_list']
# 用户发生行为的天数
feat4_before = before_three.groupby(['USRID'], as_index=False)['cate_1'].agg({'before_user_cate_1_count': "count"})
feat5_before = before_three.groupby(['USRID'], as_index=False)['cate_2'].agg({'before_user_cate_2_count': "count"})
feat6_before = before_three.groupby(['USRID'], as_index=False)['cate_3'].agg({'before_user_cate_3_count': "count"})
feat28 = pd.crosstab(data['USRID'],data['TCH_TYP']).reset_index()
feat29 = pd.crosstab(data.USRID,data.cate_1).reset_index()
feat30 = pd.crosstab(data.USRID, data.cate_2).reset_index()
feat31 = pd.crosstab(data.USRID, data.cate_3).reset_index()
feat32 = pd.crosstab(data.USRID,data.hour).reset_index()
feat34 = pd.crosstab(data.USRID,data.week).reset_index()
data = data[['USRID','label']]
data = data.drop_duplicates(subset='USRID')
data = pd.merge(data, feat1, on=['USRID'], how='left')
data = pd.merge(data, feat2, on=['USRID'], how='left')
data = pd.merge(data, feat3, on=['USRID'], how='left')
data = pd.merge(data, feat4, on=['USRID'], how='left')
data = pd.merge(data, feat5, on=['USRID'], how='left')
data = pd.merge(data, feat6, on=['USRID'], how='left')
data = pd.merge(data, feat8, on=['USRID'], how='left')
data = pd.merge(data, feat11, on=['USRID'], how='left')
data = pd.merge(data, feat12, on=['USRID'], how='left')
data = pd.merge(data, feat13, on=['USRID'], how='left')
data = pd.merge(data, feat14, on=['USRID'], how='left')
data = pd.merge(data, feat15, on=['USRID'], how='left')
data = pd.merge(data, feat16, on=['USRID'], how='left')
data = pd.merge(data, feat17, on=['USRID'], how='left')
data = pd.merge(data, feat18, on=['USRID'], how='left')
data = pd.merge(data, log, on=['USRID'], how='left')
data = pd.merge(data, feat28, on=['USRID'], how='left')
data = pd.merge(data, feat29, on=['USRID'], how='left')
data = pd.merge(data, feat30, on=['USRID'], how='left')
data = pd.merge(data, feat31, on=['USRID'], how='left')
data = pd.merge(data, feat32, on=['USRID'], how='left')
data = pd.merge(data, feat34, on=['USRID'], how='left')
data = pd.merge(data, feat11_sp, on=['USRID'], how='left')
data = pd.merge(data, feat12_sp, on=['USRID'], how='left')
data = pd.merge(data, feat13_sp, on=['USRID'], how='left')
data = pd.merge(data, feat14_sp, on=['USRID'], how='left')
data = pd.merge(data, feat15_sp, on=['USRID'], how='left')
data = pd.merge(data, feat16_sp, on=['USRID'], how='left')
data = pd.merge(data, feat17_sp, on=['USRID'], how='left')
data = pd.merge(data, feat18_sp, on=['USRID'], how='left')
data = pd.merge(data, feat1_before, on=['USRID'], how='left')
data = pd.merge(data, feat2_before, on=['USRID'], how='left')
data = pd.merge(data, feat3_before, on=['USRID'], how='left')
data = pd.merge(data, feat4_before, on=['USRID'], how='left')
data = pd.merge(data, feat5_before, on=['USRID'], how='left')
data = pd.merge(data, feat6_before, on=['USRID'], how='left')
return data
def main():
train_agg, train_log, train_flg, test_agg, test_log = load_csv()
train, test = merge_table(train_agg, train_log, train_flg, test_agg, test_log)
train.to_csv('../fea/train.csv',sep='\t',index=None)
test.to_csv('../fea/test.csv', sep='\t', index=None)
if __name__ == '__main__':
main() | [
"wantong.sun@foxmail.com"
] | wantong.sun@foxmail.com |
cc43440bf8eef90f605cad03be1c751c39264596 | dbc5bfb2b3eec582c58dc3e76c1e92450250b371 | /app.py | a39b092802bf44862baf5865cdc4c07b315988d4 | [] | no_license | olawrence799/webScraping | ac36a0bfa5ce15fe506e9ebbfbfcebc625014e17 | 50e608426ebfe1efbc237de16ba30c21f5b19553 | refs/heads/master | 2021-09-09T16:38:54.013295 | 2018-03-18T05:03:49 | 2018-03-18T05:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from flask import Flask, render_template, jsonify, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
mongo = PyMongo(app)
@app.route('/')
def index():
mars = mongo.db.mars.find_one()
return render_template('index.html', mars=mars)
@app.route('/scrape')
def scrape():
mars = mongo.db.mars
data = scrape_mars.scrape()
mars.update({}, data, upsert=True)
return redirect("http://localhost:5000/", code=302)
if __name__ == "__main__":
app.run(debug=True) | [
"owen@Owens-MacBook-Pro.local"
] | owen@Owens-MacBook-Pro.local |
c797e1ec5b3e5955a867418fed9a26431bd4212c | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/redis/v1/redis-v1-py/google/cloud/redis_v1/services/cloud_redis/pagers.py | ea1c2287e22e2c73eb752e030a4919c860621449 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,709 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.redis_v1.types import cloud_redis
class ListInstancesPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.redis_v1.types.ListInstancesResponse` object, and
provides an ``__iter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.redis_v1.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., cloud_redis.ListInstancesResponse],
request: cloud_redis.ListInstancesRequest,
response: cloud_redis.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.redis_v1.types.ListInstancesRequest):
The initial request object.
response (google.cloud.redis_v1.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = cloud_redis.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[cloud_redis.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[cloud_redis.Instance]:
for page in self.pages:
yield from page.instances
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListInstancesAsyncPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.redis_v1.types.ListInstancesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.redis_v1.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[cloud_redis.ListInstancesResponse]],
request: cloud_redis.ListInstancesRequest,
response: cloud_redis.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.redis_v1.types.ListInstancesRequest):
The initial request object.
response (google.cloud.redis_v1.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = cloud_redis.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[cloud_redis.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[cloud_redis.Instance]:
async def async_generator():
async for page in self.pages:
for response in page.instances:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
ffb81ac3e424652e5a48fdb526bf4d905bbd1018 | cfb61b4a2fbe0f7a162e71208b8b772e4300b66b | /read_record/utils.py | 2dae7fcfe0490756c835e00583026a19f3b23923 | [] | no_license | zy421612918/blog | 4c18ca6364fc1d4425dbba82fedb42d762e93ffe | f5b92452b468a54108c23a4a45b6e8c504a3f301 | refs/heads/master | 2020-03-22T12:38:56.977149 | 2018-07-07T04:35:41 | 2018-07-07T04:35:41 | 140,053,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Zhangye"
# Date: 18-6-6
import datetime
from django.contrib.contenttypes.models import ContentType
from .models import ReadNum,ReadDetail
from django.utils import timezone
from django.db.models import Sum
from Blog.models import Blog
def check_read_record(request,obj):
"""
检查是否阅读过
:param request:
:param obj: blog对象
:return: cookie标记
"""
ct = ContentType.objects.get_for_model(obj)
key = '%s_%s_read'%(ct.model, obj.pk) # blog_35_read
if not request.COOKIES.get(key):
# 总阅读数+1
readnum, created = ReadNum.objects.get_or_create(content_type=ct, object_id=obj.pk)
readnum.read_num += 1
readnum.save()
# 当日计数+1
date = timezone.now().date()
readDetail,created = ReadDetail.objects.get_or_create(content_type=ct, object_id=obj.pk, date=date)
readDetail.read_num+=1
readDetail.save()
return key
def get_sevendays_read(content_type):
today = timezone.now().date()
read_list=[]
date_list=[]
print(today)
for i in range(7,0,-1):
date = today - datetime.timedelta(days=i)
date_list.append(date.strftime('%m/%d'))
read_details=ReadDetail.objects.filter(content_type=content_type, date=date)
res = read_details.aggregate(read_nums=Sum('read_num'))
read_list.append(res['read_nums'] or 0)
return read_list,date_list
def get_hot_data(content_type):
today = timezone.now().date()
read_details = ReadDetail.objects.filter(content_type=content_type, date=today).order_by('-read_num')
return read_details[:3]
def yesterday_hot_data(content_type):
today = timezone.now().date()
yesterday = today- datetime.timedelta(days=1)
read_details = ReadDetail.objects.filter(content_type=content_type, date=yesterday).order_by('-read_num')
return read_details[:3]
def get_7days_hot_date():
today = timezone.now().date()
date = today - datetime.timedelta(days=7)
blogs = Blog.objects.filter(read_details__date__lt=today,
read_details__date__gte=date).\
values('id','title')\
.annotate(read_num_sum=Sum('read_details__read_num')).order_by('-read_num_sum')
return blogs[:7]
| [
"421612918"
] | 421612918 |
c413c1f3310160c4a5734f569e47321d1e3bd9a6 | 36989b4bcd070b0889e9be921f8142df9cec67f5 | /PYTHON/Questao10.py | 850eed441aa1b2f2d9c2637f0aca160fa34890d1 | [] | no_license | larissacsf/Exercises-1 | 61dfc46d594c3a20673f65809606cdc814639792 | 26f39571ba870c1a584ac8ad80efe841b231018e | refs/heads/master | 2021-05-23T09:48:08.245971 | 2020-04-05T15:46:52 | 2020-04-05T15:46:52 | 253,228,979 | 0 | 0 | null | 2020-04-05T12:25:30 | 2020-04-05T12:25:29 | null | UTF-8 | Python | false | false | 498 | py | '''Elaborar um algoritmo que lê 3 valores a,b,c e os escreve. A seguir, encontre o maior
dos 3 valores e o escreva com a mensagem : "É o maior".'''
def maiorNumero(n1, n2, n3):
if n1 > n2 and n1 > n3:
aux = n1
elif n2 > n3:
aux = n2
else:
aux = n3
return aux
def main():
n1 = int(input("Digite um número: "))
n2 = int(input("Digite um número: "))
n3 = int(input("Digite um número: "))
print(maiorNumero(n1, n2, n3), "é o maior")
main() | [
"larissa_cuane10@hotmail.com"
] | larissa_cuane10@hotmail.com |
94d95349f260143004954f60708415d49af4c291 | 741d2868877fbd237a966e04899942224e36ae76 | /QT_Mouse_House/Qt_Exp_Controls.py | dba439c0336c290c9bc35c28e9d3e221e52bcfe6 | [] | no_license | TiangeLi/ArduinoCntrl | 5e7b9357a4b304e712b1046ae6d13cda05c4fc01 | f0ec2eef91d145f4807928e020857c62aaaf159b | refs/heads/master | 2021-05-04T09:41:44.942708 | 2018-03-28T18:15:44 | 2018-03-28T18:15:44 | 61,747,298 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 37,091 | py | # coding=utf-8
"""Qt Widgets for Experiment Config and Control"""
import os
from Names import *
from Misc_Classes import *
from Misc_Functions import *
from Custom_Qt_Tools import *
from copy import deepcopy
from Dirs_Settings import ArdDataContainer
from Custom_Qt_Widgets import GUI_LiveScrollingGraph, GUI_LJDataReport, GUI_StatusBars
import PyQt4.QtGui as qg
import PyQt4.QtCore as qc
class GUI_ExpControls(qg.QWidget):
"""Settings and Buttons for Configuring the Experiment"""
def __init__(self, dirs, gui_progbar):
qg.QWidget.__init__(self)
self.dirs = dirs
self.gui_progbar = gui_progbar
# Initialize Widgets
self.time_config_widget = GUI_TimeConfig(self.dirs, self.gui_progbar)
self.start_stop_btns_widget = GUI_StartStopBtns(self.dirs, self.gui_progbar)
self.save_config_widget = GUI_SaveConfig(self.dirs)
self.lj_graph_widget = GUI_LiveScrollingGraph(dirs)
self.lj_table_widget = GUI_LJDataReport()
self.lj_config_widget = GUI_LabJackConfig(self.dirs, self.lj_graph_widget)
self.ard_config_widget = GUI_ArdConfig(self.dirs, self.gui_progbar)
self.device_presets_widget = GUI_DevicePresets(self.dirs, self.gui_progbar,
self.lj_config_widget,
self.time_config_widget)
self.status_bars = GUI_StatusBars()
# Layout
self.setup_tabs()
self.grid = qg.QGridLayout()
self.setLayout(self.grid)
self.add_to_grid()
def setup_tabs(self):
"""Sets up the main and config tabs"""
self.tabs = qg.QTabWidget(self)
tab_main = qg.QWidget(self.tabs)
tab_config = qg.QWidget(self.tabs)
self.tab_grid_main = qg.QGridLayout()
self.tab_grid_config = qg.QGridLayout()
tab_main.setLayout(self.tab_grid_main)
tab_config.setLayout(self.tab_grid_config)
self.tabs.addTab(tab_main, 'Main')
self.tabs.addTab(tab_config, 'Config')
self.add_to_tab_main()
self.add_to_tab_config()
def add_to_tab_main(self):
"""Add Widgets to Main Tab"""
self.tab_grid_main.addWidget(self.lj_graph_widget, 0, 0, 1, 5)
self.tab_grid_main.addWidget(self.status_bars, 1, 0, 1, 3)
self.tab_grid_main.addWidget(self.lj_table_widget, 1, 3, 1, 2)
def add_to_tab_config(self):
"""Add widgets to Config Tab"""
self.tab_grid_config.addWidget(self.ard_config_widget, 0, 0, 1, 2)
self.tab_grid_config.addWidget(self.lj_config_widget, 1, 1)
self.tab_grid_config.addWidget(self.device_presets_widget, 1, 0)
def add_to_grid(self):
"""Add tabs and non-tabbed widgets to grid"""
self.grid.addWidget(self.start_stop_btns_widget, 0, 0)
self.grid.addWidget(self.time_config_widget, 1, 0)
self.grid.addWidget(self.save_config_widget, 2, 0)
self.grid.addWidget(self.tabs, 0, 1, 3, 1)
def enable_disable_widgets(self, exp_running):
"""Enable or Disable Widgets depending on experiment state"""
# Widgets we enable on running an exp.
enabled_on_run = [self.start_stop_btns_widget.stop_btn]
# Widgets we disable on running an exp.
disable_on_run = [self.start_stop_btns_widget.start_btn,
self.start_stop_btns_widget.name_frame,
self.save_config_widget, self.time_config_widget,
self.lj_config_widget, self.ard_config_widget,
self.device_presets_widget]
# Config Enabled/Disabled:
for widget in enabled_on_run:
if exp_running:
widget.setEnabled(True)
else:
widget.setEnabled(False)
for widget in disable_on_run:
if exp_running:
widget.setEnabled(False)
else:
widget.setEnabled(True)
# Other changes
if exp_running:
self.lj_graph_widget.frame.setTitle('LabJack Live Stream (Low Frequency Scanning) - [Recording to File]')
if not exp_running:
self.lj_graph_widget.frame.setTitle('LabJack Live Stream (Low Frequency Scanning) - [Not Recording to File]')
class GUI_LabJackConfig(qg.QWidget):
"""Configuring LabJack Options"""
def __init__(self, dirs, grapher):
qg.QWidget.__init__(self)
self.num_ch = 14
self.dirs = dirs
self.lj_proc_updated = False
self.grapher = grapher
self.proc_handler_queue = PROC_HANDLER_QUEUE
self.grid = qg.QGridLayout()
self.setLayout(self.grid)
# Setup and Layout
self.initialize()
def initialize(self):
"""Sets up Config Label and Controls"""
self.frame = qg.QGroupBox('LabJack Config')
self.grid.addWidget(self.frame)
grid = qg.QGridLayout()
self.frame.setLayout(grid)
# Create objects and Add to grid
grid.addWidget(self.init_summary_label(), 0, 0)
grid.addWidget(self.init_entry(), 1, 0)
grid.addWidget(self.init_checkboxes(), 2, 0)
self.reload_gui_info(True)
def init_summary_label(self):
"""Sets up a label for summarizing labjack settings"""
self.summ_label = qg.QLabel('')
self.summ_label.setAlignment(qc.Qt.AlignCenter)
self.summ_label.setFrameStyle(qg.QFrame.Sunken | qg.QFrame.StyledPanel)
return self.summ_label
def init_entry(self):
"""Sets up entry and button for scan freq"""
frame = qg.QFrame()
grid = qg.QGridLayout()
frame.setLayout(grid)
# Entry
freq = self.dirs.settings.lj_last_used.scan_freq
self.scan_freq_entry = GUI_IntOnlyEntry(max_digits=5, default_txt=str(freq))
self.scan_freq_entry.setText(str(freq))
# Max Freq Label
self.max_freq_label = qg.QLabel('')
self.max_freq_label.setAlignment(qc.Qt.AlignCenter)
self.max_freq_label.setFrameStyle(qg.QFrame.Sunken | qg.QFrame.StyledPanel)
self.set_max_freq_label()
# Buttons
confirm_btn = qg.QPushButton('Confirm')
confirm_btn.clicked.connect(self.save_scan_freq)
# Layout
grid.addWidget(qg.QLabel('Scan Frequency:'), 0, 0, 1, 4)
grid.addWidget(self.max_freq_label, 1, 0, 1, 5)
grid.addWidget(self.scan_freq_entry, 2, 0, 1, 4)
grid.addWidget(confirm_btn, 2, 4)
return frame
def init_checkboxes(self):
"""Sets up checkboxes for each channel"""
frame = qg.QFrame()
grid = qg.QGridLayout()
frame.setLayout(grid)
# Generate and Layout checkboxes
grid.addWidget(qg.QLabel('Channels Currently in Use:\n'), 0, 0, 1, 5)
self.checkboxes = [qg.QCheckBox('{:0>2}'.format(i)) for i in range(self.num_ch)]
[grid.addWidget(self.checkboxes[i],
i // 5 + 1,
i - (i // 5) * 5) for i in range(self.num_ch)]
[self.checkboxes[i].clicked.connect(self.save_channels) for i in list(range(self.num_ch))]
return frame
def set_max_freq_label(self):
"""Shows user the maximum scan freq allowed"""
num_ch = len(self.dirs.settings.lj_last_used.ch_num)
msg = 'Max Freq = 50000 / [{}] Channels = [{}] Hz' \
''.format(num_ch, int(50000 / num_ch))
self.max_freq_label.setText(msg)
def set_summ_label(self):
"""Sets the summary label to reflect most updated LJ settings"""
ch = self.dirs.settings.lj_last_used.ch_num
freq = self.dirs.settings.lj_last_used.scan_freq
self.summ_label.setText('Channels:\n{}\n\nScan Freq: [{} Hz]'.format(ch, freq))
def save_scan_freq(self):
"""Sets the scan frequency"""
scan_freq = self.scan_freq_entry.text().strip()
max_freq = int(50000 / len(self.dirs.settings.lj_last_used.ch_num))
if scan_freq == '' \
or int(scan_freq) > max_freq \
or int(scan_freq) == 0:
self.scan_freq_entry.visual_warning()
return
self.update_lj_last_used(scan_freq=int(deepcopy(scan_freq)), reset_gui_elements=False)
def update_lj_last_used(self, ch_num=None, scan_freq=None, send_to_proc_handler=True, reset_gui_elements=False):
"""Update dirs.settings.lj_last_used. Also notify proc_handler to update lj_proc settings"""
self.grapher.plots_are_reset = False
if not ch_num:
ch_num = deepcopy(self.dirs.settings.lj_last_used.ch_num)
if not scan_freq:
scan_freq = deepcopy(self.dirs.settings.lj_last_used.scan_freq)
# first notify proc_handler to update lj_proc settings
if not self.lj_proc_updated:
# Send Message
if send_to_proc_handler:
self.proc_handler_queue.put_nowait('{}{}|{}'.format(LJ_CONFIG, ch_num, scan_freq))
# check back every 5 ms until lj_proc has been updated
qc.QTimer.singleShot(5, lambda: self.update_lj_last_used(ch_num, scan_freq, False, reset_gui_elements))
# once proc_handler has updated lj_procs, we update the GUI
elif self.lj_proc_updated:
self.dirs.settings.lj_last_used.ch_num = deepcopy(ch_num)
self.dirs.settings.lj_last_used.scan_freq = deepcopy(scan_freq)
self.reload_gui_info(reset_gui_elements)
self.lj_proc_updated = False
self.grapher.update_graphs()
def save_channels(self):
"""Saves channels selected based on boxes checked"""
selected = [i for i in range(self.num_ch) if self.checkboxes[i].isChecked()]
max_freq = int(50000 / len(selected))
freq = self.dirs.settings.lj_last_used.scan_freq
if freq > max_freq:
freq = max_freq
self.scan_freq_entry.setText(str(freq))
self.update_lj_last_used(ch_num=selected, scan_freq=freq, reset_gui_elements=False)
def enable_disable_chkboxes(self):
"""Enable or disable check boxes depending on number of boxes checked"""
selected = [i for i in range(self.num_ch) if self.checkboxes[i].isChecked()]
# All check boxes available if selected between 1-7 channels
if 1 < len(selected) < 8:
[self.checkboxes[i].setEnabled(True) for i in range(self.num_ch)]
# Cannot select more than 8 channels
elif len(selected) == 8:
[self.checkboxes[i].setEnabled(False) for i in range(self.num_ch)
if i not in self.dirs.settings.lj_last_used.ch_num]
# Cannot select fewer than 1 channel
elif len(selected) == 1:
[self.checkboxes[i].setEnabled(False) for i in range(self.num_ch) if self.checkboxes[i].isChecked()]
def reload_gui_info(self, reset_gui_elements):
"""Changes label, entry, boxes checked based on settings file"""
if reset_gui_elements:
self.scan_freq_entry.setText(str(self.dirs.settings.lj_last_used.scan_freq))
self.set_channels()
self.set_summ_label()
self.enable_disable_chkboxes()
self.set_max_freq_label()
self.grapher.reset_plots()
def set_channels(self):
"""Sets channels based on which ones enabled in dirs.settings"""
[self.checkboxes[i].setChecked(True) for i in self.dirs.settings.lj_last_used.ch_num]
[self.checkboxes[i].setChecked(False) for i in range(self.num_ch)
if i not in self.dirs.settings.lj_last_used.ch_num]
class GUI_ArdConfig(qg.QWidget):
"""Configuring Arduino Input/Output Settings"""
def __init__(self, dirs, gui_progbar):
qg.QWidget.__init__(self)
self.dirs = dirs
self.gui_progbar = gui_progbar
self.grid = qg.QGridLayout()
self.setLayout(self.grid)
# Setup and Layout
self.initialize()
self.setMaximumHeight(120)
self.grid.addWidget(self.frame)
def initialize(self):
"""Sets up buttons for arduino configuring"""
self.frame = qg.QGroupBox('Arduino Config')
grid = qg.QGridLayout()
self.frame.setLayout(grid)
# Dynamic "Types" Label
self.types_label = qg.QLabel(' ')
self.types_label.setAlignment(qc.Qt.AlignCenter)
self.types_label.setFrameStyle(qg.QFrame.Sunken | qg.QFrame.Panel)
self.types_label.setMaximumWidth(qt_text_metrics.width('Output'))
self.types_label.setMinimumWidth(qt_text_metrics.width('Output'))
# Pin Dropdown Menu
pins = ['', tone_pin, ''] + output_pins + [''] + pwm_pins
self.pins_dropdown = qg.QComboBox()
self.pins_dropdown.activated[str].connect(self.pins_dropdown_selection)
for pin in pins:
self.pins_dropdown.addItem(str(pin))
# Other Entries and Labels
static_labels = ['Pin', 'Type', 'Start Time', 'End Time', 'Freq', 'Phase Shift', 'Duty Cycle']
self.entries = {}
for index, label in enumerate(static_labels):
# Create Entries
if label not in ['Type', 'Pin']:
self.entries[label] = GUI_IntOnlyEntry(max_digits=6)
self.entries[label].setEnabled(False) # Initialize to Disabled
# Create Column Labels
static_labels[index] = qg.QLabel(label)
static_labels[index].setAlignment(qc.Qt.AlignCenter)
# Some special restrictions on phase shift and duty cycles
self.entries['Phase Shift'].setMaxLength(3)
self.entries['Phase Shift'].set_min_max_value(0, 360)
self.entries['Duty Cycle'].setMaxLength(2)
self.entries['Duty Cycle'].set_min_max_value(1, 99)
# Add to Grid
for index, label in enumerate(static_labels):
grid.addWidget(label, 0, index)
if label.text() not in ['Type', 'Pin']:
grid.addWidget(self.entries[label.text()], 1, index)
elif label.text() == 'Type':
grid.addWidget(self.types_label, 1, index)
elif label.text() == 'Pin':
grid.addWidget(self.pins_dropdown, 1, index)
# Confirm Button
self.confirm_btn = qg.QPushButton('\nConfirm\n')
self.confirm_btn.clicked.connect(self.add_new_config)
self.confirm_btn.setEnabled(False) # Initialize to Disabled
grid.addWidget(self.confirm_btn, 0, len(static_labels), 2, 1)
def pins_dropdown_selection(self, selection):
"""Changes widget output and enabled entries depending on pin selection"""
try:
selection = int(selection)
except ValueError:
self.types_label.setText('')
self.enable_disable_entries(types=None)
else:
# First we check if our pin selection is a selected arduino progbar
progbar_selected = self.gui_progbar.scene.selectedItems()
if not (len(progbar_selected) == 1 and selection == int(progbar_selected[0].data.pin)):
self.gui_progbar.reset_selection()
# Then we set the labels and enable or disable necessary entries
self.pins_dropdown.setCurrentIndex(self.pins_dropdown.findText(str(selection)))
if selection == tone_pin:
self.types_label.setText(tone)
self.enable_disable_entries(types=tone)
elif selection in output_pins:
self.types_label.setText(output)
self.enable_disable_entries(types=output)
elif selection in pwm_pins:
self.types_label.setText(pwm)
self.enable_disable_entries(types=pwm)
def enable_disable_entries(self, types):
"""Enables or Disables Entries based on field selection"""
universal_enabled = ['Start Time', 'End Time']
self.confirm_btn.setEnabled(True)
if not types:
enabled = []
self.confirm_btn.setEnabled(False)
elif types == tone:
enabled = universal_enabled + ['Freq']
elif types == output:
enabled = universal_enabled
elif types == pwm:
enabled = universal_enabled + ['Freq', 'Phase Shift', 'Duty Cycle']
for entry in self.entries:
if entry not in enabled:
self.entries[entry].setEnabled(False)
else:
self.entries[entry].setEnabled(True)
def add_new_config(self):
"""Adds a new visual and backend config for arduino settings based on user input"""
if not self.check_entries_valid():
return
new_config = ArdDataContainer(*self.get_entry_input())
# Adds the new config and reloads the progbar
# First we check if any conflicts exist
conflicts = self.check_new_config_conflicts(new_config)
# If we have selected exactly one progbar, then we make adjustments to it
# BUT: We make adjustments if and only if there are no conflicts or the conflict is with the selected bar
if len(self.gui_progbar.scene.selectedItems()) == 1 \
and ((len(conflicts) == 1 and conflicts[0] == self.gui_progbar.scene.selectedItems()[0].data)
or len(conflicts) == 0):
# We change the old config and overwrite it to the new config
selected_item_data = self.gui_progbar.scene.selectedItems()[0].data
index = self.dirs.settings.ard_last_used.configs.index(selected_item_data)
self.dirs.settings.ard_last_used.configs[index] = new_config
# Set the background
self.gui_progbar.set_dynamic_background()
# We then reselect the item so the user knows which bar they adjusted
for bar in self.gui_progbar.ard_stim_bars():
if bar.data == new_config:
bar.setSelected(True)
self.gui_progbar.setFocus()
# If no conflicts, we add the new config
elif len(conflicts) == 0:
self.dirs.settings.ard_last_used.configs.append(new_config)
self.gui_progbar.set_dynamic_background()
# Otherwise we send a warning to the user
else:
[item.visual_warning() for item in self.gui_progbar.ard_stim_bars()
for config in conflicts if item.data == config]
def check_entries_valid(self):
"""Checks that user inputs are appropriate inputs"""
# Are entries empty?
if not self.check_entries_not_empty():
return False
# Is the segment endtime greater than the segment start time?
elif int(self.entries['End Time'].text()) <= int(self.entries['Start Time'].text()):
self.entries['End Time'].visual_warning()
self.entries['Start Time'].visual_warning()
return False
# Are tone frequencies at least 50Hz?
elif self.types_label.text() == tone and int(self.entries['Freq'].text()) < 50:
self.entries['Freq'].visual_warning()
GUI_Message('Tone Frequencies must be at least 50Hz;\n\nUse PWM pins for Low Frequencies')
return False
# Are PWM frequencies at most 100Hz?
elif self.types_label.text() == pwm and int(self.entries['Freq'].text()) > 100:
self.entries['Freq'].visual_warning()
GUI_Message('PWM Frequencies must be at most 100Hz;\n\nUse Pin 10 (Tone) for High Frequencies')
return False
else:
return True
def check_new_config_conflicts(self, new_config):
"""Checks if a new user config conflicts with previous entries"""
conflicts = []
configs = self.dirs.settings.ard_last_used.configs
# Is new_config.pin already being used in a pre-existing config?
configs = [config for config in configs if new_config.pin == config.pin]
# If new_config.pin is already being used, does the new_config intersect with any previous configs?
if len(configs) != 0:
new_start, new_stop = new_config.time_on_ms, new_config.time_off_ms
time_segments = [(config.time_on_ms, config.time_off_ms) for config in configs]
conflicts = [configs[time_segments.index(segment)] for segment in time_segments
# New timepoints should not be within previous segments
if segment[0] < new_start < segment[1]
or segment[0] < new_stop < segment[1]
# New segments should not be within or encompass previous segments
or (segment[0] <= new_start and segment[1] >= new_stop)
or (segment[0] >= new_start and segment[1] <= new_stop)]
return conflicts
def check_entries_not_empty(self):
"""Checks that entries are not empty before pulling input from them"""
enabled_but_empty = [self.entries[entry]
for entry in self.entries
if self.entries[entry].isEnabled()
and self.entries[entry].text().strip() == '']
if len(enabled_but_empty) > 0:
[entry.visual_warning() for entry in enabled_but_empty]
return False
elif len(enabled_but_empty) == 0:
return True
def get_entry_input(self):
"""Gets user input from fields and returns specific parameters depending on type specified"""
# First we get the output of all fields
types = self.types_label.text()
pin = int(self.pins_dropdown.currentText())
time_on_ms = int(self.entries['Start Time'].text().strip()) * 1000
time_off_ms = int(self.entries['End Time'].text().strip()) * 1000
freq = self.entries['Freq'].text().strip()
phase_shift = self.entries['Phase Shift'].text().strip()
duty_cycle = self.entries['Duty Cycle'].text().strip()
# Then we distribute necessary information depending on types requested
if types == tone:
return time_on_ms, time_off_ms, types, 10, freq
elif types == output:
return time_on_ms, time_off_ms, types, pin
elif types == pwm:
return time_on_ms, time_off_ms, types, pin, freq, phase_shift, duty_cycle
def load_from_ard_bar(self, data):
"""Gets data from an implemented arduino bar in gui_progbar"""
# Parse data
pin, types, freq, phase_shift, duty_cycle, time_on, time_off = ('',) * 7
if data:
types = data.types
time_on = str(int(data.time_on_ms / 1000))
time_off = str(int(data.time_off_ms / 1000))
if data.types == tone:
pin = '10'
freq = str(data.freq)
elif data.types in [output, pwm]:
pin = str(data.pin)
if data.types == pwm:
freq = str(data.freq)
phase_shift = str(data.phase_shift)
duty_cycle = str(data.duty_cycle)
# Set Widgets
self.pins_dropdown.setCurrentIndex(self.pins_dropdown.findText(pin))
self.types_label.setText(types)
self.entries['Start Time'].setText(time_on)
self.entries['End Time'].setText(time_off)
self.entries['Freq'].setText(freq)
self.entries['Phase Shift'].setText(phase_shift)
self.entries['Duty Cycle'].setText(duty_cycle)
# Enable/Disable widgets
self.enable_disable_entries(types)
class GUI_DevicePresets(qg.QWidget):
"""Selecting/Saving User Defined Presets"""
def __init__(self, dirs, gui_progbar, lj_widget, time_widget):
qg.QWidget.__init__(self)
self.dirs = dirs
self.gui_progbar = gui_progbar
self.lj_widget = lj_widget
self.time_widget = time_widget
self.grid = qg.QGridLayout()
self.setLayout(self.grid)
self.device_types = [arduino, labjack]
self.initialize()
def preset_names(self, device):
"""Returns list of presets depending on device;
we use a function since the list is dynamic, makes life easier"""
if device == labjack:
return [''] + [name for name in self.dirs.settings.lj_presets]
elif device == arduino:
return [''] + [name for name in self.dirs.settings.ard_presets]
def initialize(self):
"""Set up GUI elements"""
frame = qg.QGroupBox('Device Presets')
self.grid.addWidget(frame)
grid = qg.QGridLayout()
frame.setLayout(grid)
# QWidget Containers
self.entries = {dev: GUI_EntryWithWarning() for dev in self.device_types}
self.btns = {dev: qg.QPushButton('Save New') for dev in self.device_types}
self.dropdowns = {dev: qg.QComboBox() for dev in self.device_types}
# Set frame layout with two inner frames for each device's preset GUI
[grid.addWidget(self.init_preset_type(dev), i, 0) for i, dev in enumerate(self.device_types)]
self.connect_widgets()
def init_preset_type(self, dev):
"""Sets up an inner frame containing preset options depending on type passed"""
frame = qg.QFrame()
frame.setFrameStyle(qg.QFrame.Sunken | qg.QFrame.StyledPanel)
grid = qg.QGridLayout()
frame.setLayout(grid)
# Setup Widgets
grid.addWidget(qg.QLabel('{} Presets'.format(dev.capitalize())), 0, 0, 1, 5)
grid.addWidget(self.entries[dev], 2, 0, 1, 5)
grid.addWidget(self.btns[dev], 3, 2, 1, 1)
grid.addWidget(qg.QLabel('Select: '), 4, 0, 1, 1)
grid.addWidget(self.dropdowns[dev], 4, 1, 1, 4)
return frame
def connect_widgets(self):
"""Connects individual widgets to appropriate slots"""
for dev in self.device_types:
[self.dropdowns[dev].addItem(name) for name in sorted(self.preset_names(dev), key=str.lower)]
self.dropdowns[dev].activated[str].connect(lambda option, device=dev: self.select_preset(option, device))
self.btns[dev].clicked.connect(lambda cl, device=dev: self.save_preset(device))
def select_preset(self, option, device):
"""Based on user selection in dropdowns, implement the preset"""
if option == '':
return
if device == arduino:
self.dirs.settings.ard_last_used = deepcopy(self.dirs.settings.ard_presets[option])
self.gui_progbar.set_dynamic_background()
self.time_widget.set_text_in_entries()
elif device == labjack:
ch_num = self.dirs.settings.lj_presets[option].ch_num
scan_freq = self.dirs.settings.lj_presets[option].scan_freq
self.lj_widget.update_lj_last_used(ch_num=ch_num, scan_freq=scan_freq, reset_gui_elements=True)
def save_preset(self, device):
"""Saves user settings to a hardcopy preset"""
name = self.entries[device].text().strip()
# Is the entry empty?
if len(name) == 0:
self.entries[device].visual_warning()
return
# Is the entry overwriting a previous setting?
overwrite = None
if name in self.preset_names(device):
msg = '[{}]\nAlready exists as a preset!\nOverwrite anyway?'.format(name)
overwrite = qg.QMessageBox.question(self, 'Overwrite?', msg,
qg.QMessageBox.No | qg.QMessageBox.Yes,
qg.QMessageBox.No)
if overwrite == qg.QMessageBox.No:
return
# If not empty, and [choose to overwrite OR a new name], we proceed to save it.
if device == arduino:
self.dirs.settings.ard_presets[name] = deepcopy(self.dirs.settings.ard_last_used)
elif device == labjack:
self.dirs.settings.lj_presets[name] = deepcopy(self.dirs.settings.lj_last_used)
# Insert new preset name in the correct location;
# if we did an overwrite we don't need to add a new name
if overwrite != qg.QMessageBox.Yes:
ind = sorted(self.preset_names(device) + [name], key=str.lower).index(name)
self.dropdowns[device].insertItem(ind, name)
# Now we set the dropdown option to display our newly saved (or overwritten) option
self.dropdowns[device].setCurrentIndex(self.dropdowns[device].findText(name))
class GUI_StartStopBtns(qg.QWidget):
"""Start and Stop buttons and associated signals/slots.
Also contains an entry field for naming experiment runs"""
def __init__(self, dirs, gui_progbar):
qg.QWidget.__init__(self)
self.dirs = dirs
# Synchronization
self.proc_handler_queue = PROC_HANDLER_QUEUE
# GUI Objects
self.grid = qg.QGridLayout()
self.setLayout(self.grid)
self.gui_progbar = gui_progbar
# Setup
self.setMinimumWidth(200)
self.setMaximumWidth(200)
self.init_btns()
self.init_name_entry()
self.add_to_grid()
def init_btns(self):
"""Creates and Connects Start Stop Buttons"""
self.start_btn = qg.QPushButton('START')
self.start_btn.setStyleSheet('background-color: cyan')
self.stop_btn = qg.QPushButton('STOP')
self.stop_btn.setStyleSheet('background-color: orange')
self.start_btn.clicked.connect(self.start_exp)
self.stop_btn.clicked.connect(self.stop_exp)
self.stop_btn.setEnabled(False)
def init_name_entry(self):
"""An entry field for user naming"""
self.name_frame = qg.QGroupBox('Trial Name:')
grid = qg.QGridLayout()
self.name_frame.setLayout(grid)
self.name_entry = GUI_EntryWithWarning()
grid.addWidget(self.name_entry)
def get_exp_name(self):
"""Gets the experiment name from user input; if empty, set to no_name"""
name = str(self.name_entry.text().strip())
if name == '':
return False
for i in name:
if i in FORBIDDEN_CHARS:
name = name.replace(i, '_')
return name
def add_to_grid(self):
"""Add Buttons to Grid"""
self.grid.addWidget(self.name_frame, 0, 0)
self.grid.addWidget(self.start_btn, 1, 0)
self.grid.addWidget(self.stop_btn, 2, 0)
def start_exp(self):
"""Starts the experiment"""
# Get the Exp name
name = self.get_exp_name()
if not name or name in self.dirs.list_file_names():
self.name_entry.visual_warning()
return
# Save directory checks and handling
self.dirs.check_dirs()
if not self.dirs.created_date_stamped_dir:
self.dirs.create_date_stamped_dir()
self.proc_handler_queue.put_nowait('{}{}'.format(DIR_TO_USE_HEADER, self.dirs.date_stamped_dir))
# Send start message to proc_handler
self.proc_handler_queue.put_nowait('{}{}'.format(RUN_EXP_HEADER, name))
def stop_exp(self):
"""Stops the experiment"""
self.proc_handler_queue.put_nowait(HARDSTOP_HEADER)
self.gui_progbar.stop_bar()
class GUI_TimeConfig(qg.QWidget):
"""Entries and Buttons for Configuring Experiment Time"""
def __init__(self, dirs, gui_progbar):
qg.QWidget.__init__(self)
self.proc_handler_queue = PROC_HANDLER_QUEUE
self.dirs = dirs
self.gui_progbar = gui_progbar
self.grid = qg.QGridLayout()
self.setLayout(self.grid)
self.init_entries()
self.add_to_grid()
def init_entries(self):
"""Creates and Connects time config entries"""
self.entries = []
entry_types = [hh, mm, ss]
# Entries and Buttons (Signal Emitters)
hhmmss = time_convert(ms=self.dirs.settings.ttl_time())
for index, entry_type in enumerate(entry_types):
self.entries.append(GUI_IntOnlyEntry(max_digits=2, default_txt=str(hhmmss[index])))
self.entries[index].setMaximumWidth(qt_text_metrics.width('00000'))
self.set_text_in_entries()
self.ttl_time_confirm_btn = qg.QPushButton('\nOk\n')
self.ttl_time_confirm_btn.clicked.connect(self.confirm_time)
# Static Labels
static_labels = ['Hour', 'Min', 'Sec', ':', ':']
for index, name in enumerate(static_labels):
static_labels[index] = qg.QLabel(name)
static_labels[index].setAlignment(qc.Qt.AlignCenter)
# Keep time entries contained in its own frame
self.time_entry_frame = qg.QGroupBox()
self.time_entry_frame.setTitle('Total Experiment Time:')
self.time_entry_frame.setMaximumWidth(200)
grid = qg.QGridLayout()
self.time_entry_frame.setLayout(grid)
# Add components to frame
grid.addWidget(self.ttl_time_confirm_btn, 2, 0, 1, 5)
for index, entry in enumerate(self.entries):
grid.addWidget(static_labels[index], 0, index * 2)
grid.addWidget(entry, 1, index * 2)
if index < 2:
grid.addWidget(static_labels[index + 3], 1, index * 2 + 1)
def set_text_in_entries(self):
"""Sets the text based on dirs.settings"""
hhmmss = time_convert(ms=self.dirs.settings.ttl_time())
[self.entries[index].setText(str(hhmmss[index])) for index, entry_type in enumerate([hh, mm, ss])]
def confirm_time(self):
"""Sets the total experiment time"""
hhmmss = ''
# Get User Entry
for entry in self.entries:
hhmmss += '{:0>2}'.format(str(entry.text()))
# Error Checking
if int(hhmmss) < 5:
ms = 5000
else:
ms = time_convert(hhmmss=hhmmss)
# Is the time in the entries lower than the times configured in the prog bars?
if any([(ms < config.time_off_ms) for config in self.dirs.settings.ard_last_used.configs]):
ms = max([ms] + [config.time_off_ms for config in self.dirs.settings.ard_last_used.configs])
hhmmss = time_convert(ms=ms)
msg = 'Total time cannot be less than [{}:{}:{}] ' \
'because one of the arduino output endpoints exceeds this value.\n\n' \
'Reconfigure arduino outputs to ' \
'reduce total time.'.format(hhmmss[0], hhmmss[1], hhmmss[2])
GUI_Message(msg)
# Setting empty entries
for index, time in enumerate(time_convert(ms=ms)):
self.entries[index].setText(str(time))
self.dirs.settings.set_ttl_time(ms)
self.proc_handler_queue.put_nowait('{}{}'.format(TTL_TIME_HEADER, ms))
self.gui_progbar.set_dynamic_background()
def add_to_grid(self):
"""Add Widgets to Grid"""
self.grid.addWidget(self.time_entry_frame, 1, 0, 1, 2)
class GUI_SaveConfig(qg.QWidget):
"""User configurable save file names"""
def __init__(self, dirs):
qg.QWidget.__init__(self)
self.dirs = dirs
self.proc_handler_queue = PROC_HANDLER_QUEUE
self.grid = qg.QGridLayout()
self.setLayout(self.grid)
# Setup
self.setMaximumWidth(200)
self.initialize()
def initialize(self):
"""Gets list of existing save directories"""
grid = qg.QGridLayout()
frame = qg.QGroupBox('Current Save Directory:')
change_dir_btn = qg.QPushButton('Change Directory')
self.dir_label = qg.QLabel('')
self.set_dirs_label(self.dirs.settings.last_used_save_dir)
self.dir_label.setFrameStyle(qg.QFrame.Sunken | qg.QFrame.Panel)
# Connect Signals
change_dir_btn.clicked.connect(self.change_dirs_dialog)
# Layout
frame.setLayout(grid)
grid.addWidget(self.dir_label, 0, 0)
grid.addWidget(change_dir_btn, 1, 0)
self.grid.addWidget(frame)
def change_dirs_dialog(self):
"""Opens a dialog for user to specify save directory"""
directory = str(qg.QFileDialog.getExistingDirectory(None, "Select Directory",
self.dirs.settings.last_used_save_dir))
# Don't change anything if we cancelled the dialog
if len(directory) == 0:
return
# Otherwise set dir to the one we selected in dialog
self.dirs.settings.last_used_save_dir = directory
self.set_dirs_label(directory)
self.dirs.created_date_stamped_dir = False
def set_dirs_label(self, dirs):
"""Sets self.dirs_label with word wrapping on '\\' markers"""
max_len = 23
lines = []
curr_line = ''
for i in [d for d in dirs.split('\\')]:
if not len(curr_line) == 0:
curr_line += '\\'
if len(curr_line + i) < max_len:
curr_line += i
else:
lines.append(curr_line)
curr_line = '' + i
lines.append(curr_line) # Append the final line that hasn't been added yet
label = '\n'.join([''.join(line) for line in lines])
self.dir_label.setText(label)
| [
"tiange.l"
] | tiange.l |
a33ea344425501fccf20a8502fc44380fce73c76 | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /dino-master/dino/rest/resources/send.py | a63978198fcff73e1c60cefb3ad6386d3ea9a807 | [
"Apache-2.0"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,723 | py | import logging
import traceback
import eventlet
import sys
from dino import environ
from dino import utils
from dino.utils.decorators import timeit
from dino.db.manager import UserManager
from dino.rest.resources.base import BaseResource
from flask import request
logger = logging.getLogger(__name__)
def fail(error_message):
return {
'status': 'FAIL',
'message': error_message
}
class SendResource(BaseResource):
def __init__(self):
super(SendResource, self).__init__()
self.user_manager = UserManager(environ.env)
self.request = request
def async_post(self, json):
logger.debug('POST request: %s' % str(json))
if 'content' not in json:
raise RuntimeError('no key [content] in json message')
msg_content = json.get('content')
if msg_content is None or len(msg_content.strip()) == 0:
raise RuntimeError('content may not be blank')
if not utils.is_base64(msg_content):
raise RuntimeError('content in json message must be base64')
user_id = str(json.get('user_id', 0))
user_name = utils.b64d(json.get('user_name', utils.b64e('admin')))
object_type = json.get('object_type')
target_id = str(json.get('target_id'))
namespace = json.get('namespace', '/ws')
target_name = json.get('target_name')
data = utils.activity_for_message(user_id, user_name)
data['target'] = {
'objectType': object_type,
'id': target_id,
'displayName': target_name,
'url': namespace
}
data['object'] = {
'content': msg_content
}
if not environ.env.cache.user_is_in_multicast(target_id):
logger.info('user {} is offline, dropping message: {}'.format(target_id, str(json)))
return
try:
environ.env.out_of_scope_emit('message', data, room=target_id, json=True, namespace='/ws', broadcast=True)
except Exception as e:
logger.error('could not /send message to target {}: {}'.format(target_id, str(e)))
logger.exception(traceback.format_exc())
environ.env.capture_exception(sys.exc_info())
@timeit(logger, 'on_rest_send')
def do_post(self):
is_valid, msg, json = self.validate_json(self.request, silent=False)
if not is_valid:
logger.error('invalid json: %s' % msg)
raise RuntimeError('invalid json')
if json is None:
raise RuntimeError('no json in request')
if not isinstance(json, dict):
raise RuntimeError('need a dict')
eventlet.spawn_n(self.async_post, dict(json))
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
6c685c912dc3606c91cb0c2ed32d5fd4d5107297 | 0e2005613bfbefaa5dd423f74326cf7854f7d83c | /UbuntuScript2.0/script.py | 6d5ff2d0f2347ed7f61e7ec65e856b663e0dc466 | [] | no_license | xrchsploit/Tricks | 1f3bcf4e3f73ece23dba6ef495bdc370ae7b86e3 | 8a7753bf0c5b881cc09f4a266dee06395e37da69 | refs/heads/main | 2023-02-20T05:48:24.911850 | 2021-01-23T03:20:27 | 2021-01-23T03:20:27 | 325,204,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,149 | py | # Modules that are going to be used
import os
import time
# Colors for text
class bcolors:
RED = '\033[91m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
NORMAL = '\033[0m'
BOLD = '\033[1m'
FLASH = '\033[5m'
os.system("clear")
# Firefox settings, warning, apt fix, GUI updates,
print(bcolors.RED + bcolors.FLASH + "Ensure you ran this as root. I.E (sudo python3 script.py) " + bcolors.NORMAL)
print(bcolors.GREEN + "Ensure you did things like: fix apt and got the GUI software update settings " + bcolors.NORMAL)
print('')
print(bcolors.GREEN + "Ensure you go to firefox > privacy and security settings > enable delete cookies > disable ask to save logins" + bcolors.NORMAL)
print(bcolors.GREEN + "enable block pop ups > Block deceptive content" + bcolors.NORMAL)
print("")
print(bcolors.RED + bcolors.FLASH + "Screenshot all logins! this will do PAM and LightDM!!!" + bcolors.NORMAL)
time.sleep(15)
# Fixing package manager, any broken dependencies will be fixed
os.system("sudo dpkg --configure -a")
os.system("sudo apt clean")
# Updates and installing nessesary tools
os.system("sudo apt install net-tools -y")
os.system("sudo apt update -y && sudo apt upgrade -y")
os.system("sudo apt install clamav -y")
os.system("sudo apt install synaptic -y")
os.system("sudo apt install ufw -y")
os.system("sudo apt install gufw -y")
os.system("sudo apt install rkhunter -y")
os.system("sudo apt install gedit -y")
os.system("sudo apt install terminator -y")
# Bum may not work, hit or miss
os.system("sudo apt install bum")
os.system("clear")
os.system("sudo apt install libpam-cracklib -y")
cracklib = input(bcolors.YELLOW + "*Was cracklib installed correctly? If no tyne 'n' and go fix it! y/n * " + bcolors.NORMAL)
# Check if cracklib was installed correctly
if cracklib == str("n"):
print("Ok, fix it and come back!")
quit()
elif cracklib == str("y"):
print("Proceeding...")
# Tool Removal
print("Going through services and hacking tools commonly used (refer to the readme to see what you do and dont need)... ")
time.sleep(2)
# Apache2 removal
os.system("clear")
apache2 = input("Is apache2 a critical service or do you need it? y/n ")
if apache2 == str("y"):
print("Leaving apache2 installed!")
elif apache2 == str("n"):
print("Removing apache2 and all dependencies completley... ")
os.system("sudo apt autoremove apache2 -y")
# Wireshark removal
os.system("clear")
wireshark = input("Is wireshark a critical service or do you need it? y/n ")
if wireshark == str("y"):
print("Leaving wireshark installed!")
elif wireshark == str("n"):
print("Removing wireshark and all dependencies completley... ")
os.system("sudo apt autoremove wireshark -y")
# Ophcrack removal
os.system("clear")
ophcrack = input("Is ophcrack a critical service or do you need it? y/n ")
if ophcrack == str("y"):
print("Leaving ophcrack installed!")
elif ophcrack == str("n"):
print("Removing ophcrack and all dependencies completley... ")
os.system("sudo apt autoremove ophcrack -y")
# Nmap removal
os.system("clear")
nmap = input("Is nmap a critical service or do you need it? y/n ")
if nmap == str("y"):
print("Leaving nmap installed!")
elif nmap == str("n"):
print("Removing nmap and all dependencies completley... ")
os.system("sudo apt autoremove nmap -y")
# Zenmap removal
os.system("clear")
zenmap = input("Is zenmap a critical service or do you need it? y/n ")
if zenmap == str("y"):
print("Leaving zenmap installed!")
elif zenmap == str("n"):
print("Removing zenmap and all dependencies completley... ")
os.system("sudo apt autoremove zenmap -y")
# Kismet removal
os.system("clear")
kismet = input("Is kismet a critical service or do you need it? y/n ")
if kismet == str("y"):
print("Leaving kismet installed!")
elif kismet == str("n"):
print("Removing kismet and all dependencies completley... ")
os.system("sudo apt autoremove kismet -y")
# Hashcat removal
os.system("clear")
hashcat = input("Is hashcat a critical service or do you need it? y/n ")
if hashcat == str("y"):
print("Leaving hashcat installed!")
elif hashcat == str("n"):
print("Removing hashcat and all dependencies completley... ")
os.system("sudo apt autoremove hashcat -y")
# Nikto removal
os.system("clear")
nikto = input("Is nikto a critical service or do you need it? y/n ")
if nikto == str("y"):
print("Leaving nikto installed!")
elif nikto == str("n"):
print("Removing nikto and all dependencies completley... ")
os.system("sudo apt autoremove nikto -y")
# Hydra removal
os.system("clear")
hydra = input("Is hydra a critical service or do you need it? y/n ")
if hydra == str("y"):
print("Leaving hydra installed!")
elif hydra == str("n"):
print("Removing hydra and all dependencies completley... ")
os.system("sudo apt autoremove hydra -y")
# Netcat removal
os.system("clear")
netcat = input("Is netcat a critical service or do you need it? y/n ")
if netcat == str("y"):
print("Leaving netcat installed!")
elif netcat == str("n"):
print("Removing netcat and all dependencies completley... ")
os.system("sudo apt autoremove netcat -y")
# Going through packages manually
print(bcolors.GREEN + """Synaptic is going to open, click the button that says 'status' then click installed and check all those packages
You're looking for hacking tools and other random stuff, when you find stuff mark it and when done apply it
When you're done, make sure to exit synaptic for the script to continue. DOCUMENT WHAT YOU DELETE SOMEWHERE""" + bcolors.NORMAL)
time.sleep(7)
os.system("sudo synaptic")
time.sleep(3)
os.system("clear")
# Firewall settings
os.system("sudo ufw enable")
print(bcolors.YELLOW + "Set it to office environment and make sure to set incoming to reject it not deny" + bcolors.NORMAL)
time.sleep(3)
os.system("sudo gufw")
os.system("clear")
# Runs a clamscan
os.system("clear")
print("Running AV scan")
time.sleep(1)
os.system("sudo clamscan")
time.sleep(4)
os.system("clear")
# rkhunter thing
print("This is going to go through and check for rootkits/bootkits")
time.sleep(2)
rkhunter = input("Rkhunter scan if this is the first time you run this script type y if it isnt type n")
if rkhunter == str("y"):
os.system("sudo rkhunter --check")
elif rkhunter == str("n"):
print("Skipping rkhunter scan")
time.sleep(1)
os.system("clear")
# PAM Auth Setup
# Doing PAM Password settings
pamq = input(bcolors.GREEN + "Do you need to do pam? y/n" + bcolors.NORMAL)
if pamq == str("y"):
print(bcolors.RED + "This is going to do PAM Password Complexity" + bcolors.NORMAL)
os.system("sudo cat common-password > /etc/pam.d/common-password")
print("Finished doing PAM Auth... doing pam password complexity...")
os.system("clear")
time.sleep(3)
print(bcolors.YELLOW + "Doing PAM remember AUTH PASSWORD settings" + bcolors.NORMAL)
os.system("sudo echo 'auth required pam_tally2.so deny=5 onerr=fail unlock_time=1800' >> /etc/pam.d/common-auth")
os.system("clear")
print(bcolors.YELLOW + "Done with PAM" + bcolors.NORMAL)
elif pamq == str("n"):
print("Skipping PAM settings...")
# Doing file permissions on files
print(bcolors.GREEN + "Doing file permissions for: /etc/passwd /etc/shadow" + bcolors.NORMAL)
os.system("sudo chmod 600 /etc/shadow")
os.system("sudo chmod 644 /etc/passwd")
os.system("clear")
# Check for crontabs
print(bcolors.GREEN + "Go check all these located in /var/spool, comment or delete anything that isnt supposed to be startup!" + bcolors.NORMAL)
os.system("sudo ls /var/spool")
time.sleep(7)
os.system("clear")
# Sodoers check
print(bcolors.GREEN + "Check these files in the following directory using another terminal: /etc/sudoers.d" + bcolors.NORMAL)
time.sleep(4)
os.system("sudo ls /etc/sudoers.d")
os.system("clear")
# Visudo
print(bcolors.YELLOW + "Make sure only root is the only one" + bcolors.NORMAL)
time.sleep(4)
os.system("sudo visudo")
os.system("clear")
# END SEQUENCE print("Finished! A list of what this tool has completed will be sent to a folder")
| [
"noreply@github.com"
] | xrchsploit.noreply@github.com |
fc5950f4de4608412e324da17fb6c26e9e26f0c3 | 637175e6a58817d77ce160744de1ee1b43d76cda | /tutorial/tutorial/spiders/TorDown.py | a3abf30b597692d36831756b899d4a9b6b04c9bf | [] | no_license | AgentG2015/Scrapy | 283a26f7201deaa1498a429fda77421f470f93f1 | 30687f75c578ccb5d5c0c488d639567ce8013406 | refs/heads/master | 2020-04-06T04:08:47.260255 | 2016-07-04T06:33:51 | 2016-07-04T06:33:51 | 59,291,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | import scrapy
from tutorial.items import *
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.http import Request,FormRequest
from scrapy.utils.project import get_project_settings
class TorDown(CrawlSpider):
name = "tordown"
allowed_domains = ["tp.m-team.cc"]
link_template = r"https://tp.m-team.cc/download.php?id="
items = []
start_urls = []
def __init__(self):
self.headers = HEADER
self.cookies = COOKIES
with open('items.txt', 'r') as f:
self.items = f.read().split("\n")[0:-1]
print len(self.items), "items"
def start_requests(self):
for i, id in enumerate(self.items):
#if i > 9: break
url = self.link_template + id
request = FormRequest(url,
headers = self.headers,
cookies =self.cookies,
callback = self.parse_item)
request.meta['id'] = id
yield request
def parse_item(self, response):
filename = "torrents/" + response.meta['id'] + ".torrent"
with open(filename, 'wb') as f:
f.write(response.body)
HEADER={
"Host": "tp.m-team.cc",
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36",
"Referer": "https://tp.m-team.cc/torrents.php?sort=5&type=desc",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6,ja;q=0.4,zh-TW;q=0.2"
}
COOKIES={
'c_lang_folder':r'en',
'c_secure_uid':r'MTU5NTI2',
'c_secure_pass':r'd310dc816dd70e1425f042d8a6dc5b1c',
'c_secure_ssl':r'eWVhaA%3D%3D',
'c_secure_tracker_ssl':r'eWVhaA%3D%3D',
'c_secure_login':r'bm9wZQ%3D%3D',
}
| [
"AgentG2015@gmail.com"
] | AgentG2015@gmail.com |
d198cc72d424a69a2c2d0a225554d187793a8099 | 2415d7f3cc3a4cb98897e91386d861d4b672e824 | /src/puremvc/utilities/pipes/messages.py | c8189aae63b178e59f8c87c0193b09a84c06a93f | [] | no_license | Yoriz/puremvc-python-util-pipes | cc4cf60675f4e700736a6bbf6bda13374a37d87e | c8136e648683560c991c0ff359eb14e509f84ee2 | refs/heads/master | 2021-04-09T16:54:36.803883 | 2012-11-28T19:16:25 | 2012-11-28T19:16:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,646 | py | '''
Created on 26 Nov 2012
@author: Dave Wilson
'''
from puremvc.utilities.pipes import interfaces
class Message(interfaces.IPipeMessage):
"""
Pipe Message.
Messages travelling through a Pipeline can be filtered, and queued.
In a queue, they may be sorted by priority. Based on type, they may used
as control messages to modify the behaviour of filter or queue fittings
connected to the pipeline into which they are written.
"""
PRIORITY_HIGH = 1
"""High priority Messages can be sorted to the front of the queue"""
PRIORITY_MED = 5
"""Medium priority Messages are the default"""
PRIORITY_LOW = 10
"""Low priority Messages can be sorted to the back of the queue"""
BASE = 'http://puremvc.org/namespaces/pipes/messages/'
NORMAL = BASE + 'normal/'
"""Normal Message type."""
def __init__(self, msg_type, header=None, body=None, priority=5):
"""
@param msg_type: Messages can be handled differently according to type
@param header: Header properties describe any meta data about the
message for the recipient
@param body: Body of the message is the precious cargo
@param priority: Messages in a queue can be sorted by priority.
"""
self.msg_type = msg_type
self.header = header
self.body = body
self.priority = priority
def getType(self):
"""Get the type of this message"""
return self.msg_type
def setType(self, msg_type):
"""Set the type of this message"""
self.msg_type = msg_type
def getPriority(self):
"""Get the priority of this message"""
return self.priority
def setPriority(self, priority):
"""Set the priority of this message"""
self.priority = priority
def getHeader(self):
"""Get the header of this message"""
return self.header
def setHeader(self, header):
"""Set the header of this message"""
self.header = header
def getBody(self):
"""Get the body of this message"""
return self.body
def setBody(self, body):
"""Set the body of this message"""
self.body = body
class FilterControlMessage(Message):
"""
Filter Control Message.
A special message type for controlling the behaviour of a Filter.
The FilterControlMessage.SET_PARAMS message type tells the Filter to
retrieve the filter parameters object.
The FilterControlMessage.SET_FILTER message type tells the Filter to
retrieve the filter function.
The FilterControlMessage.BYPASS message type tells the Filter that it
should go into Bypass mode operation, passing all normal messages
through unfiltered.
The FilterControlMessage.FILTER message type tells the Filter that it
should go into Filtering mode operation, filtering all normal normal
messages before writing out. This is the default mode of operation and so
this message type need only be sent to cancel a previous
FilterControlMessage.BYPASS message.
The Filter only acts on a control message if it is targeted to this named
filter instance. Otherwise it writes the message through to its output
unchanged.
"""
BASE = Message.BASE + 'filter-control/'
"""Message type base URI"""
SET_PARAMS = BASE + 'setparams'
"""Set filter parameters."""
SET_FILTER = BASE + 'setfilter'
"""Set filter function."""
BYPASS = BASE + 'bypass'
"""Toggle to filter bypass mode."""
FILTER = BASE + 'filter'
"""Toggle to filtering mode. (default behaviour)."""
def __init__(self, msg_type, name, msg_filter=None, params=None):
"""
@param name: The target filter name.
@param msg_filter: The filter function.
@param params: The parameters object.
"""
super(FilterControlMessage, self).__init__(msg_type)
self.name = name
self.msg_filter = msg_filter
self.params = params
def setName(self, name):
"""Set the target filter name."""
self.name = name
def getName(self):
"""Get the target filter name."""
return self.name
def setFilter(self, msg_filter):
"""Set the filter function."""
self.msg_filter = msg_filter
def getFilter(self):
"""Get the filter function."""
return self.msg_filter
def setParams(self, params):
"""Set the parameters object."""
self.params = params
def getParams(self):
"""Get the parameters object."""
return self.params
class QueueControlMessage(Message):
"""
Queue Control Message.
A special message for controlling the behaviour of a Queue.
When written to a pipeline containing a Queue, the type of the message is
interpreted and acted upon by the Queue.
Unlike filters, multiple serially connected queues aren't very useful and
so they do not require a name. If multiple queues are connected serially,
the message will be acted upon by the first queue only.
"""
BASE = Message.BASE + '/queue/'
FLUSH = BASE + 'flush'
"""Flush the queue."""
SORT = BASE + 'sort'
"""Toggle to sort-by-priority operation mode."""
FIFO = BASE + 'fifo'
"""Toggle to FIFO operation mode (default behaviour)."""
def __init__(self, msg_type):
super(QueueControlMessage, self).__init__(msg_type)
| [
"dave.wilson1@ntlworld.com"
] | dave.wilson1@ntlworld.com |
257b7003bc935a62b341351ce28e8be2d272b6ae | bb88a96f31e12007ec99f8e64045e790831805fe | /gringotts/api/v1/sub.py | 71f38bf767d8eab916dcde5a1fd699fbbcb865f1 | [] | no_license | rogeroger-yu/ustack-gringotts | 38ed680f111c262a666f55609074a00fd6f76b7e | 75f656398c11b0dbddf99bf429994624915c3565 | refs/heads/master | 2022-09-24T03:52:46.452502 | 2017-03-30T11:03:54 | 2017-03-30T11:19:22 | 86,901,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | import calendar
import pecan
import wsme
import datetime
import decimal
from pecan import rest
from pecan import request
from wsmeext.pecan import wsexpose
from wsme import types as wtypes
from oslo_config import cfg
from gringotts import exception
from gringotts import utils as gringutils
from gringotts.api.v1 import models
from gringotts.db import models as db_models
from gringotts.openstack.common import log
from gringotts.openstack.common import timeutils
from gringotts.openstack.common import uuidutils
LOG = log.getLogger(__name__)
class SubsController(rest.RestController):
"""The controller of resources
"""
@wsexpose(models.Subscription, body=models.SubscriptionPostBody)
def post(self, data):
conn = pecan.request.db_conn
subscription = conn.create_subscription(request.context,
**data.as_dict())
if subscription:
return models.Subscription.from_db_model(subscription)
else:
return None
@wsexpose(None, body=models.SubscriptionPutBody)
def put(self, data):
conn = pecan.request.db_conn
if data.quantity != wtypes.Unset:
conn.update_subscription(request.context,
**data.as_dict())
elif data.new_flavor != wtypes.Unset and data.old_flavor != wtypes.Unset:
conn.update_flavor_subscription(request.context,
**data.as_dict())
| [
"guangyu@unitedstack.com"
] | guangyu@unitedstack.com |
d47adad75197246557075f605b07f839b2123a11 | d4f1bd5e52fe8d85d3d0263ede936928d5811bff | /Python/Problem Solving/BOJ/boj5724.py | c8ae00d9227f3f31db76e85c3ec8da1ed321f2ee | [] | no_license | ambosing/PlayGround | 37f7d071c4402599995a50cac1e7f1a85c6d10dd | 0d5262dbb2fa2128ecb3fd969244fa647b104928 | refs/heads/master | 2023-04-08T04:53:31.747838 | 2023-03-23T06:32:47 | 2023-03-23T06:32:47 | 143,112,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | lst = list()
idx_lst = list()
max_val = -1
while True:
n = int(input())
if n == 0:
break
idx_lst.append(n)
if max_val < n:
max_val = n
for i in range(1, max_val + 1):
if i != 1:
lst.append(lst[i - 2] + i ** 2)
else:
lst.append(1)
for i in idx_lst:
print(lst[i - 1])
| [
"jiyang@student.42seoul.kr"
] | jiyang@student.42seoul.kr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.