text stringlengths 38 1.54M |
|---|
from tkinter import *
root = Tk()
root.title("File Handling")
root.geometry("500x300")
def open_file():
global text1
text1.set("")
if entry_window.get():
file = open(entry_window.get(),'r')
for each in file:
text1.set(each)
def create_file():
global text1
text1.set("")
if entry_window.get():
f = open(entry_window.get(), "x")
text1.set("Create File Successfully")
def append_text():
global text2
text1.set("")
if entry_window.get():
file = open(entry_window.get(),'r')
for each in file:
text1.set(each)
file3=open(entry_window.get(),"a")
#c=input("Enter string to append: \n")
if entry_window3.get():
#file3.write("\n")
file3.write(entry_window3.get())
file3.close()
text1.set("Append Successfully")
text = StringVar()
text1 = StringVar()
text2 = StringVar()
entry_window = Entry(root,justify=CENTER, width=40, borderwidth=4, textvariable=text)
entry_window.place(x=120,y=40)
btn_check = Button(root,text="Open File", command=open_file)
btn_check.place(x=210,y=80)
entry_window2 = Entry(root,justify=CENTER, width=40,borderwidth=4, textvariable=text1)
entry_window2.place(x=120,y=120, height=50)
label = Label(root,text="Enter text for apeend:")
label.place(x=120,y=175)
entry_window3 = Entry(root,justify=CENTER, width=40, borderwidth=4, textvariable=text2)
entry_window3.place(x=120,y=200)
btn_check1 = Button(root,text="Create File", command=create_file)
btn_check1.place(x=80,y=240)
btn_check2 = Button(root,text="Append Text", command=append_text)
btn_check2.place(x=210,y=240)
btn_check3 = Button(root,text="Close File", command=root.destroy)
btn_check3.place(x=350,y=240)
root.mainloop()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-12 15:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logsys', '0010_auto_20170112_1741'),
]
operations = [
migrations.AlterField(
model_name='userwallet',
name='UserWallet_walletPrice',
field=models.IntegerField(default=0),
),
]
|
class Solution:
def largestSumOfAverages(self, A: List[int], K: int) -> float:
"""
dp[i][j] = dp[i0][j-1] + cal(i0, j)
"""
n = len(A)
pre_sum = [0] * n
pre_sum[0] = A[0]
for i in range(1, n):
pre_sum[i] = pre_sum[i - 1] + A[i]
def cal(i, j):
return (pre_sum[j] - pre_sum[i] + A[i]) / (j - i + 1)
dp = [[0] * (K + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(min(i, K) + 1):
if j == 1:
dp[i][j] = cal(0, i - 1)
else:
for k in range(1, i):
dp[i][j] = max(dp[i][j], dp[k][j - 1] + cal(k, i - 1))
return dp[-1][-1] |
from flask import request
from flask_restful import Resource
from http import HTTPStatus
from flask_jwt_extended import get_jwt_identity, jwt_required, jwt_optional
from models.reservation import Reservation
from schemas.reservation import ReservationSchema
reservation_schema = ReservationSchema()
reservation_list_schema = ReservationSchema(many=True)
class ReservationListResource(Resource):
def get(self):
reservation = Reservation.get_all_published()
return reservation_list_schema.dump(reservation), HTTPStatus.OK
@jwt_required
def post(self):
json_data = request.get_json()
current_user = get_jwt_identity()
data = reservation_schema.load(data=json_data)
reservation = Reservation(**data)
reservation.user_id = current_user
reservation.save()
return reservation_schema.dump(reservation), HTTPStatus.CREATED
@jwt_required
def patch(self, reservation_id):
json_data = request.get_json()
data = reservation_schema.load(data=json_data, partial=('name',))
reservation = Reservation.get_by_id(reservation_id=reservation_id)
if reservation is None:
return {'message': 'Reservation not found'}, HTTPStatus.NOT_FOUND
current_user = get_jwt_identity()
if current_user != reservation.user_id:
return {'message': 'Access is not allowed'}, HTTPStatus.FORBIDDEN
reservation.name = data.get('name') or reservation.name
reservation.pet = data.get('pet') or reservation.pet
reservation.service = data.get('service') or reservation.service
reservation.duration = data.get('duration') or reservation.duration
reservation.save()
return reservation_schema.dump(reservation), HTTPStatus.OK
class ReservationResource(Resource):
@jwt_optional
def get(self, reservation_id):
reservation = Reservation.get_by_id(reservation_id=reservation_id)
if reservation is None:
return {'message': 'Reservation not found'}, HTTPStatus.NOT_FOUND
current_user = get_jwt_identity()
if reservation.is_publish == False and reservation.user_id != current_user:
return {'message': 'Access is not allowed'}, HTTPStatus.FORBIDDEN
return reservation.data(), HTTPStatus.OK
@jwt_required
def put(self, reservation_id):
json_data = request.get_json()
reservation = Reservation.get_by_id(reservation_id=reservation_id)
if reservation is None:
return {'message': 'Reservation not found'}, HTTPStatus.NOT_FOUND
current_user = get_jwt_identity()
if current_user != reservation.user_id:
return {'message': 'Access is not allowed'}, HTTPStatus.FORBIDDEN
reservation.name = json_data['name']
reservation.pet = json_data['pet']
reservation.service = json_data['service']
reservation.duration = json_data['duration']
reservation.save()
return reservation.data(), HTTPStatus.OK
@jwt_required
def patch(self, reservation_id):
json_data = request.get_json()
data = reservation_schema.load(data=json_data, partial=('name',))
reservation = Reservation.get_by_id(reservation_id=reservation_id)
if reservation is None:
return {'message': 'Reservation not found'}, HTTPStatus.NOT_FOUND
current_user = get_jwt_identity()
if current_user != reservation.user_id:
return {'message': 'Access is not allowed'}, HTTPStatus.FORBIDDEN
reservation.name = data.get('name') or reservation.name
reservation.pet = data.get('pet') or reservation.pet
reservation.service = data.get('service') or reservation.service
reservation.duration = data.get('duration') or reservation.duration
reservation.save()
return reservation_schema.dump(reservation), HTTPStatus.OK
@jwt_required
def delete(self, reservation_id):
reservation = Reservation.get_by_id(reservation_id=reservation_id)
if reservation is None:
return {'message': 'Reservation not found'}, HTTPStatus.NOT_FOUND
current_user = get_jwt_identity()
if current_user != reservation.user_id:
return {'message': 'Access is not allowed'}, HTTPStatus.FORBIDDEN
reservation.delete()
return {}, HTTPStatus.NO_CONTENT
class ReservationPublishResource(Resource):
@jwt_required
def put(self, reservation_id):
reservation = Reservation.get_by_id(reservation_id=reservation_id)
if reservation is None:
return {'message': 'reservation not found'}, HTTPStatus.NOT_FOUND
current_user = get_jwt_identity()
if current_user != reservation.user_id:
return {'message': 'Access is not allowed'}, HTTPStatus.FORBIDDEN
reservation.is_publish = True
reservation.save()
return {}, HTTPStatus.NO_CONTENT
@jwt_required
def delete(self, reservation_id):
reservation = Reservation.get_by_id(reservation_id=reservation_id)
if reservation is None:
return {'message': 'reservation not found'}, HTTPStatus.NOT_FOUND
current_user = get_jwt_identity()
if current_user != reservation.user_id:
return {'message': 'Access is not allowed'}, HTTPStatus.FORBIDDEN
reservation.is_publish = False
reservation.save()
return {}, HTTPStatus.NO_CONTENT
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import sys
import csv
sys.path.append('../utils')
import segmentation_dataset
import input_generator
from loss import loss
from quantization import QG
from deeplab_v3 import Deeplab_v3
import scipy.misc as misc
note = "std_quant"
mode = "train"#mode can be train or val
logs_dir="../save/" + note + "/ckpt/"
sums_dir="../save/" + note + "/summary/"
loss_dir="../save/" + note + "/loss/"
vis_dir="../save/" + note + "/vis/"
# Choose the GPU to run on
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
class args:
display = 1
write_summary = 5000
try_save = 1000
weight_decay = 1e-5
batch_norm_decay = 0.95
batchSize = 8
testBatchSize = 1# This should not change to ensure correct validation mIoU
crop_size = [512,512]#size to crop, automatically padded
if mode == "train":
is_training = True
else:
is_training = False
# class dataset:
# name = "ade20k"
# train = 20210 #number of training images
# trainaug = 0
# trainval = 0
# val = 2000
# classes = 151 #classes including ignore_label
# ignore_label = 0 #label that does not participate training and inference
# train_steps = 150000
# data_dir = "../../data/ADE20K/tfrecord"
# lr_schedule = [1, 0.0625, 50000, 0.03125, 100000, 0.015625, 150000, 0.0078125]#learning rate for ADE20K dataset
class dataset:
name = "pascal_voc_seg"
train = 1464 #number of training images
trainaug = 10582
trainval = 2913
val = 1449
classes = 21 #classes including ignore_label
ignore_label = 255 #label that does not participate training and inference
train_steps = 30000
data_dir = "../../data/pascal_voc_seg/tfrecord"#"../../dataSet/pascal_voc_seg/tfrecord"
lr_schedule = [1, 0.0625, 3000, 0.03125, 12000, 0.015625, 20000, 0.0078125]#learning rate for PASCAL VOC 2012 dataset
def quantize_grads(grads_and_vars,model_class,lrate):
grads = []
for grad_and_var in grads_and_vars:
grads.append([QG(grad_and_var[0],lrate),grad_and_var[1]])
return grads
def train(loss_val,model_class,lrate):
optimizer = tf.train.GradientDescentOptimizer(1)
grads = optimizer.compute_gradients(loss_val)
grads = quantize_grads(grads,model_class,lrate)
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_op += model_class.W_clip_op
with tf.control_dependencies(update_op):
train_op = optimizer.apply_gradients(grads)
return train_op
tf.logging.set_verbosity(tf.logging.INFO)
print("Setting up dataset reader")
with tf.device('/cpu:0'):
data_train = segmentation_dataset.get_dataset(
dataset.name,"trainaug",dataset.data_dir)#train for ADE20K, trainaug for PASCAL
data_val = segmentation_dataset.get_dataset(
dataset.name,"val",dataset.data_dir)
batchTrain = input_generator.get(
data_train,
args.crop_size,
args.batchSize,#is_training=True,
dataset_split="training")
batchTest = input_generator.get(
data_val,
args.crop_size,
args.testBatchSize,#is_training=False,
dataset_split="validation")
train_images = batchTrain['image']
print(train_images)
train_annotations = batchTrain['label']
print(train_annotations)
valid_images = batchTest['image']
valid_annotations = batchTest['label']
valid_names = batchTest['image_name']
valid_height = batchTest['height']
valid_width = batchTest['width']
is_training = tf.Variable(initial_value=args.is_training, trainable=False, name='train_stat', dtype=tf.bool)
#setting up the network
model = Deeplab_v3(dataset.classes,batch_norm_decay=args.batch_norm_decay,is_training=is_training)
logits = model.forward_pass(train_images)
predicts = tf.argmax(logits, axis=-1, name='predicts')
variables_to_restore = tf.trainable_variables(scope='resnet_v2_50')
# finetune resnet_v2_50的参数(block1到block4)
restorer = tf.train.Saver(variables_to_restore)
cross_entropy = loss(logits,train_annotations,dataset.classes,ignore_label=dataset.ignore_label)
# l2_norm l2正则化
l2_loss = args.weight_decay * tf.add_n(
[tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()])
loss = cross_entropy + l2_loss
tf.summary.scalar("loss",loss)
lr = tf.Variable(initial_value=0., trainable=False, name='lr', dtype=tf.float32)
train_op =train(loss,model,lr)
#train benchmark
out = tf.reshape(tf.argmax(logits,axis=3),shape=[-1])#[B,H,W]
labels = tf.reshape(tf.cast(tf.squeeze(train_annotations,squeeze_dims=[3]),dtype=tf.int64), shape=[-1])
indices = tf.squeeze(tf.where(tf.logical_not(tf.equal(
labels, dataset.ignore_label))), 1)
labels = tf.cast(tf.gather(labels, indices), tf.int64)
out = tf.gather(out, indices)
accuracy = tf.reduce_mean(tf.cast(tf.logical_not(tf.equal(out, labels)), tf.float32))
tf.summary.scalar("accuracy",accuracy)
#test benchmark
tf.get_variable_scope().reuse_variables()
valid_logits = model.forward_pass(valid_images)
# valid_logits = test_aug(valid_images,model)
valid_output = tf.argmax(valid_logits, axis=-1, name='predicts')# used to visualize the prediciton
valid_predicts = tf.reshape(tf.argmax(valid_logits, axis=-1, name='predicts'), shape=[-1])
valid_labels = tf.reshape(tf.cast(tf.squeeze(valid_annotations,squeeze_dims=[3]),dtype=tf.int64), shape=[-1])
valid_indices = tf.squeeze(tf.where(tf.logical_not(tf.equal(
valid_labels, dataset.ignore_label))), 1)
valid_labels = tf.cast(tf.gather(valid_labels, valid_indices), tf.int64)
valid_predicts = tf.gather(valid_predicts, valid_indices)
valid_accuracy = tf.reduce_mean(tf.cast(tf.logical_not(tf.equal(valid_predicts, valid_labels)), tf.float32))
mean_iou_val, conf_mat_val = tf.metrics.mean_iou(valid_predicts,valid_labels,dataset.classes,name="miou")
# 我们要保存所有的参数
saver = tf.train.Saver(tf.all_variables())
running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="miou")
running_vars_initializer = tf.variables_initializer(var_list=running_vars)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)#start the queue runner
if "train" in mode:
# finetune resnet_v2_50参数
ckpt = tf.train.get_checkpoint_state(logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Last check point restored...")
else:
restorer.restore(sess, '../pretrain/resnet_v2_50.ckpt')
sess.run([model.W_q_op])
print("Model restored, weights quantized.")
elif mode=="val" or mode=="vis":
ckpt = tf.train.get_checkpoint_state(logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Last check point restored...")
else:
print("Model not fond!")
else:
print("This mode is illeagal, please check!")
if not os.path.exists(logs_dir): os.makedirs(logs_dir)
if not os.path.exists(sums_dir): os.makedirs(sums_dir)
if not os.path.exists(loss_dir): os.makedirs(loss_dir)
if not os.path.exists(vis_dir): os.makedirs(vis_dir)
#-------------------------------TensorBoard--------------------------------------
#setting up tensorboard summary
train_writer = tf.summary.FileWriter(sums_dir, sess.graph)
merge = tf.summary.merge_all()
if mode == "train":
lastAccu = 1.
thisAccu = 1.
with open(loss_dir + 'loss_curve.csv', 'w') as f:
writer = csv.writer(f)
for step in range(1, dataset.train_steps):
# update learning rate
if len(dataset.lr_schedule) / 2:
if step == dataset.lr_schedule[0]:
dataset.lr_schedule.pop(0)
lr_new = dataset.lr_schedule.pop(0)
lr_old = sess.run(lr)
sess.run(lr.assign(lr_new))
tf.logging.info('lr: %f -> %f' % (lr_old, lr_new))
#train and evaluate
loss_tr, l2_loss_tr, predicts_tr, accu, _ = sess.run(
fetches=[cross_entropy, l2_loss, predicts, accuracy, train_op])
#display training loss and accuracy
if step % args.display == 0:
tf.logging.info('Step:%s , loss:%.4f, accuracy:%.4f' % (step,loss_tr, accu))
#-------------------------------TensorBoard--------------------------------------
#write summary
if (step+1) % args.write_summary == 0:
summary = sess.run(merge)
train_writer.add_summary(summary, step)
if (step+1) % args.try_save == 0:
batchNumTest = dataset.val // args.testBatchSize
sess.run(is_training.assign(False))
sess.run(running_vars_initializer)
for val_step in range(dataset.val // args.testBatchSize):
sess.run(conf_mat_val)
score = sess.run(mean_iou_val)
# mean_score = np.sum(score)/dataset.classes
#tf.logging.info('mIoU on valid set:%.4f' % (mean_score))
tf.logging.info('mIoU on valid set:%.4f' % (score))
saver.save(sess, logs_dir + "model.ckpt", step)
sess.run(is_training.assign(True))
writer.writerow([step,loss_tr,accu])
#code to evaluate the model
elif mode == "val":
total_accu = 0.
for step in range(dataset.val // args.testBatchSize):
sess.run(conf_mat_val)
mean_score,accu = sess.run([mean_iou_val,valid_accuracy])
# mean_score = np.sum(score)/dataset.classes
tf.logging.info('Error on step %s is:%.4f, mIoU is: %.4f' % (step,accu,mean_score))
total_accu += accu
total_accu /= dataset.val // args.testBatchSize
tf.logging.info('mIoU on valid set:%.4f ,error:%.4f' % (mean_score,total_accu))
#code to visualize the results
elif mode == "vis":
if not os.path.exists(vis_dir + "picture"): os.makedirs(vis_dir + "picture")
if not os.path.exists(vis_dir + "ground"): os.makedirs(vis_dir + "ground")
if not os.path.exists(vis_dir + "predict"): os.makedirs(vis_dir + "predict")
for step in range(dataset.val // args.testBatchSize):
image, anno, pred, name, height, width= sess.run([valid_images,
valid_annotations,valid_output, valid_names,
valid_height, valid_width])
anno = np.squeeze(anno, axis=3)
for itr in range(args.testBatchSize):
image_save = image[itr, :height[itr], :width[itr],0:3]
anno_save = anno[itr, :height[itr], :width[itr]]
pred_save = pred[itr, :height[itr], :width[itr]]
# print(image_save.shape)
misc.imsave(os.path.join(vis_dir + "ground", name[itr] + ".png"), anno_save.astype(np.uint8))
misc.imsave(os.path.join(vis_dir + "picture", name[itr] + ".png"), image_save.astype(np.uint8))
misc.imsave(os.path.join(vis_dir + "predict", name[itr] + ".png"), pred_save.astype(np.uint8))
print("Saved image: %s" % str(step*args.testBatchSize+itr))
|
import time, pygame
pygame.init()
pygame.mixer.init()
n = input("Enter time in seconds ")
def main():
pygame.mixer.music.load("0897.wav")
time.sleep(n)
pygame.mixer.music.play()
time.sleep(5)
if __name__ == '__main__':
main()
|
<<<<<<< HEAD
#to create database and collection
from pymongo import MongoClient
import pymongo
import time
class database:
client = MongoClient('localhost',27017)
DB = client.libdb
book = DB.book;
user = DB.user
lend = DB.lend
@staticmethod
def indexc():
database.book.create_index([("bkid",pymongo.ASCENDING)],unique=True)
database.user.create_index([("uid",pymongo.ASCENDING)],unique=True)
database.lend.create_index([("bkid",pymongo.ASCENDING)],unique=True)
@staticmethod
def insert_book(bkid,bname):
books = {}
books['name'] = bname
try:
books['bkid'] = int(bkid)
except ValueError:
print("id should be a number");
return;
books['availability'] = 1;
books['count'] = 0
database.book.insert_one(books);
@staticmethod
def insert_user(uname,uid,phone):
users = {}
users["name"] = uname
try:
users["uid"] = int(uid)
except ValueError:
print("id should be a number")
return;
try:
users["phone"] = int(phone)
except ValueError:
print("enter valid phone number")
return;
users['books'] = []
print(users)
database.user.insert_one(users);
@staticmethod
def lendbook(uid,bkid):
da = time.time() + 15*24*60*60
da = time.gmtime(da)
if(type(bkid) == str):
try:
bkid = int(bkid)
except ValueError:
print("enter valid book id")
return;
if(type(uid) == str):
try:
uid = int(uid)
except ValueError:
print("enter valid user id")
return;
bo = list(database.book.find({"bkid":bkid}))
if(bo == []):
print("the book doesn't exist")
return;
bo = bo[0]
bo["availability"] = 0
database.book.update({"bkid":bkid},bo)
u = list(database.user.find({"uid":uid}))
if(u == []):
print("the user is not registered")
return;
u = u[0]
database.lend.insert_one({"bkid":bkid,"bookname":bo["name"],"uid":uid,"username":u["name"],"date":str(da)})
@staticmethod
def bookreturn(bkid):
l = list(database.lend.find({"bkid":bkid}))
if(l == []):
print("this record is not in the db")
return
l = l[0]
u = database.user.find_one({"uid":l["uid"]})
bo = database.book.find_one({"bkid":bkid})
bo["availability"] = 1
bo["count"]+=1
u["books"].append(bo["name"])
database.lend.remove({"bkid":bkid})
database.book.update({"bkid":bkid},bo)
database.user.update({"uid":u["uid"]},u);
if __name__ == "__main__":
database.indexc();
#database.insert_book(100,"aryabhatta")
#database.insert_user("cibi",120,9587600345)
#database.lendbook(120,100);
database.bookreturn(100)
=======
__author__ = 'shankar'
import pymongo
class Database(object):
URI = "mongodb://127.0.0.1:27017"
DATABASE = None
@staticmethod
def initialize():
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client['library']
@staticmethod
def insert(collection, data):
Database.DATABASE[collection].insert(data)
@staticmethod
def find(collection, query):
return Database.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return Database.DATABASE[collection].find_one(query)
>>>>>>> 539166723692c174104c91dda63bda1c37cce855
|
from helpers.data_source_fixture import DataSourceFixture
def test_pandas_df(data_source_fixture: DataSourceFixture):
import duckdb
import pandas as pd
con = duckdb.connect(database=":memory:")
test_df = pd.DataFrame.from_dict({"i": [1, 2, 3, 4], "j": ["one", "two", "three", "four"]})
scan = data_source_fixture.create_test_scan()
scan.add_duckdb_connection(con)
scan.add_sodacl_yaml_str(
f"""
checks for test_df:
- row_count = 4
- missing_count(i) = 0
- missing_count(j) = 0
"""
)
scan.execute(allow_warnings_only=True)
scan.assert_all_checks_pass()
scan.assert_no_error_logs()
|
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol
import sys
from collections import deque, defaultdict
from subprocess import Popen, PIPE
from twisted.python import log
def getQueue():
return deque([], 50)
class SedBot(irc.IRCClient):
nickname = 'sedbot'
channels = ['#avo', '#!']
lineRate = 1
msgbuffers = defaultdict(getQueue)
def connectionMade(self): irc.IRCClient.connectionMade(self)
def connectionLost(self, reason): irc.IRCClient.connectionLost(self, reason)
def signedOn(self):
for channel in self.channels:
self.join(channel)
def privmsg(self, user, channel, msg):
nick = user.split('!', 1)[0]
print nick, msg, channel
if msg.startswith("s/"):
cmd = ['/bin/sed', '-e', msg.split(';')[0], '-e', 'tx', '-e', 'd', '-e', ':x']
p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=PIPE, shell=False)
for buffmsg in self.msgbuffers[channel]:
p.stdin.write("<%s> %s\n" % (buffmsg[0], buffmsg[1]))
out, err = p.communicate()
if len(err) > 0:
self.msg(channel, err)
elif len(out) == 0:
self.msg(channel, '/bin/sed: No match')
else:
result = out.rstrip().split("\n").pop()
self.msg(channel, result)
#self.msgbuffers[channel].append([self.nickname, result])
else:
self.msgbuffers[channel].append([nick, msg])
return
class SedBotFactory(protocol.ClientFactory):
protocol = SedBot
def __init__(self): pass
def clientConnectionLost(self, connector, reason):
print "connection lost"
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "connection failed"
reactor.stop()
if __name__ == '__main__':
log.startLogging(sys.stdout)
f = SedBotFactory()
reactor.connectTCP('irc.teamavolition.com', 6667, f)
reactor.run() |
from margot import Symbol, MargotDataFrame, Ratio, BaseAlgo, Position, BackTest
from margot import finance, cboe, alphavantage as av
# def test_algo():
# class Index(Symbol):
# close = cboe.Column(time_series='close')
# class VXBasis(MargotDataFrame):
# vixm = Index(symbol='VIX', trading_calendar='NYSE')
# class MyAlgo(BaseAlgo):
# data = VXBasis()
# myalgo = MyAlgo()
|
odd_even = int(input('Enter a number: '))
a = odd_even % 2
b = odd_even % 4
if a != 0:
print('The number is odd')
if a == 0 and b != 0:
print('The number is even')
elif a == 0 and b == 0:
print('The number is a multiple of 4 and it is also even') |
# Generated by Django 2.1.1 on 2018-09-06 19:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='topic',
old_name='flag',
new_name='activate',
),
]
|
import math
n=2
primes = []
while len(primes)<10001:
i = 2
check = True
while (i<=math.sqrt(n)):
if(n%i==0):
check = False
break
i +=1
if(check): primes.append(n)
n+=1
print primes[-1]
print primes[10002]
#Put a number to check if it is prime:
#65845484531
#65845484531 is a prime
#Press enter to try again |
from flask import Flask, render_template, request, url_for, redirect, flash
import datetime
import csv
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker, session
import os
from werkzeug.utils import secure_filename
import os.path
UPLOAD_FOLDER = 'd:/cs50/flask/static/uploads'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
engine = create_engine("postgresql://postgres:Cyber123@localhost:5432/postgres")
db = scoped_session(sessionmaker(bind=engine))
"""
@app.route("/")
def index():
return render_template("index.html")
"""
@app.route('/')
def hello():
return render_template('login.html')
@app.route("/showForm")
def showForm():
# show our html form to the user
message = "Python and Postgres Registration Application"
return render_template("register.html", message=message)
@app.route("/register", methods=["POST"])
def register():
username = request.form.get("username")
password = request.form.get("password")
# check for blanks
if username == "":
message = "Please fill in your email address"
return render_template("register.html", message=message)
if password == "":
message = "Please fill in your password"
return render_template("register.html", message=message)
db.execute("INSERT INTO users (username, password) VALUES (:username, :password)",
{"username": username, "password": password})
try:
db.commit()
except psycopg2.Error as e:
message = "Database error: " + e + "/n SQL: " + s
return render_template("register.html", message=message)
message = "Your user account has been added. click on Login for logging in."
return render_template("register.html", message=message)
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/login', methods=['POST'])
def login():
uname = request.form.get("username")
psd = request.form.get("password")
result = db.execute("SELECT username, password FROM users where username= :username",
{'username': uname}).fetchone()
if result is not None and result.password == psd:
message = "None"
return render_template('index.html', message=message)
else :
message="username/password invalid"
return render_template('login.html', message=message)
@app.route("/logout")
def logout():
return render_template("login.html")
@app.route("/airlines")
def airlines():
flights = db.execute("select * from flights").fetchall()
return render_template("airlines.html", flights=flights)
@app.route("/book", methods=["post"])
def book():
name = request.form.get("name")
try:
flight_id = int(request.form.get("flight_id"))
except ValueError:
return render_template("response.html", message="invalid Flight Number")
if db.execute("select * from flights where id = :id", {"id": flight_id}).rowcount == 0:
return render_template("response.html", message="No such flight")
db.execute("INSERT INTO passengers (name, flight_id) VALUES (:name, :flight_id)",
{"name": name, "flight_id": flight_id})
try:
db.commit()
except psycopg2.Error as e:
message = "Database error: " + e + "/n SQL: " + s
return render_template("response.html", message=message)
message = "successfully Registered!!"
return render_template("response.html", message=message)
@app.route("/flights")
def flights():
flights = db.execute("SELECT * FROM flights").fetchall()
return render_template("flights.html", flights=flights)
@app.route("/flights/<int:flight_id>")
def flight(flight_id):
flight = db.execute("SELECT * FROM flights WHERE id = :id", {"id": flight_id}).fetchone()
if flight is None:
return render_template("resopnse.html", message="No such flight.")
passengers = db.execute("SELECT name FROM passengers WHERE flight_id = :flight_id",
{"flight_id": flight_id}).fetchall()
return render_template("flight.html", flight=flight, passengers=passengers)
@app.route("/tshirt")
def tshirt():
return render_template("tshirt.html")
@app.route("/tshirtsize", methods=["POST"])
def tshirtsize():
id = request.form.get("member_id")
size = request.form.get("tshirtsize")
number = request.form.get("tshirtnumber")
db.execute("UPDATE members SET t_size = :t_size, t_number = :t_number WHERE id= :id",
{"t_size": size, "t_number": number,"id":id })
try:
db.commit()
except psycopg2.Error as e:
message = "Database error: " + e + "/n SQL: " + s
return render_template("response.html", message=message)
return render_template("tshirtsize.html", id=id, size=size, number=number)
@app.route("/newyear")
def newyear():
now = datetime.datetime.now()
d = now.date
m = now.month
y = now.year
new_year = m == 1 and d == 1
new_year_date = datetime.datetime(y + 1, 1, 1)
no_of_days = new_year_date - now
no_of_days_str = str(no_of_days)
no_of_days_str1 = ""
for i in no_of_days_str:
no_of_days_str1 += i
if i == ',':
break
return render_template("newyear.html", new_year=new_year, no_of_days_str1=no_of_days_str1, now=now)
@app.route("/members")
def members():
members = db.execute("SELECT * FROM members").fetchall()
return render_template("members.html", members=members)
@app.route("/memberdetails/<int:member_id>")
def memberdetails(member_id):
member=db.execute("SELECT * from members where id = :member_id",
{"member_id": member_id}).fetchone()
if member is None:
return render_template("resopnse.html", message="No Such Member.")
return render_template("memberdetails.html", member=member)
@app.route("/newmember")
def newmember():
return render_template("registermember.html")
@app.route("/registermember",methods=["post","GET"])
def registermember():
fname=request.form.get("memberfname")
lname=request.form.get("memberlname")
dob=request.form.get("memberdob")
db.execute("INSERT INTO members (firstname, lastname, dob) VALUES (:fname, :lname, :dob)",
{"fname": fname, "lname": lname, "dob": dob})
try:
db.commit()
except psycopg2.Error as e:
message = "Database error: " + e + "/n SQL: " + s
return render_template("response.html", message=message)
member = db.execute("SELECT id from members where firstname = :fname",
{"fname": fname}).fetchone()
if member is None:
return render_template("resopnse.html", message="No Such Member.")
mid=member.id
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
ff=f"d:/cs50/flask/static/uploads/{filename}"
dd=f"D:/cs50/flask/static/uploads/{mid}.jpg"
os.rename(ff,dd)
return render_template("registersuccess.html")
@app.route("/forupdatemember",methods=["post","GET"])
def forupdatemember():
fname = request.form.get("updatefname")
lname = request.form.get("updatelname")
dob = request.form.get("updatedob")
uid = request.form.get("updateid")
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
ff=f"d:/cs50/flask/static/uploads/{filename}"
print(ff)
if os.path.exists(f'D:/cs50/flask/static/uploads/{uid}.jpg'):
print(True)
os.remove(f'D:/cs50/flask/static/uploads/{uid}.jpg')
dd=f"D:/cs50/flask/static/uploads/{uid}.jpg"
os.rename(ff,dd)
if fname == "":
message = "Please fill in your first name"
return render_template("memberdetails.html", message=message)
if lname == "":
message = "Please fill in your last name"
return render_template("memberdetails.html", message=message)
if dob == "":
message = "Please fill in your date of birth"
return render_template("memberdetails.html", message=message)
#Error in update Query
#db.execute("UPDATE members SET firstname= %s, lastname= %s, dob= %s WHERE id= %S",(fname, lname, dob, uid))
try:
db.commit()
except psycopg2.Error as e:
message = "Database error: " + e + "/n SQL: " + s
return render_template("response.html", message=message)
return render_template("updatesuccess.html") |
from rest_framework import serializers
from .models import User, Rol
from rest_framework.validators import UniqueValidator
from django.utils.crypto import get_random_string
from django.core.mail import EmailMessage
from django.http import HttpResponseRedirect
class UserSerializer(serializers.ModelSerializer):
username = serializers.CharField(max_length=30)
class Meta:
model = User
fields = '__all__'
def create(self, validated_data):
if 'rol' in validated_data:
user = User(
email=validated_data['email'],
username=validated_data['username'],
rol=validated_data['rol'],
)
else:
user = User(
email=validated_data['email'],
username=validated_data['username'],
)
if User.objects.filter(username=validated_data['username']):
raise serializers.ValidationError('This username already exist.')
user.user_verified = False
user.verified_code = get_random_string(length=10)
user.set_password(validated_data['password'])
user.save()
body = 'El codigo de confirmacion de su cuenta es: '+user.verified_code
email = EmailMessage('Codigo de confirmacion de App Users en Django', body, to=[user.email])
#email.send()
return user
def update(self, instance, validated_data):
user_old = User.objects.get(id=instance.id)
if user_old and user_old.id != instance.id:
raise serializers.ValidationError('This username already exist.')
instance.username = validated_data.get('username', instance.username)
instance.password = validated_data.get('password', instance.password)
if instance.password != user_old.password:
instance.set_password(validated_data['password'])
instance.rol = validated_data.get('rol', instance.rol)
try:
user_oldd = User.objects.get(email=validated_data['email'])
except:
user_oldd = None
if user_oldd and user_oldd.id != instance.id:
raise serializers.ValidationError('This email already exist.')
if validated_data['user_verified']:
instance.user_verified = True
instance.email = validated_data['email']
instance.save()
return instance
class ForgotPasswordSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('email',)
def create(self, validated_data):
user = User.objects.get(email = validated_data['email'])
new_pass = get_random_string(length=10)
user.set_password(new_pass)
user.save()
body = 'Su nueva contraseña es: '+new_pass
email = EmailMessage('Restauración de contraseña', body, to=[user.email])
#email.send()
user.save()
return user
class RolSerializer(serializers.ModelSerializer):
class Meta:
model = Rol
fields = '__all__'
def create(self, validated_data):
rol = Rol(
name = validated_data['name']
)
if Rol.objects.filter(name=validated_data['name']):
raise serializers.ValidationError('This rol name already exist.')
rol.save()
return rol
def update(self, instance, validated_data):
rol_old = Rol.objects.filter(name=validated_data['name']).first()
if rol_old and rol_old.id != instance.id:
raise serializers.ValidationError('This rol name already exist.')
instance.name = validated_data.get('name', instance.name)
instance.permisos.remove(1)
instance.permisos.remove(2)
instance.permisos.remove(3)
instance.permisos.remove(4)
instance.permisos.remove(5)
instance.permisos.remove(6)
for permiso in validated_data['permisos']:
instance.permisos.add(permiso)
instance.save()
return instance
class RolNameSerializer(serializers.ModelSerializer):
class Meta:
model = Rol
fields = '__all__' |
from collections import deque
from itertools import combinations
def find_subarray(data, target):
curr_sum = data[0]
start, idx = 0, 1
size = len(data)
while idx <= size:
while curr_sum > target and start < idx - 1:
curr_sum = curr_sum - data[start]
start += 1
if curr_sum == target:
return data[start:idx]
if idx < size:
curr_sum = curr_sum + data[idx]
idx += 1
return None
def find_sum(list, target):
for comb in combinations(list, 2):
if sum(comb) == target:
return True
return False
def decode(data, preamble):
window = deque()
for i in range(len(data)):
if len(window) < preamble:
window.append(data[i])
else:
if not find_sum(window, data[i]):
return data[i]
window.popleft()
window.append(data[i])
with open("./test.txt") as f:
data = f.read().split("\n")
data = [int(x) for x in data]
target = decode(data, 25)
print(target)
subarray = find_subarray(data, target)
subarray.sort()
print(subarray[0] + subarray[len(subarray) - 1])
|
# -*- coding: utf-8 -*-
"""
A few utility methods used by various other files.
Created on Sat May 3 12:33:42 2014
@author: brian
"""
from datetime import datetime
import math
program_start = datetime.now()
#A convenient print statement for long runs - also includes a timestamp at the beginning of the message
#Arguments:
#msg - a string to be printed
def logMsg(msg):
td = datetime.now() - program_start
print "[" + str(td) + "] " + msg
#A print statement intended to log the percentage of completion of some task with many iterations
#Can be called many times, but only prints when the percentage is a "nice" number, rounded to a given number of digits
#Arguments
#num - the current iteration
#outof - the total number of iterations
#How many digits should the percentage be rounded to?
def logPerc(num, outof, digits):
rounded = round(float(num)/outof, digits)
prev = round(float(num-1)/outof, digits)
if(prev < rounded):
logMsg(str(rounded*100) + "%")
#helper function. Computes euclidean distance between two vectors
def euclideanDist(v1, v2):
s = 0
for i in range(len(v1)):
s += (v1[i] - v2[i]) **2
return math.sqrt(s)
EARTH_RADIUS = 3963.1676
#computes distance between two lat-lon points, assuming spherical earth
def haversine((lat1,lon1), (lat2,lon2)):
[lat1, lon1, lat2, lon2] = map(math.radians, [lat1, lon1, lat2, lon2])
lat_haversine = math.sin((lat2-lat1)/2) * math.sin((lat2-lat1)/2)
lon_haversine = math.sin((lon2 - lon1)/2) * math.sin((lon2 - lon1)/2)
cosine_term = math.cos(lat1) * math.cos(lat2)
distance = 2 * EARTH_RADIUS * math.asin(math.sqrt(lat_haversine + cosine_term*lon_haversine))
return distance
#helper function. Normalizes a vector in-place
def normalize(vector):
s = sum(vector)
for i in range(len(vector)):
vector[i] = float(vector[i]) / s
#A builder function - yields a squence of datetime objects
#Arguments:
#start_date - a datetime object. the first date of the sequence
#end_date - a datetime object. the end of the date sequence (non inclusive)
#delta - a timedelta object. The step size
def dateRange(start_date, end_date, delta):
d = start_date
while(d < end_date):
yield d
d += delta
#Rounds a datetime to a given granularity (1 hour, 15 minutes, etc..)
#Arguments
#dt - a datetime object
#granularity - a timedelta object
#Returns a datetime, rounded to the given granularity
def roundTime(dt, granularity):
start_time = datetime(year=2000,month=1,day=1,hour=0)
tmp = dt - start_time
rounded = int(tmp.total_seconds() / granularity.total_seconds())
return start_time + rounded*granularity
#Takes a list, which represents the header row of a table
#Returns a dictionary which maps the string column names to integer column ids
def getHeaderIds(header_row):
mapping = {}
for i in range(len(header_row)):
mapping[header_row[i]] = i
def time_search(time_list, time):
lo = 0
hi = len(time_list) - 1
mid = (lo + hi) / 2
while( lo != hi and lo != mid ):
if(time < time_list[mid]):
hi = mid
else:
lo = mid
mid = (hi + lo) / 2
return hi
"""
if(lo == hi):
return lo
else:
lo_dist = time_dist(time_list[lo], time)
hi_dist = time_dist(time_list[hi], time)
if(lo_dist < hi_dist):
return lo
else:
return hi
"""
#Returns the magnitude in seconds of the difference between
#two datetime objects.
def time_dist(time_1, time_2):
delta = time_1 - time_2
delta = abs(delta.total_seconds())
return delta
|
#!/usr/bin/python3
# Description: gives the sum of 2 numbers
# Author: Paul hivert
# Date: 15/10/2018
import sys
print("choisi un nombre")
value1 = input()
try:
value1 = int(value1)
except:
sys.exit("please enter a number")
print("choisi un autre nombre")
value2 = input()
try:
value2 = int(value2)
except:
sys.exit("please enter a number")
print("leurs somme est égale a ")
print(value1 + value2) |
from tests.admin_test import AdminTestCase
from tests.bass_test import BassTestCase
from tests.user_test import UserTestCase
class TestAnonAdminView(BassTestCase):
def test_denied(self):
for url in '', 'users/', 'users/edit/1', 'files/', 'files/a':
rv = self.app.get('/admin/{}'.format(url))
assert rv.status_code == 404
class TestUserAdminView(UserTestCase, TestAnonAdminView):
pass
class TestAdminView(AdminTestCase):
def test_users_index(self):
rv = self.app.get('/admin/users/')
assert rv.status_code == 200
assert '<a href="/admin/users/edit/1">admin</a>' in rv.data
|
from tkinter import *
from tkinter import ttk
from datetime import datetime, timedelta
class Input_Popup:
def __init__(self, type, parent=None, data=None):
#type options: new_meal, new_ingredient, new_instruction, edit_meal, edit_ingredient, edit_instruction
self.done = False
self.rv_data = {}
self.type = type
self.popup_window = top = Toplevel(parent)
self.popup_window.transient(parent)
if self.type == "new_meal":
self.label = Label(top, text="Enter New Meal Name")
self.label.pack()
self.entry = Entry(top)
self.entry.pack()
self.button = Button(top, text="Add Meal", command=self.close_window)
self.button.pack()
self.entry.focus()
elif self.type == "new_ingredient":
self.label = Label(top, text="Select ingredient")
self.label.pack()
self.ingredient_combo = ttk.Combobox(top, state="Normal", values=["Ingredient Name", ""] + data["ingredients"])
self.ingredient_combo.current(0)
self.ingredient_combo.pack()
self.unit_type_combo = ttk.Combobox(top, state="Normal", values=["Unit Type", ""]+data["unit_types"])
self.unit_type_combo.current(0)
self.unit_type_combo.pack()
self.quantity_combo = ttk.Combobox(top, state="Normal", values=["Quantity",""]+data["quantities"])
self.quantity_combo.current(0)
self.quantity_combo.pack()
self.button = Button(top, text="Add Ingredient", command=self.close_window)
self.button.pack()
self.ingredient_combo.focus()
elif self.type == "new_instruction":
self.label = Label(top, text="Add instruction")
self.label.pack()
self.entry = Entry(top)
self.entry.pack()
self.button = Button(top, text="Add Instruction", command=self.close_window)
self.button.pack()
self.entry.focus()
elif self.type == "edit_meal":
self.label = Label(top, text="Edit Meal Name")
self.label.pack()
self.entry = Entry(top)
self.entry.insert(END, data["meal_name"])
self.entry.pack()
self.button = Button(top, text="Ok", command=self.close_window)
self.button.pack()
self.entry.focus()
elif self.type == "edit_ingredient":
self.label = Label(top, text=f"Edit ingredient: {data['ingredient']}")
self.label.pack()
self.unit_type_combo = ttk.Combobox(top, state="Normal", values=[data["unit"], ""] + data["unit_types"])
self.unit_type_combo.current(0)
self.unit_type_combo.pack()
self.quantity_combo = ttk.Combobox(top, state="Normal", values=[data["quantity"], ""] + data["quantities"])
self.quantity_combo.current(0)
self.quantity_combo.pack()
self.button = Button(top, text="Update Ingredient", command=self.close_window)
self.button.pack()
self.unit_type_combo.focus()
elif self.type == "edit_instruction":
self.label = Label(top, text="Edit instruction")
self.label.pack()
self.entry = Entry(top)
self.entry.insert(END, data["current_instruction"])
self.entry.pack()
self.button = Button(top, text="Ok", command=self.close_window)
self.button.pack()
self.entry.focus()
elif self.type == "custom_meal_plan":
self.label_packs = []
self.combos = []
start_date = datetime.strptime(data["start_date"], "%m/%d/%y")
weeks = data["weeks"]
days_needed = data["days_needed"]
total_days = weeks * 7
meal_names = [meal.meal_name for meal in data["meals"]]
self.meal_data = {}
for meal in data["meals"]:
self.meal_data.update({
meal.meal_name: meal
})
for i in range(total_days):
cur_date = start_date + timedelta(days=i)
cur_date_str = f"{cur_date.strftime('%m/%d/%y')} - {cur_date.strftime('%A')}"
cur_label = Label(top, text=cur_date_str)
cur_label.pack()
cur_label_pack = (cur_date_str, cur_label)
self.label_packs.append(cur_label_pack)
cur_combo = ttk.Combobox(top, state="readonly", values=[""]+meal_names)
cur_combo.pack()
self.combos.append(cur_combo)
if "edit" not in self.type:
self.done_button = Button(top, text="Done", command=self.set_done_true)
self.done_button.pack()
if data and "position" in data:
self.popup_window.geometry(data["position"])
else:
self.popup_window.geometry("+%d+%d" % (parent.winfo_rootx()+(parent.winfo_reqwidth()/2)-(self.popup_window.winfo_reqwidth()/2),parent.winfo_rooty()))
self.popup_window.bind("<Return>", self.close_window)
def close_window(self, junk=None):
if self.type == "new_meal" or self.type == "edit_meal":
pass
self.rv_data = {"name": self.entry.get().strip()}
elif self.type == "new_ingredient" or self.type == "edit_ingredient":
pass
if self.type == "new_ingredient":
ingredient = self.ingredient_combo.get()
else:
ingredient = None
self.rv_data = {
"ingredient": ingredient,
"unit_type": self.unit_type_combo.get(),
"quantity": self.quantity_combo.get()
}
elif self.type == "new_instruction" or self.type == "edit_instruction":
pass
self.rv_data = {
"instruction": self.entry.get().strip()
}
elif self.type == "custom_meal_plan":
meal_dates = []
for i, label_pack in enumerate(self.label_packs):
cur_date_str = label_pack[0].split("-")[0].strip()
cur_meal = self.combos[i].get()
if not cur_meal:
continue
meal_dates.append((f'{datetime.strptime(cur_date_str, "%m/%d/%y").strftime("%Y-%m-%d")} 23:00:00', self.meal_data[cur_meal]))
self.rv_data = {
'meal_dates': meal_dates
}
if "edit" in self.type:
self.done = True
self.rv_data.update({
'done': self.done,
'position': self.popup_window.geometry()
})
self.popup_window.destroy()
def set_done_true(self):
self.done = True
self.close_window()
def get_ui(self):
return self.popup_window
def get_rv_data(self):
for item in self.rv_data:
if isinstance(self.rv_data[item], str):
try:
self.rv_data.update({item: self.rv_data.title()})
except:
pass
return self.rv_data
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import Optional
import attr
from marshmallow3_annotations.ext.attrs import AttrsSchema
@attr.s(auto_attribs=True, kw_only=True)
class DashboardQuery:
name: Optional[str] = attr.ib(default=None)
url: Optional[str] = attr.ib(default=None)
query_text: Optional[str] = attr.ib(default=None)
class DashboardQuerySchema(AttrsSchema):
class Meta:
target = DashboardQuery
register_as_scheme = True
|
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import numpy as np
import cv2
import sys, pygame
from modules2 import *
import csv
import math
from PIL import Image
#generate a list of hex center locations in the hex coordinate sytem
def generateCubicHex(BOARD_SIZE):
zone_list_hex=[]
for x in range(-BOARD_SIZE,BOARD_SIZE+1):
for y in range(-BOARD_SIZE,BOARD_SIZE+1):
for z in range(-BOARD_SIZE,BOARD_SIZE+1):
if x+y+z==0:
zone_list_hex.append([x,y,z])
#print(x,y,z)
return zone_list_hex
#define cartesian location of hex centers in pixels
def generateCartHex(zone_list_hex,hex_side_length,height,width):
zone_pixels_cart=[]
for liste in zone_list_hex:
xx = liste[0] * (3/2) * hex_side_length + width/2
yy = math.sqrt(3) * hex_side_length * (liste[0]/2 + liste[1]) + height/2
zone_pixels_cart.append([xx,yy])
return zone_pixels_cart
#given x,y .. return all verticies of relevant hexagon
#starts at right and goes clockwise
def generateHexVerts(x,y,hex_side_length):
degsyy = 0
x1= x + hex_side_length * math.cos(math.radians(degsyy))
y1= y + hex_side_length * math.sin(math.radians(degsyy))
degsyy = 60
x2= x + hex_side_length * math.cos(math.radians(degsyy))
y2= y + hex_side_length * math.sin(math.radians(degsyy))
degsyy = 120
x3= x + hex_side_length * math.cos(math.radians(degsyy))
y3= y + hex_side_length * math.sin(math.radians(degsyy))
degsyy = 180
x4= x + hex_side_length * math.cos(math.radians(degsyy))
y4= y + hex_side_length * math.sin(math.radians(degsyy))
degsyy = 240
x5= x + hex_side_length * math.cos(math.radians(degsyy))
y5= y + hex_side_length * math.sin(math.radians(degsyy))
degsyy = 300
x6= x + hex_side_length * math.cos(math.radians(degsyy))
y6= y + hex_side_length * math.sin(math.radians(degsyy))
return [[x1,y1],[x2,y2],[x3,y3],[x4,y4],[x5,y5],[x6,y6]]
#define cartesian location of hex centers in pixels
def drawGrid(screen,zone_pixels_cart,i,hex_side_length):
for polyg in zone_pixels_cart:
pygame.draw.polygon(screen,(22,i*.9/2,i*.3/2), generateHexVerts(polyg[0],polyg[1],hex_side_length),0)
#define cartesian location of hex centers in pixels
def drawDots(screen,zone_pixels_cart,dotSize):
for polyg in zone_pixels_cart:
pygame.draw.ellipse(screen, (255,255,255), [polyg[0]-(dotSize/2), polyg[1]-(dotSize/2), dotSize, dotSize])
def backgroundSubtraction():
image1 = Image.open('foregroundImage.jpg')
image2 = Image.open('backgroundImage.jpg')
image1.load()
image2.load()
image3 = image1._new(image1.im.chop_subtract(image2.im, 1.0, 0))
image3.save('subtractedImage.jpg')
def backgroundSubtractionControl():
"""
image1 = Image.open('foregroundImage.jpg')
image2 = Image.open('backgroundImage.jpg')
image1.load()
image2.load()
image3 = image1._new(image1.im.chop_subtract(image2.im, 1.0, 0))
image3.save('subtractedImage.jpg')
"""
pass
def blobDetection():
im = cv2.imread("subtractedImage.jpg", cv2.IMREAD_GRAYSCALE)
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 256
# Filter by Area.
params.filterByArea = True
params.minArea = 20
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.5
# Filter by Inertia
params.filterByInertia =True
params.minInertiaRatio = 0.5
# Change colout
params.filterByColor = True
params.maxThreshold = 256
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(255-im)
keypoints_cart = []
print("numKeypoints= ", len(keypoints))
for keypt in range(0,len(keypoints)):
x = keypoints[keypt].pt[0]
y = keypoints[keypt].pt[1]
print("x=", x)
print("y=", y)
print(" ")
keypoints_cart.append([x,y])
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show keypoints
#cv2.imshow("Keypoints", im_with_keypoints)
#cv2.waitKey(0)
cv2.imwrite('detectedBlobs.png',im_with_keypoints)
return keypoints_cart, keypoints
def blobDetectionControl():
im = cv2.imread("subtractedImageControl.jpg", cv2.IMREAD_GRAYSCALE)
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 256
# Filter by Area.
params.filterByArea = True
params.minArea = 20
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.5
# Filter by Inertia
params.filterByInertia =True
params.minInertiaRatio = 0.5
# Change colout
params.filterByColor = True
params.maxThreshold = 256
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(255-im)
keypoints_cart = []
print("numKeypoints= ", len(keypoints))
for keypt in range(0,len(keypoints)):
x = keypoints[keypt].pt[0]
y = keypoints[keypt].pt[1]
print("x=", x)
print("y=", y)
print(" ")
keypoints_cart.append([x,y])
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show keypoints
#cv2.imshow("Keypoints", im_with_keypoints)
#cv2.waitKey(0)
cv2.imwrite('detectedBlobsControl.png',im_with_keypoints)
return keypoints_cart, keypoints
#HSL Detection - given an image array in hsv it will output the identity of any ship theredetected
# for opencv For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255].
hue=0
hue = hue/360 *179
"""
#hues = [x for x in range(0,361,60)]
hues_list = [0,60,120,180,240,300]
def findShipId(hsvImageArray,hueRange):
out_array = [0,0,0,0,0,0]
numPixels = len(hsvImageArray)
pixel_sum = [0,0,0,0,0,0] #keeps track of the count of each hue-matching pixel
for pixx in hsvImageArray:
for hueCnt in range(0,6):
#if ( ((pixx[0] + 60) > (hues_list[hueCnt] +60 - hueRange)) and ((pixx[0] + 60) < (hues_list_looped[hueCnt] +60 + hueRange)) ):
elif ( ((pixx[0]%330) > (hues_list[hueCnt] +60 - hueRange)) and ((pixx[0] + 60) < (hues_list_looped[hueCnt] +60 + hueRange)) ):
if ( (pixx[1] > 150) and (pixx[2] > 150) ):
pixel_sum[hueCnt] +=1
for i in range(0,5):
if pixel_sum[i] > numPixels/24:
out_array[i] = 1
print("Detected shipId", out_array)
return out_array
#GENTERATE SHIP IDENTIFIERS
import itertools
from itertools import combinations
#Hifriend generate sh ip identifiers - 6 hues w/ 4colour die
def generateShipId():
in_list = [0, 1, 2, 3, 4, 5]
out_list = []
for i in range(1, 5):
out_list.extend(itertools.combinations(in_list, i))
out_list2 = []
for i in out_list:
inter_list = [0,0,0,0,0,0]
for j in range(0,6):
if j in i:
inter_list[j] = 1
out_list2.append(inter_list)
return out_list2
#CREATE SHIP COLOUR COMBINATIONS
lst1 = [0, 1, 2, 3, 4, 5, 6, 7, 8]
lst = []
for x in lst1:
lst.append(x*60)
combs = []
#aaaa = [list(x) for x in combinations(lst, 4)]
#for x in range(len(aaaa)): combs.append(aaaa[x])
aaaa = [list(x) for x in combinations(lst, 3)]
for x in range(len(aaaa)): combs.append(aaaa[x])
aaaa = [list(x) for x in combinations(lst, 2)]
for x in range(len(aaaa)): combs.append(aaaa[x])
print(len(combs), end=" ships created")
"""
|
import sys
sys.stdin = open("D1_6243_input.txt", "r")
# data = ','.join(sorted(list(set(list(map(str, input().split()))))))
# print(data)
print(','.join(sorted(list(set(list(map(str, input().split()))))))) |
from naive_bayes.tokenizer import Tokenizer
from naive_bayes.email_object import EmailObject
from naive_bayes.spam_trainer import SpamTrainer
__all__ = [ "tokenizer" ]
|
########### Python 3.5 #############
import http.client, urllib.request, urllib.parse, urllib.error, base64, json, pickle
apiFile = open('secrets.txt', 'r')
apiKey = apiFile.readline().replace("\n", "")
headers = {
# Request headers
'Ocp-Apim-Subscription-Key': apiKey,
}
# 12 Months of the year
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
# Final month (last month has no data)
finalMonth = months[11]
# First year to get data from
currentYear = 2009
# Last month to get data from (inclusive)
finalYear = 2016
# Person to be looked up
personQuery = 'Obama'
# Index of the first month to query
monthIndex = 0 # January
# Json Object to be built up from queries
results = {}
# Function for getting the mm/yyyy representation of the current year/month
def getMMYYYY(monthIndex, year):
mm = monthIndex + 1
monthString = ''
if mm < 10:
monthString = '0' + str(mm)
else:
monthString = str(mm)
return monthString + "/" + str(year)
# Iterates over the time range and adds urls for each month
while (currentYear <= finalYear):
if currentYear == finalYear and months[monthIndex] == finalMonth:
break
# Search string to be built out of the person + month + year
queryString = personQuery + " " + months[monthIndex] + " " + str(currentYear)
params = urllib.parse.urlencode({
# Request parameters
'q': queryString,
'count': '10',
'offset': '0',
'mkt': 'en-us',
'safeSearch': 'Moderate',
})
try:
conn = http.client.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("GET", "/bing/v5.0/images/search?%s" % params, "{body}", headers)
response = conn.getresponse()
# Add the url to the url list for the month
str_response = response.read().decode('utf-8')
json_obj = json.loads(str_response)
urlList = []
for i in range(0,10):
contenturl = json_obj['value'][i]['contentUrl']
urlList.append(contenturl)
# Adds the list to the results dictionary
monthYearKey = getMMYYYY(monthIndex, currentYear)
results[monthYearKey] = urlList
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
monthIndex += 1
if monthIndex > 11:
monthIndex = 0
currentYear += 1
pickle.dump(results, open("faceUrls.pkl", "wb"))
print(json.dumps(results))
####################################
|
"""
This tests all requests made by the pyvesync library with pytest.
All tests inherit from the TestBase class which contains the fixtures
and methods needed to run the tests.
The `helpers.call_api` method is patched to return a mock response.
The method, endpoint, headers and json arguments are recorded
in YAML files in the api directory, catagorized in folders by
module and files by the class name.
The default is to record requests that do not exist and compare requests
that already exist. If the API changes, set the overwrite argument to True
in order to overwrite the existing YAML file with the new request.
"""
import logging
import call_json
import call_json_outlets
import call_json_bulbs
import call_json_fans
import call_json_switches
from utils import TestBase, assert_test, parse_args
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
LOGIN_RESPONSE = call_json.LOGIN_RET_BODY
def test_device_tests():
"""Test to ensure all devices have are defined for testing.
All devices should have an entry in the DETAILS_RESPONSES dict
with response for get_details() method. This test ensures that
all devices have been configured for testing. The details response
should be located in `{device_type}Details` class of the respective
call_json_{device_type} module and the DETAILS_RESPONSE module variable.
The class variable with the details response does not matter, the dictionary
key of DETAILS_RESPONSES should match the device type.
Examples
---------
class FanDetails:
"Core200SResponse": {'speed': 1, 'device_status': 'on'}
DETAILS_RESPONSES = {
'Core200S': FanDetails.Core200SResponse
}
Asserts
-------
Number of devices for each type has a response defined in the
respective `call_json` module.
See Also
--------
src/tests/README.md - README located in the tests directory
"""
assert call_json_fans.FANS_NUM == len(call_json_fans.DETAILS_RESPONSES)
assert call_json_bulbs.BULBS_NUM == len(call_json_bulbs.DETAILS_RESPONSES)
assert call_json_outlets.OUTLETS_NUM == len(call_json_outlets.DETAILS_RESPONSES)
assert call_json_switches.SWITCHES_NUM == len(call_json_switches.DETAILS_RESPONSES)
class TestGeneralAPI(TestBase):
"""General API testing class for login() and get_devices()."""
def test_login(self):
"""Test login() method request and API response."""
print("Test Login")
self.mock_api.return_value = LOGIN_RESPONSE
self.manager.enabled = False
assert self.manager.login()
all_kwargs = parse_args(self.mock_api)
assert assert_test(self.manager.login, all_kwargs, None,
self.write_api, self.overwrite)
def test_get_devices(self):
"""Test get_devices() method request and API response."""
print("Test Device List")
self.mock_api.return_value = call_json.DeviceList.device_list_response()
self.manager.get_devices()
all_kwargs = parse_args(self.mock_api)
assert assert_test(self.manager.get_devices, all_kwargs, None,
self.write_api, self.overwrite)
assert len(self.manager.bulbs) == call_json_bulbs.BULBS_NUM
assert len(self.manager.outlets) == call_json_outlets.OUTLETS_NUM
assert len(self.manager.fans) == call_json_fans.FANS_NUM
assert len(self.manager.switches) == call_json_switches.SWITCHES_NUM
|
import sys
import time
# Initialize
debugMode = False # Set to true to switch on debug mode
n = 0
allPersons = [] #array
menPrefs = [] #array
womenPrefs = [] #array
# Functions
def isInt(i):
try:
int(i)
return True
except ValueError:
return False
# Read parameters
if ((len(sys.argv) < 2) or (len(sys.argv) > 4)):
print "USAGE: stableMarriage.py <inputfile> [<testfile>]"
print ""
print " PARAMETERS"
print " <inputfile> Filename of inputfile"
print " <testfile> Filename of expected outputfile"
exit()
start_time = time.clock()
# Read inputfile
inputFilename = sys.argv[1]
inputfile = open(inputFilename, 'r')
for line in inputfile:
if ((line.startswith("n=")) and (isInt(line.partition("=")[2]))):
n = int(line.partition("=")[2])
allPersons = [''] * 2 * n
menPrefs = [[0] * n] * n
womenPrefs = [[0] * n] * n
elif ((line) and (not line.startswith("#")) and (isInt(line[0])) and (n != 0)):
if (line.find(":") != -1):
prefsStr = line.strip().split(": ")[1].split(" ")
prefs = [0] * n
index = 0
if (int(line.split(": ")[0])%2 == 1):
# Read men prefs
for prefStr in prefsStr:
prefs[index] = (int(prefStr)-1)/2
index += 1
menPrefs[(int(line.split(": ")[0])-1)/2] = prefs
else:
# Read women prefs (inverse)
for prefStr in prefsStr:
prefs[(int(prefStr)-1)/2] = index
index += 1
womenPrefs[(int(line.split(": ")[0])-1)/2] = prefs
else:
# Read all persons
allPersons[int(line.strip().split(" ")[0])-1] = line.strip().split(" ")[1].replace("\n","")
# Reading results
if (debugMode): print "All persons: " + str(allPersons) + "\nMen Preferences: " + str(menPrefs) + "\nWomen Preferences: " + str(womenPrefs)
#Validations
if (n == 0):
print "ERROR: Bad data - n can't be empty"
exit()
if (len(allPersons)/n != 2):
print 'ERROR: Bad data - There is no equal number of man and woman'
exit()
if (len(menPrefs) + len(womenPrefs) != len(allPersons)):
print 'ERROR: Bad data - Not every person has a preference list'
exit()
# Gale-Shapley Algorithm
freeMen = range(n) #stack
wife = [-1] * n #array
husband = [-1] * n #array
proposals = [0] * n #array
while (len(freeMen) > 0):
if (debugMode): print "------------------------------\nProposals of " + str(allPersons[freeMen[0]*2]) + ": " + str(proposals[freeMen[0]])
if (proposals[freeMen[0]] < n):
woman = menPrefs[freeMen[0]][proposals[freeMen[0]]]
proposals[freeMen[0]] += 1
if (debugMode): print str(allPersons[freeMen[0]*2]) + " proposes to " + str(allPersons[woman*2+1])
if (husband[woman] == -1):
if (debugMode): print str(allPersons[woman*2+1]) + " says yes"
husband[woman] = freeMen[0]
wife[freeMen[0]] = woman
if (debugMode): print str(allPersons[freeMen[0]*2]) + " is not free anymore"
freeMen.pop(0)
elif (womenPrefs[woman][freeMen[0]] < womenPrefs[woman][husband[woman]]):
if (debugMode): print str(allPersons[woman*2+1]) + " thinks " + str(allPersons[freeMen[0]*2]) + " is better than " + str(allPersons[husband[woman]*2])
freeMen.append(husband[woman])
if (debugMode): print str(allPersons[husband[woman]*2]) + " is free again"
husband[woman] = freeMen[0]
wife[freeMen[0]] = woman
if (debugMode): print str(allPersons[freeMen[0]*2]) + " is not free anymore"
freeMen.pop(0)
else:
if (debugMode): print str(allPersons[woman*2+1]) + " rejects " + str(allPersons[freeMen[0]*2])
else:
freeMen.pop(0)
elapsed_time = time.clock() - start_time
# Output
if (debugMode): print "=============================="
result = ""
for man in range(n):
result += allPersons[man*2] + " -- " + allPersons[wife[man]*2+1] + "\n"
print result
print "\nTime: " + str(elapsed_time) + "\n"
# Test output
if (len(sys.argv) == 3):
testFilename = sys.argv[2]
testFile = open(testFilename, 'r')
if (testFile.read() == result):
print "==============================\nOK\n=============================="
else:
print "==============================\nNOT OK\n=============================="
|
from django.shortcuts import render
from os.path import isdir, dirname, join
from os import mkdir
from .settings import BASE_DIR
import pandas as pd
def home(request):
context = {}
return render(request, 'index.html', context)
def upload(request):
if request.method == 'POST':
uploadDir = BASE_DIR+'/upload'+'/namelist'
if not isdir(uploadDir):
mkdir(uploadDir)
uploadedFile = request.FILES.get('Scores')
if not uploadedFile:
return render(request, 'mingdan.html', {'msg':'没有选择文件'})
if not uploadedFile.name.endswith('.xlsx'):
if not uploadedFile.name.endswith('.xls'):
return render(request, 'mingdan.html', {'msg':'必须选择xlsx或xls文件'})
dstFilename = join(uploadDir, uploadedFile.name)
with open(dstFilename, 'wb') as fp:
for chunk in uploadedFile.chunks():
fp.write(chunk)
pdData = pd.read_excel(dstFilename)
pdData = pdData[3:][['2019-2020学年第1学期点名册', 'Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4']]
pdhtml = pdData.to_html(index=False)
context = {}
context['text'] = pdhtml
context['msg'] = '上传成功'
return render(request, 'mingdan.html', context)
else:
return render(request, 'mingdan.html',{'msg':None})
def onceanalyse(request):
dstFilename = BASE_DIR+'/upload/'+'namelist/'+'(2019-2020-1)-B0300121S-JSJY0001-11dmc.xls'
Data = pd.read_excel(dstFilename)
Data = Data[4:][['2019-2020学年第1学期点名册', 'Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4']]
names = Data['Unnamed: 2'].tolist()
IDs = Data['Unnamed: 1'].tolist()
students = dict(zip(names, IDs))
if request.method == 'POST':
uploadDir = BASE_DIR+'/upload'+'/oncescore'
if not isdir(uploadDir):
mkdir(uploadDir)
uploadedFile = request.FILES.get('Scores')
if not uploadedFile:
return render(request, 'once.html', {'msg':'没有选择文件'})
if not uploadedFile.name.endswith('.xlsx'):
if not uploadedFile.name.endswith('.xls'):
return render(request, 'once.html', {'msg':'必须选择xlsx或xls文件'})
dstFilename = join(uploadDir, uploadedFile.name)
with open(dstFilename, 'wb') as fp:
for chunk in uploadedFile.chunks():
fp.write(chunk)
pdData = pd.read_excel(dstFilename)
rarenames = pdData['真实姓名'].tolist()
wholenames = []
for name in rarenames:
if type(name)==str:
wholenames.append(name)
meijiao =[]
for name in names:
if not name in wholenames:
meijiao.append(name)
scores = {}
for name in wholenames:
temp=pdData['得分'][pdData['真实姓名']==name].tolist()
scores[name]=temp[0]
maxscore = max(scores.values())
goods = []
bads = []
for key, value in scores.items():
if value >= maxscore*0.9:
goods.append(key)
elif value < maxscore*0.6:
bads.append(key)
MeiJiao = {}
Goods = {}
Bads = {}
for i, j in students.items():
if i in meijiao:
MeiJiao.update({i:j})
elif i in goods:
Goods.update({i:j})
elif i in bads:
Bads.update({i:j})
meijiao.clear()
goods.clear()
bads.clear()
for key, value in MeiJiao.items():
meijiao.append(key+value)
for key, value in Goods.items():
goods.append(key+value)
for key, value in Bads.items():
bads.append(key+value)
dic1 = {'没交作业的人':meijiao,}
df1=pd.DataFrame(data=dic1)
dh1=df1.to_html(index=False)
dic2 = {'做得不好的人':bads,}
df2=pd.DataFrame(data=dic2)
dh2=df2.to_html(index=False)
dic3 = {'做得好的人':goods,}
df3=pd.DataFrame(data=dic3)
dh3=df3.to_html(index=False)
context = {}
context['text1'] = dh1
context['text2'] = dh2
context['text3'] = dh3
context['msg'] = '上传成功'
return render(request, 'once.html', context)
else:
return render(request, 'once.html',{'msg':None})
|
# Generated by Django 3.1.4 on 2021-02-28 17:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('toys', '0002_auto_20210228_2220'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='employee',
),
migrations.AddField(
model_name='company',
name='employee',
field=models.ManyToManyField(blank=True, null=True, related_name='company', to='toys.Employee'),
),
]
|
s = input()
a = []
for i in range(len(s)):
if s[i:i+10] in s[i+10:]:
a.append(s[i:i+10])
print(a) |
'''
This file computes the k size
required to tolerate against an
adversary who joins his mining
power to fill the queue of an
honest miner.
'''
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from decimal import *
getcontext().prec = 400
'''
Results from Previous Runs
0. 1x Scalability
+-----------+-----------+
| 32 | 3 |
| 40 | 4 |
| 48 | 5 |
| 56 | 6 |
| 64 | 7 |
| 72 | 8 |
| 80 | 9 |
| 88 | 10 |
| 96 | 11 |
| 104 | 12 |
| 112 | 13 |
| 120 | 14 |
| 128 | 15 |
+-----------+-----------+
1. 5x Scalability
+-----------+----------+
| 32 | 5 |
| 40 | 7 |
| 48 | 8 |
| 56 | 10 |
| 64 | 11 |
| 72 | 13 |
| 80 | 15 |
| 88 | 16 |
| 96 | 18 |
| 104 | 19 |
| 112 | 21 |
| 120 | 22 |
| 128 | 24 |
+-----------+----------+
2. 10x Scalability
+-----------+----------+
| 32 | 8 |
| 40 | 10 |
| 48 | 12 |
| 56 | 14 |
| 64 | 16 |
| 72 | 18 |
| 80 | 20 |
| 88 | 22 |
| 96 | 24 |
| 104 | 26 |
| 112 | 28 |
| 120 | 30 |
| 128 | 33 |
+-----------+----------+
3. 20x Scalability
+-----------+----------+
| 32 | 13 |
| 40 | 16 |
| 48 | 20 |
| 56 | 23 |
| 64 | 27 |
| 72 | 30 |
| 80 | 33 |
| 88 | 37 |
| 96 | 40 |
| 104 | 44 |
| 112 | 47 |
| 120 | 51 |
| 128 | 54 |
+-----------+----------+
'''
def computeProb(lambd, MAX):
probs = []
ks.append(0)
probs.append(Decimal(1)-lambd)
ks.append(1)
probs.append((Decimal(1)-lambd)*(np.exp(lambd)-Decimal(1)))
totalProb = probs[0] + probs[1]
for i in range(2, MAX):
term1 = (Decimal(1)-lambd)
term2 = np.exp(Decimal(i)*lambd)
term3 = Decimal(0.0)
for j in range(1,i):
term4 = np.exp(Decimal(j)*lambd)
term5 = np.power(-1,(Decimal(i)-Decimal(j)))
term6 = np.power(Decimal(j)*lambd, Decimal(i)-Decimal(j))/(math.factorial(Decimal(i)-Decimal(j)))
term7 = np.power(Decimal(j)*lambd, Decimal(i)-Decimal(j+1))/(math.factorial(Decimal(i)-Decimal(j+1)))
term8 = term4*term5*(term6 + term7)
term3 = term3 + term8
result = term1*(term2 + term3)
if result < Decimal(0):
print("NEGATIVEEEEEEEEEEEEEEEEEEEEEEEE")
ks.append(i)
probs.append(math.log2(result))
# totalProb = totalProb + result
# print(i,result)
return(probs)
# print(totalProb)
def computeK(lambd, MAX, threshold):
errorProb = Decimal(0)
i = MAX
while i > 1:
term1 = (Decimal(1)-lambd)
term2 = np.exp(Decimal(i)*lambd)
term3 = Decimal(0.0)
for j in range(1,i):
term4 = np.exp(Decimal(j)*lambd)
term5 = np.power(-1,(Decimal(i)-Decimal(j)))
term6 = np.power(Decimal(j)*lambd, Decimal(i)-Decimal(j))/(math.factorial(Decimal(i)-Decimal(j)))
term7 = np.power(Decimal(j)*lambd, Decimal(i)-Decimal(j+1))/(math.factorial(Decimal(i)-Decimal(j+1)))
term8 = term4*term5*(term6 + term7)
term3 = term3 + term8
result = term1*(term2 + term3)
if result < Decimal(0):
print("NEGATIVEEEEEEEEEEEEEEEEEEEEEEEE")
errorProb = errorProb + result
if errorProb > threshold:
return (i-1)
i=i-1
errorProb = errorProb + (Decimal(1)-lambd)*(np.exp(lambd)-Decimal(1))
if errorProb > threshold:
return 1
errorProb = errorProb + (Decimal(1)-lambd)
if errorProb > threshold:
return 0
return None
def computeKValuses(lambd, MAX, beta):
kValues = []
betaValues = []
while beta <= 128:
kValue = computeK(lambd, MAX, math.pow(2,-1*beta))
if kValue is None:
beta = beta + 8
print("None k Values")
continue
kValues.append(kValue)
betaValues.append(beta)
print(beta,",",kValue)
beta = beta+8
return (kValues, betaValues)
MAX = 100 + 1
# 1 second block processing time i.e 5x scalability
# lambda5 = lambdA = Decimal(1)/Decimal(15.0) + Decimal(1)/Decimal(30)
# kValues5 , betaValues5 = computeKValuses(lambda5, MAX, 32)
# # 2 second block processing time i.e 10x scalability
# lambd10 = Decimal(1)/Decimal(5.0)
# kValues10 , betaValues10 = computeKValuses(lambda10, MAX, 32)
# # 4 second block processing time i.e 20x scalability
# lambda20 = Decimal(1)/Decimal(2.5)
# kValues20 , betaValues20 = computeKValuses(lambda10, MAX, 32)
lambda1 = Decimal(1)/Decimal(50)
kValues1 , betaValues1 = computeKValuses(lambda1, MAX, 32)
# plt.figure(1)
# # plt.plot(ks, probHonest, label='Probability')
# # plt.plot(ks, probAdv, label='Probability with Adv')
# plt.plot(betaValues, kValues, label='5x Ethereum Gas Limit')
# plt.plot(betaValues, kValues2, label='10x Ethereum Gas Limit')
# plt.plot(betaValues, kValues4, label='20x Ethereum Gas Limit')
# plt.grid(True)
# plt.legend(loc="upper left")
# plt.xlabel('-ve of Log2 (Probability (Q>k))')
# plt.ylabel('Required k')
# # plt.title('Gas usage and limit with increasing block height')
# plt.show()
|
import cv2
from darkflow.net.build import TFNet
import numpy as np
import time
option1 = {
'model': 'cfg/tiny-yolo-voc-nofight.cfg',
'load': 16500,
'threshold': 0.1,
'gpu': 1.0
}
option2 = {
'model': 'cfg/tiny-yolo-voc-1c.cfg',
'load': 35250,
'threshold': 0.15,
'gpu': 1.0
}
option3 = {
'model': 'cfg/yolo.cfg',
'load': 'bin/yolo.weights' ,
'threshold': 0.45,
'gpu': 1.0
}
tfnet1 = TFNet(option1) # no-fight
tfnet2 = TFNet(option2) # fight
tfnet3 = TFNet(option3)
fc = 30.0
capture = cv2.VideoCapture('testvideo/fighttest5.mp4')
capture.set(3, 720)
capture.set(4, 1080)
codec = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
writer = cv2.VideoWriter('testvideo/fighttest5(maked).avi', codec, fc, (int(capture.get(3)), int(capture.get(4))))
count1 = 0
count2 = 0
colors = [tuple(255 * np.random.rand(3)) for i in range(5)]
while (capture.isOpened()):
stime = time.time()
ret, frame = capture.read()
if ret:
results1 = tfnet1.return_predict(frame)
results2 = tfnet2.return_predict(frame)
results3 = tfnet3.return_predict(frame)
for color, result in zip(colors, results3):
# tl = (result['topleft']['x'], result['topleft']['y'])
# br = (result['bottomright']['x'], result['bottomright']['y'])
label2 = result['label']
if label2 == 'person':
for color, result in zip(colors, results2):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label2 = 'fight'
confidence = result['confidence']
text = '{}: {:.0f}%'.format(label2, confidence * 100)
frame = cv2.rectangle(frame, tl, br, (0,0,250), 7)
frame = cv2.putText(frame, text,tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 250, 0), 2)
count2=count2+1
if label2 == 'person':
for color, result in zip(colors, results1):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label1 = 'no_fight'
confidence = result['confidence']
text = '{}: {:.0f}%'.format(label1, confidence * 100)
frame = cv2.rectangle(frame, tl, br, (250,0,0), 7)
frame = cv2.putText(frame, text,tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 250, 0), 2)
count1=count1+1
# confidence = result['confidence']
# text = '{}: {:.0f}%'.format(label2, confidence * 100)
# frame = cv2.rectangle(frame, tl, br, (0,0,250), 7)
# frame = cv2.putText(frame, text,tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 250, 0), 2)
# count2=count2+1
cv2.imshow('frame', frame)
writer.write(frame)
print('FPS {:.1f}'.format(1 / (time.time() - stime)))
print(count1)
print(count2)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
capture.release()
writer.release()
cv2.destroyAllWindows()
break
|
"""Ontology converter."""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, List, Optional
from linkml_runtime import SchemaView
from linkml_runtime.utils.formatutils import camelcase
from oaklib.datamodels.obograph import Graph
from oaklib.datamodels.vocabulary import IS_A
from oaklib.interfaces.obograph_interface import OboGraphInterface
from ontogpt.templates.halo import Ontology, OntologyElement
this_path = Path(__file__).parent
logger = logging.getLogger(__name__)
@dataclass
class OntologyConverter:
"""Converts an OAK ontology to an OntoGPT schema."""
adapter: OboGraphInterface = None
schemaview: SchemaView = None
fixed_slot_values: Dict[str, str] = field(default_factory=lambda: {})
def __post_init__(self):
templates_path = this_path.parent / "templates"
path_to_template = str(templates_path / "halo.yaml")
self.schemaview = SchemaView(path_to_template)
def extract_seed_ontology(self, seeds: List[str], predicates: List[str]) -> Ontology:
"""Extract an ontology from a given text.
:param text:
:return:
"""
ancestors = list(set(list(self.adapter.ancestors(seeds, predicates, reflexive=True))))
seed_graph = self.adapter.extract_graph(ancestors, predicates, dangling=False)
logger.info(len(seed_graph.nodes))
seed_ontology = self.from_obograph(seed_graph)
return seed_ontology
def from_adapter(self) -> Ontology:
"""Convert an OAK adapter to an Ontology.
:param adapter:
:return:
"""
graph = self.adapter.as_obograph()
return self.from_obograph(graph)
def from_obograph(self, graph: Graph) -> Ontology:
"""Convert an OBO Graph to an Ontology.
:param graph:
:return:
"""
adapter = self.adapter
ontology = Ontology()
element_index = {}
node_to_element_name = {}
id2slot = {}
inverses = {}
for slot in self.schemaview.class_induced_slots(OntologyElement.__name__):
if slot.inverse:
inverses[slot.name] = slot.inverse
inverses[slot.inverse] = slot.name
if slot.slot_uri:
id2slot[slot.slot_uri] = slot
logger.info(list(id2slot.keys()))
logger.info(inverses)
for node in graph.nodes:
meta = node.meta
if not node.lbl:
continue
if not meta:
# logger.warning(f"Node {node.id} has no meta")
continue
element = OntologyElement(
name=self.node_to_name(node.id, node.lbl),
synonyms=[synonym.val for synonym in meta.synonyms],
description=meta.definition.val if meta.definition else None,
)
for k, v in self.fixed_slot_values.items():
setattr(element, k, v)
element_index[element.name] = element
node_to_element_name[node.id] = element.name
for edge in graph.edges:
if edge.pred == "is_a":
pred = IS_A
else:
try:
pred = adapter.uri_to_curie(edge.pred)
except:
pred = edge.pred
if pred not in id2slot:
continue
if edge.sub not in node_to_element_name:
continue
if edge.obj not in node_to_element_name:
continue
subject = node_to_element_name[edge.sub]
object = node_to_element_name[edge.obj]
slot = id2slot[pred]
getattr(element_index[subject], slot.name).append(object)
if slot.name in inverses:
inverse = inverses[slot.name]
getattr(element_index[object], inverse).append(subject)
for ldef in adapter.logical_definitions([node.id for node in graph.nodes]):
if ldef.definedClassId in node_to_element_name:
element = element_index[node_to_element_name[ldef.definedClassId]]
if not ldef.genusIds:
continue
if not ldef.restrictions:
continue
genus_elts = [node_to_element_name[g] for g in ldef.genusIds]
differentia = [
f"{adapter.label(r.propertyId)} some {self.node_to_name(r.fillerId)}"
for r in ldef.restrictions
]
element.equivalent_to = (
f"{' and '.join(genus_elts)} and {' and '.join(differentia)}"
)
logging.info(f"Equiv[{element.name}] = {element.equivalent_to}")
for element in element_index.values():
ontology.elements.append(element)
return ontology
def node_to_name(self, curie: str, label: Optional[str] = None) -> str:
"""Convert a node to a name.
:param curie:
:param label:
:return:
"""
if label is None:
label = self.adapter.label(curie)
if label is None:
logger.warning(f"Node {curie} has no label")
label = curie
return camelcase(label)
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.requests_native_auth import TokenAuthenticator
from pendulum import DateTime
# Basic full refresh stream
class WrikeStream(HttpStream, ABC):
"""
Wrike API Reference: https://developers.wrike.com/overview/
"""
primary_key = "id"
url_base = ""
def __init__(self, wrike_instance: str, **kwargs):
super().__init__(**kwargs)
self.url_base = f"https://{wrike_instance}/api/v4/"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
nextPageToken = response.json().get("nextPageToken")
if nextPageToken:
return {"nextPageToken": nextPageToken}
else:
return None
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return next_page_token
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
for record in response.json()["data"]:
yield record
def path(self, **kwargs) -> str:
"""
This one is tricky, the API path is the class name by default. Airbyte will load `url_base`/`classname` by
default, like https://app-us2.wrike.com/api/v4/tasks if the class name is Tasks
"""
return self.__class__.__name__.lower()
class Tasks(WrikeStream):
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return next_page_token or {"fields": "[customFields,parentIds,authorIds,responsibleIds,description,briefDescription,superTaskIds]"}
class Customfields(WrikeStream):
pass
class Contacts(WrikeStream):
pass
class Workflows(WrikeStream):
pass
def to_utc_z(date: DateTime):
return date.strftime("%Y-%m-%dT%H:%M:%SZ")
class Comments(WrikeStream):
def __init__(self, start_date: DateTime, **kwargs):
self._start_date = start_date
super().__init__(**kwargs)
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
"""
Yields a list of the beginning timestamps of each 7 days period between the start date and now,
as the comments endpoint limits the requests for 7 days intervals.
"""
start_date = self._start_date
now = pendulum.now()
while start_date <= now:
end_date = start_date + pendulum.duration(days=7)
yield {"start": to_utc_z(start_date)}
start_date = end_date
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
slice_params = {"updatedDate": '{"start":"' + stream_slice["start"] + '"}'}
return next_page_token or slice_params
class Folders(WrikeStream):
pass
# Source
class SourceWrike(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, any]:
"""
:param config: the user-input config object conforming to the connector's spec.yaml
:param logger: logger object
:return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.
"""
try:
headers = {
"Accept": "application/json",
} | TokenAuthenticator(token=config["access_token"]).get_auth_header()
resp = requests.get(f"https://{config['wrike_instance']}/api/v4/version", headers=headers)
resp.raise_for_status()
return True, None
except requests.exceptions.RequestException as e:
error = e.response.json()
message = error.get("errorDescription") or error.get("error")
return False, message
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
"""
:param config: A Mapping of the user input configuration as defined in the connector spec.
"""
start_date = pendulum.parse(config.get("start_date")) if config.get("start_date") else pendulum.now().subtract(days=7)
args = {"authenticator": TokenAuthenticator(token=config["access_token"]), "wrike_instance": config["wrike_instance"]}
return [
Tasks(**args),
Customfields(**args),
Contacts(**args),
Workflows(**args),
Folders(**args),
Comments(start_date=start_date, **args),
]
|
import sys
input = sys.stdin.readline
N, K = map(int, input().split())
multitap = list(map(int, input().split()))
plugs = []
count = 0
for i in range(K):
if multitap[i] in plugs:
continue
if len(plugs) < N:
plugs.append(multitap[i])
continue
multitap_idxs = []
hasplug = True
for j in range(N):
if plugs[j] in multitap[i:]:
multitap_idx = multitap[i:].index(plugs[j])
else:
multitap_idx = 101
hasplug = False
multitap_idxs.append(multitap_idx)
if not hasplug:
break
plug_out = multitap_idxs.index(max(multitap_idxs))
del plugs[plug_out]
plugs.append(multitap[i])
count += 1
print(count) |
#!/pkg/qct/software/python/3.5.2/bin/python
###################################################################################
# Filename: emailDiskAbusers.py
# Author: cskebriti
# Description: Standalone program, does NOT require setup/config.py
#
''' Takes the argument passed from command line and runs a check to find users
who take up the most disk space, sending notification email to them.
Ex. Usage:
./emailDiskAbusers.py /prj/qca/cores/wifi/lithium/santaclara/dev01/workspaces
'''
###################################################################################
import sys, os
import collections, operator
import subprocess
from pathlib import Path
import smtplib # Std Lib: email server
from email.mime.multipart \
import MIMEMultipart # Std Lib: email msg
from email.mime.text \
import MIMEText # Std Lib: email msg
#####################################################################################################
## Global Variables
#####################################################################################################
DEBUG = 0
SENDER = "RegressionAutomation@DoNotReply.com"
mainDirectory = Path("/prj/qca/cores/wifi/lithium/santaclara/dev01/workspaces/") #default
USER_PERCENT_USAGE_THRESHOLD = 75.0
DISK_CAPACITY_THRESHOLD = 85.0
#Named Tuple
DiskUsages = collections.namedtuple('DiskUsages', 'user spaceUsed spaceLimit usedPercent')
#
records = list() #keep a list of type DiskUsages
ToList = list()
CcList = ["c_ckebri@qti.qualcomm.com", "tpillai@qti.qualcomm.com", "lithium.qdisk@qca.qualcomm.com"]
##
HtmlBody = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Excel To HTML using codebeautify.org</title>
</head>
<body>
<style type=\"text/css\">
.one {border-collapse:collapse;table-layout:fixed;COLOR: black; width: 1000px;word-wrap: break-word;}
.b { border-style:solid; border-width:3px;border-color:#333333;padding:10px;width:10px;background-color:#2E64FE; text-align:center;COLOR: black;}
.c { border-style:solid; border-width:3px;border-color:#333333;text-align: center;padding:10px;background-color:#ffffff;width:10px;COLOR: black;}
.d { border-style:solid; border-width:3px;border-color:#333333;text-align: left;padding:10px;background-color:#79d379;width:10px;COLOR: black;}
.red { border-style:solid; border-width:3px;border-color:#333333;text-align: left; padding:10px;background-color:#ff704d;width:10px;COLOR: black;}
.yellow { border-style:solid; border-width:3px;border-color:#333333;text-align: center;padding:10px;background-color:#ffff80;width:10px;COLOR: black;}
.green { border-style:solid; border-width:3px;border-color:#333333;text-align: left;padding:10px;background-color:#66ff66;width:10px;COLOR: black;}
.grey { border-style:solid; border-width:3px;border-color:#333333;text-align: left;padding:10px;background-color:#e0e0d1;width:10px;COLOR: black;}
</style>
<H5>Hello,
<br/>Please be aware that other engineers share the same disk as you. As a consideration, please utilize this time now to remove any unused files in
your workspace to free disk space for the group.
<br/>
<br/>
</H5>
<table class=\"one\">
<tr>
<th class=\"b\">USER</th>
<th class=\"b\">SPACE USED MB</th>
<th class=\"b\">SPACE LIMIT MB</th>
<th class=\"b\">USED PERCENT</th>
</tr>
'''
HtmlTableTemplate = '''
<tr>
<td class=\"c\">{user}</td>
<td class=\"c\">{spaceUsed}</td>
<td class=\"c\">{spaceLimit}</td>
<td class=\"c\">{usedPercent}</td>
</tr>
'''
Signature = '''
</table>
<br/>
<br/>Best Regards,
<br/>Automation Team
</body>
</html>
'''
#####################################################################################################
# Function Definitions:
#####################################################################################################
#################################
def getDiskAbusers():
global records,ToList
cmd = "/pkg/icetools/bin/quota.eng -u all -d {}".format(mainDirectory)
output = (subprocess.check_output(cmd, shell=True, universal_newlines=True)).split('\n')
for line in output:
try:
itype,user,spaceUsed,spaceLimit,usedPercent,filesUsed,filesLimit,fUsedPercent = line.split()
#import pdb; pdb.set_trace()
if ( (itype == 'user') and (float(usedPercent) > USER_PERCENT_USAGE_THRESHOLD) ):
r = DiskUsages(user,float(spaceUsed),float(spaceLimit),float(usedPercent))
if user.isalpha():
records.append(r)
ToList.append(user+'@qti.qualcomm.com')
except ValueError:
continue
#################################
def createMsgBody(user, spaceUsed, spaceLimit, usedPercent):
HtmlMsg = HtmlBody + HtmlTableTemplate.format(
user=user, spaceUsed=spaceUsed, spaceLimit=spaceLimit, usedPercent=usedPercent)
#print (HtmlMsg + "\n\n")
return HtmlMsg + Signature
#################################
def emailUser(record):
assert(isinstance(record,DiskUsages)),"Internal Error"
##
userEmail = record.user + "@qti.qualcomm.com" #, "tpillai@qti.qualcomm.com"
recipients = [userEmail, "c_ckebri@qti.qualcomm.com"] \
if not DEBUG else ["c_ckebri@qti.qualcomm.com"] #list
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = "HIGH Disk Usage Warning: '{user}' on {idir}".format(
user=record.user,idir='/'.join(mainDirectory.parts[-3:]))
msg['From'] = SENDER
msg['To'] = ', '.join(recipients) #type string
# Generate Html Message
messageBody = createMsgBody(record.user,record.spaceUsed,record.spaceLimit,record.usedPercent)
text = ""
html = messageBody
# Record the MIME types of both parts - text/plain and text/html
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
s = smtplib.SMTP('localhost')
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
try:
print ("\tSending email...")
myDict = s.sendmail(SENDER, recipients, msg.as_string())
if not myDict: print ("\tEmail sent!")
else: print("Something unexpected happened while sending email")
#else: import pdb; pdb.set_trace()
except Exception as e:
print ("Unexpected error sending mail:", e)
finally:
s.quit()
#################################
def dispatchEmails():
if not DEBUG:
for record in records:
emailUser(record)
else:
#emailUser(records[0])
pass
##
print("\nNumber of records:", len(records))
#################################
def checkInputArgs():
global mainDirectory
if len(sys.argv) <= 1:
print ("Usage: {} <DIRECTORY>".format(sys.argv[0]))
print ("using default directory:", mainDirectory)
# raise SystemExit
else:
mainDirectory = Path(sys.argv[1])
assert(mainDirectory.exists()),"Path '{}' does not exist".format(mainDirectory)
#################################
def createSummary():
mySorted = sorted(records, key=operator.attrgetter('spaceUsed', 'usedPercent', 'user'), reverse=True)
MsgSummaryBody = ""
for row in mySorted:
MsgSummaryBody += HtmlTableTemplate.format(user = row.user,
spaceUsed = row.spaceUsed,
spaceLimit = row.spaceLimit,
usedPercent = row.usedPercent)
##
HtmlSummary = HtmlBody + MsgSummaryBody + Signature
return HtmlSummary
#################################
def emailSummary(SummaryMsg):
##
if not DEBUG:
recipients = ToList + CcList
else: #debug
recipients = ["c_ckebri@qti.qualcomm.com"] #list
print (ToList)
#For subject of message
diskCapacity = getDiskCapacity()
idir = '/'.join(mainDirectory.parts[-3:])
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = "HIGH Disk Usage Summary on {0} ({1}% capacity)".format(
idir, diskCapacity)
msg['From'] = SENDER
msg['To'] = ', '.join(recipients) #type string
# Generate Html Message
messageBody = SummaryMsg
text = ""
html = messageBody
# Record the MIME types of both parts - text/plain and text/html
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
s = smtplib.SMTP('localhost')
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
try:
print ("\tSending email...")
myDict = s.sendmail(SENDER, recipients, msg.as_string())
if not myDict: print ("\tEmail sent!")
else: print("Something unexpected happened while sending email")
#else: import pdb; pdb.set_trace()
except Exception as e:
print ("Unexpected error sending mail:", e)
finally:
s.quit()
#################################
def getDiskCapacity():
cmd = "df --si {0} | xargs | tr -s ' ' | cut -d ' ' -f 12".format(mainDirectory)
output = float(subprocess.check_output(cmd, shell=True, universal_newlines=True).split('%')[0])
return output
#################################
def checkDiskCapacity_bool():
output = getDiskCapacity()
if output > DISK_CAPACITY_THRESHOLD:
return True
else: return False
#####################################################################################################
# Main: Start of Program #
#####################################################################################################
if 'main' in __name__: print ("Start of Program")
##
checkInputArgs()
#
getDiskAbusers()
#
if checkDiskCapacity_bool():
#dispatchEmails() #thangam said dont run this
emailSummary(createSummary())
#
#####################################################################################################
|
# wbite - pokazać
import numpy as np
# #- pewny pełny
# .- pewny pusty
# ?- nierozpatrzony
def all_arrangements(len_row, ch):
def h(len_row, ch):
sum_ch = sum(ch)
len_ch = len(ch)
if len_ch == 0:
yield '.'*len_row
else:
for start in range(len_row-sum_ch-len_ch+1):
for arrangement in h(len_row-ch[0]-start-1, ch[1:]):
yield '.'*start + '#'*ch[0] + '.' + arrangement
for arr in h(len_row+1, ch):
yield arr[:-1]
def legal(arr, row):
for i in range(len(row)):
if arr[i] == '#' and row[i] == '.':
return False
if arr[i] == '.' and row[i] == '#':
return False
return True
def step(image, rows, cols):
# rows
for row_num, (row, row_ch) in enumerate(zip(image, rows)):
len_row = len(row)
all_full = [True for _ in range(len_row)]
all_empty = [True for _ in range(len_row)]
for arr in all_arrangements(len_row, row_ch):
if legal(arr, row):
all_full = [all_full[x] and arr[x]=='#' for x in range(len_row)]
all_empty = [all_empty[x] and arr[x]=='.' for x in range(len_row)]
for x in range(len_row):
if all_full[x]:
image[row_num][x] = '#'
elif all_empty[x]:
image[row_num][x] = '.'
draw(image)
#cols
imageT = list(map(list, zip(*image))) #transpose
for col_num, (col, col_ch) in enumerate(zip(imageT, cols)):
len_col = len(col)
all_full = [True for _ in range(len_col)]
all_empty = [True for _ in range(len_col)]
for arr in all_arrangements(len_col, col_ch):
if legal(arr, col):
all_full = [all_full[y] and arr[y]=='#' for y in range(len_col)]
all_empty = [all_empty[y] and arr[y]=='.' for y in range(len_col)]
for y in range(len_col):
if all_full[y]:
image[y][col_num] = '#'
elif all_empty[y]:
image[y][col_num] = '.'
draw(image)
return image
def nonogram(rows, cols):
width = len(cols)
height = len(rows)
image = [['?' for _ in range(width)] for _ in range(height)]
while any(any(p == '?' for p in row) for row in image):
image = step(image, rows, cols)
return image
def draw(image):
for y in range(len(image)):
for x in range(len(image[y])):
print(image[y][x], end='')
print()
print()
with open("zad_input.txt", "r") as in_f:
with open("zad_output.txt", "w") as out_f:
w, h = [int(x) for x in next(in_f).split()]
r = [[int(el) for el in line.split()] for line in in_f]
rows, cols = r[:w], r[w:]
image = nonogram(rows, cols)
for y in range(len(image)):
for x in range(len(image[y])):
out_f.write(image[y][x])
out_f.write("\n")
|
"""
This module deals with authorizing users for access via HTTP simple
authentication. When the password is received it is hashed using SHA-256 and
compared against the hash in the database for that user."""
from functools import wraps
import hashlib
from flask import request, Response
from chkphrase.models import User
import chkphrase.database as db
def check_auth(username, password):
"""This function is called to check if a username/password combination is
valid."""
session = db.db_session()
result = session.query(User).filter(User.name == username)
if result.count() != 1:
session.rollback()
session.close()
return False
else:
truePass = result[0].password
tryPass = hashlib.sha256(password).hexdigest()
session.close()
return truePass == tryPass
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
|
import vtk
import numpy as np
import pydicom as dicom
import platform
import os
import time
import vtk.util.numpy_support as vtknp
import glob
import re
minValGr = 0.0 # Skalierung der Grauwerte
maxValGr = 255.0
diffValGr = maxValGr - minValGr
countList = []
count = -1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def close_window(iren):
render_window = iren.GetRenderWindow()
render_window.Finalize()
iren.TerminateApp()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def showAxes():
axes = vtk.vtkAxesActor()
widget = vtk.vtkOrientationMarkerWidget()
widget.SetOrientationMarker(axes)
widget.SetInteractor(iren)
widget.SetEnabled(1)
widget.InteractiveOn()
return(axes, widget)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def readFilesToDicomArray(path, listOfSeries):
listOfVTKDataLists = []
listOfPixelDims = []
listOfPixelSpacings = []
listOfMaxCounts = []
listOfMatrices = []
dictFilesDCM = {}
for series in listOfSeries: # für jeden Ordner
for dirName, subdirList, fileList in os.walk(path + series):
for filename in fileList:
if ".dcm" in filename.lower():
actDs = dicom.read_file(os.path.join(dirName, filename))
pos = "{} {}".format(actDs.ImagePositionPatient, actDs.ImageOrientationPatient)
if (pos not in dictFilesDCM):
dictFilesDCM[pos] = {}
dictFilesDCM[pos][actDs.InstanceNumber] = os.path.join(dirName, filename)
for actPos, actDict in dictFilesDCM.items(): # für jede Slice
sortEntries = sorted(actDict)
first = True
actIndex = 0
for actFile in sortEntries: # für jedes einzelne Bild
actDicom = dicom.read_file(actDict[actFile])
if first: # organisiere Metadaten + ArrayDicom anlegen
first = False
winCen = actDicom.WindowCenter
winWidth = actDicom.WindowWidth
resIntercept = actDicom.RescaleIntercept
resSlope = actDicom.RescaleSlope
ConstPixelDims = (len(sortEntries),
int(actDicom.Rows),
int(actDicom.Columns))
ConstPixelSpacing = (float(actDicom.PixelSpacing[0]),
float(actDicom.PixelSpacing[1]),
1.0)
#float(actDicom.SliceThickness))
position = actDicom.ImagePositionPatient
orientation = actDicom.ImageOrientationPatient
xdir = orientation[0:3]
ydir = orientation[3:6]
zdir = [0.0, 0.0, 0.0]
vtk.vtkMath.Cross(xdir, ydir, zdir)
matrix = vtk.vtkMatrix4x4()
for i in range(3):
matrix.SetElement(i, 0, xdir[i])
matrix.SetElement(i, 1, ydir[i])
matrix.SetElement(i, 2, zdir[i])
matrix.SetElement(i, 3, position[i])
ArrayDicom = np.zeros(ConstPixelDims, dtype = float)
ArrayDicom[actIndex, :, :] = actDicom.pixel_array
actIndex += 1
np.clip((resSlope * diffValGr / winWidth) * ArrayDicom + (((resIntercept - winCen) / winWidth + 0.5) * diffValGr + minValGr),
minValGr, maxValGr, out = ArrayDicom)
VTK_dataList = []
for actImage in range(len(ArrayDicom)):
VTK_dataList.append(vtknp.numpy_to_vtk(ArrayDicom[actImage].ravel(),deep=True, array_type=vtk.VTK_FLOAT))
listOfVTKDataLists.append(VTK_dataList)
listOfMaxCounts.append(len(sortEntries))
listOfPixelDims.append(ConstPixelDims)
listOfPixelSpacings.append(ConstPixelSpacing)
listOfMatrices.append(matrix)
return (listOfVTKDataLists, listOfPixelDims, listOfPixelSpacings,
listOfMaxCounts, listOfMatrices)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def readDynpt():
f = open(pathIn + "simulation/x.dynpt", 'rb')
header = dict(re.findall(r"(\w*):(\w*)", f.read(1024).decode('utf-8')))
shapeTest = [int(header['t']), int(header['x']), 3]
data = np.fromfile(f, dtype=np.float32)
if header['unites_x'] == "um":
data /= 1000
return(header, data.reshape(shapeTest))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def getModelPosition():
minZPos = float("inf")
for dirName, subdirList, fileList in os.walk(pathIn + "segmentation"):
for filename in fileList:
if ".dcm" in filename.lower():
actDs = dicom.read_file(os.path.join(dirName, filename))
actZPos = actDs.ImagePositionPatient[2]
if actZPos < minZPos:
minZPos = actZPos
return [actDs.ImagePositionPatient[0], actDs.ImagePositionPatient[1], minZPos]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
##########################################
##########################################
if platform.platform()[0] == "W":
print("OS: win")
pathIn = "c:/users/vch/desktop/Bredies/CASE01/"
else:
print("OS: not win")
pathIn = "/home/horakv/Desktop/Bredies/CASE01/"
##########################################
##########################################
seriesList = []
seriesList.append("segmentation/Visit_1___MRI_Data_and_Images_14d/B0553_90_MultiLabel_seg-1.3.6.1.4.1.16787.100.1.2.20170301.9093758712.1300")
###############################################################################
# Vorbereitung für Rendering
###############################################################################
ren = vtk.vtkRenderer()
#ren.SetBackground(0.8, 0.8, 0.8)
ren.SetBackground(1, 1, 1)
renWin = vtk.vtkRenderWindow()
renWin.SetSize(1000, 1000)
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
###############################################################################
# Struktur des Modells einlesen
###############################################################################
filenames = glob.glob(pathIn + 'mesh/*.vtk')
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(filenames[0])
geometryFilter = vtk.vtkGeometryFilter()
geometryFilter.SetInputConnection(reader.GetOutputPort())
geometryFilter.Update()
polydata = geometryFilter.GetOutput()
scalarRange = polydata.GetScalarRange()
actMesh = vtknp.vtk_to_numpy(polydata.GetPoints().GetData())
actMesh /= 1000 # Daten sind in um statt in mm gegeben -> Korrektur
###############################################################################
# Verschiebungen vorbereiten und Visualisierung
###############################################################################
(header, displacements) = readDynpt()
maxCount = len(displacements)
mMapper = vtk.vtkPolyDataMapper()
mMapper.SetInputData(polydata)
mMapper.SetScalarRange(scalarRange)
mActor = vtk.vtkActor()
mActor.SetMapper(mMapper)
mActor.SetPosition(getModelPosition())
mActor.GetProperty().SetOpacity(0.2)
if scalarRange == (0.0, 1.0):
mMapper.ScalarVisibilityOff()
mActor.GetProperty().SetColor(1,0,0)
ren.AddActor(mActor)
# =============================================================================
# (axes, widget) = showAxes()
# =============================================================================
###############################################################################
# Dicoms einlesen
###############################################################################
t0 = time.time()
(listOfVTKDataLists, listOfPixelDims, listOfPixelSpacings, listOfMaxCounts,
listOfMatrices) = readFilesToDicomArray(pathIn, seriesList)
numImages = len(listOfVTKDataLists)
t1 = time.time()
print("Zeit:", t1-t0)
###############################################################################
# Dicoms für Visualisierung vorbereiten
###############################################################################
lookupTable = vtk.vtkLookupTable()
lookupTable.SetNumberOfTableValues(256)
lookupTable.SetRange(0.0, 255.0)
for j in range(256):
lookupTable.SetTableValue(j, j/255.0, j/255.0, j/255.0, min(j/255.0*5, 1.0))
lookupTable.Build()
images = []
for actImage in range(numImages): # für jede Slice
countList.append(-1)
image = vtk.vtkImageData()
image.SetDimensions(listOfPixelDims[actImage][1], listOfPixelDims[actImage][2], 1)
image.SetSpacing(listOfPixelSpacings[actImage][0], listOfPixelSpacings[actImage][1], 0.0)
image.AllocateScalars(vtk.VTK_FLOAT, 1)
image.GetPointData().SetScalars(listOfVTKDataLists[actImage][0])
images.append(image)
mapTransparency = vtk.vtkImageMapToColors()
mapTransparency.SetLookupTable(lookupTable)
mapTransparency.PassAlphaToOutputOn()
mapTransparency.SetInputData(image)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(mapTransparency.GetOutputPort())
mapper.SetColorModeToDirectScalars()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetInterpolationToFlat()
actor.GetProperty().ShadingOff()
actor.GetProperty().LightingOff()
actor.SetUserMatrix(listOfMatrices[actImage])
ren.AddActor(actor)
###############################################################################
# Rendering-Rest & beenden
###############################################################################
iren.Initialize()
renWin.Render()
print("Start")
iren.Start()
if platform.platform()[0] != "W":
close_window(iren)
del renWin, iren
|
class Config():
images_folder = './data/RSICD/RSICD_images/'
annotations_name = './data/RSICD/dataset_rsicd.json'
# pretrain model config
pre_train_model = 'mobilenet'
fix_pretrain_model = False
feature_size = 1280 # pretrain model's feature map number in final layer
# Attention layer config
attention_size = 1280
# LSTM config
embed_size = 1280
input_size = embed_size + feature_size # encoder output feature vector size: 1280
hidden_size = 1280 # 4096
num_layers = 1
# training config
batch_size = 100 # 64
train_iter = 60001 # 100000
encoder_learning_rate = 1e-4
decoder_learning_rate = 1e-4
save_model_iter = 400 |
#!/usr/bin/python
import griddb_python as griddb
import sys
factory = griddb.StoreFactory.get_instance()
argv = sys.argv
NumContainer = 10
NumRow = 2
try:
gridstore = factory.get_store(host=argv[1], port=int(argv[2]), cluster_name=argv[3], username=argv[4], password=argv[5])
print("[MultiGet S]")
predEntry = {}
for i in range(NumContainer):
keys = []
pred = gridstore.create_row_key_predicate(griddb.Type.STRING)
for j in range(NumRow):
keys.append("name" + str(j))
pred.set_distinct_keys(keys)
predEntry.update({"container" + str(i): pred})
resultDict = gridstore.multi_get(predEntry)
for containerName, rows in resultDict.items():
print(containerName, rows)
print("[MultiGet E]")
except griddb.GSException as e:
for i in range(e.get_error_stack_size()):
print("[", i, "]")
print(e.get_error_code(i))
print(e.get_message(i))
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import random
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images.data:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size-1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=2, padding=1, activation='relu', batch_norm=True):
super(ConvBlock,self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride,padding)
self.batch_norm = batch_norm
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
self.relu = torch.nn.ReLU(True)
self.lrelu = torch.nn.LeakyReLU(0.2,True)
self.tanh = torch.nn.Tanh()
def forward(self,x):
if self.batch_norm:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation == 'relu':
return self.relu(out)
elif self.activation == 'lrelu':
return self.lrelu(out)
elif self.activation == 'tanh':
return self.tanh(out)
elif self.activation == 'no_act':
return out
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=2, padding=1, output_padding=1, activation='relu', batch_norm=True):
super(DeconvBlock,self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size,output_size,kernel_size,stride,padding,output_padding)
self.batch_norm = batch_norm
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
self.relu = torch.nn.ReLU(True)
def forward(self,x):
if self.batch_norm:
out = self.bn(self.deconv(x))
else:
out = self.deconv(x)
if self.activation == 'relu':
return self.relu(out)
elif self.activation == 'lrelu':
return self.lrelu(out)
elif self.activation == 'tanh':
return self.tanh(out)
elif self.activation == 'no_act':
return out
class ResnetBlock(torch.nn.Module):
def __init__(self,num_filter,kernel_size=3,stride=1,padding=0):
super(ResnetBlock,self).__init__()
conv1 = torch.nn.Conv2d(num_filter,num_filter,kernel_size,stride,padding)
conv2 = torch.nn.Conv2d(num_filter,num_filter,kernel_size,stride,padding)
bn = torch.nn.InstanceNorm2d(num_filter)
relu = torch.nn.ReLU(True)
pad = torch.nn.ReflectionPad2d(1)
self.resnet_block = torch.nn.Sequential(
pad,
conv1,
bn,
relu,
pad,
conv2,
bn
)
def forward(self,x):
out = self.resnet_block(x)
return out
class Generator(torch.nn.Module):
def __init__(self,input_dim,num_filter,output_dim,num_resnet):
super(Generator,self).__init__()
#Reflection padding
self.pad = torch.nn.ReflectionPad2d(3)
#Encoder
self.conv1 = ConvBlock(input_dim,num_filter,kernel_size=7,stride=1,padding=0)
self.conv2 = ConvBlock(num_filter,num_filter*2)
self.conv3 = ConvBlock(num_filter*2,num_filter*4)
#Resnet blocks
self.resnet_blocks = []
for i in range(num_resnet):
self.resnet_blocks.append(ResnetBlock(num_filter*4))
self.resnet_blocks = torch.nn.Sequential(*self.resnet_blocks)
#Decoder
self.deconv1 = DeconvBlock(num_filter*4,num_filter*2)
self.deconv2 = DeconvBlock(num_filter*2,num_filter)
self.deconv3 = ConvBlock(num_filter,output_dim,kernel_size=7,stride=1,padding=0,activation='tanh',batch_norm=False)
def forward(self,x):
#Encoder
enc1 = self.conv1(self.pad(x))
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
#Resnet blocks
res = self.resnet_blocks(enc3)
#Decoder
dec1 = self.deconv1(res)
dec2 = self.deconv2(dec1)
out = self.deconv3(self.pad(dec2))
return out
def normal_weight_init(self,mean=0.0,std=0.02):
for m in self.children():
if isinstance(m,ConvBlock):
torch.nn.init.normal_(m.conv.weight,mean,std)
if isinstance(m,DeconvBlock):
torch.nn.init.normal_(m.deconv.weight,mean,std)
if isinstance(m,ResnetBlock):
torch.nn.init.normal_(m.conv.weight,mean,std)
torch.nn.init.constant_(m.conv.bias,0)
class Discriminator(torch.nn.Module):
def __init__(self,input_dim,num_filter,output_dim):
super(Discriminator,self).__init__()
conv1 = ConvBlock(input_dim,num_filter,kernel_size=4,stride=2,padding=1,activation='lrelu',batch_norm=False)
conv2 = ConvBlock(num_filter,num_filter*2,kernel_size=4,stride=2,padding=1,activation='lrelu')
conv3 = ConvBlock(num_filter*2,num_filter*4,kernel_size=4,stride=2,padding=1,activation='lrelu')
conv4 = ConvBlock(num_filter*4,num_filter*8,kernel_size=4,stride=1,padding=1,activation='lrelu')
conv5 = ConvBlock(num_filter*8,output_dim,kernel_size=4,stride=1,padding=1,activation='no_act',batch_norm=False)
self.conv_blocks = torch.nn.Sequential(
conv1,
conv2,
conv3,
conv4,
conv5
)
def forward(self,x):
out = self.conv_blocks(x)
return out
def normal_weight_init(self,mean=0.0,std=0.02):
for m in self.children():
if isinstance(m,ConvBlock):
torch.nn.init.normal_(m.conv.weight.data,mean,std)
|
# Program to gather your followers and people who you follow, difference them and find out the people who you follow but dont follow you back.
# Youtube Tutorial link: https://www.youtube.com/watch?v=BGU2X5lrz9M
# Requirements:
# - Gecko
# - Selenium
# - Python 3.6 or above
""" Importing the required headers """
# Webdriver is for navigation through the webpages. Library with the common keys (used in keyboard shortcuts) such as enter etc
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
# System libraries that will come in handy
import time
import random
import sys
from pynput.keyboard import Key, Controller
""" The Instagram class with all the functionality """
class Instagram:
# Initialization of the object at the time of creation
def __init__(self, username, password):
# Initialize the username and password for the user
self.username = username
self.password = password
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.privatebrowsing.autostart", True)
# profile.set_preference("browser.fullscreen.autohide", True)
self.driver = webdriver.Firefox(firefox_profile = profile)
# To close the webdriver in case of any issues
def closeBrowser(self):
self.driver.close()
# To login to the Instagram account
def login(self):
# Set the web driver
driver = self.driver
driver.maximize_window()
driver.implicitly_wait(5)
try:
# Get function to make a https request. Go to the homepage of Instagram (wait for it to load)
driver.get("https://www.instagram.com/")
time.sleep(3)
# Login using the username and password (time delays added to let the page load properly)
login_button = driver.find_element_by_xpath("//a[@href='/accounts/login/?source=auth_switcher']")
login_button.click()
time.sleep(3)
user_name_elem = driver.find_element_by_xpath("//input[@name='username']")
user_name_elem.clear()
user_name_elem.send_keys(self.username)
passworword_elem = driver.find_element_by_xpath("//input[@name='password']")
passworword_elem.clear()
passworword_elem.send_keys(self.password)
passworword_elem.send_keys(Keys.RETURN)
time.sleep(3)
except:
return False
url = driver.current_url
if("login" in url):
return False
return True
def goToHomePage(self):
driver = self.driver
profile_button = driver.find_element_by_xpath("//a[@href='/"+self.username+"/']")
profile_button.click()
time.sleep(2)
return
# Returns a list of followers
def getFollowers(self):
driver = self.driver
print("\n--------------------------- Getting Followers ---------------------------")
# Followers button Xpath: /html/body/div[1]/section/main/div/header/section/ul/li[2]/a/span
number_of_followers = (driver.find_element_by_xpath("/html/body/div[1]/section/main/div/header/section/ul/li[2]/a/span")).text
print("You have", number_of_followers, "followers!\n")
followers_button = driver.find_element_by_xpath("//a[@href='/"+self.username+"/followers/']")
followers_button.click()
time.sleep(2)
keyboard = Controller()
keyboard.press(Key.tab)
keyboard.press(Key.tab)
for i in range(int(1.25 * int(number_of_followers.strip()))):
keyboard.press(Key.down)
time.sleep(0.1)
# List Xpath: /html/body/div[4]/div/div[2]/ul/div
# Name1 Xpath: /html/body/div[4]/div/div[2]/ul/div/li[1]/div/div[2]/div[2]/div
# Name2 Xpath: /html/body/div[4]/div/div[2]/ul/div/li[2]/div/div[2]/div[2]/div
# Name 'n' Xpath: /html/body/div[4]/div/div[2]/ul/div/li['n']/div/div[1]/div[2]/div[2]
followers = []
x = 0
for i in range(1, int(number_of_followers.strip()) + 1):
""" Paths that point to the user name """
# path1 = "/html/body/div[4]/div/div[2]/ul/div/li[" + str(i) + "]/div/div[2]/div[2]/div"
# path2 = "/html/body/div[4]/div/div[2]/ul/div/li[" + str(i) + "]/div/div[1]/div[2]/div[2]"
""" Paths that point to the user handle """
path1 = "/html/body/div[4]/div/div[2]/ul/div/li[" + str(i) + "]/div/div[1]/div[2]/div[1]/a"
path2 = "/html/body/div[4]/div/div[2]/ul/div/li[" + str(i) + "]/div/div[2]/div[1]/div/div/a"
paths = [path1, path2]
#print(path)
name = ""
try:
element = WebDriverWait(driver,5).until(
EC.presence_of_element_located((By.XPATH, paths[x]))
)
name = element.text
except:
if(x == 0):
x = 1
else:
x = 0
element = WebDriverWait(driver,5).until(
EC.presence_of_element_located((By.XPATH, paths[x]))
)
name = element.text
print(name)
followers.append(name)
keyboard.press(Key.backspace)
return followers
# Returns a list of accounts that you are following
def getFollowingAccounts(self):
driver = self.driver
print("\n--------------------------- Getting Following Accounts ---------------------------")
# Number of people you are following button Xpath: /html/body/div[1]/section/main/div/header/section/ul/li[3]/a/span
number_of_people_following = (driver.find_element_by_xpath("/html/body/div[1]/section/main/div/header/section/ul/li[3]/a/span")).text
print("You are following", number_of_people_following, "accounts!\n")
followers_button = driver.find_element_by_xpath("//a[@href='/"+self.username+"/following/']")
followers_button.click()
time.sleep(2)
keyboard = Controller()
keyboard.press(Key.tab)
keyboard.press(Key.tab)
time.sleep(0.1)
keyboard.press(Key.tab)
time.sleep(0.1)
keyboard.press(Key.tab)
for i in range(int(1.25 * int(number_of_people_following.strip()))):
keyboard.press(Key.down)
time.sleep(0.1)
# List Xpath: /html/body/div[4]/div/div[2]/ul/div
# Name1 Xpath: /html/body/div[4]/div/div[2]/ul/div/li[1]/div/div[1]/div[2]/div[2]
# Name2 Xpath: /html/body/div[4]/div/div[2]/ul/div/li[2]/div/div[2]/div[2]/div
# Name 'n' Xpath: /html/body/div[4]/div/div[2]/ul/div/li['n']/div/div[1]/div[2]/div[2]
following = []
x = 0
for i in range(1, int(number_of_people_following.strip()) + 1):
""" Paths that point to the user name """
# path1 = "/html/body/div[4]/div/div[2]/ul/div/li[" + str(i) + "]/div/div[2]/div[2]/div"
# path2 = "/html/body/div[4]/div/div[2]/ul/div/li[" + str(i) + "]/div/div[1]/div[2]/div[2]"
""" Paths that point to the user handle """
path1 = "/html/body/div[4]/div/div[2]/ul/div/li[" + str(i) + "]/div/div[1]/div[2]/div[1]/a"
path2 = "/html/body/div[4]/div/div[2]/ul/div/li[" + str(i) + "]/div/div[2]/div[1]/div/div/a"
paths = [path1, path2]
#print(path)
name = ""
try:
element = WebDriverWait(driver,5).until(
EC.presence_of_element_located((By.XPATH, paths[x]))
)
name = element.text
except:
if(x == 0):
x = 1
else:
x = 0
element = WebDriverWait(driver,5).until(
EC.presence_of_element_located((By.XPATH, paths[x]))
)
name = element.text
print(name)
following.append(name)
keyboard.press(Key.backspace)
return following
if __name__ == "__main__":
print("\n--------------------------- Welcome ---------------------------")
# Add your username and password
username = input("Enter your name: ")
password = input("Enter your password: ")
# Login to Instagram
ig = Instagram(username, password)
success = ig.login()
print("\nAttempting to Login...")
print("-------------------------------------------")
if(success):
print("Logged in successfully")
print("-------------------------------------------\n")
else:
print("There was a Error during Login")
print("-------------------------------------------\n")
print("Closing Browser Session")
ig.closeBrowser()
exit(1)
# Getting the required data
ig.goToHomePage()
followers = ig.getFollowers()
followingAccounts = set(ig.getFollowingAccounts())
ig.closeBrowser()
# The important part is the difference (followingAccounts - Followers), i.e who you are following but are not following you
for follower in followers:
if(follower in followingAccounts):
followingAccounts.remove(follower)
print("\n--------------------------- Result ---------------------------")
print("Number of accounts not following you back are: ", len(followingAccounts))
print("The people you need to stop following are:")
for i in followingAccounts:
print(i)
exit(0) |
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class velo_pop_pipeline(models.Model):
_name = 'pop.pipeline.test'
_description = 'Model Menu POP Pipeline'
_rec_name = 'name'
name = fields.Char(string="Name")
description = fields.Char(string="Description")
class velo_pop_pipeline_master(models.Model):
_name = 'pop.pipeline.master'
_description = 'POP Pipeline Master'
_rec_name = 'pp_name'
def action_status(self):
self.state = 'b'
def action_fulfillment(self):
self.state = 'c'
@api.model
def create(self, values):
record = super(velo_pop_pipeline_master, self).create(values);
record['pp_id'] = self.env['ir.sequence'].next_by_code('velo.pop.pipeline.seq') or '/'
return record
pp_name = fields.Char(string="POP Name")
pp_id = fields.Char(string="POP ID")
pp_regional = fields.Many2one('pop.regional', string="POP Regional")
pp_area = fields.Many2one('pop.area', string="POP Area")
pp_type = fields.Many2one('pop.type', string="POP Type")
pp_capacity = fields.Char(string="POP Capacity")
pp_metroe = fields.Boolean(string="MetroE")
pp_probability_floor = fields.Char(string="Probability Floor")
pp_probability_tenant = fields.Char(string="Probability Tenant")
pp_property_management = fields.Char(string="Property Management")
pp_pic = fields.Char(string="PIC")
pp_job_title = fields.Char(string="Job Title")
pp_phone = fields.Char(string="Phone Number")
pp_email = fields.Char(string="Email")
pp_full_address = fields.Text(string="Full Address")
pp_potential_customer = fields.Char(string="Potential Customer")
pp_coordinat = fields.Float(string="Coordinat")
pp_longitude = fields.Float(string="Longitude")
pp_latitude = fields.Float(string="Latitude")
pp_active_date = fields.Date(string="Active Date")
state = fields.Selection([('a','Market Development'),('b','Site Fulfillment'),('c','Site Acquisition')], string="State", default='a')
pp_activities_ids = fields.One2many('acquisition.activities', 'activities_id', string="Activites ID")
deadline = fields.Date(related='pp_activities_ids.deadline', string="Deadline")
class velo_acquisition_activities(models.Model):
_name = 'acquisition.activities'
_description = 'Acquisition Activities'
_rec_name = 'activities'
activities_id = fields.Many2one('pop.pipeline.master', string="POP Master ID", invisible="1")
activities = fields.Selection([('proposal', 'Proposal'), ('visit', 'Visit'), ('negotiation', 'Negotiation')], string="Activities")
date_start = fields.Date(string="Date Start")
deadline = fields.Date(string="Deadline")
finish_time = fields.Date(string="Finish Time")
percentage = fields.Float(string="Percentage")
attachment = fields.Binary(string="Attachment") |
"""
Даны два целочисленных списка A и B, упорядоченных по неубыванию. Объедините их в один упорядоченный список С (то есть он должен содержать len(A)+len(B) элементов). Решение оформите в виде функции merge(A, B), возвращающей новый список. Алгоритм должен иметь сложность O(len(A)+len(B)). Модифицировать исходные списки запрещается. Использовать функцию sorted и метод sort запрещается.
Формат ввода
Программа получает на вход два неубывающих списка, каждый в отдельной строке.
Формат вывода
Программа должна вывести последовательность неубывающих чисел, полученных объединением двух данных списков.
Тест 1
Входные данные:
1 5 7
2 4 4 5
Вывод программы:
1 2 4 4 5 5 7
Тест 2
Входные данные:
1 4 7
1 5 6
Вывод программы:
1 1 4 5 6 7
Тест 3
Входные данные:
1
1
Вывод программы:
1 1
"""
# 17.09.19
a = [int(i) for i in input().split()]
b = [int(i) for i in input().split()]
def merge(a, b):
res = a + b
return sorted(res)
print(' '.join(map(str, merge(a, b))))
|
import pygame
import math
class Ship:
'''
Coords:
4: back points
4: front points
3: top wing
1: front tip
2: tip wings
'''
verticies = \
(-3, -1, 1), ( -3, 1, 1), (-3, -1, -1), (-3, 1, -1), \
(3, -1, 1), (3, 1, 1), (3, -1, -1), (3, 1, -1), \
(-3, 0, 1), (3, 0, 1), (-4, 0, 2), \
(7, 0, 0), \
(-2, -3, 0), (-2, 3, 0)
edges = (0, 1), (0, 2), (1, 3), (2, 3), (4, 5), (4, 6), (5, 7), (6, 7), (0, 4), (1, 5), (2, 6), (3, 7), (8, 9), (8, 10), (9, 10), (4, 11), (5, 11), (6, 11), (7, 11), (0, 12), (2, 12), (4, 12), (6, 12), (1, 13), (3, 13), (5, 13), (7, 13)
faces = (6, 4, 5, 7), (8, 9, 10),(12, 0, 4), (12, 2, 6), (13, 1, 5), (13, 3, 7), (2, 0, 1, 3)
colors = (255, 128, 0),(255, 128, 0), (153, 204, 255), (153, 204, 255), (153, 204, 255), (153, 204, 255), (255, 0, 0)
def __init__(self, position = (0,0,0), rotation = (0,0)):
self.x, self.y, self.z = position
self.verts = [(self.x+X, self.y+Y, self.z+Z) for X, Y, Z in self.verticies]
self.rotation = list(rotation)
self.rotationCorrection = 0
self.rollRight= False
self.rollLeft = False
self.radsLeft = 0
def update(self, dt, key):
s = dt*7
self.x += s * 5
if (key[pygame.K_a]):
self.y += s * 2
self.rotation[1] -= s / 5
self.rotationCorrection -= s / 5
if (key[pygame.K_d]):
self.y -= s * 2
self.rotation[1] += s / 5
self.rotationCorrection += s / 5
# z-direct
if key[pygame.K_s]:
self.z -= s * 2
if key[pygame.K_w]:
self.z += s * 2
if key[pygame.K_h] and (self.rotation[1]% (2*math.pi))<2.2:
self.rotation[1] += s / 3
if key[pygame.K_l] and (self.rotation[1]% (2*math.pi))> 0.7:
self.rotation[1] -= s / 3
if key[pygame.K_j] and not self.rollLeft:
self.rollLeft = True
self.radsLeft = math.pi * 2
if key[pygame.K_k] and not self.rollRight:
self.rollRight = True
self.radsLeft = math.pi * 2
if self.rollLeft:
if self.radsLeft > .6 :
self.y += s * 7
self.z += s*3
self.rotation[1] -= .6
self.radsLeft -= .6
else:
self.rotation[1] -= self.radsLeft
self.radsLeft = 0
self.y -= s * 7
self.z -= s * 5
self.rollLeft = False
if self.rollRight:
if self.radsLeft > .6 :
self.y -= s * 7
self.z += s*3
self.rotation[1] += .6
self.radsLeft -= .6
else:
self.rotation[1] += self.radsLeft
self.radsLeft = 0
self.y += s * 7
self.z -= s * 5
self.rollRight = False
if(self.rotationCorrection != 0):
fix = self.rotationCorrection / 2
self.rotation[1] -= fix
self.rotationCorrection -= fix
self.verts = [(self.x + X / 2, self.y + Y / 2, self.z + Z / 2) for X, Y, Z in self.verticies]
|
# -*- coding: utf-8 -*-
# 隐语义模型
import pickle
import pandas as pd
import numpy as np
from math import exp
def loadDataFrom2PKL(pklName1 ,pklName2):
f1 = open(pklName1,'rb')
UserMovieDict = pickle.load(f1)
f1.close()
f2 = open(pklName2,'rb')
MovieUserDict = pickle.load(f2)
f2.close()
return UserMovieDict, MovieUserDict
def loadDataFrom1PKL(pklName1):
f1 = open(pklName1,'rb')
Dict = pickle.load(f1)
f1.close()
return Dict
def allMovie(MovieUserDict):
'''
获取所有电影的列表
'''
return [key for key in MovieUserDict.keys()]
def getMovieHeat(MovieUserDict):
'''
获取电影的热度,电影热度根据评价电影的人数决定
'''
MovieHeatDict = dict()
for key in MovieUserDict.keys():
MovieHeatDict[key] = len(MovieUserDict[key])
return MovieHeatDict
def getUserPositiveItem(UserMovieDict, UserID):
'''
获取一个用户喜爱电影列表
'''
positiveItemList = UserMovieDict[UserID]
return positiveItemList
def getUserNegativeItem(MovieHeatDict, positiveItemList, UserID, positiveItemListLength):
'''
获取用户负反馈物品:热门但是用户没有进行过评分与正反馈数量相等
'''
allNegativeItemDict = MovieHeatDict.copy()
for positiveItem in positiveItemList:
if positiveItem in MovieHeatDict.keys():
# print(positiveItem)
del allNegativeItemDict[positiveItem]
TopmovieList = sorted(allNegativeItemDict.items(),key = lambda x:x[1], reverse=True)[0:positiveItemListLength]
negativeItemList = [key[0] for key in TopmovieList]
return negativeItemList
def initTrainingData(positiveItemList ,negativeItemList):
OnepersonData = dict()
for each in positiveItemList+negativeItemList:
if each in positiveItemList:
OnepersonData[each] = 1.
else:
OnepersonData[each] = 0.
return OnepersonData
def initPara(userID, itemID, classCount):
'''''
初始化参数q,p矩阵, 随机
:param userCount:用户ID
:param itemCount:物品ID
:param classCount: 隐类数量
:return: 参数p,q
'''
arrayp = np.random.rand(len(userID), classCount)
arrayq = np.random.rand(classCount, len(itemID))
p = pd.DataFrame(arrayp, columns=range(0,classCount), index=userID)
q = pd.DataFrame(arrayq, columns=itemID, index=range(0,classCount))
return p,q
def lfmPredict(p, q, userID, itemID):
'''''
利用参数p,q预测目标用户对目标物品的兴趣度
:param p: 用户兴趣和隐类的关系
:param q: 隐类和物品的关系
:param userID: 目标用户
:param itemID: 目标物品
:return: 预测兴趣度
'''
p = np.mat(p.ix[userID].values)
q = np.mat(q[itemID].values).T
r = (p * q).sum()
r = sigmod(r)
return r
def sigmod(x):
'''''
单位阶跃函数,将兴趣度限定在[0,1]范围内
:param x: 兴趣度
:return: 兴趣度
'''
y = 1.0/(1+exp(-x))
return y
def latenFactorModel(UserMovieDict, MovieUserDict, classCount, iterCount, alpha, lamda):
'''''
隐语义模型计算参数p,q
:param classCount: 隐类数量
:param iterCount: 迭代次数
:param alpha: 步长
:param lamda: 正则化参数
:return: 参数p,q
'''
p, q = initPara(UserMovieDict.keys(), MovieUserDict.keys(), 5)
for step in range(0, iterCount):
for userID in UserMovieDict.keys():
positiveItemList = getUserPositiveItem(UserMovieDict, userID)
MovieHeatDict = getMovieHeat(MovieUserDict)
negativeItemList = getUserNegativeItem(MovieHeatDict, positiveItemList, userID, len(positiveItemList))
OnepersonData = initTrainingData(positiveItemList, negativeItemList)
for itemID in OnepersonData.keys():
eui = OnepersonData[itemID] - lfmPredict(p, q, userID, itemID)
for f in range(0, classCount):
# print('step %d user %d class %d' % (step, int(userID), f))
p[f][userID] += alpha * (eui * q[itemID][f] - lamda * p[f][userID])
q[itemID][f] += alpha * (eui * p[f][userID] - lamda * q[itemID][f])
alpha *= 0.9
return p, q
def recommand(UserMovieDict, MovieUserDict, userID, p, q, TopN=10):
userItemlist = getUserPositiveItem(UserMovieDict, userID)
otherItemList = list(set(allMovie(MovieUserDict)) - set(userItemlist))
predictDict = dict()
for itemID in otherItemList:
predictDict[itemID] = lfmPredict(p, q, userID, itemID)
rankDict = sorted(predictDict.items(),key = lambda x:x[1],reverse=True)[0:TopN]
res = []
for each in rankDict:
res.append(each[0])
return res
def getMovieList(item_list):
f = open('movies.dat', 'r')
movieNameList = []
data = f.readlines()
for item in item_list:
for each_line in data:
info = each_line.split('::')
movieNum = info[0]
movieName = info[1]
# print (movieNum, movieName)
if item == movieNum:
movieNameList.append(movieName)
print(info[2])
return movieNameList
UserMovieDict, MovieUserDict = loadDataFrom2PKL('UserMovieDict.pkl','MovieUserDict.pkl')
#positiveItemList = getUserPositiveItem(UserMovieDict, '2')
#MovieHeatDict = getMovieHeat(MovieUserDict)
#negativeItemList = getUserNegativeItem(MovieHeatDict, positiveItemList, '2', len(positiveItemList))
#OnepersonData = initTrainingData(positiveItemList, negativeItemList)
#p, q = initPara(UserMovieDict.keys(), MovieUserDict.keys(), 5)
#r = lfmPredict(p, q, '1', '1193')
p, q = latenFactorModel(UserMovieDict, MovieUserDict, 5, 3, 0.01, 0.01)
ItemList = recommand(UserMovieDict, MovieUserDict, '1', p, q)
movieNameList = getMovieList(ItemList)
|
from sqlalchemy import Boolean, Column, Date, Integer, String
from app.api.db.baseClass import Base
class User(Base):
id = Column(Integer, primary_key=True, index=True)
username = Column(String(32), unique=True, index=True, nullable=False)
nickname = Column(String(32))
sex = Column(String(8), doc="性别")
identity_card = Column(String(32), doc="身份证")
phone = Column(String(32), doc="手机号")
address = Column(String(32), doc="地址")
work_start = Column(Date, doc="入职日期")
hashed_password = Column(String(128), nullable=False)
avatar = Column(String(128), doc="头像")
introduction = Column(String(256), )
status = Column(String(32), nullable=False)
is_active = Column(Boolean(), default=True)
is_superuser = Column(Boolean(), default=False)
|
'''
Copyright 2011, 2012 Timothy Hunter <tjhunter@eecs.berkeley.edu>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) version 3.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library. If not, see <http://www.gnu.org/licenses/>.
'''
# pylint: disable=W0105
'''
Created on Sep 20, 2011
@author: tjhunter
Encoding/decoding conversions.
'''
import datetime
from structures import LatLng, State, Path, StateCollection
def encode_LatLng(gps):
return {'lat': gps.lat, 'lng': gps.lng}
def decode_LatLng(dct):
return LatLng(dct['lat'], dct['lng'])
def encode_link_id(link_id):
(nid, direction) = link_id
return {'id': nid, 'direction': direction}
def decode_link_id(dct):
return (dct['id'], dct['direction'])
def encode_State(state):
return {'link': encode_link_id(state.link_id), \
'offset': state.offset, \
'gps_pos': encode_LatLng(state.gps_pos)}
def decode_State(dct):
gps_pos = decode_LatLng(dct['gps_pos']) if 'gps_pos' in dct else None
return State(decode_link_id(dct['link']), \
dct['offset'], gps_pos)
def encode_Path(path):
return {'start': encode_State(path.start), \
'links': [encode_link_id(link_id) for link_id in path.links], \
'end': encode_State(path.end), \
'latlngs': [encode_LatLng(latlng) for latlng in path.latlngs]}
def decode_Path(dct):
latlngs = [decode_LatLng(dct2) for dct2 in dct['latlngs']] \
if 'latlngs' in dct else None
return Path(decode_State(dct['start']), \
[decode_link_id(dct2) for dct2 in dct['links']], \
decode_State(dct['end']), \
latlngs)
def encode_time(time):
return {'year': time.year, \
'month': time.month, \
'day': time.day, \
'hour': time.hour, \
'minute': time.minute, \
'second': time.second}
def decode_time(dct):
return datetime.datetime(dct['year'], dct['month'], dct['day'], \
dct['hour'], dct['minute'], dct['second'])
def encode_StateCollection(sc):
return {'id': sc.id, 'latlng': encode_LatLng(sc.gps_pos), \
'time': encode_time(sc.time), \
'states': [encode_State(state) for state in sc.states]}
def decode_StateCollection(dct):
return StateCollection(dct['id'], [decode_State(dct2) \
for dct2 in dct['states']], \
decode_LatLng(dct['latlng']), \
decode_time(dct['time']))
|
import csv, smtplib, ssl, xlrd, openpyxl, datetime, subprocess, argparse, n2w, warnings
import pandas as pd
to_address = "tripdirector@kandalore.com"
import gmailapppassword
from_address = gmailapppassword.username
password = gmailapppassword.password
my_parser = argparse.ArgumentParser()
my_parser.add_argument('-t', '--test', action='store_true', help='Testing mode. All emails are sent to tripdirector@kandalore.com')
args = my_parser.parse_args()
print(vars(args))
if args.test is True:
print('Wow it worked')
path = "/Users/stu/Documents/Tripping/Schedule.xlsm"
#input number you want to search
#number = input('Enter date to find\n')
now = datetime.datetime.now()
exceltup = (now.year, now.month, now.day)
today = int(xlrd.xldate.xldate_from_date_tuple((exceltup),0))
tomorrow = today + 1
dayleavingtext = "in two days"
intwodays = today + 2
inthreedays = today + 3
tripUID = input("Enter a trip UID:\n")
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(from_address, password)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Data Validation extension is not supported")
df = pd.read_excel('/Users/stu/Documents/Tripping/Schedule.xlsm', sheet_name='Trips')
for row in df['TripID']:
if int(row) == int(tripUID):
subject = "Tripper is " + df.loc[df['TripID'] == int(tripUID)]['Tripper1HR'].values
print(subject)
# subject = df[df['TripID'] == int(tripUID)]
# print(subject)
|
import os
products = []
if os.path.isfile("products2.csv"): #检查档案是否存在
print("找到档案了!")
with open("products2.csv", "r", encoding = "gb2312") as f:
for line in f:
if "商品,价格" in line:
continue
name, price = line.strip().split(",")
products.append([name, price])
print(products)
else:
print("找不到档案.")
#让使用者输入
while True:
name = input("请输入商品名称:")
if name == "q":
break
price = input("请输入商品价钱: ")
products.append([name, price])
print(products)
#印出所有购买记录
for product in products:
print(product[0], "的价钱是:", product[1])
#写入档案
with open("products2.csv","w", encoding = "gb2312") as f:
f.write("商品,价格\n")
for p in products:
f.write(p[0] + "," + p[1] + "\n") |
#1059-pares-e-impares
# entrada
N = int(input())
impar = []
par = []
for i in range(N):
numero = int(input())
if (False == (numero % 2 == 0)):
impar.append(numero)
elif (True):
par.append(numero)
impar.sort(reverse=True)
par.sort(reverse=False)
for i in range(len(par)):
print(par[i])
for i in range(len(impar)):
print(impar[i]) |
import sys
sys.stdin=open('다솔이.txt','r')
n=int(input())
for tc in range(1,n+1):
string=input()
if len(string)==1:
dia=[[0]*5 for i in range(5)]
for y in range(5):
if y==0 or y==4:
dia[y]=['.','.','#','.','.']
elif y==1 or y==3:
dia[y]=['.','#','.','#','.']
else:
dia[y]=['#','.',string,'.','#']
for j in dia:
for k in j:
print(k,end='')
print()
else:
dia=[[0]*(5+4*(len(string)-1)) for i in range(5)]
i = 0
for y in range(5):
for x in range(0,len(dia[0])-1,4):
if y==0 or y==4:
dia[y][x:x+5]=['.','.','#','.','.']
elif y==1 or y==3:
dia[y][x:x+5]=['.','#','.','#','.']
else:
dia[y][x:x + 5]=['#', '.', string[i], '.', '#']
i+=1
for j in dia:
for k in j:
print(k,end='')
print()
|
import unittest
from player import Player
from throw import Throw
class TestPlayer(unittest.TestCase):
def test_player_name_is_set(self):
player = Player('Phil', 100)
self.assertEqual('Phil', player.name)
def test_player_score_is_set(self):
player = Player('Phil', 100)
self.assertEqual(100, player.current_score)
def test_player_sets_won_is_zero(self):
player = Player('Phil', 100)
self.assertEqual(0, player.sets_won)
def test_player_legs_won_is_zero(self):
player = Player('Phil', 100)
self.assertEqual(0, player.legs_won)
def test_player_throw_updates_score(self):
player = Player('Phil', 501)
player_throw = Throw(180)
player.throw_darts(player_throw)
self.assertEqual(321, player.current_score)
def test_player_is_bust_when_throw_greater_than_current_score(self):
player = Player('Phil', 100)
player_throw = Throw(120)
self.assertTrue(player.is_bust(player_throw))
def test_player_is_bust_when_throw_is_one_less_than_current_score(self):
player = Player('Phil', 100)
player_throw = Throw(99)
self.assertTrue(player.is_bust(player_throw))
def test_player_score_does_not_change_when_busted(self):
player = Player('Phil', 100)
player_throw = Throw(120)
player.throw_darts(player_throw)
self.assertEqual(100, player.current_score)
def test_player_throws_winning_score(self):
player = Player('Phil', 100)
player_throw = Throw(100)
self.assertTrue(player.is_winning_throw(player_throw))
def test_player_throws_winning_score_results_in_zero_score(self):
player = Player('Phil', 100)
player_throw = Throw(100)
self.assertTrue(player.is_winning_throw(player_throw))
player.throw_darts(player_throw)
self.assertEqual(0,player.current_score)
def test_player_does_not_throw_winning_score(self):
player = Player('Phil', 100)
player_throw = Throw(80)
self.assertFalse(player.is_winning_throw(player_throw))
player.throw_darts(player_throw)
self.assertEqual(20,player.current_score)
def test_100_is_a_finish(self):
player = Player('Phil', 100)
self.assertTrue(player.is_on_a_finish())
def test_180_is_not_a_finish(self):
player = Player('Phil', 180)
self.assertFalse(player.is_on_a_finish())
def test_170_is_a_finish(self):
player = Player('Phil', 170)
self.assertTrue(player.is_on_a_finish())
def test_167_is_a_finish(self):
player = Player('Phil', 167)
self.assertTrue(player.is_on_a_finish())
def test_164_is_a_finish(self):
player = Player('Phil', 164)
self.assertTrue(player.is_on_a_finish())
def test_161_is_a_finish(self):
player = Player('Phil', 161)
self.assertTrue(player.is_on_a_finish())
def test_160_is_a_finish(self):
player = Player('Phil', 160)
self.assertTrue(player.is_on_a_finish())
def test_159_is_not_a_finish(self):
player = Player('Phil', 159)
self.assertFalse(player.is_on_a_finish())
def test_162_is_not_a_finish(self):
player = Player('Phil', 162)
self.assertFalse(player.is_on_a_finish())
def test_171_is_not_a_finish(self):
player = Player('Phil', 171)
self.assertFalse(player.is_on_a_finish())
if __name__ == '__main__':
unittest.main() |
import pandas as pd
import numpy as np
import os
import sys
from collections import defaultdict
DEBUG =1
def process_input_file(filename, sep='|'):
fin=open(filename,'r')
degree_dict=defaultdict(lambda: defaultdict(int))
first=0
for each in fin:
lst=each.split(sep)
hour=lst[0].split(' ')[-1].split(':')[0]
A_party=lst[1]
B_party=lst[2]
A_tower=lst[4]
B_tower=lst[5]
A_string=A_party
if first==0:
print 'DEBUG '+A_string
first=first+1
degree_dict[A_string][B_party]+=1
degree_dict[B_party][A_party]+=1
fout=open('degree_'+filename,'w')
fout.write('A,Degree\n')
for each in degree_dict.keys():
fout.write(each+','+str(len(degree_dict[each].keys()))+'\n')
fout.close()
if __name__=='__main__':
if len(sys.argv)!=2:
print 'Wrong number of arguments'
print 'Input file name required'
sys.exit(-1)
process_input_file(sys.argv[1])
|
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 - 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import log
AUTH_CONTEXT_ENV = 'KEYSTONE_AUTH_CONTEXT'
"""Environment variable used to convey the Keystone auth context.
Auth context is essentially the user credential used for policy enforcement.
It is a dictionary with the following attributes:
* ``user_id``: user ID of the principal
* ``project_id`` (optional): project ID of the scoped project if auth is
project-scoped
* ``domain_id`` (optional): domain ID of the scoped domain if auth is
domain-scoped
* ``roles`` (optional): list of role names for the given scope
* ``group_ids``: list of group IDs for which the API user has membership
"""
LOG = log.getLogger(__name__)
def is_v3_token(token):
# V3 token data are encapsulated into "token" key while
# V2 token data are encapsulated into "access" key.
return 'token' in token
def v3_token_to_auth_context(token):
creds = {'is_delegated_auth': False}
token_data = token['token']
try:
creds['user_id'] = token_data['user']['id']
except AttributeError:
LOG.warning(_('RBAC: Invalid user data in v3 token'))
raise exception.Unauthorized()
if 'project' in token_data:
creds['project_id'] = token_data['project']['id']
else:
LOG.debug('RBAC: Proceeding without project')
if 'domain' in token_data:
creds['domain_id'] = token_data['domain']['id']
if 'roles' in token_data:
creds['roles'] = []
for role in token_data['roles']:
creds['roles'].append(role['name'])
creds['group_ids'] = [
g['id'] for g in token_data['user'].get(federation.FEDERATION, {}).get(
'groups', [])]
trust = token_data.get('OS-TRUST:trust')
if trust is None:
creds['trust_id'] = None
creds['trustor_id'] = None
creds['trustee_id'] = None
else:
creds['trust_id'] = trust['id']
creds['trustor_id'] = trust['trustor_user']['id']
creds['trustee_id'] = trust['trustee_user']['id']
creds['is_delegated_auth'] = True
oauth1 = token_data.get('OS-OAUTH1')
if oauth1 is None:
creds['consumer_id'] = None
creds['access_token_id'] = None
else:
creds['consumer_id'] = oauth1['consumer_id']
creds['access_token_id'] = oauth1['access_token_id']
creds['is_delegated_auth'] = True
return creds
def v2_token_to_auth_context(token):
creds = {'is_delegated_auth': False}
token_data = token['access']
try:
creds['user_id'] = token_data['user']['id']
except AttributeError:
LOG.warning(_('RBAC: Invalid user data in v2 token'))
raise exception.Unauthorized()
if 'tenant' in token_data['token']:
creds['project_id'] = token_data['token']['tenant']['id']
else:
LOG.debug('RBAC: Proceeding without tenant')
if 'roles' in token_data['user']:
creds['roles'] = [role['name'] for
role in token_data['user']['roles']]
trust = token_data.get('trust')
if trust is None:
creds['trust_id'] = None
creds['trustor_id'] = None
creds['trustee_id'] = None
else:
creds['trust_id'] = trust.get('id')
creds['trustor_id'] = trust.get('trustor_id')
creds['trustee_id'] = trust.get('trustee_id')
creds['is_delegated_auth'] = True
return creds
def token_to_auth_context(token):
if is_v3_token(token):
creds = v3_token_to_auth_context(token)
else:
creds = v2_token_to_auth_context(token)
return creds
|
import math
def iss(n):
s=0
if n==0 or n==1:
s=1
else:
di={}
while n%2==0:
if 2 in di:
di[2]+=1
else:
di[2]=1
n=n//2
for i in range(3,int(math.sqrt(n))+1,2):
while n%i==0:
n=n//i
if i in di:
di[i]+=1
else:
di[i]=1
if n>2:
if n in di:
di[n]+=1
else:
di[n]=1
for ke in di:
if di[ke]==1:
s+=nl(ke)
else:
s+=(nl(ke)+nl(di[ke]))
return s
def nl(n):
c=0
while n>0:
n=n//10
c+=1
return c
def minsub(l,s):
n=len(l)
dp=[[True for i in range(s+1)] for i in range(n+1)]
for i in range(n+1):
for j in range(s+1):
if i==0:
dp[i][j]=(False,0)
for i in range(n+1):
for j in range(s+1):
if j==0:
dp[i][j]=(True,0)
for i in range(1,n+1):
for j in range(1,s+1):
if l[i-1]>j:
dp[i][j]=dp[i-1][j]
else:
p=dp[i][j-l[i-1]]
q=dp[i-1][j]
if p[0]==True:
if q[0]==True:
dp[i][j]=(True,min(dp[i][j-l[i-1]][1]+1,dp[i-1][j][1]))
else:
dp[i][j]=(True,dp[i][j-l[i-1]][1]+1)
else:
if q[0]==True:
dp[i][j]=(True,dp[i-1][j][1])
else:
dp[i][j]=(False,0)
return dp[n][s][1]
l=[]
for i in range(1,2*(10**5) + 1):
if iss(i)==nl(i):
l.append(i)
for _ in range(int(input())):
a,b=map(int,input().split())
i=0
while i<len(l):
if l[i]>(b-a):
break
i+=1
print(minsub(l[:i],(b-a)))
|
from . import factory_rst_test
from . import scc
from pylat import get_config
class Test_BLE_Device(factory_rst_test.Coap_Request_Tests):
available_endpoints = []
@classmethod
def setUpClass(cls):
"""Set up the class variables.
Call well-known/core endpoint, create a list of available endpoints
"""
cls.sccPath = get_config("scc_path")
cls.mac = get_config("device_MAC") # check here to see if list; run against list of devices or single
cls.outputFile = get_config("output_file")
wellKnown = scc.run_scc(cls.sccPath, "--mac {0} --type GET --url coap://[::1]/.well-known/core".format(cls.mac), cls.outputFile)
data = wellKnown['Data'].split(',')
cls.available_endpoints = [d.split('>')[0][2:] for d in data]
# cls.available_endpoints = ["lock", "login"] #fill this list to test specific endpoints.
def GET_coap(self, endpoint, proxy="::1"):
return scc.run_scc(self.sccPath, "--mac {0} --type GET --url coap://[{1}]/{2}".format(self.mac, proxy, endpoint), self.outputFile)
def DELETE_coap(self, endpoint, proxy="::1"):
return scc.run_scc(self.sccPath, "--mac {0} --type DELETE --url coap://[{1}]/{2}".format(self.mac, proxy, endpoint), self.outputFile)
def POST_coap(self, endpoint, payload=None, proxy="::1"):
requestString = "--mac {0} --type POST --url coap://[{1}]/{2}".format(self.mac, proxy, endpoint)
if payload != None:
requestString = requestString + " --payload {}".format(payload)
return scc.run_scc(self.sccPath, requestString, self.outputFile) |
#Escribir un programa que muestre por pantalla la tabla de multiplicar
# del 1 al 10.
for i in range(1, 11):
for j in range(1, 11):
print(i*j, end = "\t")
print("") |
#!/usr/bin/python
import subprocess
import os
import numpy as np
import math
import matplotlib.pyplot as plt
import analyzer as stats
import operator
import matplotlib.ticker as mticker
from pylab import *
import time
from matplotlib.backends.backend_pdf import PdfPages
import pickle
class BWACaller():
def __init__(self, readType, referenceSequence, sequenceToMap1, sequenceToMap2, regularPath):
self.readType = readType
self.referenceSequence = referenceSequence
self.sequenceToMap1 = sequenceToMap1
self.sequenceToMap2 = sequenceToMap2
self.cmd1 = "bwa index "+str(self.referenceSequence)
self.cmd2 = "bwa aln "+str(self.referenceSequence)+" "+str(self.sequenceToMap1)+" > "+regularPath+"sequence1.sai"
self.cmd3 = "bwa aln "+str(self.referenceSequence)+" "+str(self.sequenceToMap2)+" > "+regularPath+"sequence2.sai"
self.cmd4 = "bwa sampe "+str(self.referenceSequence)+" "+regularPath+"sequence1.sai "+regularPath+"sequence2.sai "+str(self.sequenceToMap1)+" "+str(self.sequenceToMap2)+" > "+regularPath+"alignment.sam"
self.cmd5 = "bwa samse "+str(self.referenceSequence)+" " +regularPath+"sequence1.sai "+str(self.sequenceToMap1)+" > alignment.sam"
def calculateIndex(self):
os.system(self.cmd1)
def align(self):
if (self.readType == 1):
os.system(self.cmd2)
os.system(self.cmd3)
else:
os.system(self.cmd2)
def doSampe(self):
os.system(self.cmd4)
def doSamsa(self):
os.system(self.cmd5)
class SAMTools():
def __init__(self, regularPath):
self.cmd1 = "samtools view -bS "+regularPath+"alignment.sam | samtools sort - test_sorted"
self.cmd2 = "samtools index "+regularPath+"test_sorted.bam "+regularPath+"test_sorted.bai"
self.cmd3 = "samtools idxstats "+regularPath+"test_sorted.bam > "+regularPath+"analyze.txt"
self.cmd4 = "samtools flagstat "+regularPath+"test_sorted.bam"
def execute(self):
os.system(self.cmd1)
os.system(self.cmd2)
os.system(self.cmd3)
os.system(self.cmd4)
class BEDTools():
def __init__(self, regularPath, pathToBedTools): #dodaj da ima path!
#self.pathToBedTools = "~/bedtools/bin/bedtools"
self.cmd1 = pathToBedTools+" genomecov -ibam "+regularPath+"test_sorted.bam -dz > "+regularPath+"test.bam.cov"
def compute(self):
os.system(self.cmd1)
class Coverage:
def getContigData(self, inputData): #get basic contig info
self.contiger = stats.ContigAnalyzer()
self.contigs = []
self.contigHeaders = []
self.contigLengths = []
self.contigs, self.contigHeaders, self.contigLengths = self.contiger.parseFasta(inputData)
self.totalLenCont = self.contiger.getTotalLength()
def getAlignmentData(self, inputData): #get alignment data
stream = open(inputData, 'r')
data = stream.readlines()
alignData = {}
self.uncoveredRegions = {}
for header in self.contigHeaders:
alignData[header] = []
self.uncoveredRegions[header] = []
#print alignData
for line in data:
line = line.strip()
temp = []
temp = line.split("\t")
covValue = int(temp[-1])
header = temp[0]
alignData[header].append(covValue)
self.alignData = alignData
return alignData
def getCoveragePerContig(self): #get all covered bases in contig, every base must be covered at least once
coverageData = {}
self.notCoveredContigs = []
self.totalCoverage = 0
print "[AN:] Getting coverage per contig..."
for header in self.alignData:
temp = self.alignData[header]
covered = 0
for value in temp:
if value > 0:
covered += 1
if len(temp) > 0:
coverage = float(covered) / len(temp)
coverageData[header] = coverage
print "[AN:] Contig Id: ", header, " coverage: ", coverage
else:
self.notCoveredContigs.append(header)
print "[AN:] Contig Id: ", header, " has 0 coverage"
self.coverageData = coverageData
print "[AN:] Contigs not covered by any read:"
for contig in self.notCoveredContigs:
print "\t",contig
return coverageData
def getUncovered(self):
baseNum = 0
allData = self.contiger.getContigDict()
for header in self.notCoveredContigs:
baseNum = baseNum + len(allData[header])
return baseNum
def getMaxCoveredContigs(self, n): #get contigs with highest number of bases covered
sortedContigs = self.sortContigs()
return sortedContigs[0:n]
def sortContigs(self): #sort contigs by coverage
sortedContigs = sorted(self.coverageData.iteritems(), key=operator.itemgetter(1), reverse = True)
return sortedContigs
def plotCoverage(self, contigId, path, showMe): #plot coverage of contig
currentCovs = self.alignData[contigId]
xAxis = np.arange(0, len(currentCovs))
yAxis = np.array(currentCovs)
plot(xAxis, yAxis, 'ro', markersize=3)
xlabel('Relative position inside contig')
ylabel('Number of reads')
title('Contig coverage['+contigId+']')
grid(True)
savefig(path+"/"+contigId+".png")
if showMe == 1:
show()
else:
time.sleep(1)
def plotAllContigCov(self, path, show): #plot coverage of all contigs in .fasta file
if not os.path.exists(path):
os.makedirs(path)
pickle.dump(self.alignData, open(path+"/data.p","wb"))
for header in self.alignData:
if self.alignData[header] != []:
print "Plotting:", header
self.plotCoverage(header, path, show)
else:
print "Skipping contig:"+header+" none of the reads were mapped to it."
def getUncoveredRegions(self): #skip uncovered regions
self.totalUncoveredBases = 0
for header in self.alignData:
temp = self.alignData[header]
for i in range(0, len(temp)):
if temp[i] == 0:
self.totalUncoveredBases += 1
self.uncoveredRegions[header].append(i)
print "Percentage of uncovered bases:",(float(self.totalUncoveredBases) + self.getUncovered()) / self.totalLenCont
return self.totalUncoveredBases, float(self.totalUncoveredBases)/self.totalLenCont * 100
def getRegionMaxCov(self, contigId): #get regions with highest coverage
currentContigCov = self.alignData[contigId]
if currentContigCov == []:
return 0, None
maxCoverage = max(currentContigCov)
maxCovPos = [i for i, x in enumerate(currentContigCov) if x == maxCoverage]
return maxCoverage, maxCovPos
def getPotColapseRegions(self, n): #get potential colapse regions (high coverage)
maxCovs = {}
print "[AN:] Getting maximum coverage per contig..."
for header in self.alignData:
maxcov, pos = self.getRegionMaxCov(header)
print "[AN:] Contig ID:",header, "max cov:", maxcov
maxCovs[header] = maxcov
sortedCovs = sorted(maxCovs.iteritems(), key=operator.itemgetter(1), reverse = True)
return sortedCovs[0:n]
class Initializer:
def __init__(self, contigFile, readFile_1, readFile_2):
self.contigFile = contigFile
self.read1 = readFile_1
self.read2 = readFile_2
|
"""CoinGecko model"""
__docformat__ = "numpy"
import json
import logging
import os
from typing import List
import pandas as pd
from pycoingecko import CoinGeckoAPI
from openbb_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
COINS_COLUMNS_MAP = {
"Symbol": "symbol",
"Name": "name",
"Volume [$]": "total_volume",
"Market Cap": "market_cap",
"Market Cap Rank": "market_cap_rank",
"7D Change [%]": "price_change_percentage_7d_in_currency",
"24H Change [%]": "price_change_percentage_24h_in_currency",
}
PERIODS = {
"1h": "?time=h1",
"24h": "?time=h24",
"7d": "?time=d7",
"14d": "?time=d14",
"30d": "?time=d30",
"60d": "?time=d60",
"1y": "?time=y1",
}
API_PERIODS = ["14d", "1h", "1y", "200d", "24h", "30d", "7d"]
CATEGORIES = {
"trending": 0,
"most_voted": 1,
"positive_sentiment": 2,
"recently_added": 3,
"most_visited": 4,
}
GAINERS_FILTERS = ["Rank", "Symbol", "Name", "Volume", "Price", "Change"]
TRENDING_FILTERS = [
"Rank",
"Name",
"Price_BTC",
"Price_USD",
]
RECENTLY_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_1h",
"Change_24h",
"Added",
"Url",
]
YFARMS_FILTERS = [
"Rank",
"Name",
"Value_Locked",
"Return_Year",
]
CAP_FILTERS = [
"Rank",
"Name",
"Symbol",
"Price",
"Change_1h",
"Change_24h",
"Change_7d",
"Volume_24h",
"Market_Cap",
]
DEX_FILTERS = [
"Name",
"Rank",
"Volume_24h",
"Coins",
"Pairs",
"Visits",
"Most_Traded",
"Market_Share",
]
GAINERS_LOSERS_COLUMNS = [
"Symbol",
"Name",
"Price [$]",
"Market Cap",
"Market Cap Rank",
"Volume [$]",
]
@log_start_end(log=logger)
def read_file_data(file_name: str) -> dict:
if file_name.split(".")[1] != "json":
raise TypeError("Please load json file")
par_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(par_dir, "data", file_name)
with open(path, encoding="utf8") as f:
data = json.load(f)
return data
@log_start_end(log=logger)
def get_categories_keys() -> List[str]:
"""Get list of categories keys
Returns
-------
List[str]
List of categories keys
"""
categories = read_file_data("coingecko_categories.json")
return list(categories.keys())
@log_start_end(log=logger)
def get_coins(
limit: int = 250,
category: str = "",
sortby: str = "Symbol",
ascend: bool = False,
) -> pd.DataFrame:
"""Get N coins from CoinGecko [Source: CoinGecko]
Parameters
----------
limit: int
Number of top coins to grab from CoinGecko
category: str
Category of the coins we want to retrieve
sortby: str
Key to sort data
ascend: bool
Sort data in ascending order
Returns
-------
pd.DataFrame
N coins
"""
sortby = sortby.replace("_", " ").title()
client = CoinGeckoAPI()
df = pd.DataFrame()
table_size = limit
# CoinGecko's API returns all coins without limit when the category is set.
if category:
kwargs = {
"vs_currency": "usd",
"order": "market_cap_desc",
"per_page": limit,
"sparkline": False,
"price_change_percentage": "1h,24h,7d,14d,30d,200d,1y",
"category": category,
}
data = client.get_coins_markets(**kwargs)
df = pd.concat([df, pd.DataFrame(data)], ignore_index=True)
else:
page_size = min(limit, 250)
page = 1
while limit > 0:
kwargs = {
"vs_currency": "usd",
"order": "market_cap_desc",
"per_page": page_size,
"sparkline": False,
"price_change_percentage": "1h,24h,7d,14d,30d,200d,1y",
"page": page,
}
data = client.get_coins_markets(**kwargs)
df = pd.concat([df, pd.DataFrame(data)], ignore_index=True)
limit -= page_size
page += 1
if sortby in COINS_COLUMNS_MAP:
df = df[(df["total_volume"].notna()) & (df["market_cap"].notna())]
df = df.sort_values(by=COINS_COLUMNS_MAP[sortby], ascending=ascend)
df = df.astype({"market_cap_rank": "Int64"})
return df.head(table_size)
@log_start_end(log=logger)
def get_gainers_or_losers(
limit: int = 20,
interval: str = "1h",
typ: str = "gainers",
sortby: str = "market_cap",
ascend: bool = True,
) -> pd.DataFrame:
"""Returns data about top gainers - coins which gain the most in given period and
top losers - coins that lost the most in given period of time. [Source: CoinGecko]
Parameters
----------
limit: int
Num of coins to get
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see /coins/markets in https://www.coingecko.com/en/api/documentation)
ascend: bool
Sort data in ascending order
interval: str
One from {14d,1h,1y,200d,24h,30d,7d}
typ: str
Either "gainers" or "losers"
Returns
-------
pd.DataFrame
Top Gainers / Top Losers - coins which gain/lost most in price in given period of time.
Columns: Symbol, Name, Volume, Price, %Change_{interval}, Url
"""
sortby = sortby.replace("_", " ").title()
if interval not in API_PERIODS:
raise ValueError(
f"Wrong time period\nPlease chose one from list: {API_PERIODS}"
)
df = get_coins(limit)
sorted_df = df.sort_values(
by=[f"price_change_percentage_{interval}_in_currency"],
ascending=typ != "gainers",
)
sorted_df = sorted_df[
[
"symbol",
"name",
"current_price",
"market_cap",
"market_cap_rank",
"total_volume",
f"price_change_percentage_{interval}_in_currency",
]
]
sorted_df = sorted_df.set_axis(
GAINERS_LOSERS_COLUMNS + [f"Change {interval} [%]"],
axis=1,
copy=True,
)
if sortby in GAINERS_LOSERS_COLUMNS:
sorted_df = sorted_df[
(sorted_df["Volume [$]"].notna()) & (sorted_df["Market Cap"].notna())
]
sorted_df = sorted_df.sort_values(by=sortby, ascending=ascend)
return sorted_df
def get_gainers(
interval: str = "1h",
limit: int = 50,
sortby: str = "market_cap_rank",
ascend: bool = True,
) -> pd.DataFrame:
"""Shows Largest Gainers - coins which gain the most in given period. [Source: CoinGecko]
Parameters
----------
interval: str
Time interval by which data is displayed. One from [1h, 24h, 7d, 14d, 30d, 60d, 1y]
limit: int
Number of records to display
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see /coins/markets in https://www.coingecko.com/en/api/documentation)
ascend: bool
Sort data in ascending order
Returns
-------
pd.DataFrame
Top Gainers - coins which gain most in price in given period of time.
Columns: Symbol, Name, Volume, Price, %Change_{interval}, Url
"""
return get_gainers_or_losers(
limit=limit,
interval=interval,
typ="gainers",
sortby=sortby,
ascend=ascend,
)
def get_losers(
interval: str = "1h",
limit: int = 50,
sortby: str = "market_cap_rank",
ascend: bool = True,
) -> pd.DataFrame:
"""Shows Largest Losers - coins which lose the most in given period. [Source: CoinGecko]
Parameters
----------
interval: str
Time interval by which data is displayed. One from [1h, 24h, 7d, 14d, 30d, 60d, 1y]
limit: int
Number of records to display
sortby: str
Key to sort data. The table can be sorted by every of its columns. Refer to
API documentation (see /coins/markets in https://www.coingecko.com/en/api/documentation)
ascend: bool
Sort data in ascending order
Returns
-------
pd.DataFrame
Top Losers - coins which lost most in price in given period of time.
Columns: Symbol, Name, Volume, Price, %Change_{interval}, Url
"""
return get_gainers_or_losers(
limit=limit, interval=interval, typ="losers", sortby=sortby, ascend=ascend
)
@log_start_end(log=logger)
def get_trending_coins() -> pd.DataFrame:
"""Returns trending coins [Source: CoinGecko]
Parameters
----------
Returns
-------
pd.DataFrame
Trending Coins
"""
client = CoinGeckoAPI()
data = client.get_search_trending()
coins = data["coins"]
df = pd.DataFrame(columns=["Symbol", "Name", "market_cap Cap Rank"])
for i, coin in enumerate(coins):
coin = coin["item"]
df.loc[i] = [coin["id"], coin["name"], coin["market_cap_rank"]]
return df
@log_start_end(log=logger)
def get_coin_list() -> pd.DataFrame:
"""Get list of coins available on CoinGecko [Source: CoinGecko]
Returns
-------
pd.DataFrame
Coins available on CoinGecko
Columns: id, symbol, name
"""
client = CoinGeckoAPI()
return pd.DataFrame(
client.get_coins_list(),
columns=["id", "symbol", "name"],
).reset_index()
@log_start_end(log=logger)
def get_coins_for_given_exchange(exchange_id: str = "binance", page: int = 1) -> dict:
"""Helper method to get all coins available on binance exchange [Source: CoinGecko]
Parameters
----------
exchange_id: str
id of exchange
page: int
number of page. One page contains 100 records
Returns
-------
dict
dictionary with all trading pairs on binance
"""
client = CoinGeckoAPI()
binance_coins = client.get_exchanges_tickers_by_id(id=exchange_id, page=page)
return binance_coins["tickers"]
@log_start_end(log=logger)
def get_mapping_matrix_for_exchange(exchange_id: str, pages: int = 12) -> dict:
"""Creates a matrix with all coins available on Binance with corresponding coingecko coin_id. [Source: CoinGecko]
Parameters
----------
exchange_id: str
id of exchange: binance
pages: int
number of pages. One page contains 100 records
Returns
-------
dict
dictionary with all coins: {"ETH" : "ethereum"}
"""
coins_dct = {}
for i in range(pages):
coins = get_coins_for_given_exchange(exchange_id=exchange_id, page=i)
for coin in coins:
bin_symbol, gecko_id = coin["base"], coin["coin_id"]
if bin_symbol not in coins_dct:
coins_dct[bin_symbol] = gecko_id
return coins_dct
|
from django.contrib.auth.models import Group, User
from django.test import TestCase
from tally_ho.libs.permissions.groups import create_permission_groups, \
create_demo_users_with_groups
class TestGroups(TestCase):
number_of_groups = 14
def setUp(self):
pass
def test_create_permission_groups(self):
count = Group.objects.count()
create_permission_groups()
diff_count = Group.objects.count() - count
self.assertEqual(diff_count, self.number_of_groups)
def test_create_demo_users_with_groups(self):
count = Group.objects.count()
user_count = User.objects.count()
password = '1234'
create_demo_users_with_groups(password)
diff_count = Group.objects.count() - count
self.assertEqual(diff_count, self.number_of_groups)
user_diff_count = User.objects.count() - user_count
self.assertEqual(user_diff_count, self.number_of_groups)
user = User.objects.get(username='administrator')
self.assertTrue(user.check_password(password))
|
import fileinput
## Write all results into a txt file
f = open("E:/ArcGIS/ACS/national/A_python_scripts - Copy/final_US.txt", "r")
i = 1
fout = open("output0.txt","wb")
for line in f:
fout.write(line)
i += 1
if i%60000 == 0:
fout.close()
fout = open("output%d.txt"%(i/60000),"wb")
fout.close()
|
# -*- coding: utf-8 -*-
"""
Program: lab1_1.py
Author: Alejandro Romero
Calcular la tasa de impuesto de un alimento
1. Declaracion de variables
tax tasa de impuesto
tax_one tasa de impuesto adicional
2. Entradas
Valor del alimento
Numero de alimentos
3. Computaciones:
tasa de entrada = suma de numero de alimentos + tax + tax_one
4. Salida:
El calculo de los elementos comprados
"""
# Declaracion de Constantes
TAX = 0.16
TAX_ONE = 0.03
# Entradas de teclado
food = int(input('Ingrese el valor del Alimento'))
amount_food = int(input('Ingrese la cantidad de Alimentos'))
# Computaciones
total = (food * amount_food) * (TAX + TAX_ONE)
# Salidas
print("El total de los alimentos es: ", total)
|
"""
Creating credentials for Mitto instance.
"""
import os
import sys
import uuid
from dotenv import load_dotenv
from mitto_sdk import Mitto
load_dotenv()
UUID = str(uuid.uuid4())
NAME = f"creds_{UUID}".replace("-", "_")
TYPE = "sql"
BASE_URL = os.getenv("MITTO_BASE_URL")
API_KEY = os.getenv("MITTO_API_KEY")
NEW_CREDS = {
"name": NAME,
"type": TYPE,
"data": {}
}
def main(new_creds=NEW_CREDS):
"""creating credentials"""
mitto = Mitto(
base_url=BASE_URL,
api_key=API_KEY
)
create_creds = mitto.create_credentials(creds=new_creds)
return create_creds
if __name__ == "__main__":
sys.exit(main(new_creds=NEW_CREDS))
|
from django.conf.urls import url
from ColorBlind_App import views
urlpatterns = [
url(r'^$',views.colors,name='colors'),
url(r'^result', views.results, name='results'),
]
|
from collections import defaultdict, Counter
from itertools import permutations as p
def factorize(n):
d = defaultdict(int)
for i in range(2, int(n**0.5)+1):
while n%i==0:
d[i] += 1
n //= i
if not n:
break
if n>1:
d[n] += 1
return d
N = int(input())
d = Counter()
for i in range(1, N+1):
d += factorize(i)
vl = d.values()
ans = sum(1 for i, j, k in p(vl, 3) if i>=2 and j>=4 and k>=4)//2
ans += sum(1 for i, j in p(vl, 2) if i>=2 and j>=24)
ans += sum(1 for i, j in p(vl, 2) if i>=4 and j>=14)
ans += sum(1 for i in vl if i>=74)
print(ans) |
"""
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 3:
(080) is the area code for fixed line telephones in Bangalore.
Fixed line numbers include parentheses, so Bangalore numbers
have the form (080)xxxxxxx.)
Part A: Find all of the area codes and mobile prefixes called by people
in Bangalore.
- Fixed lines start with an area code enclosed in brackets. The area
codes vary in length but always begin with 0.
- Mobile numbers have no parentheses, but have a space in the middle
of the number to help readability. The prefix of a mobile number
is its first four digits, and they always start with 7, 8 or 9.
- Telemarketers' numbers have no parentheses or space, but they start
with the area code 140.
Print the answer as part of a message:
"The numbers called by people in Bangalore have codes:"
<list of codes>
The list of codes should be print out one per line in lexicographic order with no duplicates.
"""
area_code = set()
for record in calls:
if record[0].startswith('(080)'): # caller from bangalore
if record[1].find(' ') == -1: # not a mobile
if not record[1].startswith('('): # not a fixed line
area_code.add(record[1][:3]) # adding area code for telecommuter
area_code.add(record[1][1:record[1].find(')')]) # adding area code for fixed line
else:
area_code.add(record[1][:4]) # adding a prefix for mobile number
print('The numbers called by people in Bangalore have codes:', end='\n' * 2)
# sorting in lexicographic order
for item in sorted(area_code):
print(item)
"""
Part B: What percentage of calls from fixed lines in Bangalore are made
to fixed lines also in Bangalore? In other words, of all the calls made
from a number starting with "(080)", what percentage of these calls
were made to a number also starting with "(080)"?
Print the answer as a part of a message::
"<percentage> percent of calls from fixed lines in Bangalore are calls
to other fixed lines in Bangalore."
The percentage should have 2 decimal digits
"""
print()
total_calls = list()
for record in calls:
if record[0].startswith('(080)'): # caller from bangalore
if record[1].startswith('(080)'): # calls made to bangalore
total_calls.append('080') # quick way to identify 080 in the total calls list
else:
total_calls.append(record[1])
print("{:.2f} percent of call from fixed lines in Bangalore are calls to other fixed lines in Bangalore." \
.format(total_calls.count('080') / len(total_calls) * 100))
|
# uncompyle6 version 3.3.5
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.3 (default, Jun 24 2019, 04:54:02)
# [GCC 9.1.0]
# Embedded file name: .\Disturbance.py
# Size of source mod 2**32: 56 bytes
class Disturbance:
def __init__(self):
pass |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-09 16:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('laboratorio', '0004_bodega_unidad_medida'),
]
operations = [
migrations.CreateModel(
name='DetalleOrden',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cantidad', models.DecimalField(decimal_places=8, max_digits=11, null=True)),
('fecha_movimiento', models.DateTimeField(null=True)),
('nivel_bodega_destino', models.IntegerField(null=True)),
('seccion_bodega_destino', models.IntegerField(null=True)),
('bodega', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='laboratorio.Bodega')),
('estado', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='do_estado', to='laboratorio.Tipo')),
('producto', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='laboratorio.Producto')),
('transaccion_inventario', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='do_transaccion', to='laboratorio.TransaccionInventario')),
],
),
migrations.CreateModel(
name='OrdenPedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha_peticion', models.DateTimeField(null=True)),
('fecha_recepcion', models.DateTimeField(null=True)),
('observaciones', models.CharField(max_length=500)),
('notas_aprobacion', models.CharField(max_length=500)),
('estado', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='op_estado', to='laboratorio.Tipo')),
('proveedor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='op_proveedor', to=settings.AUTH_USER_MODEL)),
('usuario_aprobacion', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='op_usuario_aprobacion', to=settings.AUTH_USER_MODEL)),
('usuario_creacion', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='op_usuario_creacion', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import argparse
import os
import fnmatch
__description__ = 'add template'
def main():
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument('-p', '--path', type=str, default="src",
help=('path to where will add file'
'(default: %(default)s)'))
parser.add_argument('-n', '--name', type=str, default="template",
help=('name file with extens=cion .c .h .cpp .hpp'
'(default: %(default)s)'))
args = parser.parse_args()
if not os.path.exists(args.path):
print("path not exist")
exit()
if os.path.exists(args.path + '/' +args.name):
print("file exist")
exit()
for root, dirnames, filenames in os.walk('./'):
for filename in fnmatch.filter(filenames, '*.*'):
if filename == args.name:
print("file exist", os.path.join(root, filename))
exit()
if args.name == "template":
print("name not set")
exit()
if args.name[-2] == '.':
if args.name[-1] == 'c':
type_source = "C"
len_t = 1
elif args.name[-1] == 'h':
type_source = "H"
len_t = 1
else:
print("type not support")
exit()
else:
if args.name[-3:] == 'cpp':
type_source = "CPP"
len_t = 3
elif args.name[-3:] == 'hpp':
type_source = "HPP"
len_t = 3
else:
print("type not support")
exit()
file_template = open(args.path+'/'+args.name,'w', encoding="utf-8")
file_template.write("/**\n")
file_template.write(" * @file "+args.name+"\n")
file_template.write(" * @author Shoma Gane <shomagan@gmail.com>\n")
file_template.write(" * \n")
file_template.write(" * @defgroup "+args.path+"\n")
file_template.write(" * @ingroup "+args.path+"\n")
file_template.write(" * @version 0.1 \n")
file_template.write(" * @brief TODO!!! write brief in \n")
file_template.write(" */\n")
file_template.write("#ifndef "+ args.name[:-(len_t+1)].upper() +"_"+ type_source+"\n")
file_template.write("#define "+ args.name[:-(len_t+1)].upper() +"_"+ type_source + " 1"+"\n")
file_template.write(" \n")
fb_name = args.name[:-(len_t+1)]
if type_source == "H":
file_template.write("/*add includes below */\n")
file_template.write("#include \"type_def.h\"\n\n")
file_template.write("/*add includes before */\n")
file_template.write("#ifdef __cplusplus \n")
file_template.write(" extern \"C\" {\n")
file_template.write("#endif\n")
file_template.write("/*add functions and variable declarations below */\n")
file_template.write("typedef struct{\n\n")
file_template.write("}}{}_input_t;\n".format(fb_name))
file_template.write("typedef union{\n\n")
file_template.write("}}{}_local_t;\n\n".format(fb_name))
file_template.write("typedef struct{\n\n")
file_template.write("}}{}_output_t;\n\n".format(fb_name))
file_template.write("typedef struct{\n")
file_template.write(" {}_input_t input;\n".format(fb_name))
file_template.write(" {}_local_t local;\n".format(fb_name))
file_template.write(" {}_output_t output;\n".format(fb_name))
file_template.write("}}{}_regs_t;\n\n".format(fb_name))
file_template.write("/**\n")
file_template.write(" * @brief {} - set default value if neccessery\n".format(fb_name))
file_template.write(" * @param regs\n")
file_template.write(" * @return\n")
file_template.write(" */\n")
file_template.write("int {}_init({}_regs_t * regs);\n".format(fb_name,fb_name))
file_template.write("/**\n")
file_template.write(" * @brief {} cyclic calling function\n".format(fb_name))
file_template.write(" * @param regs\n")
file_template.write(" * @return\n")
file_template.write(" */\n")
file_template.write("int {}({}_regs_t * regs);\n".format(fb_name,fb_name))
file_template.write("\n")
file_template.write("/*add functions and variable declarations before */\n")
file_template.write("#ifdef __cplusplus\n")
file_template.write("}\n")
file_template.write("#endif\n")
else:
file_template.write("#include \"{}.h\"\n".format(fb_name))
file_template.write("int {}_init({}_regs_t * regs){{\n".format(fb_name,fb_name))
file_template.write(" int res = 0;\n")
file_template.write("\n")
file_template.write(" return res;\n")
file_template.write("}\n")
file_template.write("int {}({}_regs_t * regs){{\n".format(fb_name,fb_name))
file_template.write(" int res = 0;\n")
file_template.write("\n")
file_template.write(" return res;\n")
file_template.write("}\n")
file_template.write("#endif "+"//"+ fb_name.upper() +"_"+ type_source+"\n")
file_template.close()
if __name__ == "__main__":
'add template in project'
main()
|
__author__ = "Costas Bakas"
__copyright__ = "Copyright 2018, http request"
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
import urllib.request
import codecs
import os
import operator
import requests
import json
import createRequest_ui
class GetTrans(QDialog, createRequest_ui.Ui_window01):
def __init__(self):
QDialog.__init__(self)
# initial value
self.setupUi(self)
self.setUrl.setText( "https://gurujsonrpc.appspot.com" )
self.processBtn.clicked.connect( self.process )
self.closeBtn.clicked.connect(self.close )
def process (self) :
## main program
resp = ""
data = ""
x = 1
print( "value of " + str(self.setMsg.toPlainText()) )
if self.setMsg.toPlainText() :
print ("not empty message")
for x in range( x, x + 1 ):
data = { 'method': 'guru.test', 'params': ['Guru'], 'id': 123 }
data['id'] = x
print( data )
self.setMsg.setText(str(data))
r = requests.post( self.setUrl.text(), json=data )
print( "response " )
print( "-----" )
print( x, r.status_code, r.text )
print( "-----" )
try:
json_data = json.loads( r.text )
print ( json_data )
#print( json_data['transactionRequestError']['errors']['code'] )
#print( json_data.get( 'transactionRequestError' ) )
except ValueError as e:
print( 'invalid json: %s' % e )
return None # or: raise
x += 1
resp = resp + str(r.status_code) + os.linesep
resp = resp + str(r.text) + os.linesep
self.setResult.setText(str(resp))
else :
print ("empty message")
data = self.setMsg.toPlainText()
print ( str(data))
r = requests.post( self.setUrl.text(), json=data )
print("res="+str(r.text))
try:
json_data = json.loads( r.text )
#print( json_data['transactionRequestError']['errors']['code'] )
#print( json_data.get( 'transactionRequestError' ) )
except ValueError as e:
print( 'invalid json: %s' % e )
return None # or: raise
resp = resp + str(r.status_code) + os.linesep
resp = resp + str(r.text) + os.linesep
self.setResult.setText(str(resp))
QMessageBox.information(self, "information", "process completed successfully")
app = QApplication(sys.argv)
dialog = GetTrans()
dialog.show()
app.exec_()
## end ## |
import asyncio
async def tick():
print('Tick')
await asyncio.sleep(1)
print('Tock')
return 'tick-tock'
async def main():
t1 = asyncio.create_task(tick(), name='tick1')
t2 = asyncio.ensure_future(tick()) # Аналогично t1.
# await t1
# await t2
# Когда мы работаем с набором тасков, то есть для этого api.
results = await asyncio.gather(t1, t2)
print(f'{t1.get_name()}. Done= {t1.done()}')
print(f'{t2.get_name()}. Done= {t2.done()}')
for result in results:
print(result)
if __name__ == '__main__':
asyncio.run(main())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceOperationDcsMerchantqrcodeQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceOperationDcsMerchantqrcodeQueryResponse, self).__init__()
self._apply_merchant_pid = None
self._role_id = None
self._role_ids = None
@property
def apply_merchant_pid(self):
return self._apply_merchant_pid
@apply_merchant_pid.setter
def apply_merchant_pid(self, value):
self._apply_merchant_pid = value
@property
def role_id(self):
return self._role_id
@role_id.setter
def role_id(self, value):
self._role_id = value
@property
def role_ids(self):
return self._role_ids
@role_ids.setter
def role_ids(self, value):
if isinstance(value, list):
self._role_ids = list()
for i in value:
self._role_ids.append(i)
def parse_response_content(self, response_content):
response = super(AlipayCommerceOperationDcsMerchantqrcodeQueryResponse, self).parse_response_content(response_content)
if 'apply_merchant_pid' in response:
self.apply_merchant_pid = response['apply_merchant_pid']
if 'role_id' in response:
self.role_id = response['role_id']
if 'role_ids' in response:
self.role_ids = response['role_ids']
|
import os
import pandas as pd
def find_outdir():
# get the correct directories
dirs = ["./output/"]
for d in dirs:
if os.path.exists(d):
outdir = d
return(outdir)
def summarize_train_test_split():
# from utils.organization import find_outdir
# def table_1_demos():
try:
outdir = find_outdir()
except:
outdir = "./output/"
# load all notes_labeled_embedded (patients who NOT culled)
nle = [i for i in os.listdir(f"{outdir}notes_labeled_embedded_SENTENCES/")
if '.csv' in i]
cndf = pd.read_pickle(f"{outdir}conc_notes_df.pkl")
cndf['month'] = cndf.LATEST_TIME.dt.month + (
cndf.LATEST_TIME.dt.year - min(cndf.LATEST_TIME.dt.year)) * 12
# generate 'note' label (used in webanno and notes_labeled_embedded)
cndf.month = cndf.month.astype(str)
uidstr = ("m" + cndf.month.astype(
str) + "_" + cndf.PAT_ID + ".csv").tolist()
# conc_notes_df contains official list of eligible patients
notes_in_cndf = [i for i in nle if
"_".join(i.split("_")[-2:]) in uidstr]
# make df with labels from each note
nle_pat_id = [str(i).split("_")[-1].split(".")[0] for i in notes_in_cndf]
nle_month = [int(i.split("_")[-2][1:]) for i in notes_in_cndf]
nle_batch = [str(i).split("_")[-4] + "_" + str(i).split("_")[-3] for i in notes_in_cndf]
nle_batch = [str(i).split("_")[1] if 'enote' in i else i for i in nle_batch]
nle_filename = [i for i in notes_in_cndf]
nle = pd.DataFrame(dict(PAT_ID=nle_pat_id,
month=nle_month,
batch=nle_batch,
filename=nle_filename))
# load dropped patients
dropped = [i for i in
os.listdir(f"{outdir}notes_labeled_embedded_SENTENCES/dropped_notes/")
if '.csv' in i]
dropped_pat_id = [str(i).split("_")[-1].split(".")[0] for i in dropped]
dropped_month = [int(i.split("_")[-2][1:]) for i in dropped]
dropped_batch = [str(i).split("_")[-4] + "_" + str(i).split("_")[-3] for i in dropped]
dropped_batch = [str(i).split("_")[1] if 'enote' in i else i for i in dropped_batch]
dropped_filename = [i for i in dropped]
dropped = pd.DataFrame(dict(PAT_ID=dropped_pat_id,
month=dropped_month,
batch=dropped_batch,
filename=dropped_filename))
# double check that the dropped patients are dropped
# note, good to check with this merge -- lots of other flawed strategies out there
nle_keepers = nle.merge(dropped.drop_duplicates(),
on=['PAT_ID', 'month', 'batch', 'filename'],
how='left',
indicator=True)
nle_keepers = nle_keepers.loc[nle_keepers._merge == 'left_only']\
.drop(columns=['_merge'])
# All ingested (curated, labeled, & embedded) training patients and months
nle_train = nle_keepers.loc[nle_keepers.month < 13]
# All ingested (curated, labeled, & embedded) test patients and months
nle_test = nle_keepers.loc[nle_keepers.month > 12]
# load test notes from notes_output. These notes are "rough" (have not been
# curated, labeled, or embedded)
# batch_1 is in notes_output/batch_01 and notes are labeled batch_01
# batch_2 is in notes_output/batch_02 and notes are labeled batch_02
# test_batch_1 is in notes_output/batch_03 and notes are labeled batch_03_m[13-24]...
# batch_3 is in notes_output/batch_04 and notes are labeled batch_03_m[1-11]...
# test_batch_2 is in notes_output/batch_05 and notes are labeled batch_05
# batch_6 is in notes_output/batch_06 and notes are labeled batch_6
# test_batch_3 is in notes_output/batch_07 and notes are labeled batch_07
# test_batch_4 is in notes_output/batch_08 and notes are labeled batch_08
# test_batch_5 is in notes_output/batch_09 and notes are labeled batch_09
# test_batch_6 is in notes_output/batch_10 and notes are labeled batch_10
# not sure if batch 7 will get curated:
# test_batch_7 is in notes_output/batch_11 and notes are labeled batch_11
batches_with_annotated_test_notes = ['batch_01', 'batch_02', 'batch_03',
'batch_05', 'batch_06', 'batch_07',
'batch_08', 'batch_09', 'batch_10']
notes_output_test = []
for batch in batches_with_annotated_test_notes:
notes = os.listdir(f"{outdir}notes_output/{batch}")
notes = [i for i in notes if 'batch' in i and 'alternate' not in i]
PAT_IDs = [str(i).split("_")[-1].split(".")[0] for i in notes]
months = [int(i.split("_")[-2][1:]) for i in notes]
label_batch = [str(i).split("_")[-4] + "_" + str(i).split("_")[-3] for i in notes]
fname = [i for i in notes]
pat_batch = pd.DataFrame(dict(PAT_ID=PAT_IDs,
month=months,
#notes_output_batch=batch,
batch=label_batch,
filename=fname))
pat_batch = pat_batch[pat_batch.month > 12]
notes_output_test.append(pat_batch)
notes_output_test = pd.concat(notes_output_test).reset_index(drop=True)
# drop batches that are in nle_test (these have been labeled & embedded so we
# want to keep track of that version, which is the gold standard)
rough_test_notes = pd.concat(
[nle_test,
notes_output_test[~notes_output_test.batch.isin(nle_test.batch)]]
).reset_index(drop=True)
# find duplicates
complete_dups = rough_test_notes.loc[rough_test_notes.duplicated(
subset=['PAT_ID', 'month'],
keep=False)].sort_values(['PAT_ID', 'month'])
# remove the duplicate in batch_06
remove_dup = complete_dups[complete_dups.batch == 'batch_06']
rough_test_notes = rough_test_notes.merge(remove_dup.drop_duplicates(),
on=['PAT_ID', 'month', 'batch', 'filename'],
how='left',
indicator=True)
rough_test_notes = rough_test_notes.loc[rough_test_notes._merge
== 'left_only'].drop(columns=['_merge'])
# report number in each batch
print(rough_test_notes.groupby('batch').agg(test_batch_count = ('PAT_ID', 'count')))
print(nle_train.groupby('batch').agg(train_batch_count = ('PAT_ID', 'count')))
rough_test_notes.to_csv(f"{outdir}notes_labeled_embedded_SENTENCES/notes_test_rough.csv")
nle_train.to_csv(f"{outdir}notes_labeled_embedded_SENTENCES/notes_train_official.csv")
if __name__ == "__main__":
pass
|
from __future__ import print_function
def num_bit_swap(a, b):
count = 0
c = a ^ b # Bits that are different are 1s
while c != 0:
count += c & 1 # `count` increments upon a different bit
c >>= 1 # Right shift `c` by one bit so that LSB can be got
return count
def num_bit_swap2(a, b):
count = 0
c = a ^ b
while c != 0:
count += 1
c &= c - 1 # Clear the rightmost 1 bit
return count
def _test():
pass
def _print():
a = int('1001101', 2)
b = int('1110110', 2)
print(num_bit_swap(a, b))
print(num_bit_swap2(a, b))
if __name__ == '__main__':
_test()
_print()
|
# -*- coding:utf-8 -*-
class Solution:
def FirstNotRepeatingChar(self, s):
s_dic = dict()
res = ''
for i in s[::-1]:
if i not in s_dic:
res = i
s_dic[i] = 1
else:
s_dic[i] += 1
return res
if __name__ == '__main__':
s = 'google'
sol = Solution()
print(sol.FirstNotRepeatingChar(s)) |
from buildrzapi.viewsets import ParcelleViewset
from rest_framework import routers
router = routers.DefaultRouter()
router.register('parcelle',ParcelleViewset)
# localhost:p/api/employee/5
# GET, POST, PUT, DELETE
# list , retrive |
for i in range(int(input())):
ar = input().split()
x = int(ar[0])
y = int(ar[1])
z = y - x
if z > 0:
if z % 4 == 0:
print(3)
elif z % 2 == 0:
print(2)
else:
print(1)
elif z < 0:
print(1 if abs(z) % 2 == 0 else 2)
else:
print(0)
|
from math import sqrt
import pandas as pd
new_user_profiles = pd.read_csv('new_user_profiles.csv')
def pearson_correlation(person1_id, person2_id):
person1_profile = None
person2_profile = None
# get user profiles
for index, profile in new_user_profiles.iterrows():
if profile['user id'] == person1_id:
person1_profile = profile
if profile['user id'] == person2_id:
person2_profile = profile
# Add up all the features of each user
person1_features_sum = 0
person2_features_sum = 0
person1_features = []
person2_features = []
p1_feature_scores = person1_profile[4:23].iteritems()
for p1_feature_score in p1_feature_scores:
person1_features_sum += p1_feature_score[1]
person1_features.append(p1_feature_score[1])
p2_feature_scores = person2_profile[4:23].iteritems()
for p2_feature_score in p2_feature_scores:
person2_features_sum += p2_feature_score[1]
person2_features.append(p2_feature_score[1])
# print(person1_features_sum,person2_features_sum)
person1_square_features_sum = 0
person2_square_features_sum = 0
for p1_feature_score in p1_feature_scores:
person1_square_features_sum += pow(p1_feature_score[1], 2)
for p2_feature_score in p2_feature_scores:
person2_square_features_sum += pow(p2_feature_score[2], 2)
product_sum_of_both_users = 0
for j in range(0, 19, 1):
product_sum_of_both_users += person1_features[j] * person2_features[j]
# print(product_sum_of_both_users)
# Calculate the pearson score
numerator_value = product_sum_of_both_users - (
person1_features_sum * person2_features_sum / 19)
denominator_value = sqrt(
(person1_square_features_sum - pow(person1_features_sum, 2) / 19) * (
person2_square_features_sum - pow(person2_features_sum, 2) / 19))
if denominator_value == 0:
return 0
else:
r = numerator_value / denominator_value
return r
# print(pearson_correlation(113,134))
def user_similarity_by_features(person_id):
# similar_users = []
scores = []
for i, other_person in new_user_profiles.iterrows():
if other_person['user id'] != person_id:
sim_score = pearson_correlation(person_id, other_person['user id'])
scores.append([sim_score, other_person['user id']])
print(other_person['user id'])
# Sort the similar persons so that highest scores person will appear at the first
scores.sort()
scores.reverse()
# for user in user_data:
#
# if user['user']['name'] == scores[0][1]:
# print(user)
# if user['user']['name'] == person:
# print(user)
# print(scores[0][1])
# highest_value = float(scores[0][0])
# print(len(scores))
# for j in range(0, len(scores)):
# similar_user_model = {'user': {'name': None, 'score': None}}
# similar_user_model['user']['name'] = scores[j][1]
# similar_user_model['user']['score'] = scores[j][0]
#
# similar_users.append(similar_user_model)
return scores
print(user_similarity_by_features(113))
|
import folium as fo #pip install folium
map=fo.Map()
x=fo.FeatureGroup(name="My Map")
x.add_child(fo.Marker(location=[8.407449,77.709568],popup='My point',icon=fo.Icon(color='Black')))
map.add_child(x) |
# 测试代码
# https://zhuanlan.zhihu.com/p/37086409
import tensorflow as tf
import cv2
# Creates a graph.
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
img = cv2.imread("img/demo2.jpg")
# img = cv2.imread("D:/ComputerVision/FaceMaskDetection/img/demo2.jpg")
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-27 07:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0122_merge_20190227_1507'),
]
operations = [
migrations.AlterField(
model_name='application',
name='customer_status',
field=models.CharField(choices=[('draft', 'Draft'), ('under_review', 'Under Review'), ('amendment_required', 'Amendment Required'), ('accepted', 'Accepted'), ('partially_approved', 'Partially Approved'), ('declined', 'Declined')], default='draft', max_length=40, verbose_name='Customer Status'),
),
migrations.AlterField(
model_name='application',
name='processing_status',
field=models.CharField(choices=[('draft', 'Draft'), ('with_officer', 'With Officer'), ('with_assessor', 'With Assessor'), ('with_assessor_conditions', 'With Assessor (Conditions)'), ('with_approver', 'With Approver'), ('renewal', 'Renewal'), ('licence_amendment', 'Licence Amendment'), ('awaiting_applicant_response', 'Awaiting Applicant Response'), ('awaiting_assessor_response', 'Awaiting Assessor Response'), ('awaiting_responses', 'Awaiting Responses'), ('ready_for_conditions', 'Ready for Conditions'), ('ready_to_issue', 'Ready to Issue'), ('approved', 'Approved'), ('partially_approved', 'Partially Approved'), ('declined', 'Declined'), ('discarded', 'Discarded'), ('under_review', 'Under Review')], default='draft', max_length=30, verbose_name='Processing Status'),
),
]
|
from .models import Post
from django.contrib import admin
@admin.register(Post)
class postAdmin(admin.ModelAdmin):
icon = '<i class="material-icons">chat_bubble_outline</i>'
|
from flask_restful import Resource
from flask_restful import reqparse, fields, marshal_with
from datetime import datetime
from model import challenge_datasource as datasource
import date_util as dateutil
# marshaller
day_field = {
'day': fields.DateTime(dt_format='rfc822'),
'done': fields.Boolean
}
challenge_fields = {
'id': fields.Integer,
'title': fields.String,
'start_date': fields.DateTime(dt_format='rfc822'),
'end_date': fields.DateTime(dt_format='rfc822'),
'days': fields.List(fields.Nested(day_field))
}
# request parser
parser = reqparse.RequestParser()
parser.add_argument('title', location = 'json', help = 'Challenge title')
parser.add_argument('start_date', type = lambda x: datetime.strptime(x, '%Y-%m-%d'), location = 'json', help = 'Challenge start date')
parser.add_argument('end_date', type = lambda x: datetime.strptime(x, '%Y-%m-%d'), location = 'json', help = 'Challenge end date')
class Challenges(Resource):
@marshal_with(challenge_fields, envelope='challenges')
def get(self):
# return all challenges
return datasource.challenges
@marshal_with(challenge_fields, envelope='challenge')
def post(self):
args = parser.parse_args()
id = datasource.get_next_id()
# create challenge object
if args['start_date'] is not None and args['end_date'] is not None:
days = dateutil.init_days(args['start_date'], args['end_date'])
else:
print("days empty")
days = []
challenge = {
"id": id,
"title": args['title'],
"start_date": args['start_date'],
"end_date": args['end_date'],
"days": days
}
# add to challenges
datasource.challenges.append(challenge)
return challenge, 201
|
x=20
y=45
z=23
if x>y and x>z :
print("x is greater")
else:
if z>y :
print("z is greater")
else:
print("y is greater") |
food_and_drinks = [['jafa', 'bananica', 'plazma'], ['sarma', 'pasulj', 'riba'], ['vino', 'sok', 'pivo', 'rakija']]
print(food_and_drinks[0][2])
print(food_and_drinks[0][0])
print(food_and_drinks[2][1])
print(food_and_drinks[1][0])
|
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
import json
from .models import MessageBody, ChatRoom
from django.contrib.auth.models import User
from datetime import datetime
class ChatCustomer(WebsocketConsumer):
def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
chat_room = self.scope["url_route"]["kwargs"]["room_name"]
current_chat_room = ChatRoom.objects.get(room_name = chat_room)
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
for messages in current_chat_room.message_list.all():
self.send(text_data=json.dumps({
'message': messages.content,
'sender' : messages.sender.username,
'date' : messages.time_for_message.strftime("%d-%m-%Y %H:%M"),
}))
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
sender = text_data_json['sender']
date = datetime.now().strftime("%d-%m-%Y %H:%M")
sender_user = User.objects.get(username = sender)
new_message = MessageBody(sender=sender_user,content = message, time_for_message = datetime.now())
new_message.save()
chat_room = self.scope["url_route"]["kwargs"]["room_name"]
current_chat_room = ChatRoom.objects.get(room_name = chat_room)
current_chat_room.message_list.add(new_message)
current_chat_room.save()
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'sender' : sender,
'date' : date,
}
)
# Receive message from room group
def chat_message(self, event):
message = event['message']
sender = event['sender']
date = event['date']
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Send message to WebSocket
self.send(text_data=json.dumps({
'message': message,
'sender' : sender,
'date' : date,
})) |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import random
import time
class WindTurbine(object):
""" Represents virtually and graphically a wind turbine
It uses the raw data collected from a Wind Turbine in a circular buffer
to simulate the real turbine sensors.
"""
def __init__(self, turbine_id=0, raw_data=None):
if raw_data is None or len(raw_data) == 0:
raise Exception("You need to pass an array with at least one row for raw data")
self.turbine_id = turbine_id # id of the turbine
self.raw_data = raw_data # buffer with the raw sensors data
self.raw_data_idx = random.randint(0, len(raw_data)-1)
self.running = False # running status
self.halted = False # if True you can't use this turbine anymore. create a new one.
def is_running(self):
return self.running
def detected_anomalies(self, values, anomalies):
""" Updates the status of the 'inject noise' buttons (pressed or not)"""
self.vibration_status.value = not anomalies[0:3].any()
self.voltage_status.value = not anomalies[3:5].any()
self.rotation_status.value = not anomalies[5]
def is_noise_enabled(self, typ):
""" Returns the status of the 'inject noise' buttons (pressed or not)"""
assert(typ == 'Vol' or typ == 'Rot' or typ == 'Vib')
idx = 0
if typ == 'Vol': idx = 0
elif typ == 'Rot': idx = 1
elif typ == 'Vib': idx = 2
return False
def halt(self):
""" Halts the turnine and disable it. After calling this method you can't use it anymore."""
self.running = False
self.button.description = 'Halted'
self.img.value = self.stopped_img
self.anomaly_status.layout.visibility='hidden'
self.halted = True
def read_next_sample(self):
""" next step in this simulation """
if self.raw_data_idx >= len(self.raw_data): self.raw_data_idx = 0
sample = self.raw_data[self.raw_data_idx]
self.raw_data_idx += 1
return sample
|
#!/bin/env python
"""
More or Less generic python code for image analysis.
functions:
property: is_new_image returns True\False if there is new image
method: get_image return 4,X,Y image where 0 - R, 1 - G, 2 - B, 3 - K - colors
Valentyn Stadnytskyi
created: Feb 29 2018
last updated: July 2, 2018
Microscope Camera chip orientations:
NIH: vertical; APS: horizontal;
Vertical:
DxWxH = 3,1024,1360
*----
| |
| |
| |
| |
|---|
* is (0,0) pixel
Horizontal:
DxWxH = 3,1360,1024
|---------------|
| |
| |
*---------------|
* is (0,0) pixel
"""
__version__ = '0.1'
import matplotlib.pyplot as plt
from logging import info,warn,debug, error
from numpy import mean, transpose, std,array,hypot , abs, zeros, savetxt,loadtxt,save,load ,uint8, uint16, reshape, asarray
from numpy.ma import masked_array
from time import sleep, time
from PIL import Image
from threading import Thread, Condition
from persistent_property import persistent_property
from datetime import datetime
from scipy import ndimage, misc
import os
from thread import start_new_thread
from CAServer import casput,casdel
from CA import caget
import traceback
import os
class Image_analyzer(object):
cameraName = persistent_property('camera name', '')
fieldOfAnalysis = persistent_property('field of analysis', '')
cameraSettingGain = persistent_property('camera Setting Gain', 6)
cameraSettingExposureTime = persistent_property('camera Setting exposure time', 0.072)
background_image_filename = persistent_property('background image filename', 'background_default')
mask_image_filename = persistent_property('mask image filename', 'mask_default')
frozen_threshold = persistent_property('freezing threshhold', 0.08)
def __init__(self, name = 'freeze_detector'):
self.name = name
#camera.exposure_time = self.cameraSettingExposureTime
#camera.gain = self.cameraSettingGain
## self.frozen_threshold = 0.1
## self.frozen_threshold_temperature = -15.0
##
## #orientation of the camera
## #self.orientation = 'vertical' #
## self.orientation = 'horizontal' #
##
##
## self.difference_array = zeros((1,1))
## self.background_array = zeros((1,1))
## self.mask_array = zeros((1,1))
## self.background_image_flag = False
#self.analyse_dict = {}
def init(self, camera_name = 'MicroscopeCamera'):
self.camera_name = camera_name #Microfluidics camera #MicroscopeCamera
self.imageCounter = camera.frame_count
#camera.exposure_time = self.cameraSettingExposureTime
#camera.gain = self.cameraSettingGain
# self.logFolder = os.getcwd() + '/optical_image_analyzer/' + self.name + '/'
# if os.path.exists(os.path.dirname(self.logFolder)):
# pass
# else:
# os.makedirs(os.path.dirname(self.logFolder))
# if os.path.exists(os.path.dirname(self.logFolder+ 'Archive/') ):
# pass
# else:
# os.makedirs(os.path.dirname(self.logFolder+ 'Archive/'))
# self.background_image_filename = 'background_default_rgb.tiff'
# try:
# #self.background_image = Image.open(self.logFolder + self.background_image_filename)
# self.background_array = load(self.logFolder + 'background_default_rgb.npy')
# self.background_image_flag = True
# info('got bckg image from the drive')
# except:
# warn('couldn"t load bckg image')
# self.background_image_flag = False
#
# self.logfile = self.logFolder +'sample_frozen_image_rgb.log'
# my_file = os.path.isfile(self.logfile )
# if my_file:
# pass
# else:
# f = open(self.logfile,'w')
# timeRecord = time()
# f.write('####This experiment started at: %r and other information %r \r\n' %(timeRecord,'Other Garbage'))
# f.write('time,imageCounter, temperature, mean, mean_R,mean_G,mean_B,stdev,stdev_R,stdev_B,stdev_G\r\n')
# f.close()
def get_is_new_image(self):
"""
"""
try:
temp = camera.acquiring
if temp != True and temp != False:
print("Camera status: %r" %(temp))
camera.acquiring = False
sleep(0.1)
except:
print('error at this line: if camera.acquiring != True and camera.acquiring != False: camera.acquiring = Flase')
if not camera.acquiring: camera.acquiring = True
idx = 0
frame_count = camera.frame_count
if self.imageCounter - frame_count > 100:
self.imageCounter = 0
if self.imageCounter < frame_count:
flag = True
else:
flag = False
info('Image counter: %r' % self.imageCounter)
return flag
is_new_image = property(get_is_new_image)
def get_image(self, timeout = 5, image = None):
"""
return an array with RGBK colors and convers it to int 16 instead of int 8, for the K array
"""
from time import time
from numpy import insert
flag_fail = False
if image == None:
t = time()
while t + timeout > time():
if self.is_new_image:
tmp = camera.RGB_array.astype('int16')
img = zeros(shape = (tmp.shape[0]+1,tmp.shape[1],tmp.shape[2]), dtype = 'int16')
img[0,:,:] = tmp[0,:,:]
img[1,:,:] = tmp[1,:,:]
img[2,:,:] = tmp[2,:,:]
img[3,:,:] = tmp[0,:,:]+tmp[1,:,:]+tmp[2,:,:]
self.imageCounter = camera.frame_count
flag_fail = False
break
else:
img = None
flag_fail = True
sleep(0.250)
if flag_fail:
info('get_image has timed-out: restarting the camera.acquiring')
camera.acquiring = False
sleep(2)
camera.acquiring = True
sleep(0.25)
else:
img = img.astype('int16')
img[3,:,:] = img[0,:,:] + img[1,:,:] + img[2,:,:]
return img
def frame_count(self):
try:
count = camera.frame_count
except:
error(traceback.format_exc())
count = -1
return count
def create_mask(self,arr, anchors = [(0,0),(1,1)]):
"""
defines region of interest between anchor points defined by anchors. Yields rectangular shape
"""
from numpy import ma, zeros, ones
shape = arr.shape
mask = ones(shape, dtype = 'int16')
try:
for i in range(anchors[0][0],anchors[1][0]):
for j in range(anchors[0][1],anchors[1][1]):
mask[:,i,j] = 0
except:
error(traceback.format_exc())
mask = None
return mask
def mask_array(self,array,mask):
from numpy import ma
arr_res = ma.masked_array(array, mask)
return arr_res
def masked_section(self,array, anchors = [(0,0),(1,1)]):
x1 = anchors[0][0]
y1 = anchors[0][1]
x2 = anchors[1][0]
y2 = anchors[1][1]
return array[:,x1:x2,y1:y2]
def save_array_as_image(self,arr, filename):
image = Image.new('RGB',(1360,1024))
image.frombytes(arr.T.tostring())
image.save(filename)
def rgb2gray(self,rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def get_background_array(self):
arr = self.get_image()
self.background_array = arr
return True
def set_background_array(self, filename = 'blank'):
self.background_image_flag = False
start_new_thread(self.get_background_array,())
def plot_slices_difference(self):
for i in range(7):
plt.plot(image_analyser.difference_array[0,:,i])
plt.show()
def plot_difference(self):
plt.subplot(121)
plt.imshow(self.difference_image)
plt.colorbar()
plt.subplot(122)
plt.imshow(abs(self.difference_image))
plt.colorbar()
plt.show()
def plot_background(self):
plt.subplot(121)
plt.imshow(self.background_image)
plt.colorbar()
plt.subplot(122)
plt.imshow(self.mask_image)
plt.colorbar()
plt.show()
def plot(self,image):
plt.imshow(image)
plt.colorbar()
plt.show()
def save_images(self):
from PIL import Image
import logging; from tempfile import gettempdir
#/var/folders/y4/cw92kt415kz7wtk13fkjhh2r0000gn/T/samplr_frozen_opt.log'
import os
file_path = gettempdir() + "/Images/Optical_images_march4/log.log" # gettempdir + "/Optical_images/log.log"
directory = os.path.dirname(file_path)
try:
os.stat(directory)
except:
os.mkdir(directory)
for i in range(360):
sleep(10)
while self.is_new_image() != True:
sleep(0.05)
if self.is_new_image():
img = Image.fromarray(camera.RGB_array.transpose((-1,0,1)).transpose((-1,0,1)))
temp = str(caget("NIH:TEMP.RBV"))
img.save(directory +'/_T_'+temp + '_t_' +str(time())+'.tiff')
print('saving',directory +'_T_'+temp + '_t_' +str(time())+'.tiff')
def scan_saved_images(self):
pass
def load_image_from_file(self, filename = ""):
if len(filename)>0:
img = Image.open(filename)
arr = asarray(img, dtype="int16" ).transpose((-1,0,1))
return arr
else:
return None
def test_load_current_1_image(self):
self.test_current_1 = Image.open(self.logFolder + 'current_rgb.tiff')
def test_save_current_s_image(self):
self.test_current_s.save(self.logFolder + 'current_test_saved.tiff')
def test_load_current_s_image(self):
self.test_current_s = Image.open(self.logFolder + 'current_test_saved.tiff')
def test_load_current_2_image(self):
self.test_current_2 = Image.open(self.logFolder + 'current_test_2.tiff')
from GigE_camera_client import Camera
#camera = Camera("LabMicroscope")
camera = Camera("MicroscopeCamera")
image_analyzer = Image_analyzer()
if __name__ == "__main__":
import logging; from tempfile import gettempdir
#/var/folders/y4/cw92kt415kz7wtk13fkjhh2r0000gn/T/samplr_frozen_opt.log'
logfile = gettempdir()+"/optical_image_analyser.log"
##print(logfile)
logging.basicConfig( level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s",
filename=logfile,
)
self = image_analyzer
print('Time Start: %r' % str(datetime.now()))
print('arr = image_analyzer.get_image()')
print("image_analyzer.plot()")
print("image_analyzer.plot_difference()")
print('file_path = gettempdir() + "/Images/Optical_images/')
debug('?')
|
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Extracts image features if default_image_features = ['squeezenet_features']
The Squeezenet model reduces featurization time for large featurizations,
as it is a compressed model that uses less memory.
Read more about the SqueezeNet model @ https://github.com/rcmalli/keras-squeezenet
'''
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
def squeezenet_featurize(imagename, imagedir):
'''
This network model has AlexNet accuracy with small footprint (5.1 MB)
Pretrained models are converted from original Caffe network.
This may be useful for production-purposes if the accuracy is similar to other
types of featurizations.
See https://github.com/rcmalli/keras-squeezenet
'''
model = SqueezeNet()
img = image.load_img(imagedir+'/'+imagename, target_size=(227, 227))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
features = preds[0]
labels=list()
for i in range(len(features)):
label='squeezenet_feature_%s'%(str(i))
labels.append(label)
return features, labels
|
import unittest
import lazy_dataset
from lazy_dataset.database import DictDatabase
class IteratorTest(unittest.TestCase):
def setUp(self):
self.json = dict(
datasets=dict(
train=dict(
a=dict(example_id='a'),
b=dict(example_id='b')
),
test=dict(
c=dict(example_id='c')
)
),
meta=dict()
)
# self.temp_directory = Path(tempfile.mkdtemp())
# self.json_path = self.temp_directory / 'db.json'
# dump_json(self.json, self.json_path)
self.db = DictDatabase(self.json)
def test_dataset_names(self):
self.assertListEqual(
list(self.db.dataset_names),
list(self.json['datasets'].keys())
)
def test_iterator(self):
iterator = self.db.get_dataset('train')
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
list(self.json['datasets']['train'].keys())
)
_ = iterator['a']
_ = iterator['b']
_ = iterator[0]
_ = iterator[1]
_ = iterator[:1][0]
def test_iterator_contains(self):
iterator = self.db.get_dataset('train')
with self.assertRaises(Exception):
# contains should be unsupported
'a' in iterator
def test_map_iterator(self):
iterator = self.db.get_dataset('train')
def map_fn(d):
d['example_id'] = d['example_id'].upper()
return d
iterator = iterator.map(map_fn)
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
'A B'.split()
)
_ = iterator['a']
_ = iterator[0]
_ = iterator[:1][0]
def test_filter_iterator(self):
iterator = self.db.get_dataset('train')
def filter_fn(d):
return not d['example_id'] == 'b'
iterator = iterator.filter(filter_fn)
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
'a'.split()
)
_ = iterator['a']
with self.assertRaises(IndexError):
_ = iterator['b']
with self.assertRaises(AssertionError):
_ = iterator[0]
with self.assertRaises(AssertionError):
_ = iterator[:1]
def test_concatenate_iterator(self):
train_iterator = self.db.get_dataset('train')
test_iterator = self.db.get_dataset('test')
iterator = train_iterator.concatenate(test_iterator)
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
'a b c'.split()
)
self.assertEqual(
iterator['a']['example_id'],
'a'
)
self.assertEqual(
iterator[0]['example_id'],
'a'
)
_ = iterator[:1][0]
def test_concatenate_iterator_double_keys(self):
train_iterator = self.db.get_dataset('train')
iterator = train_iterator.concatenate(train_iterator)
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
'a b a b'.split()
)
with self.assertRaises(AssertionError):
_ = iterator['a']
self.assertEqual(
iterator[0]['example_id'],
'a'
)
_ = iterator[:1][0]
def test_multiple_concatenate_iterator(self):
train_iterator = self.db.get_dataset('train')
iterator = train_iterator.concatenate(train_iterator)
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
'a b a b'.split()
)
_ = iterator[:1][0]
def test_zip_iterator(self):
import numpy as np
train_iterator = self.db.get_dataset('train')
# Change the key order
np.random.seed(2)
train_iterator_2 = train_iterator.shuffle(False)
iterator = lazy_dataset.key_zip(train_iterator, train_iterator_2)
iterator_2 = lazy_dataset.key_zip(train_iterator_2, train_iterator)
example_ids = [e['example_id'] for e in train_iterator]
self.assertListEqual(
example_ids,
'a b'.split()
)
example_ids = [e['example_id'] for e in train_iterator_2]
self.assertListEqual(
example_ids,
'b a'.split() # train_iterator_2 has swapped keys
)
self.assertEqual( # iterator defined order
list(iterator),
[({'dataset': 'train', 'example_id': 'a'},
{'dataset': 'train', 'example_id': 'a'}),
({'dataset': 'train', 'example_id': 'b'},
{'dataset': 'train', 'example_id': 'b'})]
)
self.assertEqual( # train_iterator_2 defined order
list(iterator_2),
[({'dataset': 'train', 'example_id': 'b'},
{'dataset': 'train', 'example_id': 'b'}),
({'dataset': 'train', 'example_id': 'a'},
{'dataset': 'train', 'example_id': 'a'})]
)
def test_slice_iterator(self):
base_iterator = self.db.get_dataset('train')
base_iterator = base_iterator.concatenate(base_iterator)
iterator = base_iterator[:4]
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
'a b a b'.split()
)
iterator = base_iterator[:3]
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
'a b a'.split()
)
iterator = base_iterator[:5] # Should this work?
example_ids = [e['example_id'] for e in iterator]
self.assertListEqual(
example_ids,
'a b a b'.split()
)
_ = base_iterator[:2]
_ = base_iterator[:1]
_ = base_iterator[:0] # Should this work?
# def tearDown(self):
# shutil.rmtree(str(self.temp_directory))
class UniqueIDIteratorTest(unittest.TestCase):
def setUp(self):
self.d = dict(
datasets=dict(
train=dict(
a=dict(example_id='a'),
b=dict(example_id='b')
),
test=dict(
a=dict(example_id='a')
)
),
meta=dict()
)
self.db = DictDatabase(self.d)
def test_duplicate_id(self):
with self.assertRaises(AssertionError):
iterator = self.db.get_dataset('train test'.split())
_ = iterator.keys()
def test_duplicate_id_with_prepend_dataset_name(self):
_ = self.db.get_dataset('train test'.split())
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'welcome.ui'
#
# Created: Thu Nov 1 16:25:46 2012
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Welcome(object):
def setupUi(self, Welcome):
Welcome.setObjectName(_fromUtf8("Welcome"))
Welcome.resize(400, 300)
self.verticalLayout = QtGui.QVBoxLayout(Welcome)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.widget = QtGui.QWidget(Welcome)
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.widget)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.label = QtGui.QLabel(self.widget)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_2.addWidget(self.label)
self.verticalLayout.addWidget(self.widget)
self.retranslateUi(Welcome)
QtCore.QMetaObject.connectSlotsByName(Welcome)
def retranslateUi(self, Welcome):
Welcome.setWindowTitle(QtGui.QApplication.translate("Welcome", "Welcome", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Welcome", "<html><head/><body><p align=\"center\">Welcome to relinux 0.4!</p><p align=\"center\">Click "Next" to continue.</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
|
import scipy.stats as st
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
# class Distribution:
# Check which distribution this attribute follows so that we can do transformation before performing regression.
def check_dist(x):
fig = plt.figure(figsize=(20, 40))
ax1 = fig.add_subplot(3, 1, 1)
ax1.set_title("Johnson SU")
sns.distplot(x, kde=False, fit=st.johnsonsu)
ax2 = fig.add_subplot(3, 1, 2)
ax2.set_title("Normal")
sns.distplot(x, kde=False, fit=st.norm)
ax3 = fig.add_subplot(3, 1, 3)
ax3.set_title("Log Normal")
sns.distplot(x, kde=False, fit=st.lognorm)
# apply johnson transformation
def johnson_transform(x):
gamma, eta, epsilon, lbda = st.johnsonsu.fit(x)
yt = gamma + eta * np.arcsinh((x - epsilon) / lbda)
return yt, gamma, eta, epsilon, lbda
# apply inverse of johnson transformation
def johnson_inverse(y, gamma, eta, epsilon, lbda):
return lbda * np.sinh((y - gamma) / eta) + epsilon
# # plot distribution of data in a dataframe
# def dist_graph(df):
# # quantitative = [f for f in df if df[f] != 'object']
# # qualitative = [f for f in df if df[f] == 'object']
# # f
# g = sns.FacetGrid(df, col_wrap=2, sharex=False, sharey=False)
# g = g.map(sns.distplot)
|
import re
from pprint import pprint
import itertools
def Main():
ingredients={}
combinations = []
result={}
with open('input.txt',mode='r') as data:
for line in data:
line=line.rstrip()
nums = re.findall(r'([+-]?[0-9]{1,})',line)
nums = [int(n) for n in nums]
ingredients[line.split(":")[0]]={
"capacity":nums[0],
"durability":nums[1],
"flavor":nums[2],
"texture":nums[3],
"calories":nums[4]
}
for comb in itertools.combinations(range(1,101),len(ingredients)):
if sum(comb) == 100:
for perm in itertools.permutations(comb):
combinations.append(perm)
for comb in combinations:
(a,b,c,d) = (ingredients["Sprinkles"],ingredients["PeanutButter"],ingredients["Frosting"],ingredients["Sugar"])
cap= comb[0]*a["capacity"] +comb[1]*b["capacity"] +comb[2]*c["capacity"] +comb[3]*d["capacity"]
dur= comb[0]*a["durability"]+comb[1]*b["durability"]+comb[2]*c["durability"]+comb[3]*d["durability"]
fla= comb[0]*a["flavor"] +comb[1]*b["flavor"] +comb[2]*c["flavor"] +comb[3]*d["flavor"]
tex= comb[0]*a["texture"] +comb[1]*b["texture"] +comb[2]*c["texture"] +comb[3]*d["texture"]
if not (cap < 0 or dur < 0 or fla<0 or tex<0):
result[comb] = cap*dur*fla*tex
if result[comb] == 11171160:
print (comb)
sort = sorted(result,key=result.get,reverse=True)
print("{} gives best score of {}".format(sort[0],result[sort[0]]))
if __name__ == "__main__":
print("Calculating...")
Main()
|
import pandas
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
def ourDNN(inputdims):
# create model
model = Sequential()
model.add(Dense(inputdims, input_dim=inputdims, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
#model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(learning_rate=5e-4), metrics=['accuracy'])
model.compile(loss='binary_crossentropy', optimizer="adam", metrics=['accuracy'])
print(model.summary())
return model
def draw(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
|
import pandas as pd
import pickle
# d = pd.read_csv("data/spoilers/train.csv")
#
# list_set = set(d['page'])
# print(len(list_set))
#
#
# with open('movie_rating_test_dump.pickle','rb') as rating_file:
# dump = pickle.load(rating_file)
# train_rating = dump[0]
# print(len(train_rating))
feature_list = []
feature_list.extend(['n_words', 'n_chars', 'toolong', 'allcaps',
'max_len', 'mean_len', 'bad_ratio',
'n_bad', 'capsratio', "n_nicks", "n_urls", "n_sentences",
"n_non_words", "idiot_regexp", "moron_regexp", "n_html", "strong_pos", "strong_neg", "weak_pos",
"weak_neg"])
print(feature_list)
|
# Generated by Django 3.0.6 on 2020-06-07 14:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=300)),
],
),
migrations.CreateModel(
name='Story',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=280)),
('once_upon_a_time', models.CharField(max_length=280)),
('every_day', models.CharField(max_length=280)),
('one_day', models.CharField(max_length=280)),
('result', models.CharField(max_length=280)),
('result2', models.CharField(max_length=280)),
('until_finally', models.CharField(max_length=280)),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='stories.Genre')),
],
),
migrations.CreateModel(
name='UserStory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('story', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='stories.Story')),
],
),
]
|
import humanfriendly
import logging
import psutil
# https://code.google.com/p/modwsgi/wiki/RegisteringCleanupCode
class Generator2:
def __init__(self, iterable, callback, environ):
self.__iterable = iterable
self.__callback = callback
self.__environ = environ
def __iter__(self):
for item in self.__iterable:
yield item
def close(self):
try:
if hasattr(self.__iterable, 'close'):
self.__iterable.close()
finally:
self.__callback(self.__environ)
class ExecuteOnCompletion2:
def __init__(self, application, callback):
self.__application = application
self.__callback = callback
def __call__(self, environ, start_response):
try:
result = self.__application(environ, start_response)
except BaseException:
self.__callback(environ)
raise
return Generator2(result, self.__callback, environ)
def rss_checker(rss_limit=None, rss_percent_limit=None):
"""
Uses a configured rss_limit (absolute amount in bytes) and percentage
rss_limit to determine whether to kill the running process.
If the current rss usage is above rss_limit AND the percentage rss usage
of physical memory is above rss_percent_limit, kill the process
"""
log = logging.getLogger(__name__)
process = psutil.Process()
def callback(environ):
rss = process.memory_info().rss
over_rss = rss_limit and rss > rss_limit
rss_perc = process.memory_percent(memtype="rss") # XXX: this does not work on Fargate (reports host stats)
if rss_percent_limit:
over_perc = rss_perc > rss_percent_limit
else:
over_perc = True # only consider rss if we have no percent set
if over_rss and over_perc:
log.error(f"Killing process. Memory usage: {rss}Mb (limit {rss_limit}); Percentage "
f"{rss_perc} (limit {rss_percent_limit})")
process.kill()
return callback
def filter_app(app, global_conf, rss_limit=None, rss_percent_limit=None):
if rss_limit is not None:
rss_limit = humanfriendly.parse_size(rss_limit)
if rss_percent_limit is not None:
rss_percent_limit = float(rss_percent_limit)
callback = rss_checker(rss_limit, rss_percent_limit)
return ExecuteOnCompletion2(app, callback)
|
import platform
import sys
import os
import re
import subprocess
def get_lsb_information():
distinfo = {}
if os.path.exists('/etc/lsb-release'):
try:
with open('/etc/lsb-release') as lsb_release_file:
for line in lsb_release_file:
line = line.strip()
if not line:
continue
# Skip invalid lines
if not '=' in line:
continue
var, arg = line.split('=', 1)
if var.startswith('DISTRIB_'):
var = var[8:]
if arg.startswith('"') and arg.endswith('"'):
arg = arg[1:-1]
if arg: # Ignore empty arguments
distinfo[var] = arg
except IOError as msg:
print('Unable to open /etc/lsb-release:', str(msg))
return distinfo
def get_distro_information():
lsbinfo = get_lsb_information()
# OS is only used inside guess_debian_release anyway
for key in ('ID', 'RELEASE', 'CODENAME', 'DESCRIPTION',):
if key not in lsbinfo:
distinfo = guess_debian_release()
distinfo.update(lsbinfo)
return distinfo
else:
return lsbinfo
def linux_distribution():
try:
return platform.linux_distribution()
except:
return "N/A"
if __name__ == '__main__':
os = platform.uname()
dist = platform.dist()
print(get_distro_information())
print dist[0]+' '+dist[1]
print os[2]
print("""Python version: %s
dist: %s
linux_distribution: %s
system: %s
machine: %s
platform: %s
uname: %s
version: %s
mac_ver: %s
""" % (
sys.version.split('\n'),
str(platform.dist()),
linux_distribution(),
platform.system(),
platform.machine(),
platform.platform(),
platform.uname(),
platform.version(),
platform.mac_ver(),
))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.