index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,900 | 5474e7af7febf01d0d04168e25ecab1342619a83 | #!/bin/python3
# Link to the problem: https://www.hackerrank.com/challenges/apple-and-orange
import sys
def appleAndOrange(s, t, a, b, apple, orange):
# Complete this function
apple_count = 0
orange_count = 0
for app in apple:
if(app+a >= s and app+a <= t):
apple_count += 1
for orn in orange:
if(orn+b <= t and orn+b >= s):
orange_count += 1
return [apple_count, orange_count]
if __name__ == "__main__":
s, t = input().strip().split(' ')
s, t = [int(s), int(t)]
a, b = input().strip().split(' ')
a, b = [int(a), int(b)]
m, n = input().strip().split(' ')
m, n = [int(m), int(n)]
apple = list(map(int, input().strip().split(' ')))
orange = list(map(int, input().strip().split(' ')))
result = appleAndOrange(s, t, a, b, apple, orange)
print ("\n".join(map(str, result)))
|
995,901 | e30bdffeacd5ea6bcb1b475d35b3c6f9efd2a3ba | import sys
import random
import pygame
def create_player_cards():
# 创建卡片信息,player
_card = [x for x in range(13)]
cards = []
player = [[], [], [], []]
# 单副牌(除去大小王)
for x in range(4):
color = list(map(lambda n: (n, x), _card))
cards = cards + color
# 再加一副牌
cards = cards * 2
# 洗牌
count = 0
random.shuffle(cards)
# 发牌
for ct in cards:
player[count % 4].append(ct)
count += 1
return player
def sort_by_card(_card):
n, _ = _card
if n <= 1:
n += 13
return n
'''--------------main-----------------'''
# 初始化显示
pygame.init()
size = width, height = 1280, 720
black = 0, 0, 0
screen = pygame.display.set_mode(size)
# 载入牌面
card_colors = ('k', 'l', 'p', 's') # 花色
card_images = [[], [], [], []]
for c in range(4):
for i in range(1, 14):
img = pygame.image.load(f"img/{card_colors[c]}{i}.png")
card_images[c].append(img) # 载入所有牌面
players_cards = create_player_cards()
l_count = 0
for li in range(4):
r_count = 0
players_cards[li].sort(key=sort_by_card)
for c in players_cards[li]:
card, c_colors = c
screen.blit(card_images[c_colors][card], (150 + r_count, 50 + l_count))
pygame.time.wait(10)
pygame.display.flip()
r_count += 30
l_count += 100
# 主循环
while 1:
# 处理退出
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
|
995,902 | cf65af326097c48720cd89c6423b46b4881d72fb | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs
from evaluate import exact_match_score, f1_score
from util import Progbar, minibatches, split_train_dev
logging.basicConfig(level=logging.INFO)
def get_optimizer(opt):
if opt == "adam":
optfn = tf.train.AdamOptimizer
elif opt == "sgd":
optfn = tf.train.GradientDescentOptimizer
else:
assert (False)
return optfn
class QASystem(object):
def __init__(self, encoder, decoder, flags, embeddings, rev_vocab):
"""
Initializes your System
:param encoder: an encoder that you constructed in train.py
:param decoder: a decoder that you constructed in train.py
:param args: pass in more arguments as needed
"""
self.encoder = encoder
self.decoder = decoder
self.max_context_len = flags.max_context_len
self.max_question_len = flags.max_question_len
self.pretrained_embeddings = embeddings
self.vocab_dim = encoder.vocab_dim
self.n_epoch = flags.epochs
self.batch_size = flags.batch_size
self.rev_vocab = rev_vocab
self.dropout = flags.dropout
self.summaries_dir = flags.summaries_dir
self.summary_flag = flags.summary_flag
self.max_grad_norm = flags.max_grad_norm
self.base_lr = flags.learning_rate
self.decay_number = flags.decay_number
self.model_name = flags.model_name
self.train_loss_log = flags.train_dir + "/" + "train_loss.csv"
self.dev_loss_log = flags.train_dir + "/" + "dev_loss.csv"
self.filter_flag = flags.filter_flag
# ==== set up placeholder tokens ========
self.context_placeholder = tf.placeholder(tf.int32, shape=(None, self.max_context_len))
self.question_placeholder = tf.placeholder(tf.int32, shape = (None, self.max_question_len))
self.context_mask_placeholder = tf.placeholder(tf.bool, shape = (None, self.max_context_len))
self.question_mask_placeholder = tf.placeholder(tf.bool, shape = (None, self.max_question_len))
self.start_span_placeholder = tf.placeholder(tf.int32, shape = (None, self.max_context_len))
self.end_span_placeholder = tf.placeholder(tf.int32, shape = (None, self.max_context_len))
self.dropout_placeholder = tf.placeholder(tf.float32, shape=(None))
self.global_batch_num_placeholder = tf.placeholder(tf.int32, shape=(None))
# ==== assemble pieces ====
with tf.variable_scope("qa", initializer=tf.uniform_unit_scaling_initializer(1.0)):
context_embeddings, question_embeddings = self.setup_embeddings()
self.h_s, self.h_e, self.relevence = self.setup_system(context_embeddings, question_embeddings)
self.loss, self.masked_h_s, self.masked_h_e = self.setup_loss(self.h_s, self.h_e)
# computing learning rates
self.learning_rate = tf.train.exponential_decay(
self.base_lr, # Base learning rate.
self.global_batch_num_placeholder, # Current total batch number
self.decay_number, # decay every 50 batch
0.99, # Decay rate
staircase = True)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# ==== set up training/updating procedure ====
grads_and_vars = self.optimizer.compute_gradients(self.loss)
grads = [gv[0] for gv in grads_and_vars]
variables = [gv[1] for gv in grads_and_vars]
grads, _ = tf.clip_by_global_norm(grads, self.max_grad_norm)
self.train_op = self.optimizer.apply_gradients(zip(grads, variables))
if self.summary_flag:
tf.summary.scalar('cross_entropy', self.loss)
self.merged = tf.summary.merge_all()
self.saver = tf.train.Saver()
def setup_embeddings(self):
"""
Loads distributed word representations based on placeholder tokens
:return:
"""
with vs.variable_scope("embeddings"):
vec_embeddings = tf.get_variable("embeddings", initializer=self.pretrained_embeddings, trainable=False)
context_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.context_placeholder)
question_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.question_placeholder)
context_embeddings = tf.reshape(context_batch_embeddings,
(-1, self.max_context_len, self.vocab_dim))
question_embeddings = tf.reshape(question_batch_embeddings,
(-1, self.max_question_len, self.vocab_dim))
return context_embeddings, question_embeddings
def setup_system(self, context_embeddings, question_embeddings):
"""
After your modularized implementation of encoder and decoder
you should call various functions inside encoder, decoder here
to assemble your reading comprehension system!
:return:
"""
yq, yc, attention, relevence = self.encoder.encode(context_embeddings, question_embeddings,
self.context_mask_placeholder, self.question_mask_placeholder,
self.dropout_placeholder)
h_s, h_e = self.decoder.decode(self.context_mask_placeholder, self.dropout_placeholder, attention)
return h_s, h_e, relevence
def setup_loss(self, h_s, h_e):
"""
Set up your loss computation here
:return:
"""
with vs.variable_scope("loss"):
# masked_h_s = tf.boolean_mask(h_s, self.context_mask_placeholder)
# masked_h_e = tf.boolean_mask(h_e, self.context_mask_placeholder)
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(h_s, self.start_span_placeholder) +
# tf.nn.softmax_cross_entropy_with_logits(h_e, self.end_span_placeholder))
masked_h_s = tf.add(h_s, (1 - tf.cast(self.context_mask_placeholder, 'float')) * (-1e30))
masked_h_e = tf.add(h_e, (1 - tf.cast(self.context_mask_placeholder, 'float')) * (-1e30))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(masked_h_s, self.start_span_placeholder) +
tf.nn.softmax_cross_entropy_with_logits(masked_h_e, self.end_span_placeholder))
total_loss = loss
return total_loss, masked_h_s, masked_h_e
def create_feed_dict(self, train_batch, dropout, global_batch_num = 0):
feed_dict = {
self.context_placeholder: train_batch[0],
self.question_placeholder: train_batch[2],
self.context_mask_placeholder: train_batch[1],
self.question_mask_placeholder: train_batch[3],
self.dropout_placeholder: dropout,
self.global_batch_num_placeholder: global_batch_num
}
if len(train_batch) == 7:
feed_dict[self.start_span_placeholder] = train_batch[4]
feed_dict[self.end_span_placeholder] = train_batch[5]
return feed_dict
def optimize(self, session, train_batch, global_batch_num):
"""
Takes in actual data to optimize your model
This method is equivalent to a step() function
:return:
"""
input_feed = self.create_feed_dict(train_batch, 1 - self.dropout, global_batch_num)
if self.summary_flag:
output_feed = [self.train_op, self.loss, self.merged, self.learning_rate]
_, loss, summary, current_lr = session.run(output_feed, input_feed)
else:
output_feed = [self.train_op, self.loss, self.learning_rate]
_, loss, current_lr = session.run(output_feed, input_feed)
summary = None
return loss, summary, current_lr
def run_epoch(self, session, train_examples, dev_examples, epoch_num, train_log):
num_batches = int(len(train_examples) / self.batch_size)
prog = Progbar(target=num_batches)
for i, batch in enumerate(minibatches(train_examples, self.batch_size)):
loss, summary, current_lr = self.optimize(session, batch, global_batch_num = epoch_num * num_batches + i)
# logging format (epoch,loss)
train_log.write("{},{}\n".format(epoch_num + 1, loss))
prog.update(i + 1, exact = [("train loss", loss), ("current LR", current_lr)])
if self.summary_flag:
self.train_writer.add_summary(summary, i)
print("")
logging.info("Evaluating on development data")
validate_cost = self.test(session, dev_examples)
return validate_cost
def test(self, session, dev_examples):
"""
in here you should compute a cost for your validation set
and tune your hyperparameters according to the validation set performance
:return:
"""
num_batches = int(len(dev_examples) / self.batch_size)
prog = Progbar(target=num_batches)
total_cost = 0
for i, batch in enumerate(minibatches(dev_examples, self.batch_size)):
input_feed = self.create_feed_dict(batch, dropout = 1)
output_feed = [self.loss]
outputs = session.run(output_feed, input_feed)
prog.update(i + 1, exact = [("dev loss", outputs[0])])
total_cost += outputs[0]
print("")
return total_cost/(i + 1)
def decode(self, session, dev_example):
"""
Returns the probability distribution over different positions in the paragraph
so that other methods like self.answer() will be able to work properly
:return:
"""
unzipped_dev_example = list(zip(*dev_example))
input_feed = self.create_feed_dict(unzipped_dev_example[0:4], dropout = 1)
output_feed = [self.h_s, self.h_e, self.relevence]
outputs = session.run(output_feed, input_feed)
h_s = outputs[0]
h_e = outputs[1]
rel = outputs[2]
return h_s, h_e, rel
def validate(self, sess, valid_dataset):
"""
Iterate through the validation dataset and determine what
the validation cost is.
This method calls self.test() which explicitly calculates validation cost.
How you implement this function is dependent on how you design
your data iteration function
:return:
"""
return self.test(sess, valid_dataset)
def formulate_answer(self, context, rev_vocab, start, end, mask = None):
answer = ''
for i in range(start, end + 1):
if i < len(context):
if mask is None:
answer += rev_vocab[context[i]]
answer += ' '
else:
if mask[i]:
answer += rev_vocab[context[i]]
answer += ' '
return answer
def evaluate_answer(self, session, dataset, rev_vocab, sample=20, log=False):
"""
Evaluate the model's performance using the harmonic mean of F1 and Exact Match (EM)
with the set of true answer labels
This step actually takes quite some time. So we can only sample 100 examples
from either training or testing set.
:param session: session should always be centrally managed in train.py
:param dataset: a representation of our data, in some implementations, you can
pass in multiple components (arguments) of one dataset to this function
:param sample: how many examples in dataset we look at
:param log: whether we print to std out stream
:return:
"""
sample = min(sample, len(dataset))
overall_f1 = 0.
overall_em = 0.
minibatch_size = 100
num_batches = int(sample / minibatch_size)
for batch in range(0, num_batches):
start = batch * minibatch_size
end = min(len(dataset), start + minibatch_size)
h_s, h_e, _ = self.decode(session, dataset[start:end])
for i in range(minibatch_size):
a_s = np.argmax(h_s[i])
a_e = np.argmax(h_e[i])
if a_s > a_e:
k = a_e
a_e = a_s
a_s = k
sample_dataset = dataset[start + i]
context = sample_dataset[0]
(a_s_true, a_e_true) = sample_dataset[6]
predicted_answer = self.formulate_answer(context, rev_vocab, a_s, a_e)
true_answer = self.formulate_answer(context, rev_vocab, a_s_true, a_e_true)
f1 = f1_score(predicted_answer, true_answer)
overall_f1 += f1
if exact_match_score(predicted_answer, true_answer):
overall_em += 1
average_f1 = overall_f1/sample
overall_em = overall_em/sample
logging.info("F1: {}, EM: {}, for {} samples\n".format(average_f1, overall_em, sample))
return overall_f1, overall_em
def train(self, session, train_examples, dev_examples, train_dir):
"""
Implement main training loop
TIPS:
You should also implement learning rate annealing (look into tf.train.exponential_decay)
Considering the long time to train, you should save your model per epoch.
More ambitious appoarch can include implement early stopping, or reload
previous models if they have higher performance than the current one
As suggested in the document, you should evaluate your training progress by
printing out information every fixed number of iterations.
We recommend you evaluate your model performance on F1 and EM instead of just
looking at the cost.
:param session: it should be passed in from train.py
:param dataset: a representation of our data, in some implementations, you can
pass in multiple components (arguments) of one dataset to this function
:param train_dir: path to the directory where you should save the model checkpoint
:return:
"""
# some free code to print out number of parameters in your model
# it's always good to check!
# you will also want to save your model parameters in train_dir
# so that you can use your trained model to make predictions, or
# even continue training
tic = time.time()
params = tf.trainable_variables()
num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))
toc = time.time()
logging.info("Number of params: %d (retreival took %f secs)" % (num_params, toc - tic))
if self.summary_flag:
self.train_writer = tf.summary.FileWriter(self.summaries_dir + '/train', session.graph)
logging.info("Train Loss File: {}".format(self.train_loss_log))
logging.info("Dev Loss File: {}".format(self.dev_loss_log))
best_score = 100000
train_log = open(self.train_loss_log, "w")
dev_log = open(self.dev_loss_log, "w")
for epoch in range(self.n_epoch):
print("Epoch {:} out of {:}".format(epoch + 1, self.n_epoch))
dev_score = self.run_epoch(session, train_examples, dev_examples, epoch, train_log)
dev_log.write("{},{}\n".format(epoch + 1, dev_score))
logging.info("Average Dev Cost: {}".format(dev_score))
logging.info("train F1 & EM")
f1, em = self.evaluate_answer(session, train_examples, self.rev_vocab, log = True)
logging.info("Dev F1 & EM")
f1, em = self.evaluate_answer(session, dev_examples, self.rev_vocab, log = True)
if dev_score < best_score:
best_score = dev_score
print("New best dev score! Saving model in {}".format(train_dir + "/" + self.model_name))
self.saver.save(session, train_dir + "/" + self.model_name)
return best_score
|
995,903 | f95f95686cff196c12bbb62ea2379a4b4d20f565 | # we will keep all of the constants here
inputDir = "../wc_input/"
outputDir = "../wc_output/"
wcOutputFile = outputDir + "wc_result.txt"
medOutputFile = outputDir + "med_result.txt"
|
995,904 | 50581849d498934f78a1278caae314b3259f444d | import os
import pygame
class Car:
__cars_img = {
(100, 50): '1.png',
(150, 50): '2.png',
(50, 100): '3.png',
(50, 150): '4.png'
}
__SPEED_LIMIT = 50
cars = []
def __init__(self, x, y, width, height):
if y == 100 and height == 50:
self.is_main = True
img = os.path.join('images', 'main_car.png')
else:
self.is_main = False
img = os.path.join('images', self.__cars_img[(width, height)])
self.car = pygame.transform.scale(pygame.image.load(img), (width, height))
self.rect = self.car.get_rect()
self.rect.x = x
self.rect.y = y
if width > height:
self.horizontal = True
else:
self.horizontal = False
self.cars.append(self)
def round(self):
x = self.rect.x
y = self.rect.y
# x
u = (x // 50) * 50
if x >= u + 25:
new_x = u + 50
else:
new_x = u
# y
u = (y // 50) * 50
if y >= u + 25:
new_y = u + 50
else:
new_y = u
return (new_x, new_y)
def show(self, screen):
screen.blit(self.car, self.rect)
def __block_factor(self, x, y):
for car in self.cars:
if car == self:
continue
if car.rect.collidepoint((x, y)):
return car.rect
return False
def __checkSpeed(self, speed):
if speed > self.__SPEED_LIMIT:
return True
########## movements ##########
# main move function
def move(self, new_x, new_y):
self.x = self.rect.x
self.y = self.rect.y
self.width = self.rect.width
self.height = self.rect.height
if self.horizontal:
self.__mv_horizontal(new_x)
else:
self.__mv_vertical(new_y)
# move horizontal
def __mv_horizontal(self, new_x):
if new_x > self.x:
self.__mv_right(new_x)
else:
self.__mv_left(new_x)
def __mv_right(self, new_x):
block_factor = self.__block_factor(self.x + self.width, self.y)
if block_factor:
self.rect.x -= (self.x + self.width) - block_factor.x
else:
if self.__checkSpeed(new_x - self.x): return
self.rect.x = new_x
def __mv_left(self, new_x):
block_factor = self.__block_factor(self.x - 1, self.y)
if block_factor:
self.rect.x = block_factor.x + block_factor.width
else:
if self.__checkSpeed(self.x - new_x): return
self.rect.x = new_x
# move vertical
def __mv_vertical(self, new_y):
if new_y > self.y:
self.__mv_bottom(new_y)
else:
self.__mv_top(new_y)
def __mv_bottom(self, new_y):
block_factor = self.__block_factor(self.x, self.y + self.height)
if block_factor:
self.rect.y -= (self.y + self.height) - block_factor.y
else:
if self.__checkSpeed(new_y - self.y): return
self.rect.y = new_y
def __mv_top(self, new_y):
block_factor = self.__block_factor(self.x, self.y - 1)
if block_factor:
self.rect.y = block_factor.y + block_factor.height
else:
if self.__checkSpeed(self.y - new_y): return
self.rect.y = new_y
|
995,905 | 1e7bac4371653e35242c53f2c558298184934801 | # -*- coding:utf-8 -*-
# Author:lpw
# Time: 2019/2/28 22:31
# -*- coding:utf-8 -*-
# Author:lpw
# Time: 2019/2/28 22:04
import pymysql
from pymysql.cursors import DictCursor
conn = None
try:
conn = pymysql.connect('120.25.160.52','root','111111','test')
with conn as cursor: #with conn 返回一个connections对象,这个对象的__enter__方法返回一个cursor,__exit__方法在退出时会自动回滚或者提交,但是不会关闭cursor,也不会关闭连接,因此需要自己控制关闭cursor和连接
with cursor: # 或者写成with cursor as cursor
#使用with cursor时,由cursor类的__enter__方法可知道返回的是一个cursor自己,__exit__函数可知道退出时会自动关闭cursor
#操作数据库
sql = "select * from student where id = %s"
id = 5
line = cursor.execute(sql,(id,))
print(line)
print(cursor.fetchall())
finally:
if conn:
conn.close() |
995,906 | af62c96d014240d1aef23698d11438105613e06f |
from numpy import matrix
from numpy import linalg
A = matrix( [[1,2,3],[11,12,13],[21,22,23]])
steplengthR = 0.5
steplengthL = 0.4
steplength_matrix = ([steplengthR, steplengthL])
print A
print steplength_matrix
a=[[1,1],[2,1],[3,1]]
b=[[1,2],[2,2],[3,2]]
print a[2][0]
print a
c =a[2][0] + a[1][0]
print c
global R
R = 0
print steplengthR
multi_array = []
for i in xrange(5):
list2 = []
for j in xrange(6):
list3 = []
for k in xrange(7):
list3.append(0)
list2.append(list3)
multi_array.append(list2) |
995,907 | 73a78fc94175cd5c6941672b86f68f2cfe36efad | # coding: utf-8
from rq import Queue
from redis import Redis
from workers import (
fast_worker,
lazy_worker
)
rdb = Redis()
fast_queue = Queue('fast', connection=rdb)
lazy_queue = Queue('lazy', connection=rdb)
if __name__ == "__main__":
short_tasks = [1, 3, 2, 5, 6, 7, 8]
long_tasks = [20, 12, 34, 56, 70, 87]
for task in short_tasks:
fast_queue.enqueue(fast_worker, task)
for task in long_tasks:
lazy_queue.enqueue(lazy_worker, task)
|
995,908 | 838c7fb1896ef8b559f96f12c717729463700c4a | from base_handler import BaseHandler
class handler(BaseHandler):
def do_POST(self):
self.handle_POST("./lib/ite-personal-scores")
|
995,909 | da7b0e3fc0d79142383e734aeacd62d44d81ae76 | from threading import Thread
def add(a, b):
print(a + b)
# START
thr = Thread(target=add, args=(1, 2), daemon=True)
thr.start()
# END
thr.join()
|
995,910 | f208c1ef300ace68aaa6778d9b36c60a9f04f36a | #!/usr/bin/env python3
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import time
import random
# def run(i):
# time.sleep(1)
# print("%s is running" % i)
#
#
# if __name__ == '__main__':
# p = ThreadPoolExecutor(5)
# # for i in range(20):
# # p.submit(run, i)
# p.map(run, range(20))
# p.shutdown()
# print("主线程ending")
def get(num):
time.sleep(random.randint(1,3))
return num
def handel(data):
time.sleep(random.randint(1,3))
print(data.result())
if __name__ == '__main__':
t = ThreadPoolExecutor(5)
for i in range(20):
t.submit(get, i).add_done_callback(handel)
t.shutdown()
print("主线程ending") |
995,911 | 06480bff93c8dbe61b614f1c73061fa25ffc0bbe | """this module servers the purpose of declaring app as a package
and also as an application factory"""
import os
from flask import Flask, Response, json
# from flask_jwt_extended import JWTManager
from flask_swagger_ui import get_swaggerui_blueprint
from flask_cors import CORS
from app.views.redflag_view import redflag_blueprint
from app.views.users_view import user_blueprint
from app.views.auth_view import auth_blueprint
from app.views.intervention_view import intervention_blueprint
from app.views.index_view import index_blueprint, base_url_blueprint
from app.views.media_view import media_blueprint, media_edit_blueprint
from app.views.error_handlers_view import (
page_not_found,
method_not_allowed,
bad_request_error,
internal_server_error
)
from app.utilities.static_strings import (
SWAGGER_UI_URL,
API_URL,
URL_LOGIN,
URL_REGISTER,
URL_INTERVENTIONS,
URL_REDFLAGS,
URL_USERS,
URL_BASE
)
def create_app():
"""this is the application factory function, configuration, registering, etc happen here"""
app = Flask(__name__)
# app.secret_key = os.urandom(12)
# jwt_manager = JWTManager()
# jwt_manager.init_app(app)
CORS(app)
app.register_blueprint(redflag_blueprint, url_prefix="/api/v1/red-flags")
app.register_blueprint(user_blueprint, url_prefix="/api/v1/users")
app.register_blueprint(intervention_blueprint, url_prefix="/api/v1/interventions")
app.register_blueprint(auth_blueprint, url_prefix="/api/v1/auth")
app.register_blueprint(index_blueprint, url_prefix="/api/v1")
app.register_blueprint(base_url_blueprint, url_prefix="/")
app.register_blueprint(media_blueprint, url_prefix="/api/v1/files/uploads")
# app.register_blueprint(media_edit_blueprint, url_prefix="/api/v1/")
app.register_error_handler(400, bad_request_error)
app.register_error_handler(404, page_not_found)
app.register_error_handler(405, method_not_allowed)
app.register_error_handler(500, internal_server_error)
swagger_ui_blueprint = get_swaggerui_blueprint(SWAGGER_UI_URL, API_URL)
app.register_blueprint(swagger_ui_blueprint, url_prefix=SWAGGER_UI_URL)
return app
|
995,912 | 8bbeaefffd7853473102e9eddd3364d5cd233375 | #################################################################################
# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,
# National Renewable Energy Laboratory, and National Energy Technology
# Laboratory (subject to receipt of any required approvals from the U.S. Dept.
# of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#################################################################################
"""
This file demonstrates the basics of working with and using the electrolyte
database (EDB).
(1) Before we can start, you must install MongoDB (which is installed separately)
[See more information on the ReadTheDocs under 'Getting Started --> Installing WaterTAP']
(2) After installing MongoDB, you will need to 'load' the database using the
command line function 'edb load -b'. This will load the default database
that WaterTAP is bundled with.
[NOTE: If you need to 'reload' the database, simply use the command 'edb drop -d electrolytedb'
in the command line. The database on MongoDB is named "electrolytedb"]
[NOTE 2: You can invoke the command line utility with the "help" keyword to
get more information on funtionality. Command: 'edb --help' or 'edb [arg] --help']
(3) To use EDB in python, start by importing the interface class object 'ElectrolyteDB'
(4) Invoke the 'ElectrolyteDB' object to connect to the database
(5) Grab a 'base' for a configuration dictionary, and place it into a class object
(6) Get the chemcial species/components for a simulation case. There are a number of ways
to do this. In this example, we will grab them by finding all components that contain
only specific elements. Then, we add those components and their associated parameters
to the configuration dictionary being built from the 'base'.
[NOTE: An alternative method is to provide a list of the names of components you want]
(7) Get the set of reactions you want in your system and put into a 'base' object.
That 'base' can be either a 'thermo' base or a 'reaction' (as in this case)
base. IF you are adding reactions to a 'thermo' base, they should be added
as 'inherent' reactions. IF you are adding reactions to a 'reaction' base,
they should be added as 'equilibrium' (or other) reactions.
(8) When using an reactor object in IDAES, you must always provide a 'reaction_config'
to match with the 'thermo_config'. We can create a base 'reaction' config from
the database and add reactions to that config in the same way we do for the
'thermo_config' when adding reactions as inherent.
[NOTE: If a reaction is added to a 'thermo_config' as 'inherent', this it should
NOT be added to a 'reaction_config' as 'equilibrium']
"""
# ========= These imports (below) are for testing the configs from EDB ===============
# Import specific pyomo objects
from pyomo.environ import (
ConcreteModel,
)
# Import the idaes objects for Generic Properties and Reactions
from idaes.models.properties.modular_properties.base.generic_property import (
GenericParameterBlock,
)
from idaes.models.properties.modular_properties.base.generic_reaction import (
GenericReactionParameterBlock,
)
# Import the idaes object for the EquilibriumReactor unit model
from idaes.models.unit_models.equilibrium_reactor import EquilibriumReactor
# Import the core idaes objects for Flowsheets and types of balances
from idaes.core import FlowsheetBlock
# ========= These imports (above) are for testing the configs from EDB ===============
# ========================== (3) ================================
# Import ElectrolyteDB object
from watertap.edb import ElectrolyteDB
__author__ = "Austin Ladshaw"
# ========================== (4) ================================
# By default, invoking the 'ElectrolyteDB' object (with no args)
# will attempt to connect to the local host database. You can
# check the connection by calling the 'can_connect' function
# and passing the 'host' and 'port' as args. If no 'host' or
# 'port' are given, then it uses the defaults.
def connect_to_edb(test_invalid_host=False):
print("connecting to " + str(ElectrolyteDB.DEFAULT_URL))
db = ElectrolyteDB()
connected = db.can_connect()
return (db, connected)
# ========================== (5) ================================
# All configuration files used in WaterTAP for electrolyte chemistry
# require a 'base' dictionary to start. For example, we need to
# create a 'thermo_config' dictionary to pass to the GenericProperty
# package in IDAES. That 'thermo_config' file will always have a
# few specific items in common with most other configuration files.
# Thus, this operation will populate a class object that holds the
# data assocated with that 'base' dictionary.
#
# In the EDB, there are several different 'base' structures to start
# from. In this example, we will build from the 'default_thermo'
# configuration base.
def grab_base_thermo_config(db):
# Get the base and place into a result object
base = db.get_base("default_thermo")
return base
# ========================== (6) ================================
# Get chemical components/species for a simulation case
# NOTE: This function here also returns a 'list' of the
# components that it finds. This is not a built in
# feature of the EDB, but is very useful because
# getting reactions is dependent on the component list.
def get_components_and_add_to_idaes_config(db, base_obj, by_elements=False):
# Going to grab all components that contain ONLY "H" and "O"
# Expected behavior = Will get "H2O", "H_+", and "OH_-"
element_list = ["H", "O"]
# Alternatively, you can pass a list of individual componets
# you want to grab and the EDB functions should grab explicitly
# those components/species you want.
comp_list = ["H2O", "H_+", "OH_-"]
# Just like before, this function returns a results object
# that contains other objects that must be iterated through
# in order to access the information. Then, call the 'add'
# function to add those components to the 'base' object
if by_elements == True:
res_obj_comps = db.get_components(element_names=element_list)
else:
res_obj_comps = db.get_components(component_names=comp_list)
# Iterate through the results object and add the components
# to the base_obj
db_comp_list = []
for comp_obj in res_obj_comps:
print("Adding " + str(comp_obj.name) + "")
base_obj.add(comp_obj)
db_comp_list.append(comp_obj.name)
print()
return (base_obj, db_comp_list)
# ========================== (7) ================================
# Grab the reactions associated with the list of components and add
# them to a base object (which could be a 'thermo' base or 'reaction' base)
#
def get_reactions_return_object(db, base_obj, comp_list, is_inherent=True):
react_obj = db.get_reactions(component_names=comp_list)
for r in react_obj:
print("Found reaction: " + str(r.name))
if is_inherent == True:
r._data["type"] = "inherent"
base_obj.add(r)
return base_obj
# ========================== (8) ================================
# Create a base config for reactions.
def grab_base_reaction_config(db):
# Get the base and place into a result object
base = db.get_base("reaction")
return base
# This function will produce an error if the thermo config is not correct
def is_thermo_config_valid(thermo_config):
model = ConcreteModel()
model.fs = FlowsheetBlock(dynamic=False)
model.fs.thermo_params = GenericParameterBlock(**thermo_config)
return True
# This function will produce an error if the thermo config is not correct
# or if the pairing of the thermo and reaction config are invalid
def is_thermo_reaction_pair_valid(thermo_config, reaction_config):
model = ConcreteModel()
model.fs = FlowsheetBlock(dynamic=False)
model.fs.thermo_params = GenericParameterBlock(**thermo_config)
model.fs.rxn_params = GenericReactionParameterBlock(
property_package=model.fs.thermo_params, **reaction_config
)
return True
# Run script for testing
def run_the_basics_with_mockdb(db):
base_obj = grab_base_thermo_config(db)
(base_obj, comp_list) = get_components_and_add_to_idaes_config(db, base_obj)
# Create a reaction config
react_base = grab_base_reaction_config(db)
# Add reactions to the reaction base as 'equilibrium'
react_base = get_reactions_return_object(
db, react_base, comp_list, is_inherent=False
)
# If all goes well, this function returns true
return is_thermo_reaction_pair_valid(base_obj.idaes_config, react_base.idaes_config)
# Run script for testing
def run_the_basics_alt_with_mockdb(db):
base_obj = grab_base_thermo_config(db)
(base_obj, comp_list) = get_components_and_add_to_idaes_config(
db, base_obj, by_elements=True
)
# Add reactions to the thermo base as 'inherent'
base_obj = get_reactions_return_object(db, base_obj, comp_list, is_inherent=True)
# If all goes well, this function returns true
return is_thermo_config_valid(base_obj.idaes_config)
# Run script for testing
def run_the_basics_dummy_rxn_with_mockdb(db):
base_obj = grab_base_thermo_config(db)
(base_obj, comp_list) = get_components_and_add_to_idaes_config(
db, base_obj, by_elements=True
)
# Add reactions to the thermo base as 'inherent'
base_obj = get_reactions_return_object(db, base_obj, comp_list, is_inherent=True)
# Create a reaction config
react_base = grab_base_reaction_config(db)
# If no reactions are in the reaction base, this will cause an error in IDAES.
# However, we can add a 'dummy' reaction just to satisfy the IDAES code base.
react_obj = db.get_reactions(reaction_names=["dummy"])
for r in react_obj:
print("Found reaction: " + str(r.name))
react_base.add(r)
# IDAES will throw an exception when we try to do this if something is wrong
thermo_config = base_obj.idaes_config
reaction_config = react_base.idaes_config
model = ConcreteModel()
model.fs = FlowsheetBlock(dynamic=False)
model.fs.thermo_params = GenericParameterBlock(**thermo_config)
model.fs.rxn_params = GenericReactionParameterBlock(
property_package=model.fs.thermo_params, **reaction_config
)
model.fs.unit = EquilibriumReactor(
property_package=model.fs.thermo_params,
reaction_package=model.fs.rxn_params,
has_rate_reactions=False,
has_equilibrium_reactions=False,
has_heat_transfer=False,
has_heat_of_reaction=False,
has_pressure_change=False,
)
# If all goes well, this function returns true
return is_thermo_reaction_pair_valid(base_obj.idaes_config, react_base.idaes_config)
|
995,913 | 5aa4ed665753c7bafda767d18225aa4c686be8ce | import os
import sys
from extractor import files, registry
def usage():
print "Usage: " + sys.argv[0] + " <directory>"
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
if os.environ.get("EXTRACTOR_DEBUG") == "1":
print "DEBUG"
registry.DEBUG = 1
if os.environ.get("EXTRACTOR_DRYRUN") == "1":
print "DRYRUN"
registry.DRYRUN = 1
registry.DEBUG = 1
directory = sys.argv[1]
if not os.path.isdir(directory):
print "Error: Directory " + directory + " does not exist"
sys.exit(1)
if not os.path.isfile(registry.UNRAR_BINARY) or not os.access(registry.UNRAR_BINARY, os.X_OK):
print "Error: Unrar binary " + registry.UNRAR_BINARY + " does not exist or is not executable"
sys.exit(1)
files.process(directory)
|
995,914 | e3a967e5434546872dcdd6de19cdeba41e06af8b | from collections import defaultdict
import math
import torch
import torch.nn as nn
from torchvision import models
from mlx.od.fcos.decoder import decode_batch_output
from mlx.od.fcos.loss import fcos_batch_loss
class FPN(nn.Module):
"""Feature Pyramid Network backbone.
See https://arxiv.org/abs/1612.03144
"""
def __init__(self, backbone_arch, out_channels=256, pretrained=True,
levels=None):
# Assumes backbone_arch in is Resnet family.
super().__init__()
# Strides of cells in each level of the pyramid. Should be in
# descending order.
self.strides = [64, 32, 16, 8, 4]
self.levels = levels
if levels is not None:
self.strides = [self.strides[l] for l in levels]
# Setup bottom-up backbone and hooks to capture output of stages.
# Assumes there is layer1, 2, 3, 4, which is true for Resnets.
backbone = getattr(models, backbone_arch)(pretrained=pretrained)
self.backbone_out = {}
def make_save_output(layer_name):
def save_output(layer, input, output):
self.backbone_out[layer_name] = output
return save_output
backbone.layer1.register_forward_hook(make_save_output('layer1'))
backbone.layer2.register_forward_hook(make_save_output('layer2'))
backbone.layer3.register_forward_hook(make_save_output('layer3'))
backbone.layer4.register_forward_hook(make_save_output('layer4'))
# Remove head of backbone.
self.backbone = nn.Sequential(*list(backbone.children())[0:-2])
# Setup layers for top-down pathway.
# Use test input to determine the number of channels in each layer.
self.backbone(torch.rand((1, 3, 256, 256)))
self.cross_conv1 = nn.Conv2d(
self.backbone_out['layer1'].shape[1], out_channels, 1)
self.out_conv1 = nn.Conv2d(
out_channels, out_channels, 3, 1, 1)
self.cross_conv2 = nn.Conv2d(
self.backbone_out['layer2'].shape[1], out_channels, 1)
self.out_conv2 = nn.Conv2d(
out_channels, out_channels, 3, 1, 1)
self.cross_conv3 = nn.Conv2d(
self.backbone_out['layer3'].shape[1], out_channels, 1)
self.out_conv3 = nn.Conv2d(
out_channels, out_channels, 3, 1, 1)
self.cross_conv4 = nn.Conv2d(
self.backbone_out['layer4'].shape[1], out_channels, 1)
self.up_conv5 = nn.Conv2d(
out_channels, out_channels, 3, 2, 1)
def forward(self, input):
"""Computes output of FPN.
Args:
input: (tensor) batch of images with shape (batch_sz, 3, h, w)
Returns:
(list) output of each level in the pyramid ordered same as
self.strides. Each output is tensor with shape
(batch_sz, 256, h*, w*) where h* and w* are height and width
for that level of the pyramid.
"""
self.backbone_out = {}
self.backbone(input)
# c* is cross output, d* is downsampling output
c4 = self.cross_conv4(self.backbone_out['layer4'])
d4 = c4
u5 = self.up_conv5(c4)
c3 = self.cross_conv3(self.backbone_out['layer3'])
d3 = c3 + nn.functional.interpolate(d4, c3.shape[2:])
c2 = self.cross_conv2(self.backbone_out['layer2'])
d2 = c2 + nn.functional.interpolate(d3, c2.shape[2:])
c1 = self.cross_conv1(self.backbone_out['layer1'])
d1 = c1 + nn.functional.interpolate(d2, c1.shape[2:])
out = [u5, d4, self.out_conv3(d3), self.out_conv2(d2), self.out_conv1(d1)]
if self.levels is not None:
out = [out[l] for l in self.levels]
return out
class ConvBlock(nn.Module):
"""Sequence of conv2d, group norm, and relu."""
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
padding=padding)
self.gn = nn.GroupNorm(32, out_channels)
def forward(self, x):
x = self.conv(x)
x = self.gn(x)
x = nn.functional.relu(x)
return x
class FCOSHead(nn.Module):
"""Head for FCOS model.
Outputs reg_arr, label_arr, and center_arr for one level of the pyramid,
which can be decoded into a BoxList.
"""
def __init__(self, num_labels, in_channels=256):
super().__init__()
c = in_channels
self.reg_branch = nn.Sequential(
*[ConvBlock(c, c, 3, padding=1) for i in range(4)])
self.reg_conv = nn.Conv2d(c, 4, 3, padding=1)
self.label_branch = nn.Sequential(
*[ConvBlock(c, c, 3, padding=1) for i in range(4)])
self.label_conv = nn.Conv2d(c, num_labels, 3, padding=1)
self.center_conv = nn.Conv2d(c, 1, 3, padding=1)
# initialization adapted from retinanet
# https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/rpn/retinanet/retinanet.py
for modules in [self.reg_branch, self.reg_conv, self.label_branch, self.label_conv, self.center_conv]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
prob = 0.01
logit = math.log(prob / (1 - prob))
torch.nn.init.constant_(self.label_conv.bias, logit)
def forward(self, x, scale_param):
"""Computes output of head.
Args:
x: (tensor) with shape (batch_sz, 256, h*, w*) which is assumed to
be output of one level of the FPN.
scale_param: (tensor) with single element used to scale the values
in the reg_arr and varies across levels of the pyramid.
Returns:
tuple of form (reg_arr, label_arr, center_arr) where
- reg_arr is tensor<n, 4, h, w>,
- label_arr is tensor<n, num_labels, h, w>
- center_arr is tensor<n, 1, h, w>
where label_arr and center_arr contain logits
"""
reg_arr = torch.exp(scale_param * self.reg_conv(self.reg_branch(x)))
label_branch_arr = self.label_branch(x)
label_arr = self.label_conv(label_branch_arr)
center_arr = self.center_conv(label_branch_arr)
return (reg_arr, label_arr, center_arr)
class FCOS(nn.Module):
"""Fully convolutional one stage object detector
See https://arxiv.org/abs/1904.01355
"""
def __init__(self, backbone_arch, num_labels, pretrained=True,
levels=None):
super().__init__()
out_channels = 256
self.num_labels = num_labels
self.levels = levels
self.fpn = FPN(backbone_arch, out_channels=out_channels,
pretrained=pretrained, levels=levels)
num_scales = len(self.fpn.strides)
self.scale_params = nn.Parameter(torch.ones((num_scales,)))
self.head = FCOSHead(num_labels, in_channels=out_channels)
self.subloss_names = ['total_loss', 'reg_loss', 'label_loss', 'center_loss']
def forward(self, input, targets=None, get_head_out=False):
"""Compute output of FCOS.
Args:
input: tensor<n, 3, h, w> with batch of images
targets: list<BoxList> of length n with boxes and labels set
Returns:
if targets is None, returns list<BoxList> of length n, containing
boxes, labels, and scores for boxes with score > 0.05. Further
filtering based on score should be done before considering the
prediction "final".
if target is a list, returns the losses as dict of form {
'reg_loss': <tensor[1]>,
'label_loss': <tensor[1]>,
'center_loss': <tensor[1]>
}
if get_head_out is True, also return a list with the raw output
from the heads
"""
fpn_out = self.fpn(input)
img_height, img_width = input.shape[2:]
strides = self.fpn.strides
hws = [level_out.shape[2:] for level_out in fpn_out]
max_box_sides = [256, 128, 64, 32, 16]
if self.levels is not None:
max_box_sides = [max_box_sides[l] for l in self.levels]
self.pyramid_shape = [
(s, m, h, w) for s, m, (h, w) in zip(strides, max_box_sides, hws)]
head_out = []
for i, level_out in enumerate(fpn_out):
head_out.append(self.head(level_out, self.scale_params[i]))
if targets is None:
out = decode_batch_output(head_out, self.pyramid_shape, img_height, img_width)
if get_head_out:
return out, head_out
return out
loss_dict = fcos_batch_loss(
head_out, targets, self.pyramid_shape, self.num_labels)
if get_head_out:
return loss_dict, head_out
return loss_dict |
995,915 | c81229498f6d97591968f92267dea2674a5d9d91 | # Generated by Django 3.2.8 on 2021-10-15 23:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Conta',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50, verbose_name='Nome da conta')),
('tipo', models.IntegerField(choices=[(1, 'Conta Salário'), (2, 'Conta Corrente'), (3, 'Conta Poupança')], default=2, verbose_name='Tipo de conta')),
('saldo', models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name='Saldo da conta')),
],
options={
'verbose_name': 'Conta',
'verbose_name_plural': 'Contas',
'db_table': 'conta',
},
),
]
|
995,916 | 929697543aab869b1e2c577e3126b81e4f0a9411 | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadDimensionOperator(BaseOperator):
create_user_sql = """CREATE TABLE IF NOT EXISTS public.users (
userid int4 NOT NULL,
first_name varchar(256),
last_name varchar(256),
gender varchar(256),
"level" varchar(256),
CONSTRAINT users_pkey PRIMARY KEY (userid)
)"""
insert_user_sql = """insert into user_table(
userid,
first_name,
last_name,
gender,
level)
SELECT song_id, title, artist_id, year, duration FROM staging_songs"""
create_artist_sql = """CREATE TABLE IF NOT EXIST artists_table (
artistid varchar(256) NOT NULL,
name varchar(256),
location varchar(256),
lattitude numeric(18,0),
longitude numeric(18,0)
)"""
insert_artist_sql = ("""
INSERT INTO artists (
artist_id,
artist_name,
artist_location,
artist_latitude,
artist_longitude
)
SELECT distinct artist_id, artist_name, artist_location, artist_latitude, artist_longitude
FROM staging_songs
""")
create_song_sql ="""CREATE TABLE IF NOT EXIST public.songs (
songid varchar(256) NOT NULL,
title varchar(256),
artistid varchar(256),
"year" int4,
duration numeric(18,0),
CONSTRAINT songs_pkey PRIMARY KEY (songid)
)"""
insert_song_sql = ("""
INSERT INTO songs (
song_id,
title,
artist_id,
year,
duration
)
SELECT distinct song_id, title, artist_id, year, duration
FROM staging_songs
""")
create_time_sql = """CREATE TABLE IF NOT EXIST public."time" (
start_time timestamp NOT NULL,
"hour" int4,
"day" int4,
week int4,
"month" varchar(256),
"year" int4,
weekday varchar(256),
CONSTRAINT time_pkey PRIMARY KEY (start_time)
) """
insert_time_sql = ("""
INSERT INTO time (
start_time,
hour,
day,
week,
month,
year,
weekday
)
SELECT start_time, extract(hour from start_time), extract(day from start_time), extract(week from start_time),
extract(month from start_time), extract(year from start_time), extract(dayofweek from start_time)
FROM songplays
""")
ui_color = '#80BD9E'
@apply_defaults
def __init__(self,
redshift_conn_id="",
aws_credentials_id="",
region="",
table="",
*args, **kwargs):
super(LoadDimensionOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id,
self.aws_credentials_id = aws_credentials_id,
self.region = region,
self.table = table,
self.execution_date = kwargs.get('execution_date')
def execute(self, context):
#self.log.info('LoadDimensionOperator not implemented yet')
redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)
#user table
redshift.run(create_user_sql)
redshift.run(insert_user_sql)
#artist table
redshift.run(create_artist_sql)
redshift.run(insert_artist_sql)
#song table
redshift.run(create_song_sql)
redshift.run(insert_song_sql)
#time table
redshift.run(create_time_sql)
redshift.run(insert_time_sql)
|
995,917 | aa757926c7509dfd8bc81a7f1503777e09fa944b | from bs4 import BeautifulSoup
import urllib2
import re
from collections import Counter
page = urllib2.urlopen("http://localhost")
soup = BeautifulSoup(page, features="html.parser")
text = soup.get_text()
lst = re.findall(r'\w+',text)
count = Counter(str(x) for x in lst)
result = count.most_common(1)[0]
word = result[0]
times = result[1]
print "The most frequent word is " + word +", it has appeared " + str(times) +" times."
|
995,918 | 56587f1432bcb31e1cac8f9fd46fbd64d096470c | import sys
import re
from itertools import chain
def countUniqueWords(filename):
f = open(filename, 'r')
return len(set(chain(*(line.split() for line in f if line))))
f.close()
def bag(filename, N):
f = open(filename, 'r');
content = f.readlines()
#Create output matrix of length 12*** x 4040
output = [[0]*(N+1) for _ in range(len(content))]
#Create list to hold discovered words
bag_dict = []
#create var to keep count of line
count = 0
#iterate through all lines in processed file
for line in content:
#replace newlines
line = line.replace("\n",' ')
words = line.split(' ')
#iterate through every words in line
for word in words:
#print len(output[count]), len(bag_dict)
#if not previously discovered word, add to bag and increment count
if word not in bag_dict:
bag_dict.append(word)
output[count][len(bag_dict)-1] = 1
#if previously discovered word, find index and increment count
else:
spot = bag_dict.index(word)
output[count][spot] = output[count][spot] + 1
#print bag_dict
count = count+1
f.close()
print output
def main():
f = open(sys.argv[1],'r')
r = open("requests.txt",'w+')
#iterate through file to clean it up
for line in f:
#find proper lines
if '"request_text":' in line:
processed = line.lower()
#remove excess symbols
processed = re.sub('[\[\]!~*\-,><}{;)(:#$"&%.?]',' ',processed)
#remove "\n"s
processed = processed.replace("\\n",' ')
#remove slashes, now that "\n" is gone
processed = re.sub('[\\/]',' ',processed)
#attempt at removing whitespace
array = [w for w in processed.split() if not re.search(r'\d', w)]
#array = processed.split()
#remove first item in line, "request_text"
body = array[1:]
#remove digits, emails, money, mmmm, and URLs, and write to file
r.write(' '.join([i for i in body if not (i.isdigit() or "@" in i or '$' in i or "http" in i or "mmm" in i)]) + '\n')
f.close()
r.close()
N = countUniqueWords("requests.txt")
#Call the function
bag("requests.txt", N);
if __name__ == "__main__":
main()
|
995,919 | 1ae8df8b4fefc632992b27f806fc594289e727e6 | import glob
import os
def listDir():
print('listDir function: list all the files in the current directory')
print('list files use glob: it is a list type')
fileList = glob.glob('*.csv')
print(fileList)
def readFile(fileStr):
print('readFile function')
print('read all the content')
inFile = open(fileStr,'r')
print(inFile.read())
inFile.close()
print('use \'readlines\' to read list of lines')
inFile = open(fileStr,'r')
for line in inFile.readlines():
print(line)
inFile.close()
print('use \'split\' to split lines')
inFile = open(fileStr,'r')
for line in inFile.read().split('\n'):
print(line)
inFile.close()
print('with open for large data')
with open(fileStr) as inFile:
for line in inFile:
print(line)
def writeFile(fileStr,writeStr):
print('writeFile function')
writeFile = open(fileStr,'w')
print('write '+writeStr+' to '+fileStr)
writeFile.write(writeStr)
writeFile.close()
def subString(myStr):
print('subString function')
print('use row')
#substring
year = myStr[7:11]
if year == '2016' or year == '2017':
print('test num: '+myStr[4:7])
print('use split')
#split
splitStr = (myStr.split('_'))[0]
print('splitStr: '+splitStr)
print('use replace')
#remove
rStr = splitStr.replace('SQRC','')
rStr = rStr.replace('2017','')
print('replacedStr: '+rStr)
if __name__ == '__main__':
print('listDir')
listDir()
print('readFile')
readFile('csvIn.csv')
print('writeFile')
writeFile('csvOut.csv','write this line')
print('subString')
myStr = 'SQRC1532017_Feb_20_1739.csv'
subString(myStr)
|
995,920 | 80acdbec4302ce1a16b6ab75e33cb78532a0df89 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
import tests.integ
import tests.integ.timeout
from sagemaker import image_uris
from sagemaker.model import Model
from sagemaker.predictor import Predictor
from sagemaker.serializers import CSVSerializer
from sagemaker.utils import unique_name_from_base
from tests.integ import DATA_DIR
ROLE = "SageMakerRole"
INSTANCE_COUNT = 1
INSTANCE_TYPE = "ml.c5.xlarge"
TEST_CSV_DATA = "42,42,42,42,42,42,42"
XGBOOST_DATA_PATH = os.path.join(DATA_DIR, "xgboost_model")
@pytest.yield_fixture(scope="module")
def endpoint_name(sagemaker_session):
endpoint_name = unique_name_from_base("model-inference-id-integ")
xgb_model_data = sagemaker_session.upload_data(
path=os.path.join(XGBOOST_DATA_PATH, "xgb_model.tar.gz"),
key_prefix="integ-test-data/xgboost/model",
)
xgb_image = image_uris.retrieve(
"xgboost",
sagemaker_session.boto_region_name,
version="1",
image_scope="inference",
)
with tests.integ.timeout.timeout_and_delete_endpoint_by_name(
endpoint_name=endpoint_name, sagemaker_session=sagemaker_session, hours=2
):
xgb_model = Model(
model_data=xgb_model_data,
image_uri=xgb_image,
name=endpoint_name, # model name
role=ROLE,
sagemaker_session=sagemaker_session,
)
xgb_model.deploy(INSTANCE_COUNT, INSTANCE_TYPE, endpoint_name=endpoint_name)
yield endpoint_name
def test_predict_with_inference_id(sagemaker_session, endpoint_name):
predictor = Predictor(
endpoint_name=endpoint_name,
sagemaker_session=sagemaker_session,
serializer=CSVSerializer(),
)
# Validate that no exception is raised when the target_variant is specified.
response = predictor.predict(TEST_CSV_DATA, inference_id="foo")
assert response
def test_invoke_endpoint_with_inference_id(sagemaker_session, endpoint_name):
response = sagemaker_session.sagemaker_runtime_client.invoke_endpoint(
EndpointName=endpoint_name,
Body=TEST_CSV_DATA,
ContentType="text/csv",
Accept="text/csv",
InferenceId="foo",
)
assert response
|
995,921 | 0a2cb6f14727dc85a909f5d97a65f433452215ba | import os
import shutil
import requests
import time
import re
from pathlib import Path
from github import Github
from git import Repo
from pydriller import RepositoryMining
from comment_parser import comment_parser as c
from comment_parser.parsers import common
ENV = ""
MIME_MAP = {
".js" : 'application/javascript', # Javascript
".ts" : 'application/javascript', # Javascript
".tsx" : 'application/javascript', # Javascript
".mjs" : 'application/javascript', # Javascript
".html" : 'text/html', # HTML
".c" : 'text/x-c', # C
".cpp" : 'text/x-c++', # C++
".cs" : 'text/x-c++', # C#
".go" : 'text/x-go', # Go
".java" : 'text/x-java', # Java
".py" : 'text/x-python', # Python
".rb" : 'text/x-ruby', # Ruby
".sh" : 'text/x-shellscript', # Unix shell
".xml" : 'text/xml', # XML
}
def find_comments(file_path, str_code):
mime = None
for ext in MIME_MAP.keys():
if(file_path.endswith(ext)):
mime = MIME_MAP[ext]
comment = None
try:
comment = c.extract_comments_from_str(str_code, mime)
except Exception as e:
#log_errors(file_path, str(e))
#print(file_path + "\n" + str(e))
comment = []
return comment
def group_comments(list_comments):
comments_grouped = []
passed_comments = []
for i in range(len(list_comments)):
if(list_comments[i] in passed_comments):
continue
blocked_comment = list_comments[i]
first_line = list_comments[i].line_number()
actual_line = list_comments[i].line_number()
for j in range(i+1, len(list_comments)):
if(list_comments[j].line_number() == actual_line + 1): #found block to group
text = blocked_comment.text() + "\n" + list_comments[j].text()
blocked_comment = common.Comment(text, first_line, True)
passed_comments.append(list_comments[j])
actual_line = list_comments[j].line_number()
else:
break
comments_grouped.append(blocked_comment)
return comments_grouped
def is_supported(file_path):
for ext in MIME_MAP.keys():
if(file_path.endswith(ext)):
return True
return False
def get_added_lines(list_tuple_additions):
lines = []
for addition in list_tuple_additions:
lines.append(addition[0])
return lines
def is_satdc(str_comment):
patterns = ["TODO", "workaround", "fixme", "hack", "technical debt", "tech debt"]
for p in patterns:
if(str(str_comment).lower().count(p.lower()) > 0):
return True
return False
def get_satdc(repo_path, branch):
satdc = []
commits_cnt = 0
for commit in RepositoryMining(repo_path, only_in_branch = branch, only_no_merge = True).traverse_commits():
commits_cnt += 1
for mod in commit.modifications:
if(mod.source_code == None):
continue
lines_added_in_commit = get_added_lines(mod.diff_parsed['added'])
if(is_supported(mod.filename)):
comments_in_file = group_comments(find_comments(mod.filename, mod.source_code))
for comment in comments_in_file:
if(comment.is_multiline()):
init = comment.line_number()
end = comment.line_number() + len(comment.text().split("\n")) - 1
for i in range(init, end + 1):
if i in lines_added_in_commit:
if is_satdc(comment.text()):
satdc.append((comment, commit, mod))
break
else:
if(comment.line_number() in lines_added_in_commit):
if is_satdc(comment.text()):
satdc.append((comment, commit, mod))
print(">> Total de commits analisados {}".format(commits_cnt))
return satdc
def load_issues():
map_issues = {}
file_name = "datasets/satdi" + ENV + ".csv"
file_in_path = Path(__file__).parent / file_name
with open(file_in_path, "r") as read_file:
for line in read_file.readlines():
repo = line.split(",")[0].replace('"', '')
number = line.split(",")[3]
if(repo in map_issues.keys()):
map_issues[repo].append(number)
else:
map_issues[repo] = [number]
return map_issues
def find_references(comment_commits, issues):
references = []
for comment in comment_commits:
comment_list = re.split('[\n\s\/$n$]', str(comment[0]))
for id in issues:
if(id in comment_list):
references.append((comment[0], comment[1], comment[2], id))
elif(("#"+ id) in comment_list):
references.append((comment[0], comment[1], comment[2], id))
elif(("issues/"+ id) in comment_list):
references.append((comment[0], comment[1], comment[2], id))
return references
def save_commits(repo_owner, repo_name, comment_commits):
file_name = "datasets/satdc" + ENV + ".csv"
file_out_path = Path(__file__).parent / file_name
with open(file_out_path, "a") as the_file:
for save in comment_commits:
comment = save[0]
commit = save[1]
mod = save[2]
the_file.write(repo_owner + "/" + repo_name + ";" + mod.new_path + ";" + \
comment.text().replace("\n", "$n$") + ";" +\
str(comment.line_number()) + ";" + commit.hash + ";" +\
str(commit.committer_date) + "\n")
def save_refs(repo_owner, repo_name, refs):
file_name = "datasets/refs" + ENV + ".csv"
file_out_path = Path(__file__).parent / file_name
with open(file_out_path, "a") as the_file:
for save in refs:
comment = save[0]
commit = save[1]
mod = save[2]
issue = save[3]
the_file.write(repo_owner + "/" + repo_name + ";" + mod.new_path + ";" + \
issue + ";"+ comment.text().replace("\n", "$n$") + ";" +\
str(comment.line_number()) + ";" + commit.hash + ";" +\
str(commit.committer_date) + "\n")
def run(repo_owner, repo_name, label):
g = Github("", "")
github_user = g.get_user()
original_repo = g.get_repo(repo_owner + "/" + repo_name)
#clone repository
repo_path = Path(__file__).parent / "./temp/" / original_repo.name
cloned_repo = Repo.clone_from(original_repo.clone_url, repo_path)
#print(cloned_repo.active_branch)
print('Finding comment commits')
comment_commits = get_satdc(str(repo_path), cloned_repo.active_branch)
print(">> Total satdc commits " + str(len(comment_commits)))
print('Saving SATDC')
save_commits(repo_owner, repo_name, comment_commits)
issue_number_list = load_issues()
print('Finding refs')
repo = repo_owner + "/" + repo_name
refs = []
if(repo in issue_number_list.keys()):
refs = find_references(comment_commits, issue_number_list[repo])
print(">> Total refs: " + str(len(refs)))
print('Saving refs')
save_refs(repo_owner, repo_name, refs)
shutil.rmtree(repo_path)
return (len(comment_commits), len(refs))
if __name__ == "__main__":
#ENV = "_teste"
file_name = "datasets/satd_repos" + ENV + ".csv"
file_in_path = Path(__file__).parent / file_name
with open(file_in_path, "r") as read_file:
satdc = 0
refs = 0
analyzed = []
for line in read_file.readlines():
print(line.replace("\n", ""))
data = line.split(",")
if(data[0] in analyzed):
continue
repo_owner = data[0].split("/")[0]
repo_name = data[0].split("/")[1]
label = data[2]
total = run(repo_owner, repo_name, label)
satdc += total[0]
refs += total[1]
analyzed.append(data[0])
file_name = "datasets/final_stats" + ENV + ".csv"
file_out_path = Path(__file__).parent / file_name
with open(file_out_path, "a") as the_file:
the_file.write(data[0] + ";" + str(total[0]) + ";" +\
str(total[1]) + "\n")
print("\n======== Final Report ========")
print(">> Total satdc comments: " + str(satdc))
print(">> Total refs: " + str(refs)) |
995,922 | bf84b1b66c2dd63ba049f028d39cf1299d7ea3db | # flake8: noqa
from .create_pets import CreatePetsResolver
from .update_pets import UpdatePetsResolver
from .delete_pets import DeletePetsResolver
from .list_pets import ListPetResolver
from .fetch_pets import FetchPetResolver
|
995,923 | 52fc6a9b8daf04de993447278e2d2eac234449cb | """We use this file as an example for some module."""
from __future__ import annotations
from typing import Callable, cast
import numpy as np
from baynet import DAG
__all__ = ["generate_dag"]
def _shuffle(positive: int, total: int, padding: int) -> np.ndarray:
positive = np.min([positive, total])
result = np.concatenate([np.ones(positive), np.zeros(total - positive)])
np.random.shuffle(result) # Uses standard uniform shuffling
return cast(np.ndarray, np.concatenate([np.zeros(padding), result]))
def generate_dag(
nodes: int, distribution: Callable[[int], list[int]], seed: int = 1 # type: ignore
) -> DAG:
"""Generate a DAG
Args:
nodes (int): The number of nodes in the DAG
distribution(Callable): Any discrete sampling method
seed (int): The random seed for the stochastic processes in the function
Returns:
DAG: A generated DAG
Examples:
.. code:: python
>>> from functools import partial
>>> from numpy.random import binomial
>>> dist = partial(binomial, n=5, p=0.5)
>>> bn = generate_dag(20, dist)
>>> len(bn.vs)
20
"""
np.random.seed(seed)
in_degree_samples = sorted(distribution(size=nodes), reverse=True) # type: ignore
adj_mat = np.zeros((nodes, nodes))
for i in range(nodes):
n_parents = in_degree_samples[i]
adj_mat[:, i] = _shuffle(n_parents, nodes - (i + 1), i + 1)
return DAG.from_amat(adj_mat.T, [str(i) for i in range(nodes)])
|
995,924 | 06818e8dd86f25e2dbabf5e38a8f3717c80831ed | import numpy as np
import tensorflow as tf
from scipy import special as sp
#cubic_model = tf.pow((1 - t), [3]) * cp0 + tres * tf.pow(1 - t, [2]) * t * cp1 + tres * (1 - t) * tf.pow(t, [2]) * cp2 + tf.pow(t, [3]) * cp3
def izOne(x, y):
if x == 1:
return 1
if y == 1:
return 1
else:
return x * y
def bez(solve, shape=1, size=16, scale=4, dims=2):
Time = np.arange(0, size, dtype='float32') / size
t = tf.constant(np.asarray(Time), 'float32', shape=[shape, size, 1, dims])
for i in range(shape - 1):
t = t.concat([t, t], 0)
nt = 1 - t
# time stack == first order of time stack, below
# you did this to make the for loop + concat smooth
time_stack = tf.pow(nt, scale - 1)
for i in range(scale):
if i == 0 or i == scale - 1:
continue
else:
bi = sp.binom(scale - 1, i)
computation = tf.pow(nt, scale - i) * bi * tf.pow(t, i)
time_stack = tf.concat([time_stack, computation], 1)
# concat last order of time stack, below
# should be correct
time_stack = tf.concat([time_stack, tf.pow(t, scale - 1)], 1)
sess = tf.Session()
sess.run([time_stack, solve_X, solve_Y])
X = tf.get_variable('cpx', [4,], 'float32', tf.random_normal_initializer())
Y = tf.get_variable('cpy', [4,], 'float32', tf.random_normal_initializer())
cubic_X = tf.reduce_sum(tf.matmul(time_stack, tf.diag(X)) - 1, 1)
cubic_Y = tf.reduce_sum(tf.matmul(time_stack, tf.diag(Y)) - 1, 1)
loss_X = solve_X - cubic_X
loss_Y = solve_Y - cubic_Y
loss = tf.sqrt(tf.reduce_mean(tf.pow(tf.concat([loss_X, loss_Y], 1), 2), 1))
train = tf.train.AdamOptimizer(1e-4).minimize(loss)
def cond(i, x, y):
return i < loops
def algo(i, x, y):
# do the loss loop in here
train.minimize(loss)
return (i + 1, x, y)
#loop = tf.while_loop(cond, algo, (0, X, Y))
sess.run(tf.global_variables_initializer())
print sess.run([X, Y, loss])
#i, x, y = sess.run(loop)
for _ in range(loops):
sess.run(train)
#print x, y
a,b,c = sess.run([X, Y, loss])
print a
print b
print c
#print sess.run(control_points_X)
x = [ 0.8791666666666667,
0.6663523356119792,
0.5186442057291666,
0.42683207194010414,
0.3817057291666666,
0.37405497233072915,
0.39466959635416665,
0.4343393961588541,
0.4838541666666666,
0.5340037027994791,
0.5755777994791667,
0.5993662516276042,
0.5961588541666666,
0.5567454020182291,
0.4719156901041667,
0.3324595133463542]
y = [ 0.15416666666666667,
0.35943705240885415,
0.5295328776041667,
0.6661936442057294,
0.7711588541666669,
0.8461680094401042,
0.8929606119791668,
0.9132761637369793,
0.9088541666666667,
0.8814341227213541,
0.8327555338541667,
0.7645579020182292,
0.6785807291666666,
0.5765635172526042,
0.4602457682291667,
0.3313669840494792]
solve_X = tf.constant(x, 'float32', shape=[shape, 1,size])
solve_Y = tf.constant(y, 'float32', shape=[shape, 1,size])
bez([x,y])
|
995,925 | d4be745ba2fab13eb1397ae9e09593a90dd7b8a0 | import pandas as pd
#Ques_1
info=pd.read_csv("Assignment_20(Ques_1).csv")
data=pd.DataFrame(info)
print(data,"\n")
#Ques_2
Weather=pd.read_csv("Assignment_20(Ques_2).csv")
data_1=pd.DataFrame(Weather)
print("\nFirst 5 rows:\n",data_1.head(5)) #Prints first 5 rows of data_1
print("\nFirst 10 rows: \n",data_1.head(10)) #Prints first 10 rows of data_1
print("\nMean:\n:",data_1.mean()) #Basic Stats
print("\nMedian: \n",data_1.median())
print("\nMode: \n",data_1.mode())
print("\nLast 5 rows: \n",data_1.tail(5)) #Prints last 5 rows
Column_3=data_1.loc[:,'MinTemp'] #Column 3 of MinTemp was extracted instead
print(Column_3)
print("\nMean:\n",Column_3.mean())
print("\nMedian:\n",Column_3.median())
print("\nMode:\n",Column_3.mode()) |
995,926 | 10d3a7c7f76d1b888eee2d58313d7f0c4c0636f1 | print("caseStudy: 학점 계산 - 반복 중첩 조건문 8단계 - 여러 점수 계산")
print("m3_1_forIfElifElseMultiTest_001_02")
print("1. score, score1 변수 선언")
print(' score = [100,95,90,85,80,75,70,65,60,55]')
print(' score1 = []')
score = [100,95,90,85,80,75,70,65,60,55]
score1 = []
print()
print("2. 여러 조건에 따라 실행하는 반복문: ")
print(' for i in score: #range(len(score)-1) ')
print(' ')
print(' print(" 점수 {0:3d}점은 학점이 ".format(i), end="") ')
print(' ')
print(' score1 = i ')
print(' ')
print(' if score1 >100 or score1 < 0 : ')
print(' print("범위에 해당하지 않는 값입니다.") ')
print(' continue ')
print(' ')
print(' if score1 >= 90: ')
print(' grade = "A" ')
print(' elif score1 >= 80: ')
print(' grade = "B" ')
print(' elif score1 >= 70: ')
print(' grade = "C" ')
print(' elif score1 >= 60: ')
print(' grade = "D" ')
print(' else: ')
print(' grade = "F" ')
print(' ')
print(' result = score1 % 10 ')
print(' if score1 == 100: ')
print(' grade += "+" ')
print(' elif score1 < 60: ')
print(' grade += "" ')
print(' elif result >= 5: ')
print(' grade += "+" ')
print(' else: ')
print(' grade += "0" ')
print(' ')
print(' print("{0:2s}학점 입니다.".format(grade)) ')
print()
print("3. 결과값->")
for i in score: #range(len(score)-1)
print(" 점수 {0:3d}점은 학점이 ".format(i), end="")
score1 = i
if score1 >100 or score1 < 0 :
print("범위에 해당하지 않는 값입니다.")
continue
if score1 >= 90:
grade = "A"
elif score1 >= 80:
grade = "B"
elif score1 >= 70:
grade = "C"
elif score1 >= 60:
grade = "D"
else:
grade = "F"
result = score1 % 10
if score1 == 100:
grade += "+"
elif score1 < 60:
grade += ""
elif result >= 5:
grade += "+"
else:
grade += "0"
print("{0:2s}학점 입니다.".format(grade))
print()
print('4. 프로그램 종료')
print(' print("Program End")')
print(" Program End") |
995,927 | b5bcca355dc595b99d5c206f48bec17224cdbf1f | import socket
import logging
from twisted.internet.protocol import DatagramProtocol
from . import stun
from .agent import Message
from .authentication import CredentialMechanism
from . import attributes
logger = logging.getLogger(__name__)
class StunUdpProtocol(DatagramProtocol):
def __init__(self, reactor, interface, port, software, RTO=3., Rc=7, Rm=16):
"""
:param port: UDP port to bind to
:param RTO: Retransmission TimeOut (initial value)
:param Rc: Retransmission Count (maximum number of request to send)
:param Rm: Retransmission Multiplier (timeout = Rm * RTO)
"""
self.reactor = reactor
self.interface = interface
self.port = port
self.software = software
self.RTO = .5
self.Rc = 7
self.timeout = Rm * RTO
self._handlers = {
# Binding handlers
(stun.METHOD_BINDING, stun.CLASS_REQUEST):
self._stun_binding_request,
(stun.METHOD_BINDING, stun.CLASS_INDICATION):
self._stun_binding_indication,
(stun.METHOD_BINDING, stun.CLASS_RESPONSE_SUCCESS):
self._stun_binding_success,
(stun.METHOD_BINDING, stun.CLASS_RESPONSE_ERROR):
self._stun_binding_error,
}
def start(self):
port = self.reactor.listenUDP(self.port, self, self.interface)
return port.port
def datagramReceived(self, datagram, addr):
msg_type = datagram[0] >> 6
if msg_type == stun.MSG_STUN:
try:
msg = Message.decode(datagram)
except Exception:
logger.exception("Failed to decode STUN from %s:%d:", *addr)
logger.debug(datagram.hex())
else:
if isinstance(msg, Message):
self._stun_received(msg, addr)
else:
logger.warning("Unknown message in datagram from %s:%d:", *addr)
logger.debug(datagram.hex())
def _stun_received(self, msg, addr):
handler = self._handlers.get((msg.msg_method, msg.msg_class))
if handler:
logger.info("%s Received STUN", self)
#logger.debug(msg.format())
handler(msg, addr)
else:
logger.info("%s Received unrecognized STUN", self)
logger.debug(msg.format())
def _stun_unhandeled(self, msg, addr):
logger.warning("%s Unhandeled message from %s:%d", self, *addr)
logger.debug(msg.format())
def _stun_binding_request(self, msg, addr):
self._stun_unhandeled(msg, addr)
def _stun_binding_indication(self, msg, addr):
self._stun_unhandeled(msg, addr)
def _stun_binding_success(self, msg, addr):
self._stun_unhandeled(msg, addr)
def _stun_binding_error(self, msg, addr):
self._stun_unhandeled(msg, addr)
|
995,928 | 6a433c79e27af9e6e686f6419bc9bfe73c4b82e1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-03 21:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('title', models.CharField(max_length=100, primary_key=True, serialize=False)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('name', models.CharField(max_length=45, primary_key=True, serialize=False)),
('description', models.CharField(max_length=255)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('updated_date', models.DateTimeField(blank=True, null=True)),
],
),
migrations.AddField(
model_name='post',
name='topic_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Topic'),
),
]
|
995,929 | 570f3c9c047f8aac88e9287d1f4754497bfadbf6 | import cv2
import imutils as imutils
import numpy as np
import random
import sys
from os import path
import matplotlib.pyplot as plt
import matplotlib.colors
import scipy
def readFileAndResize():
srcImage = cv2.imread("imagehh.jpg")
srcImage = cv2.resize(srcImage, (1080, 1080))# default image
img = np.uint16(srcImage)
img = np.clip(2.0 * img, 0, 255)
img = np.uint8(img)
cv2.imshow(" imagemain", img)
cv2.waitKey(0)
return srcImage
readFileAndResize()
def halfTone():
srcImage = cv2.imread("imagehh.jpg")
grayImg = cv2.cvtColor(srcImage, cv2.COLOR_BGR2GRAY)
cv2.imshow("hanlfTone", grayImg)
cv2.waitKey(0)
return grayImg
halfTone()
def build_is_hist(img):
pass
hei = img.shape[0]
wid = img.shape[1]
ch = img.shape[2]
Img = np.zeros((hei+4, wid+4, ch))
for i in range(ch):
Img[:,:,i] = np.pad(img[:,:,i], (2,2), 'edge')
hsv = (matplotlib.colors.rgb_to_hsv(Img))
hsv[:,:,0] = hsv[:,:,0] * 255
hsv[:,:,1] = hsv[:,:,1] * 255
hsv[hsv>255] = 255
hsv[hsv<0] = 0
hsv = hsv.astype(np.uint8).astype(np.float64)
fh = np.array([[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]])
fv = fh.conj().T
H = hsv[:,:,0]
S = hsv[:,:,1]
I = hsv[:,:,2]
h = H[2:hei+2,2:wid+2]
s = S[2:hei+2,2:wid+2]
i = I[2:hei+2,2:wid+2].astype(np.uint8)
Rho = np.zeros((hei+4,wid+4))
for p in range(2,hei+2):
for q in range(2,wid+2):
tmpi = I[p-2:p+3,q-2:q+3]
tmps = S[p-2:p+3,q-2:q+3]
corre = np.corrcoef(tmpi.flatten('F'),tmps.flatten('F'))
Rho[p,q] = corre[0,1]
rho = np.abs(Rho[2:hei+2,2:wid+2])
rho[np.isnan(rho)] = 0
# rd = (rho*ds).astype(np.uint32)
Hist_I = np.zeros((256,1))
Hist_S = np.zeros((256,1))
# for n in range(0,255):
# temp = np.zeros(di.shape)
# temp[i==n] = di[i==n]
# Hist_I[n+1] = np.sum(temp.flatten('F'))
# # temp = np.zeros(di.shape)
# temp[i==n] = rd[i==n]
# Hist_S[n+1] = np.sum(temp.flatten('F'))
return Hist_I, Hist_S
def increasingContrast():
img = cv2.imread('imagehh.jpg')
cv2.imshow('test', img)
cv2.waitKey(0)
# alpha = 0.5
# hist_i, hist_s = build_is_hist(img)
# hist_c = alpha * hist_s + (1 - alpha) * hist_i
# hist_sum = np.sum(hist_c)
# hist_cum = hist_c.cumsum(axis=0)
#
# hsv = matplotlib.colors.rgb_to_hsv(img)
# h = hsv[:, :, 0]
# s = hsv[:, :, 1]
# i = hsv[:, :, 2].astype(np.uint8)
#
# c = hist_cum / hist_sum
# s_r = (c * 255)
# i_s = np.zeros(i.shape)
# for n in range(0, 255):
# i_s[i == n] = s_r[n + 1] / 255.0
# i_s[i == 255] = 1
# hsi_o = np.stack((h, s, i_s), axis=2)
# result = matplotlib.colors.hsv_to_rgb(hsi_o)
#
# result = result * 255
# result[result > 255] = 255
# result[result < 0] = 0
# return result.astype(np.uint8)
img = np.uint16(img)
img = np.clip(2.0 * img, 0, 255)
img = np.uint8(img)
cv2.imshow("High contrast", img)
cv2.waitKey(0)
increasingContrast()
def cannyImg():
srcImage = cv2.imread("imagehh.jpg")
grayImg = cv2.cvtColor(srcImage, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(grayImg, 60, 120)
cv2.imshow("Canny", edges)
cv2.waitKey(0)
return edges
cannyImg()
def aroundCorners():
srcImage = cv2.imread("imagehh.jpg")
grayImg = cv2.cvtColor(srcImage, cv2.COLOR_BGR2GRAY)
dst = cv2.cornerHarris(grayImg, 5, 3, 0.04)
tmp = np.empty(dst.shape, dtype=np.float32)
cv2.normalize(dst, tmp, 0.0, 1.0, norm_type=cv2.NORM_MINMAX)
srcImage = cv2.imread("imagehh.jpg")
grayImg = cv2.cvtColor(srcImage, cv2.COLOR_BGR2GRAY)
imgCanny = cv2.cvtColor(grayImg, cv2.COLOR_GRAY2BGR)
for i in range(dst.shape[0]):
for j in range(dst.shape[1]):
if tmp[i,j] > 0.3:
cv2.circle(imgCanny, (j, i), 2, (255, 0, 0), 2)
cv2.imshow("Corners", imgCanny)
cv2.waitKey(0)
imgCanny = cv2.cvtColor(imgCanny, cv2.COLOR_BGR2GRAY)
return imgCanny
aroundCorners()
def distanceTransform():
srcImage = cv2.imread("imagehh.jpg")
gray = cv2.cvtColor(srcImage, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
mask = cv2.drawContours(gray, [c], -1, 255, 2) # Edit: Changed from img to gray
dist = cv2.distanceTransform(mask, distanceType=cv2.DIST_L2, maskSize=5)
# Normalize the distance image for range = {0.0, 1.0}
# so we can visualize and threshold it
dist = cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX)
cv2.imshow("distance-transform", dist)
cv2.waitKey(0)
return dist
distanceTransform()
def Bluring(dist):
img = cv2.imread("imagehh.jpg")
integral = cv2.integral(img)
h = img.shape[0]
w = img.shape[1]
c = img.shape[2]
blur = np.zeros(img.shape, dtype=np.uint8)
for x in range(h):
for y in range(w):
for ch in range(c):
d = dist[x, y]
kernelX2 = min(int(x + 3 * d), h - 1)
kernelX1 = max(int(x - 3 * d), 0)
kernelY1 = max(int(y - 3 * d), 0)
kernelY2 = min(int(y + 3 * d), w - 1)
blur[x, y, ch] = (integral[kernelX2 + 1, kernelY2 + 1, ch] + integral[kernelX1, kernelY1, ch] -
integral[kernelX1, kernelY2 + 1, ch] - integral[kernelX2 + 1, kernelY1, ch]) // ((kernelX2 - kernelX1 + 1) * (kernelY2 - kernelY1 + 1))
cv2.imshow("Average blur" , blur)
cv2.waitKey(0)
return blur
Bluring(distanceTransform())
cv2.destroyAllWindows()
|
995,930 | d640a5dc752ff04b92058f610e5a61d8e953710d | def say_hello():
print ('this is a function')
#end of function
#calling a function
say_hello()
|
995,931 | f2be85880479a80740a995ed0736016b2ccb3e44 | #!/usr/bin/env python
from tornado.web import authenticated
from knimin.handlers.base import BaseHandler
from knimin import db
class AGUpdateGeocodeHandler(BaseHandler):
@authenticated
def get(self):
stats = db.getGeocodeStats()
self.render("ag_update_geocode.html", stats=stats,
currentuser=self.current_user)
@authenticated
def post(self):
retry = int(self.get_argument("retry", 0))
limit = int(self.get_argument('limit', -1))
limit = None if limit == -1 else limit
db.addGeocodingInfo(limit, retry)
stats = db.getGeocodeStats()
self.render("ag_update_geocode.html", stats=stats,
currentuser=self.current_user)
|
995,932 | a6022192d4c78b8c89ede55bba0f0fa109d6a977 | #-------------------------------------
# function to write condor scripts
import os
import os.path
import sys
def write_scripts(dataset, fileno, jobid, gensubmitdir, submitdir, submithost, uid, copyfiles, outdir, command, projectbase_RHEL7, project_RHEL7, fdag, username) :
outbase = "%s_%s" % (dataset, fileno)
shellname = "%s.sh" % (outbase)
submitname = "%s.submit" % (outbase);
print( "shellname", shellname)
transfer_files = True
if submithost == 'npx' :
transfer_files = False
workpath = "./"
f1 = open("%s/%s" % (gensubmitdir, shellname), "w")
text = "#!/bin/sh \n"
text += "cat /etc/redhat-release\n"
text += "gcc --version\n"
text += "if [ -z $G4BUILD ] ; then \n"
text += " if expr `gcc -dumpversion` : '4.8.5' >/dev/null; then \n"
text += " echo 'RHEL7 with py2-v3'\n"
text += " eval `/cvmfs/icecube.opensciencegrid.org/py2-v3/setup.sh`\n"
text += " if [ ! -e %s%s ] ; then \n" % (workpath,project_RHEL7)
text += ' echo "copying project %s"\n' % (project_RHEL7)
text += " cp %s/%s.tar.gz %s\n" % (projectbase_RHEL7, project_RHEL7, workpath)
text += " tar -zxf %s%s.tar.gz -C %s\n" % (workpath, project_RHEL7, workpath)
text += " fi \n"
text += " BUILDDIR=%s%s\n" % (workpath, project_RHEL7)
text += " else \n"
text += " echo 'system not supported'\n"
text += " fi \n"
text += ' echo "BUILDDIR is $BUILDDIR"\n'
text += " tar -zxf %s -C $BUILDDIR\n" % (copyfiles)
text += " cd $BUILDDIR \n"
text += ' echo "PWD is $PWD"\n'
text += " source ./env.sh\n"
text += " %s\n" % (command)
text += "else \n"
text += " tar -zxf %s -C $BUILDDIR\n" % (copyfiles)
text += " cd $BUILDDIR \n"
text += " %s\n" % (command)
text += "fi \n\n"
f1.write(text)
f1.close()
os.system("chmod a+x %s/%s" % (gensubmitdir,shellname))
f2 = open("%s/%s" % (gensubmitdir, submitname), "w")
text = "executable = %s/%s \n" % (submitdir, shellname)
text += "error = %s/%s.error \n" % (submitdir, outbase)
text += "output = %s/%s.out\n" % (submitdir, outbase)
text += "log = %s/%s.condorlog\n" % (submitdir, outbase)
text += 'environment = "X509_USER_PROXY=x509up_u%d"\n' % (uid)
text += 'transfer_input_files = %s \n' % (copyfiles)
if transfer_files :
text += 'output_destination = %s"\n' % (outdir)
text += 'should_transfer_files = YES\n'
text += """
request_cpus = 1
request_memory = 2GB
request_disk = 1GB
notification = Error
priority = 10
use_x509userproxy = true
notify_user = hoshina@icecube.wisc.edu
transfer_executable = true
when_to_transfer_output = ON_EXIT_OR_EVICT
periodic_remove = CumulativeSuspensionTime > ((RemoteWallClockTime - CumulativeSuspensionTime) / 2.0)
+SingularityImage="/cvmfs/singularity.opensciencegrid.org/opensciencegrid/osgvo-el7:latest"
+WantGlidein = True
+TransferOutput=""
queue
"""[1:]
f2.write(text)
f2.close()
fdag.write("JOB %d %s/%s \n" % (jobid, submitdir, submitname))
|
995,933 | e1a6cfb12c5848b0f63f3a083239f9de46517f22 | #!/usr/bin/env python
from google.cloud import bigquery
client = bigquery.Client()
jobs = client.list_jobs(max_results=50, state_filter='DONE')
failed_jobs = (job for job in jobs if job.job_type == 'query' and job.errors is not None)
for job in failed_jobs:
print("Query: " + job.query)
print("Error: " + job.errors[0]['message'])
|
995,934 | c53276405fe6d82e27d6091ea0adde533f12a372 | def twoStrings(s1, s2):
flag = False
a = {}
b = {}
for element in s1:
if element not in a.keys():
a[element] = 0
a[element] += 1
for element in s2:
if element not in b.keys():
b[element] = 0
b[element] += 1
for key in a.keys():
if key in b.keys():
flag = True
break
else:
pass
if flag == True:
return "YES"
else:
return "NO"
#SCORE = 100 |
995,935 | 2ed44b1610b6c923800c6cc590185d8f5d5c1305 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Application-catalog v1 stack action implementation"""
import json
import sys
import urllib
import jsonpatch
from osc_lib.command import command
from osc_lib import utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from muranoclient.apiclient import exceptions
from muranoclient.common import utils as murano_utils
LOG = logging.getLogger(__name__)
class ListEnvironments(command.Lister):
"""Lists environments"""
def get_parser(self, prog_name):
parser = super(ListEnvironments, self).get_parser(prog_name)
parser.add_argument(
'--all-tenants',
action='store_true',
default=False,
help='List environments from all tenants (admin only).',
)
parser.add_argument(
'--tenant',
metavar='<TENANT_ID>',
default=None,
help='Allows to list environments for a given tenant (admin only).'
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action({0})".format(parsed_args))
client = self.app.client_manager.application_catalog
data = client.environments.list(
parsed_args.all_tenants, parsed_args.tenant)
columns = ('id', 'name', 'status', 'created', 'updated')
column_headers = [c.capitalize() for c in columns]
return (
column_headers,
list(utils.get_item_properties(
s,
columns,
) for s in data)
)
class ShowEnvironment(command.ShowOne):
"""Display environment details"""
def get_parser(self, prog_name):
parser = super(ShowEnvironment, self).get_parser(prog_name)
parser.add_argument(
"id",
metavar="<NAME or ID>",
help=("Name or ID of the environment to display"),
)
parser.add_argument(
"--only-apps",
action='store_true',
default=False,
help="Only print apps of the environment (useful for automation).",
)
parser.add_argument(
"--session-id",
metavar="<SESSION_ID>",
default='',
help="Id of a config session.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action({0})".format(parsed_args))
client = self.app.client_manager.application_catalog
environment = utils.find_resource(client.environments,
parsed_args.id)
data = client.environments.get(environment.id,
parsed_args.session_id).to_dict()
data['services'] = jsonutils.dumps(data['services'], indent=2)
if getattr(parsed_args, 'only_apps', False):
return(['services'], [data['services']])
else:
return self.dict2columns(data)
class RenameEnvironment(command.Lister):
"""Rename an environment."""
def get_parser(self, prog_name):
parser = super(RenameEnvironment, self).get_parser(prog_name)
parser.add_argument(
'id',
metavar="<NAME or ID>",
help="Environment ID or name.",
)
parser.add_argument(
'name',
metavar="<ENVIRONMENT_NAME>",
help="A name to which the environment will be renamed.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action({0})".format(parsed_args))
client = self.app.client_manager.application_catalog
environment = utils.find_resource(client.environments,
parsed_args.id)
data = client.environments.update(environment.id,
parsed_args.name)
columns = ('id', 'name', 'status', 'created', 'updated')
column_headers = [c.capitalize() for c in columns]
return (
column_headers,
[utils.get_item_properties(
data,
columns,
)]
)
class EnvironmentSessionCreate(command.ShowOne):
"""Creates a new configuration session for environment ID."""
def get_parser(self, prog_name):
parser = super(EnvironmentSessionCreate, self).get_parser(prog_name)
parser.add_argument(
'id',
metavar="<ID>",
help="ID of Environment to add session to.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action({0})".format(parsed_args))
client = self.app.client_manager.application_catalog
environment_id = parsed_args.id
session_id = client.sessions.configure(environment_id).id
sessionid = murano_utils.text_wrap_formatter(session_id)
return (['id'], [sessionid])
class EnvironmentCreate(command.Lister):
"""Create an environment."""
def get_parser(self, prog_name):
parser = super(EnvironmentCreate, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar="<ENVIRONMENT_ID>",
help="Environment name.",
)
parser.add_argument(
'--region',
metavar="<REGION_NAME>",
help="Name of the target OpenStack region.",
)
parser.add_argument(
'--join-subnet-id',
metavar="<SUBNET_ID>",
help="Subnetwork id to join.",
)
parser.add_argument(
'--join-net-id',
metavar="<NET_ID>",
help="Network id to join.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action({0})".format(parsed_args))
client = self.app.client_manager.application_catalog
body = {"name": parsed_args.name, "region": parsed_args.region}
if parsed_args.join_net_id or parsed_args.join_subnet_id:
res = {
'defaultNetworks': {
'environment': {
'?': {
'id': uuidutils.generate_uuid(dashed=False),
'type':
'io.murano.resources.ExistingNeutronNetwork'
},
},
'flat': None
}
}
if parsed_args.join_net_id:
res['defaultNetworks']['environment']['internalNetworkName'] \
= parsed_args.join_net_id
if parsed_args.join_subnet_id:
res['defaultNetworks']['environment']['internalSubnetworkName'
] = \
parsed_args.join_subnet_id
body.update(res)
data = client.environments.create(body)
columns = ('id', 'name', 'status', 'created', 'updated')
column_headers = [c.capitalize() for c in columns]
return (
column_headers,
[utils.get_item_properties(
data,
columns,
)]
)
class EnvironmentDelete(command.Lister):
"""Delete an environment."""
def get_parser(self, prog_name):
parser = super(EnvironmentDelete, self).get_parser(prog_name)
parser.add_argument(
'id',
metavar="<NAME or ID>",
nargs="+",
help="Id or name of environment(s) to delete.",
)
parser.add_argument(
'--abandon',
action='store_true',
default=False,
help="If set will abandon environment without deleting any of its"
" resources.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action({0})".format(parsed_args))
client = self.app.client_manager.application_catalog
abandon = getattr(parsed_args, 'abandon', False)
failure_count = 0
for environment_id in parsed_args.id:
try:
environment = murano_utils.find_resource(client.environments,
environment_id)
client.environments.delete(environment.id, abandon)
except exceptions.NotFound:
failure_count += 1
print("Failed to delete '{0}'; environment not found".
format(environment_id))
if failure_count == len(parsed_args.id):
raise exceptions.CommandError("Unable to find and delete any of "
"the specified environments.")
data = client.environments.list()
columns = ('id', 'name', 'status', 'created', 'updated')
column_headers = [c.capitalize() for c in columns]
return (
column_headers,
list(utils.get_item_properties(
s,
columns,
) for s in data)
)
class EnvironmentDeploy(command.ShowOne):
"""Start deployment of a murano environment session."""
def get_parser(self, prog_name):
parser = super(EnvironmentDeploy, self).get_parser(prog_name)
parser.add_argument(
'id',
metavar="<ENVIRONMENT_ID>",
help="ID of Environment to deploy.",
)
parser.add_argument(
'--session-id',
metavar="<SESSION>",
help="ID of configuration session to deploy.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action({0})".format(parsed_args))
client = self.app.client_manager.application_catalog
client.sessions.deploy(parsed_args.id, parsed_args.session_id)
environment = utils.find_resource(client.environments,
parsed_args.id)
data = client.environments.get(environment.id,
parsed_args.session_id).to_dict()
data['services'] = jsonutils.dumps(data['services'], indent=2)
if getattr(parsed_args, 'only_apps', False):
return(['services'], [data['services']])
else:
return self.dict2columns(data)
class EnvironmentAppsEdit(command.Command):
"""Edit environment's services list.
`FILE` is path to a file, that contains jsonpatch, that describes changes
to be made to environment's object-model.
[
{ "op": "add", "path": "/-",
"value": { ... your-app object model here ... }
},
{ "op": "replace", "path": "/0/?/name",
"value": "new_name"
},
]
NOTE: Values '===id1===', '===id2===', etc. in the resulting object-model
will be substituted with uuids.
For more info on jsonpatch see RFC 6902
"""
def get_parser(self, prog_name):
parser = super(EnvironmentAppsEdit, self).get_parser(prog_name)
parser.add_argument(
'id',
metavar="<ENVIRONMENT_ID>",
help="ID of Environment to edit.",
)
parser.add_argument(
'filename',
metavar="<FILE>",
help="File to read jsonpatch from (defaults to stdin).",
)
parser.add_argument(
'--session-id',
metavar="<SESSION>",
help="ID of configuration session to edit.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.application_catalog
jp_obj = None
if not parsed_args.filename:
jp_obj = json.load(sys.stdin)
else:
with open(parsed_args.filename) as fpatch:
jp_obj = json.load(fpatch)
jpatch = jsonpatch.JsonPatch(jp_obj)
environment_id = parsed_args.id
session_id = parsed_args.session_id
environment = client.environments.get(environment_id, session_id)
object_model = jpatch.apply(environment.services)
murano_utils.traverse_and_replace(object_model)
client.services.put(
environment_id,
path='/',
data=jpatch.apply(environment.services),
session_id=session_id)
class EnvironmentModelShow(command.ShowOne):
"""Show environment's object model."""
def get_parser(self, prog_name):
parser = super(EnvironmentModelShow, self).get_parser(prog_name)
parser.add_argument(
'id',
metavar="<ENVIRONMENT_ID>",
help="ID of Environment to show.",
)
parser.add_argument(
"--path",
metavar="<PATH>",
default='/',
help="Path to Environment model section. Defaults to '/'."
)
parser.add_argument(
'--session-id',
metavar="<SESSION_ID>",
help="Id of a config session.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.application_catalog
session_id = parsed_args.session_id or None
path = urllib.parse.quote(parsed_args.path)
env_model = client.environments.get_model(parsed_args.id, path,
session_id)
return self.dict2columns(env_model)
class EnvironmentModelEdit(command.ShowOne):
"""Edit environment's object model."""
def get_parser(self, prog_name):
parser = super(EnvironmentModelEdit, self).get_parser(prog_name)
parser.add_argument(
'id',
metavar="<ENVIRONMENT_ID>",
help="ID of Environment to edit.",
)
parser.add_argument(
"filename",
metavar="<FILE>",
nargs="?",
help="File to read JSON-patch from (defaults to stdin)."
)
parser.add_argument(
'--session-id',
metavar="<SESSION_ID>",
help="Id of a config session.",
)
return parser
def take_action(self, parsed_args):
LOG.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.application_catalog
jp_obj = None
if not parsed_args.filename:
jp_obj = json.load(sys.stdin)
else:
with open(parsed_args.filename) as fpatch:
jp_obj = json.load(fpatch)
if not isinstance(jp_obj, list):
raise exceptions.CommandError(
'JSON-patch must be a list of changes')
for change in jp_obj:
if 'op' not in change or 'path' not in change:
raise exceptions.CommandError(
'Every change in JSON-patch must contain "op" and "path" '
'keys')
op = change['op']
if op not in ['add', 'replace', 'remove']:
raise exceptions.CommandError('The value of "op" item must be '
'"add", "replace" or "remove", '
'got {0}'.format(op))
if op != 'remove' and 'value' not in change:
raise exceptions.CommandError('"add" or "replace" change in '
'JSON-patch must contain "value"'
' key')
session_id = parsed_args.session_id
new_model = client.environments.update_model(parsed_args.id, jp_obj,
session_id)
return self.dict2columns(new_model)
|
995,936 | 55aac2dc3336f9072f9c86e03e27cc265951b17e | """Test suite.
Copyright 2010-2015 Brandon Rhodes. Licensed as free software under the
Apache License, Version 2.0 as detailed in the accompanying README.txt.
"""
from unittest import TestCase
from adventure import load_advent_dat
from adventure.game import Game
class CommandTest(TestCase):
def setUp(self):
game = Game()
load_advent_dat(game)
self.words = set(w.synonyms[0].text for w in game.vocabulary.values())
self.words.remove('suspend')
def test_intransitive_commands_should_not_throw_exceptions(self):
for word in self.words:
game = Game()
load_advent_dat(game)
game.start()
game.do_command(['no']) # WOULD YOU LIKE INSTRUCTIONS?
game.do_command([word])
def test_transitive_commands_should_not_throw_exceptions(self):
for word in self.words:
game = Game()
load_advent_dat(game)
game.start()
game.do_command(['no']) # WOULD YOU LIKE INSTRUCTIONS?
game.do_command(['enter']) # so we are next to lamp
game.do_command([word, 'lamp'])
|
995,937 | 34ba9624e3d34c6620a711928d79db5359d6d871 | from device import *
from google.appengine.ext import webapp
from google.appengine.ext.db import stats
class AddDevice(webapp.RequestHandler):
def post(self):
device = Device()
device.name = self.request.get('deviceName')
device.deviceID = self.request.get('deviceID')
query = DeviceGroup.all().filter('name = ', self.request.get('deviceGroup'))
device.group = query.fetch(1)[0]
query = Manufactorer.all().filter('name = ', self.request.get('manufactorer'))
device.manufactorer = query.fetch(1)[0]
device.put()
class AddDeviceGroup(webapp.RequestHandler):
def post(self):
group = DeviceGroup()
group.name = self.request.get('groupName')
group.put()
class AddManufactorer(webapp.RequestHandler):
def post(self):
man = Manufactorer()
man.name = self.request.get('manName')
man.website = self.request.get('manWebsite')
man.put()
class RemoveManufactoer(webapp.RequestHandler):
def post(self):
query = Manufactorer.all().filter('name = ', self.request.get('manufactorer'))
manufactorer = query.fetch(1)[0]
manufactorer.delete()
class Devices(webapp.RequestHandler):
def get(self):
nrDevices = int(Device.all().count())
sEcho = self.request.get('sEcho')
numberRecords = int(self.request.get('iDisplayLength'))
startRecords = int(self.request.get('iDisplayStart'))
try:
indexSort = int(self.request.get('iSortCol_0'))
except:
indexSort = 0
directionSort = self.request.get('sSortDir_0')
if directionSort.strip().lower() == "desc":
directionSort = "-"
else:
directionSort = ""
sortList = ['deviceID','name']
device_query = Device.all().order(directionSort + sortList[indexSort])
i=0
devices = device_query.fetch(numberRecords, offset = startRecords)
txt = '{"sEcho": ' + sEcho + ', "iTotalRecords": ' + str(nrDevices) + ', "iTotalDisplayRecords": '+ str(nrDevices) + ', "aaData": ['
for device in devices:
if i:
txt += ","
txt += '["' + device.deviceID + '","' + device.name + '","' + device.group.name + '","' + device.manufactorer.name + '"]'
i+=1
txt += ']}'
self.response.out.write(txt)
class DevicesForGroup(webapp.RequestHandler):
def get(self):
query = DeviceGroup.all().filter('name = ', self.request.get('deviceGroup'))
devgroup = query.fetch(1)[0]
device_query = Device.all().filter('group = ', devgroup)
devices = device_query.fetch(100)
txt = ''
for device in devices:
txt += '<option>' + device.name + '</option>'
self.response.out.write(txt)
class DetailsForDevice(webapp.RequestHandler):
def get(self):
name = self.request.get('name')
query = Device.all().filter('name = ', name)
dev = query.fetch(1)[0]
txt = "%s\n%s" %(dev.group.name, dev.manufactorer.name)
self.response.out.write(txt)
|
995,938 | 49f43d43df6b084e20aa7b440f54541cbcc7a051 | from skateboardapp.models import Skateboard
from skateboardapp.serializers import SkateboardSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
class SkateboardList(APIView):
def get(self, request, format=None):
skateboard = Skateboard.objects.all()
serializer = SkateboardSerializer(skateboard, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request):
serializer = SkateboardSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SkateboardAvailable(APIView):
def get(self, request, available, *args, **kwargs):
skateboard = Skateboard.objects.all().filter(available=available)
serializer = SkateboardSerializer(skateboard, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class SkateboardOwner(APIView):
def get(self, request, owner, *args, **kwargs):
skateboard = Skateboard.objects.all().filter(owner=owner)
serializer = SkateboardSerializer(skateboard, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class SkateboardOwnerEntry(APIView):
def get_object(self, owner, entry_id):
try:
return Skateboard.objects.all().filter(owner=owner, id=entry_id)
except Skateboard.DoesNotExist:
return None
def get(self, request, owner, entry_id, *args, **kwargs):
skateboard = self.get_object(owner, entry_id)
if not skateboard:
return Response(status.HTTP_400_BAD_REQUEST)
serializer = SkateboardSerializer(skateboard, many=True)
return Response(serializer.data, status.HTTP_200_OK)
def put(self, request, owner, entry_id):
skateboard = self.get_object(owner, entry_id).first()
if not skateboard:
return Response(status.HTTP_400_BAD_REQUEST)
serializer = SkateboardSerializer(skateboard, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, owner, entry_id):
skateboard = self.get_object(owner, entry_id)
skateboard.delete()
return Response(
{"res": "Object deleted!"},
status=status.HTTP_200_OK)
|
995,939 | f41f9852dc75963e03a0e16753fe45b0dc48c925 | #!/usr/bin/env python
import unittest
import mortimer.web
class TestRouter(unittest.TestCase):
def setUp(self):
self.router = mortimer.web.Router()
def test_invalid_route(self):
route = self.router.find_route('/')
self.assertEqual(route, None)
def test_add_route(self):
self.router.add_route((r'/$', None))
route = self.router.find_route('/')
self.assertNotEqual(route, None)
def test_add_route_list(self):
route_list = [
(r'/$', None),
(r'/login/?$', None),
]
self.router.add_route_list(route_list)
root = self.router.find_route('/')
login = self.router.find_route('/login/')
self.assertNotEqual(root, None)
self.assertNotEqual(login, None)
def test_trailing_slash(self):
self.router.add_route((r'/test/?$', None))
with_slash = self.router.find_route('/test/')
without_slash = self.router.find_route('/test')
self.assertEqual(with_slash, without_slash)
def test_match(self):
self.router.add_route((r'/test/(\d)/?', None))
route = self.router.find_route('/test/7')
self.assertNotEqual(route, None)
(handler, groups) = route
self.assertEqual(groups[0], '7')
|
995,940 | e8493ebcc4ffdf3dbe6ef3b0af9ee47a9e6f6ce5 | from odoo import api, fields, models, _
class GoldFixingPositionReport(models.Model):
_name = 'gold.fixing.position.report'
name = fields.Char(string='Doc. No.')
date = fields.Date(string='Doc. Date')
quantity_in = fields.Float(string='In')
quantity_out = fields.Float(string='Out')
rate_kilo = fields.Float(string='Rate/Kg', digits=(12, 3))
value = fields.Float(string='Value')
rate_gram = fields.Float(string='Rate/G')
quantity_balance = fields.Float(string='Balance')
amount_balance = fields.Float(string='Balance')
amount_average = fields.Float(string='Average', digits=(12, 3))
gold_capital = fields.Float(string='')
current_position = fields.Float(string='')
capital_difference = fields.Float(string='')
|
995,941 | 50d09eeef2c6bfc6eb0a7c77f3f37c5dc992226f | from django.contrib import admin
from ultimate.index.models import *
class StaticContentAdmin(admin.ModelAdmin):
list_display = ('url', 'title',)
list_display_links = ('url',)
save_as = True
save_on_top = True
admin.site.register(StaticContent, StaticContentAdmin)
class StaticMenuItemsAdmin(admin.ModelAdmin):
list_display = ('location', 'position', 'parent', 'content', 'href', 'type',)
list_display_links = ('location',)
save_as = True
save_on_top = True
admin.site.register(StaticMenuItems, StaticMenuItemsAdmin)
|
995,942 | e6ea22580369c23dafdcbb10e5c5e23a0105cd73 | import random
import unittest
import src.uriHandler as uhandler
class TestUriHandler(unittest.TestCase):
def setUp(self):
self.uriHandler = uhandler.UriHandler()
def test_getSchemeSeparator(self):
scheme = self.uriHandler.getSchemeSeparator('http://')
self.assertEqual(scheme, "://")
scheme = self.uriHandler.getSchemeSeparator('http:/')
self.assertEqual(scheme, ":")
scheme = self.uriHandler.getSchemeSeparator('http:')
self.assertEqual(scheme, ":")
scheme = self.uriHandler.getSchemeSeparator('http')
self.assertEqual(scheme, "")
def test_getHttpScheme(self):
scheme = self.uriHandler.getScheme("http://testdomain.com")
self.assertEqual(scheme, "http")
scheme = self.uriHandler.getScheme(" http://netrnternetn ")
self.assertEqual(scheme, "http")
scheme = self.uriHandler.getScheme("http://")
self.assertEqual(scheme, "http")
scheme = self.uriHandler.getScheme("http:")
self.assertEqual(scheme, "http")
def test_getComplexScheme(self):
scheme = self.uriHandler.getScheme("test.-+-TEST://testdomain.com")
self.assertEqual(scheme, "test.-+-TEST")
scheme = self.uriHandler.getScheme("test.-+=;i,'ca-TEST://testdomain.com")
self.assertEqual(scheme, "")
def test_getIncorrectScheme(self):
scheme = self.uriHandler.getScheme(" http ://netrnternetn ")
self.assertTrue(not scheme)
scheme = self.uriHandler.getScheme("http")
self.assertTrue(not scheme)
scheme = self.uriHandler.getScheme("htt p")
self.assertTrue(not scheme)
def test_getFtpScheme(self):
scheme = self.uriHandler.getScheme("ftp://domain.com")
self.assertEqual(scheme, "ftp")
def test_getMailScheme(self):
scheme = self.uriHandler.getScheme("mailto:username@example.com?subject=Topic")
self.assertEqual(scheme, "mailto")
def test_getEmptyScheme(self):
scheme = self.uriHandler.getScheme("domain.com/something")
self.assertTrue(not scheme)
scheme = self.uriHandler.getScheme("../something")
self.assertTrue(not scheme)
scheme = self.uriHandler.getScheme("127.0.0.1/something")
self.assertTrue(not scheme)
def test_getSchemeInAuthorityWithUserInf(self):
scheme = self.uriHandler.getScheme("scheme:myusername:mypassword@domain.com:9000/path")
self.assertEqual(scheme, "scheme")
scheme = self.uriHandler.getScheme("scheme:mypassword@domain.com:9000/path")
self.assertEqual(scheme, "scheme")
def test_getEmptyAuthorityEmptyRequest(self):
auth = self.uriHandler.getAuthority("")
self.assertEqual(auth, "")
auth = self.uriHandler.getAuthority("http:///path")
self.assertEqual(auth, "")
def test_getHttpAuthority(self):
auth = self.uriHandler.getAuthority("http://testdomain.com/path")
self.assertEqual(auth, "testdomain.com")
auth = self.uriHandler.getAuthority("http://testdomain.com:9000/path")
self.assertEqual(auth, "testdomain.com:9000")
auth = self.uriHandler.getAuthority("http://127.0.0.1/path")
self.assertEqual(auth, "127.0.0.1")
auth = self.uriHandler.getAuthority("http://127.0.0.1:9000/path")
self.assertEqual(auth, "127.0.0.1:9000")
auth = self.uriHandler.getAuthority("http://[xh:ns:s2:32]:9000/path")
self.assertEqual(auth, "[xh:ns:s2:32]:9000")
auth = self.uriHandler.getAuthority(
"http://errors.angularjs.org/1.2.28/$compile/tpload?p0=%2Fpartials%2Ftiles%2Ftile-m-02.html")
self.assertEqual(auth, "errors.angularjs.org")
def getIpV6Authority(self):
auth = self.uriHandler.getAuthority('http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html')
self.assertEqual(auth,'[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80')
def test_getAuthorityWithUserInf(self):
auth = self.uriHandler.getAuthority("scheme:myusername:mypassword@domain.com:9000/path")
self.assertEqual(auth, "myusername:mypassword@domain.com:9000")
auth = self.uriHandler.getAuthority("scheme:myusername@domain.com:9000/path")
self.assertEqual(auth, "myusername@domain.com:9000")
auth = self.uriHandler.getAuthority("scheme:MYTESTNAME@domain.com:9000/path")
self.assertEqual(auth, "MYTESTNAME@domain.com:9000")
def test_getEmptyUserInformation(self):
inf = self.uriHandler.getUserInformation('http://domain.com/path')
self.assertTrue(not inf)
inf = self.uriHandler.getUserInformation('http://domain.com/path@path')
self.assertTrue(not inf)
def test_getUserInformation(self):
inf = self.uriHandler.getUserInformation('http://test@domain.com/path')
self.assertEqual(inf, 'test')
inf = self.uriHandler.getUserInformation('http://test@domain.com/path@path')
self.assertEqual(inf, 'test')
def test_getMailUserInformation(self):
inf = self.uriHandler.getUserInformation('mailto:testemail@testdomain.com')
self.assertEqual(inf, 'testemail')
def test_getUserInformationWithoutScheme(self):
inf = self.uriHandler.getUserInformation('testemail@testdomain.com')
self.assertEqual(inf, 'testemail')
def test_getIpV6UserInformation(self):
usefInf = self.uriHandler.getUserInformation('http://test@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html')
self.assertEqual(usefInf, 'test')
usefInf = self.uriHandler.getUserInformation('http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html')
self.assertEqual(usefInf, '')
usefInf = self.uriHandler.getUserInformation('http://@[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html')
self.assertEqual(usefInf, '')
def test_getEmptyHostEmptyRequest(self):
host = self.uriHandler.getHost('')
self.assertTrue(not host)
def test_getEmptyHost(self):
host = self.uriHandler.getHost('scheme:///path')
self.assertTrue(not host)
def test_getHostInHttp(self):
host = self.uriHandler.getHost('http://domain.com/path')
self.assertEqual(host, 'domain.com')
host = self.uriHandler.getHost('http://domain.com?query=12')
self.assertEqual(host, 'domain.com')
host = self.uriHandler.getHost('http://domain.com#fragment')
self.assertEqual(host, 'domain.com')
host = self.uriHandler.getHost('http://127.0.0.1/path')
self.assertEqual(host, '127.0.0.1')
host = self.uriHandler.getHost('http://127.0.0.1:9000?query=12')
self.assertEqual(host, '127.0.0.1')
def test_getIpV6Host(self):
host = self.uriHandler.getHost('http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html')
self.assertEqual(host,'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210')
def test_getHostInAuthority(self):
host = self.uriHandler.getHost("scheme:MYTESTNAME@domain.com/path")
self.assertEqual(host, "domain.com")
host = self.uriHandler.getHost("scheme:myusername:mypassword@domain.com:9000/path")
self.assertEqual(host, "domain.com")
host = self.uriHandler.getHost("scheme:myusername@DOMAINdomain.com:9000/path")
self.assertEqual(host, "DOMAINdomain.com")
def test_getPort(self):
host = self.uriHandler.getPort("scheme:MYTESTNAME@domain.com/path")
self.assertEqual(host, "")
host = self.uriHandler.getPort("scheme:myusername:mypassword@domain.com:9000/path")
self.assertEqual(host, "9000")
host = self.uriHandler.getPort("scheme:myusername@DOMAINdomain.com:9000/path")
self.assertEqual(host, "9000")
def test_getIpV6Port(self):
port = self.uriHandler.getPort('http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html')
self.assertEqual(port, '80')
def test_getEmptyPath(self):
path = self.uriHandler.getPath('')
self.assertTrue(not path)
path = self.uriHandler.getPath('domain.com')
self.assertTrue(not path)
path = self.uriHandler.getPath('scheme://domain.com')
self.assertTrue(not path)
path = self.uriHandler.getPath('scheme://domain.com?query')
self.assertTrue(not path)
path = self.uriHandler.getPath('scheme://domain.com#fragment')
self.assertTrue(not path)
path = self.uriHandler.getPath('scheme:data@domain.com')
self.assertTrue(not path)
def test_getPath(self):
path = self.uriHandler.getPath('domain.com/path')
self.assertEqual(path, '/path')
path = self.uriHandler.getPath('domain.com/root/child/file.ext')
self.assertEqual(path, '/root/child/file.ext')
path = self.uriHandler.getPath('scheme://domain.com/root/child/file.ext')
self.assertEqual(path, '/root/child/file.ext')
path = self.uriHandler.getPath('scheme:data@domain.com/root/child')
self.assertEqual(path, '/root/child')
path = self.uriHandler.getPath('/root/child')
self.assertEqual(path, '/root/child')
path = self.uriHandler.getPath('scheme:///root/child')
self.assertEqual(path, '/root/child')
path = self.uriHandler.getPath('scheme:/root/child')
self.assertEqual(path, '/root/child')
path = self.uriHandler.getPath(
"http://errors.angularjs.org/1.2.28/$compile/tpload?p0=%2Fpartials%2Ftiles%2Ftile-m-02.html")
self.assertEqual(path, "/1.2.28/$compile/tpload")
def test_getPathQuery(self):
path = self.uriHandler.getPath('scheme://domain.com/root/child?query')
self.assertEqual(path, '/root/child')
path = self.uriHandler.getPath('scheme://domain.com/root/child/file.ext?query')
self.assertEqual(path, '/root/child/file.ext')
def test_getPathFragment(self):
path = self.uriHandler.getPath('scheme://domain.com/root/child#fragment')
self.assertEqual(path, '/root/child')
path = self.uriHandler.getPath('scheme://domain.com/root/child/file.ext#fragment')
self.assertEqual(path, '/root/child/file.ext')
def test_getEmptyStringQuery(self):
query = self.uriHandler.getStringQuery('scheme://domain.com/root/child#fragment')
self.assertTrue(not query)
query = self.uriHandler.getStringQuery('scheme://domain.com/root/child/file.ext#fragment')
self.assertTrue(not query)
def test_getStringQuery(self):
query = self.uriHandler.getStringQuery('scheme:///root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, 'p1=v1&p2=v2&p3=v3')
query = self.uriHandler.getStringQuery('/root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, 'p1=v1&p2=v2&p3=v3')
query = self.uriHandler.getStringQuery('scheme://domain.com/root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, 'p1=v1&p2=v2&p3=v3')
query = self.uriHandler.getStringQuery('scheme://domain.com/root/child/file.ext?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, 'p1=v1&p2=v2&p3=v3')
query = self.uriHandler.getStringQuery('root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, 'p1=v1&p2=v2&p3=v3')
query = self.uriHandler.getStringQuery('scheme:usdata@domain:port/root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, 'p1=v1&p2=v2&p3=v3')
query = self.uriHandler.getStringQuery(
"http://errors.angularjs.org/1.2.28/$compile/tpload?p0=%2Fpartials%2Ftiles%2Ftile-m-02.html")
self.assertEqual(query, "p0=%2Fpartials%2Ftiles%2Ftile-m-02.html")
def test_getEmptyQuery(self):
query = self.uriHandler.getQuery('scheme://domain.com/root/child#fragment')
self.assertTrue(not query)
query = self.uriHandler.getQuery('scheme://domain.com/root/child/file.ext#fragment')
self.assertTrue(not query)
def test_getgQuery(self):
query = self.uriHandler.getQuery('scheme:///root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, {'p1': 'v1', 'p2': 'v2', 'p3': 'v3'})
query = self.uriHandler.getQuery('/root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, {'p1': 'v1', 'p2': 'v2', 'p3': 'v3'})
query = self.uriHandler.getQuery('scheme://domain.com/root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, {'p1': 'v1', 'p2': 'v2', 'p3': 'v3'})
query = self.uriHandler.getQuery('scheme://domain.com/root/child/file.ext?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, {'p1': 'v1', 'p2': 'v2', 'p3': 'v3'})
query = self.uriHandler.getQuery('root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, {'p1': 'v1', 'p2': 'v2', 'p3': 'v3'})
query = self.uriHandler.getQuery('scheme:usdata@domain:port/root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(query, {'p1': 'v1', 'p2': 'v2', 'p3': 'v3'})
def test_getFragment(self):
frag = self.uriHandler.getFragment('scheme:///')
self.assertEqual(frag, '')
frag = self.uriHandler.getFragment('scheme:///#')
self.assertEqual(frag, '')
frag = self.uriHandler.getFragment('scheme:///root?p1=v1&p2=v2&p3=v3#fragment')
self.assertEqual(frag, 'fragment')
frag = self.uriHandler.getFragment('scheme:///#fragment')
self.assertEqual(frag, 'fragment')
def test_appendScheme(self):
uri = self.uriHandler.appendScheme('', "http")
self.assertEqual(uri, 'http://')
uri = self.uriHandler.appendScheme('http://', "http")
self.assertEqual(uri, 'http://')
uri = self.uriHandler.appendScheme('domain.com', "http", ':')
self.assertEqual(uri, 'http:domain.com')
def test_appendAuthority(self):
uri = self.uriHandler.appendAuthority("", "domain.com")
self.assertEqual(uri, 'domain.com')
uri = self.uriHandler.appendAuthority("http://", "domain.com")
self.assertEqual(uri, 'http://domain.com')
uri = self.uriHandler.appendAuthority("http:///path?query", "domain.com")
self.assertEqual(uri, 'http://domain.com/path?query')
uri = self.uriHandler.appendAuthority("http://mydomain/path?query", "domain.com")
self.assertEqual(uri, 'http://mydomain/path?query')
def test_appendPath(self):
uri = self.uriHandler.appendPath('', '/path')
self.assertEqual(uri, '/path')
uri = self.uriHandler.appendPath('domain.com', '/path')
self.assertEqual(uri, 'domain.com/path')
uri = self.uriHandler.appendPath('scheme:', '/path')
self.assertEqual(uri, 'scheme:/path')
uri = self.uriHandler.appendPath('scheme://domian/root?query#fragment', '/path')
self.assertEqual(uri, 'scheme://domian/root/path?query#fragment')
uri = self.uriHandler.appendPath('?query', '/path')
self.assertEqual(uri, '/path?query')
def test_appendQuery(self):
uri = self.uriHandler.appendQuery('', {'k1': 'v1', 'k2': 'v2'})
queryParams = self.uriHandler.getQuery(uri)
self.assertEqual(queryParams, {'k1': 'v1', 'k2': 'v2'})
self.assertEqual(len(uri), len('?k1=v1&k2=v2'))
uri = self.uriHandler.appendQuery('domain.com', {'k1': 'v1', 'k2': 42})
queryParams = self.uriHandler.getQuery(uri)
self.assertEqual(queryParams, {'k1': 'v1', 'k2': '42'})
self.assertEqual(len(uri), len('domain.com?k1=v1&k2=42'))
uri = self.uriHandler.appendQuery('domain.com?t1=42&t2=test', {'k1': 'v1', 'k2': 42})
queryParams = self.uriHandler.getQuery(uri)
self.assertEqual(queryParams, {'k1': 'v1', 'k2': '42', 't1': '42', 't2': 'test'})
self.assertEqual(len(uri), len('domain.com?t1=42&t2=test&k1=v1&k2=42'))
def test_appendFragment(self):
uri = self.uriHandler.appendFragment('', 'fragment')
self.assertEqual(uri, '#fragment')
uri = self.uriHandler.appendFragment('domain', 'fragment')
self.assertEqual(uri, 'domain#fragment')
uri = self.uriHandler.appendFragment('domain#test', 'fragment')
self.assertEqual(uri, 'domain#testfragment')
def test_replaceScheme(self):
uri = self.uriHandler.replaceScheme('', 'http', '://')
self.assertEqual(uri, 'http://')
uri = self.uriHandler.replaceScheme('scheme://domain.com', 'http', '://')
self.assertEqual(uri, 'http://domain.com')
uri = self.uriHandler.replaceScheme('scheme:domain.com', 'http', '://')
self.assertEqual(uri, 'http://domain.com')
uri = self.uriHandler.replaceScheme('scheme:domain.com', '')
self.assertEqual(uri, 'domain.com')
def test_replaceAuthority(self):
uri = self.uriHandler.replaceAuthority('domain.com/path?query', 'username@newdomain.net')
self.assertEqual(uri, 'username@newdomain.net/path?query')
uri = self.uriHandler.replaceAuthority('scheme://domain.com/path?query', 'username@newdomain.net')
self.assertEqual(uri, 'scheme://username@newdomain.net/path?query')
uri = self.uriHandler.replaceAuthority('/path?query', 'username@newdomain.net')
self.assertEqual(uri, 'username@newdomain.net/path?query')
uri = self.uriHandler.replaceAuthority('', 'username@newdomain.net')
self.assertEqual(uri, 'username@newdomain.net')
uri = self.uriHandler.replaceAuthority('scheme:///path', 'username@newdomain.net')
self.assertEqual(uri, 'scheme://username@newdomain.net/path')
uri = self.uriHandler.replaceAuthority('scheme:///path', '')
self.assertEqual(uri, 'scheme:///path')
def test_replacePath(self):
uri = self.uriHandler.replacePath('', '/newpath')
self.assertEqual(uri, '/newpath')
uri = self.uriHandler.replacePath('scheme://domain/path?query#frag', '/root/newpath')
self.assertEqual(uri, 'scheme://domain/root/newpath?query#frag')
uri = self.uriHandler.replacePath('scheme://domain/path?query#frag', '')
self.assertEqual(uri, 'scheme://domain?query#frag')
uri = self.uriHandler.replacePath('scheme://domain/path?query', '')
self.assertEqual(uri, 'scheme://domain?query')
def test_replaceQuery(self):
uri = self.uriHandler.replaceQuery('', {'k1': 'v1', 'k2': 42})
self.assertEqual(len(uri), len('?k1=v2&k2=42'))
uri = self.uriHandler.replaceQuery('scheme://domain/path/path?a=1#fr', {'k1': 'v1', 'k2': 42})
self.assertEqual(len(uri), len('scheme://domain/path/path?k1=v2&k2=42#fr'))
uri = self.uriHandler.replaceQuery('scheme://domain/path/path?a=1&nn=11#fr', {})
self.assertEqual(len(uri), len('scheme://domain/path/path#fr'))
# if __name__ == '__main__':
# suite = unittest.TestLoader().loadTestsFromTestCase(TestUriHandler)
# unittest.TextTestRunner(verbosity=2).run(suite)
|
995,943 | 1111397b523e22fb6087f7dac5829717db2f1acb | l = 10
def test1():
l = 5
print(l) # 5
def test2():
print(l) # 10
def test3():
try:
l = l + 10
print(l) # local variable 'l' referenced before assignment
except Exception as e:
print(e)
def test4():
global l
l = l + 10
print(l) # 20
test1()
test2()
test3()
test4()
|
995,944 | 83f8b4b039f2d75966fc6a00c23227be87fde215 | import sys
import random
def Minesweeper(columns, rows, difficulty):
outcome = 0
field = [[] for x in range(0,rows+2)]
facade = [[] for x in range(0,rows+2)]
revealed = [[] for x in range(0,rows+2)]
for x in range(0,rows+2):
field[x] = [0]*(columns+2)
for x in range(0,rows+2):
facade[x] = ["⏹️"]*(columns+2)
for x in range(0,rows+2):
revealed[x] = [-1]*(columns+2)
for x in range(0,int(rows*columns*difficulty/9)):
field[random.randrange(1,rows+1)][random.randrange(1,columns+1)] = 9
for x in range(1,rows+1):
for y in range(1,columns+1):
if field[x][y] == 9:
if not field[x-1][y] == 9:
field[x-1][y] +=1
if not field[x][y-1] == 9:
field[x][y-1] +=1
if not field[x+1][y] == 9:
field[x+1][y] +=1
if not field[x][y+1] == 9:
field[x][y+1] +=1
if not field[x-1][y-1] == 9:
field[x-1][y-1] +=1
if not field[x+1][y+1] == 9:
field[x+1][y+1] +=1
if not field[x-1][y+1] == 9:
field[x-1][y+1] +=1
if not field[x+1][y-1] == 9:
field[x+1][y-1] +=1
for x in range(0,columns+1):
facade[0][x] = "#️⃣"
for x in range(0,columns+1):
facade[rows+1][x] = "#️⃣"
for x in range(0,rows+1):
facade[x][0] = "#️⃣"
for x in range(0,rows+1):
facade[x][columns+1] = "#️⃣"
facade[columns+1][rows+1] = "#️⃣"
while outcome == 0:
for x in range(0,rows*columns - int(rows*columns*difficulty/9)):
for x in range(1,rows+1):
for y in range(1,columns+1):
if revealed[x][y] == field[x][y] == 0:
revealed[x-1][y] = field[x-1][y]
revealed[x][y-1] = field[x][y-1]
revealed[x+1][y] = field[x+1][y]
revealed[x][y+1] = field[x][y+1]
revealed[x-1][y-1] = field[x-1][y-1]
revealed[x+1][y+1] = field[x+1][y+1]
revealed[x-1][y+1] = field[x-1][y+1]
revealed[x+1][y-1] = field[x+1][y-1]
facade[x][y] = "0️⃣"
for x in range(1,rows+1):
for y in range(1,columns+1):
if revealed[x][y] == field[x][y]:
if field[x][y] == 0:
facade[x][y] = "0️⃣"
elif field[x][y] == 1:
facade[x][y] = "1️⃣"
elif field[x][y] == 2:
facade[x][y] = "2️⃣"
elif field[x][y] == 3:
facade[x][y] = "3️⃣"
elif field[x][y] == 4:
facade[x][y] = "4️⃣"
elif field[x][y] == 5:
facade[x][y] = "5️⃣"
elif field[x][y] == 6:
facade[x][y] = "6️⃣"
elif field[x][y] == 7:
facade[x][y] = "7️⃣"
elif field[x][y] == 8:
facade[x][y] = "8️⃣"
elif field[x][y] == 9:
facade[x][y] = "💣"
outcome = 2
if len(revealed) == rows * columns - rows*columns*difficulty/9 and 9 not in revealed:
outcome = 1
print("\n"*24)
for x in range(0,rows+2):
print(*facade[x])
if outcome == 0:
xcoord = int(input("Enter the x coordinate of the tile you would like to sweep: "))
ycoord = int(input("Enter the y coordinate of the tile you would like to sweep: "))
revealed[ycoord][xcoord] = field[ycoord][xcoord]
if outcome == 1:
for x in range(1,rows+1):
for y in range(1,columns+1):
if field[x][y] == 9:
facade[x][y] = "🚩"
print("\n"*24)
for x in range(0,rows+2):
print(*facade[x])
print("You Win!")
exit()
if outcome == 2:
print("You Lose. You uncovered a mine at",xcoord,"\b,",ycoord,"\b.")
exit()
Minesweeper(16,16,2)
# ⏹️
# 1️⃣
# 2️⃣
# 3️⃣
# 4️⃣
# 5️⃣
# 6️⃣
# 7️⃣
# 8️⃣
# 💣
# 🚩
|
995,945 | fca1c02fc48c366b2d27ed105fdb379cf19e004b | #=========================================================================
#
# Python Source File -- Created with SAPIEN Technologies PrimalScript 4.1
#
# NAME: problem 84
#
# AUTHOR:hbf
# DATE :14 sep 2007
#
# COMMENT:
#
#=========================================================================
n=4 or 6
def add_row(rd, rs, m): # dest_row += src_row * m
for j, v in enumerate(rs):
rd[j] += v*m
def mul_row(r, m):
for j in xrange(len(r)):
r[j] *= m
n2inv = 1.0/(n**2)
# Build equations eqns: prob(i) = sum(eqns[i][j]*prob(j): j=0..119) where
# prob(i) = probability to go to square i/3 after (i mod 3) double rolls.
# Also include a final zero value which will be used later
eqns = [[0.0] * 121 for i in xrange(120)]
for i in xrange(3, 2*n): # Handle non-doble rolls
p = n2inv * min((i-1) & -2, (2*n-i+1) & -2)
i *= 3
eqns[0][120 - i] = eqns[0][121 - i] = eqns[0][122 - i] = p
for i in xrange(3*2, 3*2*n+1, 3*2): # Handle 1st and 2nd double roll
eqns[1][120 - i] = eqns[2][121 - i] = n2inv
for i in xrange(3, 120):
eqns[i][:3], eqns[i][3:-1] = eqns[i-3][-4:-1], eqns[i-3][:-4]
for i in xrange(2, 120, 3): # 3rd double roll -> jail with roll counter=0
eqns[3*10+0][i] += 1.0/n
for srclist, div, destlist in ( # Handle jumps between squares
((30,), 1, (10,)),
((07,), 16, (00, 10, 11, 24, 39, 05, (15), (15), (12), (04))),
((22,), 16, (00, 10, 11, 24, 39, 05, (25), (25), (28), (19))),
((36,), 16, (00, 10, 11, 24, 39, 05, (05), (05), (12), (33))),
((02,17,33), 16, (00, 10)) # Must come after the "go to 33" above
):
mul = 1.0/div
for src in srclist:
src *= 3
for i in xrange(3):
esrc = eqns[src + i]
for dest in destlist:
add_row(eqns[dest*3 + i], esrc, mul)
mul_row(esrc, 1.0 - mul*len(destlist))
# Change each equation e to 'sum(prob(i)* e[i]) + e[120] = 0'
for i, e in enumerate(eqns):
e[i] -= 1.0
assert abs(sum(map(sum, eqns))) < 1E-10, (src, sum(map(sum, eqns)))
# Add equation 'sum(probabilities) - 100% = 0' and solve
eqns.append([1.0]*120 + [-100.0])
for i in xrange(120):
v, k = max([(abs(eqns[j][i]), j) for j in xrange(i, len(eqns))])
assert v > 1E-10
eqns[i], eqns[k] = eqns[k], eqns[i]
ei = eqns[i]
if ei[i] != -1.0:
mul_row(ei, -1.0/ei[i])
for j, ej in enumerate(eqns):
if j != i and ej[i]:
add_row(ej, ei, ej[i])
assert sum(map(abs, eqns[-1])) < 1E-10 # Check that last equation is '0=0'
p = [e[-1] for e in eqns[:-1]]
# Combine percentage per square (ignoring roll counts) and find result
p = [p[i+0] + p[i+1] + p[i+2] for i in xrange(0, len(p), 3)]
assert abs(sum(p) - 100.0) < 1E-10 and not [v for v in p if v < -1E-10]
s = sorted([(v, i) for i, v in enumerate(p)], reverse=True)
res = "%02d%02d%02d" % tuple([i for v, i in s[:3]])
print res, s[:4] # result and probabilities for the four most popular squares
|
995,946 | ddee2062cd83ff742e64f69400b9c664230a7514 | #!/usr/bin/python
val1=40
val2=val1
val3=[val2]
del val1
print val3
|
995,947 | b7e0ea5cb2fa04a97d033c41725ed792536a781a | from abc import ABC, abstractmethod
class Pessoa(ABC):
"""Representa uma pessoa.
atributos: nome, cpf.
"""
@abstractmethod
def __init__(self, nome: str, cpf: int):
self.__nome = nome
self.__cpf = cpf
@property
def nome(self):
return self.__nome
@nome.setter
def nome(self, nome):
self.__nome = nome
@property
def cpf(self):
return self.__cpf
@cpf.setter
def cpf(self, cpf):
self.__cpf = cpf
|
995,948 | 7fcddf1a7de4ff2e9878b300c5047818e2253be0 | from django.conf import settings
from django.contrib.auth import get_user_model, authenticate, login, logout
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic.base import TemplateView
from gamesight.apps.accounts.models import SubscriptionPlan
class LoginView(TemplateView):
template_name = 'accounts/login.html'
def post(self, request, *args, **kwargs):
email = request.POST.get('email')
password = request.POST.get('password')
if email and password:
user_cache = authenticate(email=email, password=password)
if user_cache is None:
return self.render_to_response({'error_message': 'Please use correct email and password'})
else:
login(request, user_cache)
return HttpResponseRedirect(reverse_lazy('home'))
def get(self, request, *args, **kwargs):
next = request.GET.get('next', None)
if request.user.is_authenticated:
if next:
return HttpResponseRedirect(next)
else:
return HttpResponseRedirect(reverse_lazy('home'))
return self.render_to_response({'next': next})
class RegisterView(TemplateView):
template_name = 'accounts/register.html'
def get_context_data(self, **kwargs):
context = {
'free_subscription_plan': SubscriptionPlan.objects.get(id=1),
'paid_subscription_plans': SubscriptionPlan.objects.filter(id__gt=1),
}
context.update(kwargs)
return context
def post(self, request, *args, **kwargs):
email = request.POST.get('email')
password = request.POST.get('password')
password2 = request.POST.get('password2')
organization_name = request.POST.get('organization_name')
if email and password and password2 and organization_name:
if password != password2:
return self.render_to_response(self.get_context_data(error_message='Password does not match'))
user = get_user_model().objects.create_user(email=email, password=password, name=organization_name)
user.save()
user = authenticate(email=email, password=password)
login(request, user)
user.profile.create_subscription(SubscriptionPlan.objects.first())
return HttpResponseRedirect(reverse_lazy('home'))
else:
return self.render_to_response(self.get_context_data(error_message='Please fill all required fields'))
class SubscriptionListView(TemplateView):
template_name = 'accounts/subscriptions.html'
def get_context_data(self, **kwargs):
context = {
'paid_subscription_plans': SubscriptionPlan.objects.filter(id__gt=1),
}
context.update(kwargs)
return context
def logout_user(request, *args, **kwargs):
logout(request)
return HttpResponseRedirect(reverse_lazy('login'))
|
995,949 | ee92cb6c23cef031aab88512e8a53c9550276e52 | """
Vasya implemented nonoptimal Enum classes.
Remove duplications in variables declarations using metaclasses.
from enum import Enum
class ColorsEnum(Enum):
RED = "RED"
BLUE = "BLUE"
ORANGE = "ORANGE"
BLACK = "BLACK"
class SizesEnum(Enum):
XL = "XL"
L = "L"
M = "M"
S = "S"
XS = "XS"
Should become:
class ColorsEnum(metaclass=SimplifiedEnum):
__keys = ("RED", "BLUE", "ORANGE", "BLACK")
class SizesEnum(metaclass=SimplifiedEnum):
__keys = ("XL", "L", "M", "S", "XS")
assert ColorsEnum.RED == "RED"
assert SizesEnum.XL == "XL"
"""
from collections.abc import Generator
from typing import Any, Dict, Optional
class SimplifiedEnum(type):
"""Simplified version of Enum class which don't have duplications in variables.
Makes possible to keep value of attribute equal to its' name.
"""
def __new__(cls, name: str, bases: tuple, dct: Dict) -> Any:
"""Creates a new class with given name, bases and dct.
Args:
name: of new class;
bases: ancestor class or classes, could be empty;
dct: name space of new class.
Returns:
a new class.
"""
members = dct.get(f"_{name}__keys")
if members:
dct["members"] = {key: key for key in members}
else:
dct["members"] = {}
new_cls = type.__new__(cls, name, bases, dct)
return new_cls
def __getattribute__(self, name: str) -> Optional[Any]:
"""Implements attribute accesses for instances of the class.
Args:
name: of attribute;
Raises:
AttributeError: if object has no attribute name;
Returns:
attribute, if is.
"""
try:
return type.__getattribute__(self, name)
except AttributeError as error:
try:
return self.__dict__["members"][name]
except KeyError:
raise error
def __setattr__(self, name: str, value: str) -> None:
"""Setts an attribute.
Args:
name: of the attribute;
value: of the attribute.
"""
self.__dict__["members"][name] = value
def __len__(self) -> int:
"""[summary]
[extended_summary]
Returns:
length of "members" dictionary.
"""
return len(self.__dict__["members"])
def __iter__(self) -> Generator[str, None, None]:
"""Called when an iterator is required for the "members" dictionary.
Yields:
iterator object which iterates over the keys of the "members" dictionary.
"""
yield from self.__dict__["members"]
|
995,950 | 75c89122df91b70afa5ea237489ec736a3ffdd07 | /Users/gesteves/anaconda/lib/python2.7/sre_parse.py |
995,951 | 1f2db516b3868334639f29ccfa3dc2221de735e7 | class MemoryAccessError(IndexError):
pass
class MemoryReadError(MemoryAccessError):
pass
class MemoryWriteError(MemoryAccessError):
pass
|
995,952 | 1f0cff42650faa60fe4464baf61660dc05f52f2d | from unittest import TestCase
class AuthHelperTestCase(TestCase):
"""
Implement some custom tests functions
"""
def assertAnyIn(self, needles, haystack):
"""
Asserts wether any of needles exists in haystack
:param list needles: Collection what we are searching for
:param list haystack: Source of truth
:return:
"""
if not any(x in haystack for x in needles):
raise AssertionError('None of \'{needles}\' in \'{haystack}\''.format(
needles=",".join(needles),
haystack=haystack
))
|
995,953 | 9c291a963204aa774caa51a83be4ef2ea9322290 | def findTheAccessCodes(l):
#Checking each item in the list for multiples will be taxing for large lists
#Instead I will create a dictionary for each index, of all the indexes which
#contain values that are divisible by the key index's value.
#For example l = [2,4,8,10,16]
#Map of l = {0:[1,2,3,4],1:[2,4],2:[4],3:[],4:[]}
#Then this can be counted by looping through the values of each key, and
#adding the length of the second key's value list to the total.
#Example with the above list: key 0 has 1 as a value and 1 has 2 items so
#total = total + 2. key 0 has 2 as a value and 2 has 1 item so
#total = total + 1. The remaining are zeros so in this case 3 is the final total.
startingList = l
def createMapping(startingList):
divisibleDict = {}
for i in range(0,len(startingList)-1):
tempList = []
for j in range(i+1,len(startingList)):
if startingList[j] % startingList[i] == 0:
tempList.append(j)
divisibleDict[i] = tempList
return divisibleDict
def countFromMapping(myMapping):
count = 0
for key in myMapping:
for value in myMapping[key]:
if myMapping.get(value):
count = count + len(myMapping.get(value))
return count
return countFromMapping(createMapping(startingList))
|
995,954 | 2905874604a73587b16ddbea10741c4a42c91b87 | import web3
import base64
from web3 import Web3, HTTPProvider
from web3.middleware import geth_poa_middleware
# TESTED WITH python 3.6
# Fill these in to test, ex. remove @RPC_ENDPOINT@
USER = "@USER@"
PASS = "@PASS@"
RPC_ENDPOINT = "@RPC_ENDPOINT@"
# Encode the username and password from the app creds into USER:PASS base64 encoded string
auth = USER + ":" + PASS
encodedAuth = base64.b64encode(auth.encode('ascii')).decode('ascii')
# Build the header object with the Basic auth and the standard headers
headers = {'headers': {'Authorization': 'Basic %s' % encodedAuth,
'Content-Type': 'application/json',
'User-Agent': 'kaleido-web3py'}}
# Construct a Web3 object by constructing and passing the HTTP Provider
provider = HTTPProvider(endpoint_uri=RPC_ENDPOINT, request_kwargs=headers)
w3 = Web3(provider)
# Add the Geth POA middleware needed for ExtraData Header size discrepancies between consensus algorithms
# See: http://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority
# ONLY for GETH/POA; If you are using quorum, comment out the line below
w3.middleware_stack.inject(geth_poa_middleware, layer=0)
# Get the latest block in the chain
block = w3.eth.getBlock("latest")
# Print the block out to the console
print(block)
|
995,955 | d2843e247e4b0a38e15a36ffdb8dd85f1f234244 | from shapes import *
square()
pentagon()
hexagon()
octogon()
star()
circ()
mainloop()
|
995,956 | 0bef7cbd37c7bcdca64f39493088deabfbfdf8b4 | import requests
class Network:
def __init__(self, baseUrl):
self._baseUrl = baseUrl
def request(self):
req = requests.get(self._baseUrl)
statusCode = req.status_code
content = req.content
return statusCode, content |
995,957 | 53cf44be5aa0fc2fc219ca00654f3d72dff9f59c | class C:
def method(self, a, b):
pass
_inst: C = C()
func = _inst.method |
995,958 | c69b2527759f2e4830344d2f146c0e60f006eacc | """Public URL configuration."""
from django.conf.urls import patterns, include, url
from .views import HomeTemplateView, AboutTemplateView
urlpatterns = patterns('',
url(r'^$', HomeTemplateView.as_view(), name='home'),
url(r'^about$', AboutTemplateView.as_view(), name='about'),
# authentication
url(r'^login$', 'public.views.login', name='login'),
url(r'^logout$', 'public.views.logout', name='logout'),
url(r'^signup$', 'public.views.signup', name='signup'),
# dashboard
url(r'^dashboard$', 'public.views.dashboard', name='dashboard'),
# profile
url(r'^users/(?P<username>[-a-zA-Z0-9]+)/$', 'public.views.profile',
name='profile'),
url(r'^user/(?P<username>[-a-zA-Z0-9]+)/(?P<date>[-0-9]+)$',
'public.views.user_day', name='user_day'),
url(r'^users/$', 'public.views.users', name='users'),
url(r'^settings$', 'public.views.settings', name='settings'),
# workouts
url(r'^workouts$', 'public.views.workouts', name='workouts'),
url(r'^workouts/(?P<workout_id>[0-9]+)$', 'public.views.workout',
name='workout'),
url(r'^record-workout$', 'public.views.record_workout',
name='record_workout'),
# internal workout ajax requests
url(r'^ajax/workout/(?P<workout_id>[0-9]+)$', 'public.views.ajax_workout',
name='ajax_workout'),
url(r'^ajax/workouts/$', 'public.views.ajax_workouts',
name='ajax_workouts'),
# meals
url(r'^nutrition$', 'public.views.nutrition', name='nutrition'),
url(r'^nutrition/(?P<meal_id>[0-9]+)$', 'public.views.meal', name='meal'),
# internal meal ajax requests
url(r'^ajax/nutrition-summary/$', 'public.views.ajax_nutrition_summary',
name='ajax_nutrition_summary'),
# exercises
url(r'^exercises$', 'public.views.exercises', name='exercises'),
url(r'^exercises/(?P<exercise_name>[-a-zA-Z]+)$', 'public.views.exercise',
name='exercise'),
url(r'^ajax/exercises/(?P<exercise_name>[-a-zA-Z]+)$',
'public.views.ajax_exercise_history', name='ajax_exercise_history'),
url(r'^ajax/big-three-progress/(?P<username>[-a-zA-Z0-9]+)/$',
'public.views.ajax_big_three_progress', name='ajax_big_three_progress'),
url(r'^ajax/popular-exercises$', 'public.views.ajax_popular_exercises',
name='ajax_popular_exercises'),
# groups and users
url(r'^groups/(?P<group_name>[-a-zA-Z0-9]+)$', 'public.views.group',
name='group'),
url(r'^groups$', 'public.views.groups', name='groups'),
# users
url(r'^users/$', 'public.views.users', name='users'),
)
|
995,959 | b587da1b8d79b124b660cabcf6bc9eded0bbe946 | import pytest
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
TEST_DB_NAME = 'PALJE_TEST'
def pytest_addoption(parser):
parser.addoption(
'--mssql_host',
action='store',
dest='mssql_host',
help='SQL Server hostname used for tests.'
)
parser.addoption(
'--mssql_port',
action='store',
default=1433,
dest='mssql_port',
help='SQL Server port number used for tests.'
)
parser.addoption(
'--mssql_driver',
action='store',
default='ODBC Driver 17 for SQL Server',
dest='mssql_driver',
help='SQL Server ODBC driver name.'
)
parser.addoption(
'--mssql_username',
action='store',
dest='mssql_username',
default='',
help='Username for SQL Server. Do not use for Win authentication.'
)
parser.addoption(
'--mssql_password',
action='store',
dest='mssql_password',
default='',
help='Password for SQL Server. Do not use for Win authentication.'
)
pytest_plugins = [
"test.fixtures"
]
def pytest_configure(config):
config.addinivalue_line(
"markers", "mssql: mark tests that require SQL server to run")
config.addinivalue_line(
"markers", "http_server: mark tests that require HTTP server to run")
def pytest_collection_modifyitems(config, items):
"""First, check if mssql tests can be executed, that is,
- MSSQL hostname was given
- connection to MSSQL can be established using the
given hostname, port number, ODBC driver and credentials
- MSSQL doesn't have database with name stored in constant TEST_DB_NAME
If mssql tests can be executed, add fixture 'mssql_setup_and_teardown'
to all tests marked with 'mssql'.
If mssql tests can not be executed, skip tests marked with 'mssql'.
For tests marked with 'http_server', add fixture 'http_server_setup_and_teardown'.
"""
execute_mssql_tests = ensure_mssql_ready_for_tests(config)
skip_mssql = pytest.mark.skip(reason="requires SQL Server")
for item in items:
if "mssql" in item.keywords:
if execute_mssql_tests:
# Add 'mssql_setup_and_teardown' as FIRST in fixture list
fixtures = ['mssql_setup_and_teardown'] + item.fixturenames
item.fixturenames = fixtures
else:
item.add_marker(skip_mssql)
if "http_server" in item.keywords:
item.fixturenames.append('http_server_setup_and_teardown')
def ensure_mssql_ready_for_tests(config):
"""Test connection to MSSQL instance
and check the existence of database
with name stored in constant TEST_DB_NAME.
"""
try:
if not config.getoption('mssql_host'):
raise Exception('MSSQL Server not given')
engine = engine_from(config)
with engine.connect() as connection:
query = "SELECT name FROM sys.databases WHERE UPPER(name) = ?"
result = connection.execute(query, (TEST_DB_NAME, ))
if result.fetchall():
raise Exception(
f"There already exists a database with name '{TEST_DB_NAME}'")
return True
except:
return False
def engine_from(config, database=None):
if not database:
database = 'master'
connection_url = URL(
drivername="mssql+pyodbc",
username=config.getoption('mssql_username'),
password=config.getoption('mssql_password'),
host=config.getoption('mssql_host'),
port=config.getoption('mssql_port'),
database=database,
query={'driver': config.getoption('mssql_driver')}
)
return create_engine(connection_url)
|
995,960 | 8a1b026260fe6ecf36e55fff9d36eeae09db0cca | # Dataset Story
# Online Retail II
# The data set named is the data set of a UK-based online store.
# Includes sales between 01/12/2009 and 09/12/2011.
# The product catalog of this company includes souvenirs. They can also be considered as promotional items.
# There is also information that most of its customers are wholesalers.
import pandas as pd
pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
pd.set_option('display.width', 500)
pd.set_option('display.expand_frame_repr', False)
from mlxtend.frequent_patterns import apriori, association_rules
from helpers.helpers import retail_data_prep, check_df
df_ = pd.read_excel("dersler/hafta_3/online_retail_II.xlsx", sheet_name="Year 2010-2011") # read data
df = df_.copy()
# Task1 : Perform Data Preprocessing
# We do data preprocessing with our previously defined function.
df = retail_data_prep(df)
# Task 2: Generate association rules through Germany customers.
df_ger = df[df["Country"] == "Germany"]
check_df(df_ger)
# In order to create an association rule, we encode the data set as 1 and 0 as requested by apriori. We write the necessary function for this.
def create_invoice_product_df(dataframe, id=False):
if id:
return dataframe.groupby(['Invoice', "StockCode"])['Quantity'].sum().unstack().fillna(0). \
applymap(lambda x: 1 if x > 0 else 0)
else:
return dataframe.groupby(['Invoice', 'Description'])['Quantity'].sum().unstack().fillna(0). \
applymap(lambda x: 1 if x > 0 else 0)
ger_inv_pro_df = create_invoice_product_df(df_ger, id=True)
# We determine the rules with the data we transform.
frequent_itemsets = apriori(ger_inv_pro_df, min_support=0.01, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="support", min_threshold=0.01)
################
# We define the function that finds the product name according to the id no information written, and find the names of the 3 products given in the assignment with this function.
def check_id(dataframe, stock_code):
product_name = dataframe[dataframe["StockCode"] == stock_code][["Description"]].values[0].tolist()
print(product_name)
urun1_name = check_id(df_ger, 21987) # ['PACK OF 6 SKULL PAPER CUPS']
urun2_name = check_id(df_ger, 23235) # ['STORAGE TIN VINTAGE LEAF']
urun3_name = check_id(df_ger, 22747) # ["POPPY'S PLAYHOUSE BATHROOM"]
#Task 4: Make a product recommendation for the users in the basket.
# We define the function that prepares the product list to be recommended for the products in the basket.
def arl_recommender(rules_df, product_id, rec_count=1):
sorted_rules = rules_df.sort_values("lift", ascending=False)
recommendation_list = []
for i, product in enumerate(sorted_rules["antecedents"]):
for j in list(product):
if j == product_id:
recommendation_list.append(list(sorted_rules.iloc[i]["consequents"])[0])
recommendation_list = list(dict.fromkeys(recommendation_list)) #aynı ürünün mükerrer gelmesini engellemek için dublicate id leri siliyoruz
return recommendation_list[0:rec_count]
recommend_product1 = arl_recommender(rules, 21987, 1)
recommend_product2 = arl_recommender(rules, 23235, 2)
recommend_product3 = arl_recommender(rules, 22747, 3)
# Task 5: What are the names of the proposed products?
#We define a function that browses the site and lists the product names according to the suggested product id list.
def get_itemname(rec_list, dataframe):
recname = []
for i in range(len(rec_list)):
a = (check_id(dataframe,rec_list[i]))
recname.append(a)
return recname
suggest1 = get_itemname(recommend_product1, df)
suggest2 = get_itemname(recommend_product2, df)
suggest3 = get_itemname(recommend_product3, df)
|
995,961 | 960f8095714ec651cb51c641bee107c58017d142 | #!/usr/bin/env python
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from math import sqrt
import rospy
from sensor_msgs.msg import TimeReference
import socket
CLOCK_REALTIME = 0
class Node:
def __init__(self, **kwargs):
self.system_name = socket.gethostname()
rospy.init_node('timesync_node', anonymous=True)
# Parameters
self.timeref_topic = rospy.get_param("~topic", "walltime/time_reference")
self.mode = rospy.get_param("~mode", "server")
self.tolerance = rospy.get_param("~tolerance", 2) # Tolerance in seconds for considering as "synchronized"
# Publishers
self.timeref_pub = rospy.Publisher(self.timeref_topic, TimeReference, queue_size=10)
self.status_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=10)
# Subscribers
rospy.Subscriber(self.timeref_topic, TimeReference, callback=self.timeref_callback)
# Messages
self.timeref_msg = TimeReference()
self.diag_status = DiagnosticStatus()
self.last_update = 0
# Rate
self.rate = rospy.Rate(0.5) # Once every 2 secs
def timeref_callback(self, msg):
if self.mode == 'client':
self.timeref_msg = msg
# Publish own walltime in topic
def server(self):
# Update time reference message
time = rospy.Time.now()
self.timeref_msg.header.stamp = time
self.timeref_msg.time_ref = time
self.timeref_msg.source = self.system_name
self.timeref_pub.publish(self.timeref_msg)
# Update diagnostic status message
self.diag_status.level = DiagnosticStatus.OK
self.diag_status.message = '%s mode' % self.mode
self.diag_status.values = [KeyValue(key='Update Status', value='OK')]
self.last_update = rospy.Time.now().to_sec()
# Get walltime from topic and compare to own clock
def client(self):
# Compare absolute time difference from reference
abs_time_diff = sqrt(( self.timeref_msg.time_ref.to_sec() - rospy.Time.now().to_sec() )**2)
# Update diagnostic status message
# If absolute difference is lower than some tolerance (tolerance param)
if abs_time_diff <= self.tolerance:
self.diag_status.level = DiagnosticStatus.OK
self.diag_status.message = '%s mode' % self.mode
self.diag_status.values = [KeyValue(key='Update Status', value='OK')]
else:
self.diag_status.level = DiagnosticStatus.ERROR
self.diag_status.message = '%s mode' % self.mode
self.diag_status.values.insert(0, KeyValue(key='Update Status', value='ERROR: Could not update walltime'))
self.last_update = rospy.Time.now().to_sec()
def run(self):
while not rospy.is_shutdown():
self.diag_status = DiagnosticStatus()
self.diag_status.name = '%s timesync' % self.system_name
self.diag_status.hardware_id = self.system_name
self.diag_status.level = DiagnosticStatus.ERROR
if self.mode == 'server':
self.server()
elif self.mode == 'client':
self.client()
else:
rospy.logerr('[timesync_node] Invalid mode selection, must be server or client')
rospy.signal_shutdown(reason='invalid mode selection')
# Check for stale diagnostics
elapsed = rospy.Time().now().to_sec() - self.last_update
if elapsed > 35:
self.diag_status.level = DiagnosticStatus.STALE
self.diag_status.message = 'Stale'
self.diag_status.values = [KeyValue(key = 'Update Status', value = 'Stale'),
KeyValue(key = 'Time Since Update', value = str(elapsed))]
diag_msg = DiagnosticArray()
diag_msg.status.append(self.diag_status)
self.status_pub.publish(diag_msg)
self.rate.sleep()
if __name__ == "__main__":
node = Node()
node.run()
|
995,962 | f7ee2c667f9f949af7cf9f301d3fe8e1585bad59 | class APIMixin(object):
pass
|
995,963 | 0b17ceecb9517c0a37a6916864fd2763a8829a41 | #!/usr/bin/python
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''channel_test.py - unit tests for the protobuf.channel module
Authors: Eric Saunders (esaunders@lcogt.net)
Martin Norbury (mnorbury@lcogt.net)
May 2009
'''
import unittest
# Add protobuf module to the classpath
import sys
sys.path.append('../../main')
# Import the class to test
import protobuf.channel as ch
import protobuf.error as error
# Import the fake stub classes used in testing to simulate sockets etc.
from fake import FakeSocketFactory, FakeSocket, FakeCallback, TestServiceImpl
# Import the RPC definition class and the test service class
import protobuf.rpc_pb2 as rpc_pb2
import test_pb2
class TestSocketRpcChannel(unittest.TestCase):
'''Unit tests for the protobuf.channel.SocketRpcChannel class.'''
def setUp(self):
# Create a channel connected to a fake socket
self.factory = FakeSocketFactory()
self.channel = ch.SocketRpcChannel(socketFactory = self.factory)
# Define a simple service request
self.service_request = test_pb2.Request()
self.service_request.str_data = 'The lord giveth'
self.serialized_request = self.service_request.SerializeToString()
# Define a service response
self.service_response = test_pb2.Response()
self.service_response.str_data = 'And the lord taketh away'
self.serialized_response = self.service_response.SerializePartialToString()
# Define an RPC request with the service request as payload
self.rpc_request = rpc_pb2.Request()
self.rpc_request.request_proto = self.serialized_request
def tearDown(self):
pass
def test___init__1(self):
self.assertEqual(self.channel.sockFactory, self.factory,
'Initialising channel with user-supplied factory')
def test___init__defaults(self):
self.assert_(self.channel.host, True)
self.assert_(self.channel.port, True)
def test_validateRequest(self):
self.rpc_request.service_name = "Dummy Service"
self.rpc_request.method_name = "Dummy Method"
self.assertEqual(self.channel.validateRequest(self.rpc_request), None,
'validateRequest - valid request provided')
def test_validateRequest_BAD_REQUEST_PROTO(self):
# A request with mandatory fields missing
self.rpc_request = rpc_pb2.Request()
self.assertRaises(error.BadRequestProtoError,
self.channel.validateRequest,
self.rpc_request)
def test_openSocket(self):
'''Test normal return from openSocket.'''
self.assert_(self.channel.openSocket, "openSocket returns something")
def test_openSocket_IO_ERROR(self):
'''Test exceptional return from openSocket (IO_ERROR).'''
# Fake socket primed to throw an unknown host exception
socket = FakeSocket()
socket.throwIOErrorException()
self.factory.setSocket(socket)
self.assertRaises(error.IOError, self.channel.openSocket,
'host', -1)
def test_openSocket_UNKNOWN_HOST(self):
'''Test exceptional return from openSocket (UNKNOWN_HOST).'''
self.assert_(self.channel.openSocket, "openSocket returns something")
# Fake socket primed to throw an unknown host exception
socket = FakeSocket()
socket.throwUnknownHostException()
self.factory.setSocket(socket)
self.assertRaises(error.UnknownHostError, self.channel.openSocket,
'host', -1)
def test_createRpcRequest(self):
'''Test createRpcRequest - normal usage.'''
# Instantiate the test service, and get a reference to the method
method_name = 'TestMethod'
service = TestServiceImpl()
method = service.DESCRIPTOR.FindMethodByName(method_name)
# Define a simple service request
service_request = test_pb2.Request()
service_request.str_data = 'The lord giveth'
serialized_request = service_request.SerializeToString()
# Define an RPC request with the service request as payload
expected_rpc = rpc_pb2.Request()
expected_rpc.request_proto = serialized_request
expected_rpc.service_name = service.DESCRIPTOR.full_name
expected_rpc.method_name = method_name
self.assertEqual(self.channel.createRpcRequest(method, service_request),
expected_rpc, 'createRpcRequest - normal usage')
def test_sendRpcMessage(self):
'''Test sendRpcMessage - normal usage.'''
# Create a socket and service request
sock = self.factory.createSocket()
sent_request = self.rpc_request
sent_request.service_name = "Dummy service"
sent_request.method_name = "Dummy method"
# Call the method
self.channel.sendRpcMessage(sock, sent_request)
# Extract the output that was written to the socket
received_request = rpc_pb2.Request()
received_request.MergeFromString(sock.output_stream.stream_data)
self.assertEqual(received_request, sent_request,
'Request written to socket')
def test_sendRpcMessage_IOError(self):
'''Test sendRpcMessage - IOError.'''
# Create a socket with an IOError condition set
sock = self.factory.createSocket()
sock.throwIOErrorException()
# Create a service request
sent_request = self.rpc_request
sent_request.service_name = "Dummy service"
sent_request.method_name = "Dummy method"
self.assertRaises(error.IOError, self.channel.sendRpcMessage, sock,
sent_request)
def test_recvRpcMessage(self):
'''Test recvRpcMessage - normal usage.'''
# Create a socket and service request
msg = 'Message from server'
sock = self.factory.createSocket()
sock.withInputBytes(msg)
# Call the method
self.assertEqual(self.channel.recvRpcMessage(sock), msg,
'recvRpcMessage - normal usage')
def test_recvRpcMessage_ioerror(self):
'''Test recvRpcMessage - IOError.'''
# Create a socket and service request
msg = 'Message from server'
sock = self.factory.createSocket()
sock.withInputBytes(msg)
sock.throwIOErrorException()
# Call the method
self.assertRaises(error.IOError, self.channel.recvRpcMessage, sock)
def test_parseResponse(self):
'''Test parseResponse - normal usage.'''
resp_class = rpc_pb2.Response
expected_response = resp_class()
bytestream = expected_response.SerializeToString()
self.assertEqual(self.channel.parseResponse(bytestream, resp_class),
expected_response, 'parseResponse - normal usage')
def test_parseResponse_junk_input(self):
'''Test the correct error is raised after sending complete crap.'''
# Setup an arbitrary and broken bytestream
bytestream = 'ABCD'
resp_class = rpc_pb2.Response
self.assertRaises(error.BadResponseProtoError,
self.channel.parseResponse, bytestream, resp_class)
def testGoodRpc(self):
'''Test a good RPC call.'''
# Fake socket with prepared response
socket = FakeSocket()
socket.withResponseProto(self.service_response)
socketFactory = FakeSocketFactory()
socketFactory.setSocket(socket)
# Create channel
channel = ch.SocketRpcChannel("host", -1, socketFactory)
controller = channel.newController()
# Create the service
service = test_pb2.TestService_Stub(channel)
# Call RPC method
callback = FakeCallback()
service.TestMethod(controller,self.service_request,callback)
self.assertTrue(callback.invoked,'Callback invoked')
self.assertEquals(self.service_response.str_data,
callback.response.str_data,'Response message')
self.assertEquals(self.serialized_request,
socket.getRequest().request_proto,
'Request protocol serialisation')
self.assertEquals(service.DESCRIPTOR.full_name,
socket.getRequest().service_name,'Service name')
self.assertEquals(service.DESCRIPTOR.methods[0].name,
socket.getRequest().method_name,'Method name')
def testUnknownHostException(self):
'''Test unknown host.'''
# Fake socket primed to throw an unknown host exception
socket = FakeSocket()
socket.throwUnknownHostException()
socketFactory = FakeSocketFactory()
socketFactory.setSocket(socket)
# Create channel
channel = ch.SocketRpcChannel("host", -1, socketFactory)
controller = channel.newController()
# Create the service
service = test_pb2.TestService_Stub(channel)
# Call RPC method
callback = FakeCallback()
service.TestMethod(controller,self.service_request,callback)
self.assertFalse(callback.invoked,'Callback invoked')
self.assertTrue(controller.failed())
self.assertEquals(rpc_pb2.UNKNOWN_HOST,controller.reason,
'Error reason')
def testIOErrorWhileCreatingSocket(self):
'''Test Error while creating socket.'''
# Fake socket primed to throw an unknown host exception
socket = FakeSocket()
socket.throwIOErrorException()
socketFactory = FakeSocketFactory()
socketFactory.setSocket(socket)
# Create channel
channel = ch.SocketRpcChannel("host", -1, socketFactory)
controller = channel.newController()
# Create the service
service = test_pb2.TestService_Stub(channel)
# Call RPC method
callback = FakeCallback()
service.TestMethod(controller,self.service_request,callback)
self.assertFalse(callback.invoked,'Callback invoked')
self.assertTrue(controller.failed())
self.assertEquals(rpc_pb2.IO_ERROR,controller.reason,'Error reason')
def testIncompleteRequest(self):
'''Test calling RPC with incomplete request.'''
# Create data
service_request = test_pb2.Request()
# Fake socket with prepared response
socket = FakeSocket()
socket.withResponseProto(self.service_response)
socketFactory = FakeSocketFactory()
socketFactory.setSocket(socket)
# Create channel
channel = ch.SocketRpcChannel("host", -1, socketFactory)
controller = channel.newController()
# Create the service
service = test_pb2.TestService_Stub(channel)
# Call RPC method
callback = FakeCallback()
service.TestMethod(controller,service_request,callback)
self.assertFalse(callback.invoked,'Callback invoked')
self.assertEquals(rpc_pb2.BAD_REQUEST_PROTO,controller.reason)
self.assertTrue(controller.failed())
def testNoCallBack(self):
'''Test RPC failing to invoke callback.'''
# Fake socket with callback set to false
socket = FakeSocket()
socket.withNoResponse(False)
socketFactory = FakeSocketFactory()
socketFactory.setSocket(socket)
# Create channel
channel = ch.SocketRpcChannel("host", -1, socketFactory)
controller = channel.newController()
# Create the service
service = test_pb2.TestService_Stub(channel)
# Call RPC method
callback = FakeCallback()
service.TestMethod(controller,self.service_request,callback)
self.assertFalse(callback.invoked,'Callback invoked')
self.assertEquals(self.serialized_request,
socket.getRequest().request_proto,
'Request protocol serialisation')
self.assertEquals(service.DESCRIPTOR.full_name,
socket.getRequest().service_name,'Service name')
self.assertEquals(service.DESCRIPTOR.methods[0].name,
socket.getRequest().method_name,'Method name')
def testBadResponse(self):
'''Test bad response from server.'''
# Fake socket with prepared response
socket = FakeSocket()
socket.withInputBytes("bad response")
socketFactory = FakeSocketFactory()
socketFactory.setSocket(socket)
# Create channel
channel = ch.SocketRpcChannel("host", -1, socketFactory)
controller = channel.newController()
# Create the service
service = test_pb2.TestService_Stub(channel)
# Call RPC method
callback = FakeCallback()
service.TestMethod(controller,self.service_request,callback)
# Verify request was sent and bad response received
self.assertFalse(callback.invoked,'Callback invoked')
self.assertEquals(self.serialized_request,
socket.getRequest().request_proto,
'Request protocol serialisation')
self.assertTrue(controller.failed(),'Controller failed')
self.assertEquals(rpc_pb2.BAD_RESPONSE_PROTO, controller.reason,
'Controller reason')
class Test__LifeCycle(unittest.TestCase):
'''Unit tests for the protobuf.channel._Lifecycle class.'''
def setUp(self):
# Create a channel connected to a fake socket
self.factory = FakeSocketFactory()
self.socket = FakeSocket()
self.channel = ch.SocketRpcChannel(socketFactory = self.factory)
self.controller = self.channel.newController()
self.lc = ch._LifeCycle(self.controller, self.channel)
self.factory.setSocket(self.socket)
# Define a simple service request
self.service_request = test_pb2.Request()
self.service_request.str_data = 'The lord giveth'
self.serialized_request = self.service_request.SerializeToString()
# Define an RPC request with the service request as payload
self.rpc_request = rpc_pb2.Request()
self.rpc_request.request_proto = self.serialized_request
def tearDown(self):
pass
def test___init__(self):
'''Test _LifeCycle constructor.'''
self.assertEqual(self.lc.controller, self.controller,
"Attribute 'controller' incorrectly initialized")
self.assertEqual(self.lc.channel, self.channel,
"Attribute 'channel' incorrectly initialized")
self.assertEqual(self.lc.sock, None,
"Attribute 'sock' incorrectly initialized")
self.assertEqual(self.lc.byte_stream, None,
"Attribute 'byte_stream' incorrectly initialized")
self.assertEqual(self.lc.rpcResponse, None,
"Attribute 'rpcResponse' incorrectly initialized")
self.assertEqual(self.lc.serviceResponse, None,
"Attribute 'serviceResponse' incorrectly initialized")
def test_tryToValidateRequest(self):
'''Test tryToValidateRequest - normal usage.'''
self.assertEquals(self.lc.tryToValidateRequest(self.rpc_request),
None, "tryToValidateRequest - valid request")
def test_tryToValidateRequest_con_error(self):
'''Test tryToValidateRequest - controller in error state.'''
self.controller.success = False
self.assertEquals(self.lc.tryToValidateRequest(self.rpc_request),
None, "tryToValidateRequest - controller in error state")
def test_tryToValidateRequest_BAD_REQUEST_PROTO(self):
'''Test tryToValidateRequest - BadRequestProto error thrown.'''
# A request with mandatory fields missing
self.rpc_request = rpc_pb2.Request()
self.lc.tryToValidateRequest(self.rpc_request)
self.assertEquals(self.controller.reason, rpc_pb2.BAD_REQUEST_PROTO,
"tryToValidateRequest - invalid request")
self.assertEquals(self.controller.failed(), True,
"tryToValidateRequest - invalid request")
def test_tryToOpenSocket(self):
'''Test tryToOpenSocket - normal usage.'''
self.lc.tryToOpenSocket()
self.assert_(self.lc.sock)
def test_tryToOpenSocket_con_error(self):
'''Test tryToOpenSocket - controller in error state.'''
self.controller.error = True
self.lc.tryToOpenSocket()
self.assertEquals(self.lc.sock, None,
"tryToOpenSocket - controller in error state")
def test_tryToOpenSocket_UNKNOWN_HOST(self):
'''Test tryToOpenSocket - UnknownHost error thrown.'''
self.socket.throwUnknownHostException()
self.lc.tryToOpenSocket()
self.assertEquals(self.lc.sock, None,
"tryToOpenSocket - UNKNOWN_HOST error")
self.assertEquals(self.controller.reason, rpc_pb2.UNKNOWN_HOST,
"tryToOpenSocket - UNKNOWN_HOST error")
self.assertEquals(self.controller.failed(), True,
"tryToOpenSocket - UNKNOWN_HOST error")
def test_tryToOpenSocket_IO_ERROR(self):
'''Test tryToOpenSocket - IOError error thrown.'''
self.socket.throwIOErrorException()
self.lc.tryToOpenSocket()
self.assertEquals(self.lc.sock, None,
"tryToOpenSocket - IO_ERROR error")
self.assertEquals(self.controller.reason, rpc_pb2.IO_ERROR,
"tryToOpenSocket - IO_ERROR error")
self.assertEquals(self.controller.failed(), True,
"tryToOpenSocket - IO_ERROR error")
def test_tryToSendRpcRequest(self):
'''Test tryToSendRpcRequest - normal usage.'''
# Instantiate the test service, and get a reference to the method
method_name = 'TestMethod'
service = TestServiceImpl()
method = service.DESCRIPTOR.FindMethodByName(method_name)
# Set the service and method names of the RPC request
self.rpc_request.service_name = service.DESCRIPTOR.full_name
self.rpc_request.method_name = method_name
# Add the socket instance to the lifecycle object
self.lc.sock = self.socket
self.assertEquals(self.lc.tryToSendRpcRequest(method, self.rpc_request),
None, "tryToSendRpcRequest - normal return")
def test_tryToSendRpcRequest_IO_ERROR(self):
'''Test tryToSendRpcRequest - IOError error thrown.'''
# Instantiate the test service, and get a reference to the method
method_name = 'TestMethod'
service = TestServiceImpl()
method = service.DESCRIPTOR.FindMethodByName(method_name)
# Set the service and method names of the RPC request
self.rpc_request.service_name = service.DESCRIPTOR.full_name
self.rpc_request.method_name = method_name
# Set the exception, and add the socket instance to the lifecycle object
self.socket.throwIOErrorException()
self.lc.sock = self.socket
self.assertEquals(self.lc.tryToSendRpcRequest(method, self.rpc_request),
None, "tryToSendRpcRequest - IO_ERROR")
self.assertEquals(self.controller.reason, rpc_pb2.IO_ERROR,
"tryToSendRpcRequest - IO_ERROR error")
self.assertEquals(self.controller.failed(), True,
"tryToSendRpcRequest - IO_ERROR error")
def test_tryToReceiveReply(self):
'''Test tryToReceiveReply - normal usage.'''
# Add some data to the socket
msg = 'Message from server'
self.socket.withInputBytes(msg)
self.lc.sock = self.socket
self.assertEquals(self.lc.tryToReceiveReply(), None,
"tryToReceiveReply - normal usage")
# Verify the socket has been closed
self.assert_(self.socket.input_stream.closed,
"tryToReceiveReply - normal usage")
def test_tryToReceiveReply_IOError(self):
'''Test tryToReceiveReply - IOError thrown.'''
# Add some data to the socket
msg = 'Message from server'
self.socket.withInputBytes(msg)
self.socket.throwIOErrorException()
self.lc.sock = self.socket
self.assertEquals(self.lc.tryToReceiveReply(), None,
"tryToReceiveReply - IO_ERROR error")
self.assertEquals(self.controller.reason, rpc_pb2.IO_ERROR,
"tryToReceiveReply - IO_ERROR error")
self.assertEquals(self.controller.failed(), True,
"tryToReceiveReply - IO_ERROR error")
# Verify the socket has been closed
self.assert_(self.socket.input_stream.closed,
"tryToReceiveReply - IO_ERROR error")
def test_tryToParseReply(self):
'''Test tryToParseReply - normal usage.'''
resp_class = rpc_pb2.Response
expected_response = resp_class()
self.lc.byte_stream = expected_response.SerializeToString()
self.assertEquals(self.lc.tryToParseReply(), None,
"tryToParseReply - normal usage")
def test_tryToParseReply_BAD_RESPONSE_PROTO(self):
'''Test tryToParseReply - BadResponseProto error thrown.'''
# Setup an arbitrary and broken bytestream
self.lc.byte_stream = 'ABCD'
self.assertEquals(self.lc.tryToParseReply(), None,
"tryToParseReply - BAD_RESPONSE_PROTO error")
self.assertEquals(self.controller.reason, rpc_pb2.BAD_RESPONSE_PROTO,
"tryToParseReply - BAD_RESPONSE_PROTO error")
self.assertEquals(self.controller.failed(), True,
"tryToParseReply - BAD_RESPONSE_PROTO error")
def test_tryToRetrieveServiceResponse(self):
'''Test tryToRetrieveServiceResponse - normal usage.'''
resp_class = rpc_pb2.Response
expected_response = resp_class()
self.lc.byte_stream = expected_response.SerializeToString()
self.lc.rpcResponse = expected_response
self.assertEquals(self.lc.tryToRetrieveServiceResponse(resp_class),
None, "tryToRetrieveServiceResponse - normal usage")
def test_tryToRetrieveServiceResponse_BAD_RESPONSE_PROTO(self):
'''tryToRetrieveServiceResponse - BadResponseProto
This error can never trigger, since all fields of an RPC
Response() object are optional!'''
pass
def test_tryToRunCallback(self):
'''Test tryToRunCallback - normal usage.'''
callback = FakeCallback()
self.lc.rpcResponse = rpc_pb2.Response()
self.assertEquals(self.lc.tryToRunCallback(callback), None,
"tryToRunCallback - normal usage")
self.assert_(self.controller.success,
"tryToRunCallback - contoller success flag set")
def suite():
'''Return the test suite containing all tests from this module.'''
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSocketRpcChannel))
suite.addTest(unittest.makeSuite(Test__LifeCycle))
return suite
if __name__ == '__main__':
unittest.main()
|
995,964 | 6d1e9cc085db3a4eba0976b4671c25a3230d45f7 | import cv2
import sys
# src = cv2.imread(sys.argv[1])
# dst_gray, dst_color = cv2.pencilSketch(src, sigma_s=60, sigma_r=0.07, shade_factor=0.05)
# cv2.imwrite('./testout/pencil_1.jpg',dst_gray)
for i in range(0,int(sys.argv[3])):
src = cv2.imread(sys.argv[1]+'/frame'+str(i)+'.jpg')
dst_gray, dst_color = cv2.pencilSketch(src, sigma_s=60, sigma_r=0.07, shade_factor=0.05)
cv2.imwrite(sys.argv[2]+'/frame'+str(i)+'.jpg',dst_gray) |
995,965 | 4979724f8d01107fa49f14ebf1d38b72be77f902 | __author__ = 'sam.royston'
from mta.turnstile_synch import UpdateManager
from transport import transit
update_manager = UpdateManager(start_yr=15)
update_manager.clean_empties()
update_manager.synch_turnstiles()
update_manager.synch_locations()
update_manager.synch_gtfs()
transit.run_opts()
|
995,966 | ed1a700cad9f033bfeb2676ccaef25f793f4d9c5 | """
给定两个单词 word1 和 word2,计算出将 word1 转换成 word2 所使用的最少操作数 。
你可以对一个单词进行如下三种操作:
插入一个字符
删除一个字符
替换一个字符
示例 1:
输入: word1 = "horse", word2 = "ros"
输出: 3
解释:
horse -> rorse (将 'h' 替换为 'r')
rorse -> rose (删除 'r')
rose -> ros (删除 'e')
示例 2:
输入: word1 = "intention", word2 = "execution"
输出: 5
解释:
intention -> inention (删除 't')
inention -> enention (将 'i' 替换为 'e')
enention -> exention (将 'n' 替换为 'x')
exention -> exection (将 'n' 替换为 'c')
exection -> execution (插入 'u')
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/edit-distance
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
"""
r o s
0 1 2 3
h 1 1 2 3
o 2 2 1 2
r 3 2 2 2
s 4 3 3 2
e 5 4 4 3
二维数组:
dp[i - 1][j - 1]表示替换
dp[i][j - 1]表示删除
dp[i - 1][j]表示替换
dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1 if word1[i]!=word2[j]
dp[i][j] = dp[i - 1][j - 1] if word1[i] == word2[j]
"""
m = len(word1)
n = len(word2)
if not word1 or not word2: return 0
dp = [[0]*(n + 1) for i in range(m + 1)]
for i in range(1, n + 1):
dp[0][i] = dp[0][i - 1] + 1
for j in range(1, m + 1):
dp[j][0] = dp[j - 1][0] + 1
for i in range(1, m + 1):
for j in range(1, n + 1):
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1
return dp[-1][-1] |
995,967 | 24e7fc74cea021217a593cf9cbac34a33d7dc68e | #!/usr/bin python3
# -*- coding: utf-8 -*-
import json
def get_fans_url(user_id, page_num):
url = 'http://www.tianya.cn/api/tw?method=follower.ice.select¶ms.userId=' \
+ user_id + '¶ms.pageNo=' + str(page_num) + '¶ms.pageSize=28'
return url
def get_friends_url(user_id, page_num):
url = 'http://www.tianya.cn/api/tw?method=following.ice.select¶ms.userId=' \
+ user_id + '¶ms.pageNo=' + str(page_num) + '¶ms.pageSize=28'
return url
def get_fans_user_id_list(response):
body = response.text
id_list = []
try:
json_obj = json.loads(body)
if json_obj:
code = json_obj['code']
if '1' == code:
user_list = json_obj['data']['user']
for user_info in user_list:
user_id = user_info['id']
id_list.append(user_id)
finally:
return id_list
|
995,968 | b1f487fd8a1b904e1c5d8fc054b3d74bd093127c | from manimlib.imports import *
#### SUGERENCIA: SIEMPRE QUE CAMBIESLOS VECTORES A VISUALIZAR ###
### CONDISERA QUE EL PLNO ES DE [-7,7]x[-4,4] ####
#### Propiedades de la definición de un espacio vectorial ####
class Grid(VGroup):
CONFIG = {
"height": 6.0,
"width": 6.0,
}
def __init__(self, rows, columns, **kwargs):
digest_config(self, kwargs, locals())
super().__init__(**kwargs)
x_step = self.width / self.columns
y_step = self.height / self.rows
for x in np.arange(0, self.width + x_step, x_step):
self.add(Line(
[x - self.width / 2., -self.height / 2., 0],
[x - self.width / 2., self.height / 2., 0],
))
for y in np.arange(0, self.height + y_step, y_step):
self.add(Line(
[-self.width / 2., y - self.height / 2., 0],
[self.width / 2., y - self.height / 2., 0]
))
class ScreenGrid(VGroup):
CONFIG = {
"rows": 8,
"columns": 14,
"height": FRAME_Y_RADIUS * 2,
"width": 14,
"grid_stroke": 0.5,
"grid_color": WHITE,
"axis_color": RED,
"axis_stroke": 2,
"labels_scale": 0.25,
"labels_buff": 0,
"number_decimals": 2
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
rows = self.rows
columns = self.columns
grid = Grid(width=self.width, height=self.height, rows=rows, columns=columns)
grid.set_stroke(self.grid_color, self.grid_stroke)
vector_ii = ORIGIN + np.array((- self.width / 2, - self.height / 2, 0))
vector_si = ORIGIN + np.array((- self.width / 2, self.height / 2, 0))
vector_sd = ORIGIN + np.array((self.width / 2, self.height / 2, 0))
axes_x = Line(LEFT * self.width / 2, RIGHT * self.width / 2)
axes_y = Line(DOWN * self.height / 2, UP * self.height / 2)
axes = VGroup(axes_x, axes_y).set_stroke(self.axis_color, self.axis_stroke)
divisions_x = self.width / columns
divisions_y = self.height / rows
directions_buff_x = [UP, DOWN]
directions_buff_y = [RIGHT, LEFT]
dd_buff = [directions_buff_x, directions_buff_y]
vectors_init_x = [vector_ii, vector_si]
vectors_init_y = [vector_si, vector_sd]
vectors_init = [vectors_init_x, vectors_init_y]
divisions = [divisions_x, divisions_y]
orientations = [RIGHT, DOWN]
labels = VGroup()
set_changes = zip([columns, rows], divisions, orientations, [0, 1], vectors_init, dd_buff)
for c_and_r, division, orientation, coord, vi_c, d_buff in set_changes:
for i in range(1, c_and_r):
for v_i, directions_buff in zip(vi_c, d_buff):
ubication = v_i + orientation * division * i
coord_point = round(ubication[coord], self.number_decimals)
label = Text(f"{coord_point}",font="Arial",stroke_width=0).scale(self.labels_scale)
label.next_to(ubication, directions_buff, buff=self.labels_buff)
labels.add(label)
self.add(grid, axes, labels)
class Conmutatividad(Scene):
def construct(self):
grid = ScreenGrid()
v_x = Arrow((0,0,0), 2*RIGHT+UP, buff = 0)
v_y = Arrow((0,0,0), RIGHT-2*UP, buff = 0)
v_z = Arrow((0,0,0), LEFT+0.5*UP, buff = 0)
suma_t_1 = TextMobject('Sumemos el vector:'+' $\\vec{x}+\\vec{y}$')
suma_t_2 = TextMobject('Sumemos el vector:'+' $\\vec{y}+\\vec{x}$')
prop_1 = TextMobject('''Observemos la primera propiedad: \n
la conmutatividad.''')
prop_1.set_color(GREEN)\
.move_to((-3,3,0))\
suma_t_1.move_to((-3,2,0))
suma_t_1[0][0:16].set_color(BLUE)
suma_t_1[0][16:18].set_color(RED)
suma_t_1[0][19:].set_color(ORANGE)
suma_t_2.move_to((-3,1.5,0))
suma_t_2[0][0:16].set_color(BLUE)
suma_t_2[0][19:].set_color(RED)
suma_t_2[0][16:18].set_color(ORANGE)
v_x_t = TextMobject('$\\vec{x}$')
v_y_t = TextMobject('$\\vec{y}$')
v_z_t = TextMobject('$\\vec{z}$')
v_x_t.move_to(v_x.get_end()+RIGHT*0.3)
v_y_t.next_to(v_y.get_end()+RIGHT*0.1)
v_z_t.next_to(v_y.get_end()+RIGHT*0.1)
gpo_x = VGroup(v_x, v_x_t)
gpo_y = VGroup(v_y, v_y_t)
gpo_z = VGroup(v_z, v_z_t)
gpo_x.set_color(RED)
gpo_y.set_color(ORANGE)
v_y_mov = Arrow(v_x.get_end(), v_x.get_end()+v_y.get_end(), buff = 0)
v_y_mov_t = TextMobject('$\\vec{y}$')
v_y_mov_t.move_to(v_y_mov.get_end()+RIGHT*0.3)
gpo_y_mov = VGroup(v_y_mov, v_y_mov_t)
v_x_mov = Arrow(v_y.get_end(), v_y.get_end()+v_x.get_end(), buff = 0)
v_x_mov_t = TextMobject('$\\vec{x}$')
v_x_mov_t.move_to(v_x_mov.get_end()+RIGHT*0.3)
gpo_x_mov = VGroup(v_x_mov, v_x_mov_t)
gpo_y_mov.set_color(ORANGE)
gpo_x_mov.set_color(RED)
v_x_mov = Arrow(v_x)
v_x_mov_t = TextMobject('$\\vec{x}$')
v_x_mov_t.move_to(v_x.get_end()+RIGHT*0.3)
suma_x_y = Arrow((0,0,0),v_y_mov.get_end(), buff=0)
gpo_y_1 = gpo_y.copy()
Text_f = TextMobject(''' Nota que en realidad lo que nos dice es que \n
no importa el camino que elijas, siempre llegas a ROMA \n
o en este caso, \n
el punto en el espacio Vectorial. ''')
Text_f.move_to((-2.6,-1.5,0))\
.scale(0.5)
####################################################################
self.play(Write(grid))
self.wait()
self.play(Write(prop_1))
self.wait()
self.play(Write(v_x),Write(v_y),Write(v_x_t), Write(v_y_t))
self.wait()
self.play(Write(suma_t_1))
self.wait()
self.play(ReplacementTransform(gpo_y, gpo_y_mov))
self.wait()
self.play(Write(suma_x_y))
self.wait()
self.play(Write(suma_t_2))
self.play(ReplacementTransform(gpo_y_mov, gpo_y_1),
ReplacementTransform(gpo_x, gpo_x_mov))
self.wait()
self.play(Write(Text_f))
self.wait()
class Asociatividad(Scene):
def construct(self):
grid = ScreenGrid()
###vectores:
x = Arrow((0,0,0), (3,2,0), buff = 0)
y = Arrow((0,0,0), (1,2,0), buff = 0)
z = Arrow((0,0,0), (1,-2,0), buff = 0)
###suma (x+y)+z
y_mov_a_x = Arrow(x.get_end(), x.get_end()+y.get_end(), buff = 0)
x_mas_y = Arrow((0,0,0), y_mov_a_x.get_end(), buff = 0)
z_mov_a_x_mas_y = Arrow(x_mas_y.get_end(), x_mas_y.get_end()+z.get_end(), buff = 0)
x_mas_y_mas_z = Arrow((0,0,0), z_mov_a_x_mas_y.get_end(), buff = 0)
###suma x+(y+z)
z_mov_a_y = Arrow(y.get_end(), y.get_end()+z.get_end(), buff = 0)
y_mas_z = Arrow((0,0,0), z_mov_a_y.get_end(), buff = 0)
y_mas_z_mov_a_x = Arrow(x.get_end(), x.get_end()+y_mas_z.get_end(), buff = 0)
x_mas_y_mas_z_1 = Arrow((0,0,0), y_mas_z_mov_a_x.get_end(), buff = 0)
###texto:
t_1 = TextMobject(''' Segunda propiedad: \n
Asociatividad ''')
t_2 = TextMobject(''' Hagamos: $(\\vec{x}+\\vec{y})+\\vec{z}$ ''')
t_3 = TextMobject(''' Hagamos: $\\vec{x}+(\\vec{y}+\\vec{z})$ ''')
t_4 = TextMobject('$(\\vec{x}+\\vec{y})$')
t_5 = TextMobject('$(\\vec{x}+\\vec{y})+\\vec{z}$')
t_6 = TextMobject('$(\\vec{y}+\\vec{z})$')
t_7 = TextMobject('$\\vec{x}+(\\vec{y}+\\vec{z})$')
t_8 = TextMobject('$\\vec{x}$')
t_9 = TextMobject('$\\vec{y}$')
t_10 = TextMobject('$\\vec{z}$')
t_11 = TextMobject('''Nota como es casi lo mimsa idea geométrica \n
que el hecho de hacer la conmutación, es tomar \n
distintos caminos para llegar al mismo vector \n
en el espacio vectorial dados tres vectores distintos.''')
###grupos:
gpo_1 = VGroup(x, t_8)
gpo_2 = VGroup(y, t_9)
gpo_3 = VGroup(z, t_10)
gpo_4 = VGroup(x_mas_y, t_4)
gpo_5 = VGroup(x_mas_y_mas_z, t_5)
gpo_6 = VGroup(y_mas_z, t_6)
gpo_7 = VGroup(x_mas_y_mas_z_1, t_7)
###posiciones texto y colores:
t_11.move_to((0,-1,0))\
.scale(0.85)
t_1.move_to((-3,3,0))\
.set_color(GREEN)
t_2.next_to(t_1, DOWN)
t_3.next_to(t_2, DOWN)
t_4.next_to(x_mas_y.get_center(), UP)
t_5.next_to(x_mas_y_mas_z.get_center(), DOWN)
t_6.next_to(y_mas_z.get_center(), UP)
t_7.next_to(x_mas_y_mas_z_1.get_center(), DOWN)
t_8.next_to(x.get_end(), UP)
t_9.next_to(y.get_end(), DOWN)
t_10.next_to(z.get_end(), DOWN)
gpo_4.set_color(BLUE)
gpo_5.set_color(RED)
gpo_2_1 = gpo_2.copy()
gpo_3_1 = gpo_3.copy()
###animación:
self.play(Write(grid))
self.wait()
self.play(Write(t_1))
self.wait()
self.play(Write(gpo_1), Write(gpo_2), Write(gpo_3))
self.wait()
self.play(Write(t_2))
self.wait()
self.play(ReplacementTransform(gpo_2, y_mov_a_x))
self.wait()
self.play(Write(gpo_4))
self.wait()
self.play(ReplacementTransform(gpo_3, z_mov_a_x_mas_y), FadeOut(gpo_1), FadeOut(y_mov_a_x))
self.wait()
self.play(Write(gpo_5))
self.wait()
self.play(FadeOut(gpo_4), FadeOut(z_mov_a_x_mas_y), FadeOut(y_mov_a_x),
FadeOut(gpo_5),Write(gpo_2_1),Write(gpo_1), Write(gpo_3_1))
self.wait()
self.play(Write(t_3))
self.wait()
self.play(ReplacementTransform(gpo_3_1, z_mov_a_y))
self.wait()
self.play(Write(gpo_6))
self.wait()
self.play(ReplacementTransform(gpo_6, y_mas_z_mov_a_x), FadeOut(gpo_2_1), FadeOut(z_mov_a_y))
self.wait()
self.play(Write(gpo_7))
self.wait()
self.play(Write(t_11))
class Inverso_Aditivo(Scene):
def construct(self):
###vectores
grid = ScreenGrid()
x = Arrow((0,0,0), (1,2,0), buff = 0)
menos_x = Arrow((0,0,0), -1*x.get_end(), buff = 0)
x_proy_y = Arrow((0,0,0), (0, -x.get_end()[1], 0), buff = 0)
x_proy_x = Arrow((0,0,0), (-x.get_end()[0], 0, 0), buff = 0)
###texto
x_t = TextMobject('$\\vec{x} = (x_1,x_2)$')
xpy_t = TextMobject('$\\vec{x} = (-x_1,0)$')
xpx_t = TextMobject('$\\vec{x} = (0,-x_2)$')
menosx_t = TextMobject('$\\vec{x} = (-x_1,-x_2)$')
prop = TextMobject('Inverso Aditivo.')
t_1 = TextMobject('''Para este vector $\\vec{x}$ \n
existe el vector $-\\vec{x}$ \n
tal que hace: \n
$\\vec{x}+(-\\vec{x}) = \\vec{0}$.''')
t_2 = TextMobject(''' Que notemos \n
que geométricamente:\n
hay que "voltear" \n
respecto al origen \n
al vector $\\vec{x}$.''')
###gpos
gpo_x = VGroup(x, x_t)
gpo_menos_x = VGroup(menos_x, menosx_t)
gpo_x_proy_y = VGroup(x_proy_y, xpy_t)
gpo_x_proy_x = VGroup(x_proy_x, xpx_t)
###propiedades:
prop.move_to((-3,2.5,0))\
.set_color(GREEN)
x_t.next_to(x.get_end(), RIGHT, buff = 0.4)
menosx_t.next_to(menos_x.get_end(), DOWN)
xpy_t.next_to(x_proy_y.get_end(), RIGHT)
xpx_t.next_to(x_proy_x.get_end(), UP)
t_1.move_to((4,-0.5,0))\
.scale(0.8)
t_2.move_to((-4,-0.5,0))\
.scale(0.8)
###animación:
self.play(Write(prop), Write(grid))
self.wait()
self.play(Write(gpo_x))
self.wait()
self.play(Write(t_1), Write(gpo_x_proy_y), Write(gpo_x_proy_x))
self.wait()
self.play(Write(t_2), Write(gpo_menos_x), FadeOut(t_1))
self.wait()
class Distribucion_Suma_Escalares(Scene):
def construct(self):
###vectores:
grid = ScreenGrid()
k=float(input('dame a k \n'))
j=float(input('dame a j \n'))
x = Arrow((0,0,0), (2,-1,0), buff = 0)
k_m_j_x = Arrow((0,0,0), (k+j)*x.get_end(), buff = 0)
kx = Arrow((0,0,0), k*x.get_end(), buff = 0)
jx = Arrow((0,0,0), j*x.get_end(), buff = 0)
kx_m_jx = Arrow((0,0,0), kx.get_end()+jx.get_end(), buff = 0)
###texto
prop = TextMobject('Distribuir el producto de suma de escalares con un vector.')
k_m_j_x_t = TexMobject('(k+j)\\vec{x}')
kx_m_jx_t = TexMobject('k\\vec{x}+j\\vec{x}')
kx_t = TexMobject('k\\vec{x}')
jx_t = TexMobject('j\\vec{x}')
t_1 = TextMobject('''Nota que esta propiedad es encontrar \n
de vuelta otros caminitos para \n
llegar a un mismo vector desntro del \n
espacio vectorial, sólo que a diferencia \n
de las propiedades dados 3 vectores \n
ahora usamos un solo vector y dos escalares, \n
estamos haciendo como un "escalamiento" de caminos. ''')
t_2 = TextMobject('''Se te ocurre cómo realizar la animaci\\'{o}n para la propiedad:
$$k(\\vec{x}+\\vec{y}) = k\\vec{x}+k\\vec{y}, \ k\in \mathbb{R}$$''')
t_3 = TextMobject(''' Checa la parte inmediata despu\\'{e}s \n
de la clase de \n
\\textit{distribucion$\\_$suma$\\_$escalares} \n
en el archivo de animaciones para\n
espacios vectoriales \n
para una idea de como realizar dicha animaci\\'{o}n.''')
###grupos:
gpo_1 = VGroup(k_m_j_x, k_m_j_x_t)
gpo_2 = VGroup(kx_m_jx, kx_m_jx_t)
gpo_3 = VGroup(kx, kx_t)
gpo_4 = VGroup(jx, jx_t)
###propiedades:
prop.move_to((0,2.5,0))\
.set_color(GREEN)
k_m_j_x_t.next_to(k_m_j_x.get_end(), RIGHT)
kx_m_jx_t.next_to(kx_m_jx.get_end(), RIGHT)
kx_t.next_to(kx.get_end(), RIGHT)
jx_t.next_to(jx.get_end(), RIGHT)
###animacion
self.play(Write(grid), Write(prop))
self.wait()
self.play(Write(gpo_1))
self.wait()
self.play(FadeOut(gpo_1), Write(gpo_3), Write(gpo_4))
self.wait()
self.play(FadeOut(gpo_3), FadeOut(gpo_4), Write(gpo_2))
self.wait()
self.play(FadeOut(gpo_2), FadeOut(grid), FadeOut(prop))
self.wait()
self.play(Write(t_1))
self.wait()
self.play(FadeOut(t_1))
self.wait()
self.play(Write(t_2))
self.wait()
self.play(FadeOut(t_2))
self.wait()
self.play(Write(t_3))
self.wait()
######Hint para crear la animación
###Primero crea una clase con el nombre de animación.
###Luego necesitas crear una "construcción" (función) para que manim ejecute el objeto animación
###Luego tendrás que definir vectores...
###No solo ellos, tambien sus propiedades como en las animaciones anteriores
###Finalmente dile a la computadora cómo deben ser realizadas las animaciones con "self.play[...etc.]"
######¿Qué conclusiones geométricas puedes obtener de ésta animación?
#### Operaciones con vectores representados por flechas ####
# Puedes modificar estos parámetros para probar diferentes vectores y escalares #
a=np.array([1,1,0])
b=np.array([-2,1,0])
c=-1.5
#### considera que el plano es [-7,7]x[-4,4] ####
text_pos=np.array([-4,2.6,0])
class Opera(Scene):
def construct(self):
plano = NumberPlane()
vec1 = Vector(direction=a,color=RED)
vec1_name = TexMobject("a")
vec1_name.next_to(vec1.get_corner(RIGHT+UP),RIGHT)
vec2 = Vector(direction=b,color=GOLD_E)
vec2_name = TexMobject("b")
vec2_name.next_to(vec2.get_corner(LEFT+UP),LEFT)
#### Suma ####
vecsum = Vector(direction=a+b,color=GREEN_SCREEN)
vecsum_name = TexMobject("a+b")
vecsum_name.next_to(vecsum,LEFT)
suma1 = TextMobject("Podemos ver la suma de dos")
suma2 = TextMobject("vectores con flechas")
suma2.next_to(suma1,DOWN)
suma = VGroup(suma1,suma2)
suma.scale(0.75)
self.play(FadeIn(suma))
self.play(ApplyMethod(suma.move_to,text_pos))
self.play(ShowCreation(plano))
self.play(ShowCreation(vec1),ShowCreation(vec2),Write(vec1_name),Write(vec2_name))
self.wait()
self.play(ApplyMethod(vec2.move_to,a+b/2),ApplyMethod(vec2_name.move_to,a+b/2))
self.wait()
self.play(ShowCreation(vecsum),Write(vecsum_name))
self.wait()
self.play(FadeOut(vec1),FadeOut(vec2),FadeOut(vecsum),FadeOut(plano),FadeOut(suma),FadeOut(vec1_name),FadeOut(vec2_name),FadeOut(vecsum_name))
#### Multiplicar por escalar ####
vecesc = Vector(direction=c*a,color=RED)
vecesc_name = TexMobject(str(c)+"a")
vecesc_name.next_to(vecesc,DOWN)
esc1 = TextMobject("Tambi\\'{e}n podemos multiplicar")
esc2 = TextMobject("vectores por un escalar del campo")
esc2.next_to(esc1,DOWN)
esc = VGroup(esc1,esc2)
esc.scale(0.75)
valor = TexMobject(r"\text{En este caso el escalar es}" + str(c))
valor.scale(0.75)
valor.move_to(text_pos)
self.play(Write(esc))
self.play(ApplyMethod(esc.move_to,text_pos))
self.play(ShowCreation(plano))
self.play(ShowCreation(vec1),Write(vec1_name))
self.wait()
self.play(ReplacementTransform(esc,valor))
self.play(ReplacementTransform(vec1,vecesc),ReplacementTransform(vec1_name,vecesc_name))
self.wait(2)
self.play(FadeOut(plano),FadeOut(vecesc_name),FadeOut(valor),FadeOut(vecesc))
#### Desafíos ####
ejem = TextMobject("Puedes modificar los vectores y el escalar para probar tus propios ejemplos,").scale(0.75)
ejem2 = TextMobject("encontrar\\'{a}s m\\'{a}s informaci\\'{o}n en el c\\'{o}digo de este video.").scale(0.75)
ejem2.next_to(ejem,DOWN)
self.play(Write(ejem),Write(ejem2))
self.wait(3)
self.play(FadeOut(ejem),FadeOut(ejem2))
#### Autores y créditos ####
autor1 = TextMobject("Bruno Ram\\'{i}rez")
autor1.scale(0.8)
contact1 = TextMobject("GitHub: @brunormzg")
contact1.scale(0.6)
contact1.next_to(autor1,DOWN)
aut1 = VGroup(autor1,contact1)
#aut1.to_edge(UP)
autor2 = TextMobject("Donaldo Mora")
autor2.scale(0.8)
autor2.next_to(contact1,DOWN)
contact2 = TextMobject("Instagram: donal\\_mora")
contact2.scale(0.6)
contact2.next_to(autor2,DOWN)
aut2 = VGroup(autor2,contact2)
autor3 = TextMobject("Rodrigo Moreno")
autor3.scale(0.8)
autor3.next_to(contact2,DOWN)
contact3 = TextMobject("Instagram: \\_nosoyro")
contact3.scale(0.6)
contact3.next_to(autor3,DOWN)
aut3 = VGroup(autor3,contact3)
#aut3.to_edge(DOWN)
self.play(Write(aut1),Write(aut2),Write(aut3))
#### Bases de espacios vectoriales y combinaciones lineales ####
class Bases(Scene):
def construct(self):
grid = ScreenGrid()
###texto:
t_1 = TextMobject('Idea intuitiva de lo que significa una base.')
t_2 = TextMobject('Pensemos en el plano cartesiano...')
t_3 = TextMobject('Pensemos en la base usual...')
t_4 = TextMobject('La canónica')
t_5 = TexMobject('\\xi = \{ \\vec{e}_1, \\vec{e}_2 \} = \{ (1,0), (0,1) \}')
t_6 = TextMobject('''Supongamos que queremos "caracterizar" \n
el vector $\\vec{x} = (1,2)$, con la base $\\xi$.''')
t_7 = TextMobject('''Es claro que: \n
$\\vec{x} = 1\\cdot(1,0)+2\\cdot(0,1) = (1,2)$''')
t_8 = TextMobject('''Que geométricamente es...''')
t_9 = TextMobject('''Solamente caminar dos veces el camino fijado \n
por el vector $(0,1)$ y luego "caminar" una \n
sola vez por el "camino" fijado por (1,0)''')
t_9.move_to((0,-1,0))
t_10 = TextMobject('''Ahora supongamos que tenemos la base: \n
$\\gamma = \{ (2,1),(1,-1) \}$''')
t_11 = TextMobject('''Sabemos que... \n
$\\vec{x} = 1\cdot(2,1)+(-1)\cdot(1,-1)$''')
t_12 = TextMobject('''Pero geométricamente qué es esta "combinación lineal"?''')
t_13 = TextMobject('''CLARO! Solamente es tomar un distinto camino \n
dado por esta "base" \n
para llegar al mimso vector $\\vec{x}$ \n
en el espacio vectorial $\\mathbb{R}^2$''')
###vectores:
e_1 = Arrow((0,0,0), (1,0,0), buff = 0)
e_2 = Arrow((0,0,0), (0,1,0), buff = 0)
e_2_mov_e_1 = Arrow(e_1.get_end(), (1,2,0), buff = 0)
v_1 = Arrow((0,0,0), (2,1,0), buff = 0)
v_2 = Arrow((0,0,0), (1,-1,0), buff = 0)
v_1_mov_v_1 = Arrow(v_1.get_end(), (1,2,0), buff = 0)
###animacion:
self.play(Write(t_1))
self.wait()
self.play(ReplacementTransform(t_1, t_2))
self.wait()
self.play(ReplacementTransform(t_2, t_3))
self.wait()
self.play(ReplacementTransform(t_3, t_4))
self.wait()
self.play(ReplacementTransform(t_4, t_5))
self.wait()
self.play(ReplacementTransform(t_5, t_6))
self.wait()
self.play(ReplacementTransform(t_6, t_7))
self.wait()
self.play(ReplacementTransform(t_7, t_8))
self.wait()
self.play(FadeOut(t_8), Write(grid))
self.wait()
self.play(Write(e_1), Write(e_2))
self.wait()
self.play(ReplacementTransform(e_2, e_2_mov_e_1), Write(t_9))
self.wait()
self.play(FadeOut(e_2_mov_e_1), FadeOut(grid), FadeOut(t_9), FadeOut(e_1), Write(t_10))
self.wait()
self.play(ReplacementTransform(t_10, t_11))
self.wait()
self.play(ReplacementTransform(t_11, t_12))
self.wait()
self.play(FadeOut(t_12), Write(grid))
self.wait()
self.play(Write(v_1), Write(v_2))
self.wait()
self.play(ReplacementTransform(v_2, v_1_mov_v_1))
self.wait()
self.play(ReplacementTransform(VGroup(v_1_mov_v_1, grid, v_1), t_13))
#### Norma y propiedades ####
### Modifica estos vectores visualizar la primera propiedad, NO se dibujan partiendo del origen
v1 = np.array([2,1,0])
v2 = np.array([4,2,0])
v3 = np.array([-1,2,0])
v4 = np.array([3,0,0])
####
# Puedes cambiar estos vectores por otros para visualizar la
# Desigualdad del tri'angulo y la propiedad de multiplicacion por escalar
avec = np.array([1,1,0])
bvec = np.array([4,-2,0])
lamb = 1.5
## (NO MODIFICAR)
anorm = np.linalg.norm(avec)
bnorm = np.linalg.norm(bvec)
sumnorm = np.linalg.norm(avec+bvec)
nv1 = round(np.linalg.norm(v1),3)
nv2 = round(np.linalg.norm(v2),3)
nv3 = round(np.linalg.norm(v3),3)
nv4 = round(np.linalg.norm(v4),3)
class Propiedades_Norma(Scene):
def construct(self):
### Titulo y definici'on ###
titulo = TextMobject("La norma y sus propiedades").scale(1.2)
deftex = TexMobject(r"\text{Se defini\'{o} la norma euclidiana de un vector}\ \vec{x} \in \mathbb{R}^n\ \text{como}:").shift(2*UP)
defn = TexMobject(r"\Vert \vec{x} \Vert:= \sqrt{x_1^2+x_2^2+x_3^2+...+x_n^2}")
longi = TextMobject("Esta cantidad representa la \"longitud\" de un vector").shift(2*DOWN)
def2d = TexMobject(r"\text{Consideremos el caso}\ \vec{x} \in \mathbb{R}^2").shift(2*UP)
d2d = TexMobject(r"\Vert \vec{x} \Vert := \sqrt{x_1^2+x_2^2}")
vamo = TextMobject("Adelante veremos tres propiedades fundamentales").shift(2*DOWN)
segslide = VGroup(def2d,d2d,vamo)
self.play(Write(titulo))
self.wait()
self.play(FadeOut(titulo))
self.play(Write(deftex),Write(defn),Write(longi))
self.wait(2)
self.play(ReplacementTransform(deftex,def2d), ReplacementTransform(defn,d2d), ReplacementTransform(longi,vamo))
self.wait(2)
self.play(FadeOut(segslide))
self.wait()
### PRIMERA PROPIEDAD ###
prop1 = TexMobject(r"\Vert \vec{x} \Vert \geq 0\ \forall \vec{x} \in \mathbb{R}^2\ \text{y}\ \Vert \vec{x} \Vert = 0 \Leftrightarrow \vec{x}=\vec{0}").to_edge(DOWN)
plano = NumberPlane()
vc1 = Vector(direction = v1 ,color = RED).move_to(np.array([-3,-2,0]))
vc2 = Vector(direction = v2 ,color = BLUE).move_to(np.array([-4,1,0]))
vc3 = Vector(direction = v3 ,color = YELLOW).move_to(np.array([4,--2,0]))
vc4 = Vector(direction = v4,color = ORANGE).move_to(np.array([2,0,0]))
dot = Dot((0,0,0), color = WHITE, radius = 0.15)
cdot = SmallDot((0,0,0), color = BLACK)
ddot = VGroup(dot,cdot)
vc1_l = TexMobject(r"\vec{x}").next_to(vc1.get_center(),UP)
vc2_l = TexMobject(r"\vec{y}").next_to(vc2.get_center(),UP)
vc3_l = TexMobject(r"\vec{z}").next_to(vc3.get_center(),UP)
vc4_l = TexMobject(r"\vec{w}").next_to(vc4.get_center(),UP)
zero_dot = TexMobject(r"\vec{0}").next_to(dot.get_center(),UP+LEFT)
vcts = VGroup(vc1,vc2,vc3,vc4,ddot)
labels = VGroup(vc1_l,vc2_l,vc3_l,vc4_l,zero_dot)
normies = TextMobject("Veamos las normas de los vectores:").to_edge(UP)
vc1_n = Vector(direction = np.array([np.linalg.norm(v1),0,0]), color = RED).shift(4*LEFT+0.5*UP)
vc2_n = Vector(direction = np.array([np.linalg.norm(v2),0,0]), color = BLUE).shift(4*LEFT)
vc3_n = Vector(direction = np.array([np.linalg.norm(v3),0,0]), color = YELLOW).shift(4*LEFT+0.5*DOWN)
vc4_n = Vector(direction = np.array([np.linalg.norm(v4),0,0]), color = ORANGE).shift(4*LEFT+DOWN)
bracv1 = Brace(vc1_n,UP)
normv1 = TexMobject(r"\Vert \vec{x} \Vert =" + str(nv1)).next_to(bracv1,UP)
brac1 = VGroup(bracv1,normv1)
bracv2 = Brace(vc2_n,UP)
normv2 = TexMobject(r"\Vert \vec{y} \Vert =" + str(nv2)).next_to(bracv2,UP)
brac2 = VGroup(bracv2,normv2)
bracv3 = Brace(vc3_n,UP)
normv3 = TexMobject(r"\Vert \vec{z} \Vert =" + str(nv3)).next_to(bracv3,UP)
brac3 = VGroup(bracv3,normv3)
bracv4 = Brace(vc4_n,UP)
normv4 = TexMobject(r"\Vert \vec{w} \Vert =" + str(nv4)).next_to(bracv4,UP)
brac4 = VGroup(bracv4,normv4)
normdot = TexMobject(r"\Vert \vec{0} \Vert = 0").next_to(ddot,DOWN)
obse1 = TextMobject("Todos estos vectores tuvieron").shift(3*LEFT+UP)
obse2 = TextMobject("una norma mayor a cero.").next_to(obse1,DOWN)
obse = VGroup(obse1,obse2)
obscero = TexMobject(r"\text{Ahora para el vector}\ \vec{0}").to_edge(UP)
unico = TextMobject("¡Es el \\'{u}nico cuya norma se anula!").shift(0.5*UP)
self.add(plano)
self.play(ShowCreation(plano, runtime = 2))
self.play(Write(prop1))
self.play(ShowCreation(vcts, runtime = 2),Write(labels))
self.wait(2)
self.play(FadeOut(plano))
self.play(Write(normies),FadeOut(labels))
self.play(ddot.shift, 0.5*UP+4*RIGHT,
run_time=1,
path_arc=1)
self.play(ReplacementTransform(vc1,vc1_n), ReplacementTransform(vc2,vc2_n), ReplacementTransform(vc3,vc3_n),
ReplacementTransform(vc4,vc4_n))
self.play(ShowCreation(brac1))
self.wait()
self.play(FadeOut(vc1_n),ReplacementTransform(brac1,brac2))
self.wait(1.5)
self.play(FadeOut(vc2_n),ReplacementTransform(brac2,brac3))
self.wait(1.5)
self.play(FadeOut(vc3_n),ReplacementTransform(brac3,brac4))
self.wait(1.5)
self.play(FadeOut(vc4_n),FadeOut(brac4))
self.wait(1.5)
self.play(Write(obse))
self.wait(1.5)
self.play(ReplacementTransform(normies,obscero),FadeOut(obse))
self.play( ddot.shift , 4*LEFT)
self.wait()
self.play(Transform(dot,cdot),ShowCreation(normdot))
self.wait(2)
self.play(Transform(obscero,unico))
self.wait(2)
self.play(FadeOut(cdot),FadeOut(normdot),FadeOut(obscero))
self.play(prop1.shift, 3.5*UP, runtime = 1.5)
self.wait(2.2)
self.play(FadeOut(prop1))
### SEGUNDA PROPIEDAD ###
prop2 = TexMobject(r" \Vert \lambda \vec{x} \Vert = \vert \lambda \vert \Vert \vec{x} \Vert").to_edge(DOWN).scale(1.2)
plano = NumberPlane()
vec1 = Vector(direction=avec,color=RED)
vec1_name = TexMobject(r"\vec{x}").next_to(vec1.get_center(),DOWN)
vecesc = Vector(direction=lamb*avec,color=RED)
vecesc_name = TexMobject(str(lamb)+r"\vec{x}").next_to(vecesc.get_center(),DOWN+RIGHT)
avs = Vector(direction = -lamb*avec,color = BLUE)
avs_name = TexMobject(str(-1*lamb)+r"\vec{x}").next_to(avs.get_center(),DOWN+RIGHT)
lanorma = TextMobject("Consideremos solamente la \"longitud\" de ambos vectores").shift(3*UP).scale(0.9)
acostado1 = Vector(direction = np.array([np.linalg.norm(lamb*avec),0,0]), color = RED).shift(4*LEFT+UP)
acostado2 = Vector(direction = np.array([np.linalg.norm(-lamb*avec),0,0]), color = BLUE).shift(4*LEFT)
abrace1 = Brace(acostado1, UP)
abrace2 = Brace(acostado2, DOWN)
abraces = VGroup(abrace1,abrace2)
abracetex1 = TexMobject(r"\Vert" + str(lamb)+ r"\vec{x} \Vert =" +str(lamb) + r"\Vert \vec{x} \Vert").next_to(abrace1, UP)
abracetex2 = TexMobject(r"\Vert" + str(-lamb)+ r" \vec{x}\Vert = " +str(lamb) + r"\Vert \vec{x} \Vert").next_to(abrace2, DOWN)
abracetexs = VGroup(abracetex1,abracetex2)
obs = TextMobject("Observemos que si el escalar es ").shift(3*RIGHT+UP).scale(0.75)
obs1 = TextMobject("negativo, se vuelve positivo").next_to(obs,DOWN).scale(0.75)
obs2 = TextMobject("Si es positivo, mantiene su signo.").next_to(obs1,DOWN).scale(0.75)
observs = VGroup(obs,obs1, obs2)
asimero = TextMobject("¡As\\'{i} act\\'{u}a la funci\\'{o}n valor absoluto!").move_to(obs).scale(0.9).shift(DOWN)
self.play(ShowCreation(plano, runtime = 2))
self.play(Write(prop2))
self.wait(2)
self.play(ShowCreation(vec1),Write(vec1_name))
self.wait()
self.play(ReplacementTransform(vec1,vecesc),ReplacementTransform(vec1_name,vecesc_name),ShowCreation(avs),Write(avs_name))
self.wait()
self.play(FadeOut(plano))
self.wait()
self.play(Write(lanorma))
self.wait()
self.play(FadeOut(vecesc_name),FadeOut(avs_name), FadeOut(lanorma))
self.play(Transform(vecesc,acostado1),Transform(avs,acostado2),ShowCreation(abraces, runtime = 1),Write(abracetexs))
self.wait(2)
self.play(Write(observs))
self.wait(2)
self.play(FadeOut(observs))
self.play(Write(asimero))
self.wait()
self.play(FadeOut(asimero))
self.play(
prop2.shift, UP*3.5+RIGHT*3,
run_time=1,
path_arc=2
)
self.wait(2)
self.play(FadeOut(abracetexs),FadeOut(abraces),FadeOut(acostado1),FadeOut(vecesc),FadeOut(avs),FadeOut(prop2))
self.wait()
#### TERCERA PROPIEDAD: DESIGUALDAD DEL TRIANGULO ####
prop3 = TexMobject(r" \Vert \vec{x} + \vec{y}\Vert \leq \Vert \vec{x} \Vert+\Vert \vec{y} \Vert").shift(3*DOWN).scale(1.2)
plano = NumberPlane()
vector1 = Vector(direction = avec, color = RED).shift(2.5*LEFT)
vector2 = Vector(direction = bvec, color = BLUE).move_to(avec+bvec/2).shift(2.5*LEFT)
vecsum = Vector(direction = avec+bvec, color = YELLOW).shift(2.5*LEFT)
vector1_label = TexMobject(r"\vec{x}").next_to(vector1.get_center(),UP)
vector2_label = TexMobject(r"\vec{y}").next_to(vector2.get_center(), UP)
vecsum_label = TexMobject(r"\vec{x}+\vec{y}").next_to(vecsum.get_center(),DOWN)
labels = VGroup(vector1_label,vector2_label,vecsum_label)
tvector1 = Vector(direction = np.array([anorm,0,0]), color = RED).shift(3.5*LEFT+UP)
tvector2 = Vector(direction = np.array([bnorm,0,0]), color = BLUE).shift(3.5*LEFT+UP+np.array([anorm,0,0]))
tvecsum = Vector(direction = np.array([sumnorm,0,0]), color = YELLOW).shift(3.5*LEFT+0.5*UP)
tvectors = VGroup(tvector1,tvector2,tvecsum)
brace1 = Brace(tvector1,UP)
brace2 = Brace(tvector2,UP)
bracesum = Brace(tvecsum, DOWN)
lnorm1 = TexMobject(r"\Vert \vec{x} \Vert").next_to(brace1.get_center(),UP)
lnorm2 = TexMobject(r"\Vert \vec{y} \Vert").next_to(brace2.get_center(),UP)
lnormsum = TexMobject(r"\Vert \vec{x} + \vec{y} \Vert").next_to(bracesum.get_center(),DOWN)
braces = VGroup(brace1,brace2,bracesum)
lnorms = VGroup(lnorm1,lnorm2,lnormsum)
laigualdad = TextMobject("(La igualdad se da cuando los tres vectores son colineales)").shift(3*DOWN).scale(0.8)
preguntaa = TextMobject("¿Puedes pensar en la relaci\\'{o}n entre norma y producto interno?").scale(0.9).shift(UP)
preguntab = TextMobject("¿Se te ocurre otra forma distinguir que vector es m\\'{a}s \"largo\"?").scale(0.9).shift(DOWN)
edita = TextMobject("Edita el c\\'{o}digo para visualizar con m\\'{a}s vectores")
self.play(Write(prop3))
self.play(ShowCreation(vector1), ShowCreation(vector2))
self.play(GrowArrow(vecsum), Write(labels))
self.wait(2)
self.play(Transform(vector1,tvector1),Transform(vector2,tvector2),Transform(vecsum,tvecsum),FadeOut(labels))
self.play(ShowCreation(braces))
self.play(Write(lnorms))
self.play(prop3.shift , UP, runtime = 1)
self.play(Write(laigualdad))
self.wait(3)
self.play(FadeOut(lnorms),FadeOut(braces),FadeOut(prop3),FadeOut(vector1),FadeOut(vector2),FadeOut(vecsum), FadeOut(laigualdad))
self.play(Write(preguntaa),Write(preguntab))
self.wait(2.3)
self.play(FadeOut(preguntaa),FadeOut(preguntab))
self.wait()
self.play(Write(edita))
#### Producto interior y su relación con la norma ####
class Producto_Interior(Scene):
def construct(self):
#signif = TextMobject("El significado geom\\'{e}trico del producto interno")
titulo = TextMobject("Aspectos geom\\'{e}tricos del producto interno").scale(1.2)
self.play(Write(titulo))
self.wait()
self.play(FadeOut(titulo))
tomemos = TextMobject("Consideremos dos vectores en el plano").shift(2*DOWN)
vecx_label = TexMobject(r"\vec{x}")
vecy_label = TexMobject(r"\vec{y}")
vecx = Vector(direction = np.array([3,3,0]), color = BLUE).shift(2*LEFT)
vecy = Vector(direction = np.array([5,0,0]), color = RED).shift(2*LEFT)
vecx_label.next_to(vecx,LEFT).shift(RIGHT)
vecy_label.next_to(vecy, DOWN)
self.play(Write(tomemos))
self.play(GrowArrow(vecx),GrowArrow(vecy))
self.play(Write(vecx_label),Write(vecy_label))
self.wait()
lambdukis = TexMobject(r"\text{¿Que valor de}\ \lambda\ \text{hace a}\ \vec{y}\ \text{y}\ \vec{x}-\lambda\vec{y}\ \text{perpendiculares?}")
lambdukis.move_to(tomemos)
vecesp = Vector(direction = 3*UP).shift(1*RIGHT)
vecesp_label = TexMobject(r'\vec{x}-\lambda\vec{y}').next_to(vecesp,RIGHT)
ylambd = Vector(direction = 3*RIGHT).shift(2*LEFT).set_color(YELLOW)
ylambd_label = TexMobject(r"\lambda\vec{y}").move_to(vecy_label)
self.play(FadeOut(tomemos))
self.play(Write(lambdukis))
self.wait()
self.play(Transform(vecy_label,ylambd_label))
self.play(Write(vecesp_label),GrowArrow(vecesp), GrowArrow(ylambd))
self.wait(2)
self.play(FadeOut(vecx),FadeOut(vecx_label),FadeOut(vecy),
FadeOut(vecy_label),FadeOut(ylambd),FadeOut(ylambd_label),FadeOut(lambdukis),
FadeOut(vecesp),FadeOut(vecesp_label))
self.wait()
recordemos = TextMobject('Para responder a esta pregunta, basta recordar que').shift(2*UP)
norm = TexMobject(r' \Vert \vec{x} \Vert = \sqrt{x_1^2+x_2^2}')
denota = TexMobject(r'\text{representa la longitud del vector}\ \vec{x}').shift(2*DOWN)
grupo1 = VGroup(recordemos,norm,denota)
pitagoras = TextMobject("Utilicemos el Teorema de Pit\\'{a}goras en nuestro tri\\'{a}ngulo").shift(2*DOWN)
normx = TexMobject(r'\Vert \vec{x} \Vert').move_to(vecx_label)
normlam = TexMobject(r'\Vert \lambda\vec{y} \Vert').move_to(ylambd_label).shift(0.5*LEFT)
normesp = TexMobject(r'\Vert \vec{x}-\lambda\vec{y} \Vert').move_to(vecesp_label)
pitriangulo = TexMobject(r"\Vert \lambda\vec{y} \Vert^2+\Vert \vec{x}-\lambda\vec{y} \Vert^2 = \Vert \vec{x} \Vert^2 ")
pitriangulo.move_to(pitagoras)
desa = TextMobject("Desarrollando esto, llegamos a la siguiente ecuaci\\'{o}n").move_to(pitriangulo).shift(DOWN)
self.play(Write(recordemos),Write(norm), Write(denota))
self.wait(2)
self.play(FadeOut(recordemos),FadeOut(norm),FadeOut(denota))
self.play(Write(pitagoras), GrowArrow(ylambd),GrowArrow(vecesp),GrowArrow(vecx))
self.play(Write(normx),Write(normesp),Write(normlam))
self.wait()
self.play(Transform(pitagoras,pitriangulo))
self.wait(2)
self.play(Write(desa))
self.wait(2)
self.play(FadeOut(pitagoras),FadeOut(desa),
FadeOut(vecesp),FadeOut(normesp),FadeOut(vecx),FadeOut(normx),FadeOut(ylambd),FadeOut(normlam))
self.wait()
ec1 = TexMobject(r"2\lambda^2(y_1^2+y_2^2)-2\lambda(x_1y_1+x_2y_2)=0").shift(2*UP)
cuyo = TextMobject("cuya soluci\\'{o}n distinta de cero es:")
solu = TexMobject(r"\lambda = \frac{x_1y_1+x_2y_2}{\Vert \vec{y} \Vert^2}").shift(2*DOWN)
self.play(Write(ec1))
self.wait()
self.play(Write(cuyo))
self.wait()
self.play(Write(solu))
self.wait(2)
self.play(FadeOut(ec1),FadeOut(cuyo))
self.play(
solu.shift, UP*5,
run_time=1,
path_arc=2
)
self.wait()
xperp = Vector(direction = np.array([0,3,0]), color = BLUE).shift(LEFT+DOWN)
yperp = Vector(direction = np.array([3,0,0]), color = RED).shift(LEFT+DOWN)
kepasa = TextMobject("¿Qu\\'{e} pasa si los vectores \\textbf{ya} son perpendiculares?").shift(2.5*DOWN)
xperp_label = TexMobject(r"\vec{x}").next_to(xperp.get_center(),LEFT)
yperp_label = TexMobject(r"\vec{y}").next_to(yperp.get_center(),DOWN)
dadocaso = TexMobject(r"\text{En dado caso},\ \lambda = 0").move_to(kepasa)
self.play(GrowArrow(xperp),GrowArrow(yperp), Write(xperp_label), Write(yperp_label), Write(kepasa))
self.wait()
self.play(Transform(kepasa,dadocaso))
self.wait()
self.play(FadeOut(xperp),FadeOut(xperp_label),FadeOut(yperp_label),FadeOut(yperp),FadeOut(kepasa))
self.wait()
newsolu = TexMobject(r"\lambda = \frac{x_1y_1+x_2y_2}{\Vert \vec{y} \Vert^2}= 0 \Rightarrow x_1y_1+x_2y_2 = 0")
cond = TexMobject(r"(\text{Si}\ \vec{y} \neq 0)").shift(2*DOWN)
self.play(Transform(solu,newsolu))
self.play(Write(cond))
self.wait(2)
self.play(FadeOut(solu), FadeOut(cond))
textprodint = TextMobject("Si recordamos:").shift(2*UP)
prodint = TexMobject(r"\vec{x} \cdot \vec{y} = x_1y_1+x_2y_2")
tons = TextMobject("Entonces").move_to(textprodint)
ida = TexMobject(r"\vec{x} \perp \vec{y} \Rightarrow \vec{x} \cdot \vec{y} = 0").scale(1.2)
self.play(Write(textprodint), Write(prodint))
self.wait(2)
self.play(Transform(textprodint,tons), Transform(prodint,ida))
self.wait(2)
masymas = TextMobject("Veamos otro aspecto geom\\'{e}trico del producto interno").shift(UP)
regre = TextMobject("regresemos al tri\\'{a}ngulo antes visto.")
self.play(Transform(textprodint,masymas),Transform(prodint,regre))
self.wait()
self.play(FadeOut(textprodint),FadeOut(prodint))
# FadeOut Todo
vecesp = Vector(direction = 3*UP).shift(1*RIGHT)
vecesp_label = TexMobject(r'\vec{x}-\lambda\vec{y}').next_to(vecesp,RIGHT)
ylambd = Vector(direction = 3*RIGHT).shift(2*LEFT).set_color(YELLOW)
ylambd_label = TexMobject(r"\lambda\vec{y}").next_to(ylambd).shift(2*LEFT+0.5*DOWN)
vecx = Vector(direction = np.array([3,3,0]), color = BLUE).shift(2*LEFT)
vecx_label = TexMobject(r"\vec{x}").next_to(vecx,LEFT).shift(RIGHT)
vecy = Vector(direction = np.array([5,0,0]), color = RED).shift(2*LEFT)
vecy_label = TexMobject(r"\vec{y}").next_to(vecy).shift(0.5*DOWN)
self.play(GrowArrow(vecx), GrowArrow(vecesp), GrowArrow(vecy))
self.play(Write(vecx_label), Write(vecesp_label), Write(vecy_label))
arco = ArcBetweenPoints(np.array([1,0,0]),np.array([0.7,0.7,0])).shift(2*LEFT)
arco_label = TexMobject(r"\theta").next_to(arco,RIGHT)
angulis = TexMobject(r"\text{Consideremos el \'{a}ngulo entre}\ \vec{x}\ \text{y}\ \vec{y}").shift(2*DOWN)
utilizando = TextMobject("Utilizando identidades trigonom\\'{e}tricas, sabemos que").move_to(angulis)
coseno = TexMobject(r"\cos\theta = \frac{\Vert \lambda\vec{y}\Vert}{\Vert \vec{x}\Vert}").move_to(angulis)
demo = TexMobject(r"\text{...se puede demostrar que independientemente del signo de}\ \lambda...").scale(0.8).move_to(angulis).shift(DOWN)
self.play(GrowArrow(arco),GrowArrow(ylambd))
self.play(Write(arco_label), Write(ylambd_label), Write(angulis))
self.wait()
self.play(Transform(angulis, utilizando))
self.wait(1.8)
self.play(Transform(angulis,coseno),FadeOut(vecy), FadeOut(vecy_label))
self.wait(2)
self.play(Write(demo))
self.wait(2)
cos2 = TexMobject(r"\cos\theta = \lambda \frac{\Vert \vec{y}\Vert}{\Vert \vec{x}\Vert}").move_to(angulis)
self.play(FadeOut(angulis))
self.play(Write(cos2))
self.wait()
self.play(FadeOut(vecx),FadeOut(vecx_label),FadeOut(vecesp),FadeOut(vecesp_label),FadeOut(ylambd_label),
FadeOut(ylambd), FadeOut(demo), FadeOut(arco), FadeOut(arco_label))
self.play(
cos2.shift, UP*5,
run_time=1,
path_arc=0
)
lambda_d = TexMobject(r"\text{Se dedujo anteriormente que}\ \lambda =\frac{\vec{x} \cdot \vec{y}}{\Vert \vec{y} \Vert^2}")
lambda_dd = TexMobject(r"\text{sustituyendo lo anterior...}")
cos3 = TexMobject(r"\cos\theta = \frac{\vec{x}\cdot\vec{y}}{\Vert \vec{x} \Vert\Vert \vec{y} \Vert}").shift(2*DOWN)
self.play(Write(lambda_d))
self.wait(1.5)
self.play(Transform(lambda_d,lambda_dd),Write(cos3 ))
self.wait(2)
yasi = TextMobject("y asi..").shift(2*UP)
thetatex = TexMobject(r"\theta = \arccos(\frac{\vec{x}\cdot\vec{y}}{\Vert \vec{x} \Vert\Vert \vec{y} \Vert}),\ \theta \in [0,\pi]").scale(1.2)
self.play(FadeOut(cos2), Transform(lambda_d,yasi),FadeOut(cos3))
self.play(Write(thetatex))
self.wait(2.5
)
#thetatex2 = TexMobject(r"\theta = \arccos(\frac{\vec{x}\cdot\vec{y}}{\Vert \vec{x} \Vert\Vert \vec{y} \Vert})")
siahora = TexMobject(r"\text{Si ahora}\ \vec{x} \cdot \vec{y}=0 ").shift(3*UP)
regreso = TexMobject(r" \theta = \arccos(0) = \pi").shift(1.5*UP)
esdecir = TexMobject(r"\vec{x} \cdot \vec{y} = 0 \Rightarrow \vec{x} \perp \vec{y}").scale(1.5)
self.play(Transform(lambda_d,siahora),Transform(thetatex,regreso), Write(esdecir))
self.wait(2)
enresumen = TextMobject("En resumen, vimos dos caracter\\'{i}sticas del producto interno").shift(2.5*UP)
sii = TexMobject(r"1)\ \vec{x} \cdot \vec{y} = 0 \Leftrightarrow \vec{x} \perp \vec{y}").shift(UP).scale(1.2)
thetaa = TexMobject(r"2)\ \theta = \arccos(\frac{\vec{x}\cdot\vec{y}}{\Vert \vec{x} \Vert\Vert \vec{y} \Vert}),\ \theta \in [0,\pi]").scale(1).shift(DOWN)
esun = TextMobject("¡Este producto es m\\'{a}s que s\\'{o}lo una f\\'{o}rmula!").shift(2.5*DOWN)
self.play(Transform(lambda_d,enresumen),FadeOut(thetatex), FadeOut(esdecir))
self.play(Write(sii))
self.wait(0.5)
self.play(Write(thetaa))
self.play(Write(esun))
self.wait(2)
self.play(FadeOut(lambda_d),FadeOut(sii), FadeOut(thetaa), FadeOut(esun))
self.wait()
#### Diferentes normas en R^n ####
class Normas(Scene):
def construct(self):
plano = NumberPlane()
intro1 = TextMobject("Veremos como se ve un c\\'{i}rculo unitario")
intro2 = TexMobject(r"\text{utilizando diferentes normas en }\mathbb{R}^2")
intro2.next_to(intro1,DOWN)
intro = VGroup(intro1,intro2)
circ1 = TextMobject("Recordemos que la definici\\'{o}n del c\\'{i}rculo es")
circ2 = TexMobject(r"\mathbb{S}^1=\{x\in\mathbb{R}^2 : \Vert x \Vert =1\}")
circ2.next_to(circ1,DOWN)
circ = VGroup(circ1,circ2)
self.play(Write(intro))
self.wait(2)
self.play(ReplacementTransform(intro,circ))
self.wait(2)
self.play(FadeOut(circ))
#### Norma 1 ####
title1 = TextMobject("Norma 1")
norm1 = TexMobject(r"\Vert x \Vert_1=\vert x_1 \vert + \vert x_2 \vert")
norm1.next_to(title1,DOWN)
Group1 = VGroup(title1,norm1)
Group1.scale(0.75)
Group1.set_color(RED)
fig1 = Square(side_length=np.sqrt(2),color=RED)
fig1.rotate(PI/4)
self.play(Write(Group1))
self.wait()
self.play(ApplyMethod(Group1.to_edge,UP))
self.play(ShowCreation(plano))
self.play(ShowCreation(fig1))
self.wait(2)
self.play(ApplyMethod(Group1.move_to,np.array([-5,3,0])))
#### Norma 2 ####
title2 =TextMobject("Norma 2")
norm2 = TexMobject(r"\Vert x \Vert_2=\left(x_1^2 + x_2^2 \right)^{1/2}")
norm2.next_to(title2,DOWN)
Group2 = VGroup(title2,norm2)
Group2.scale(0.75)
Group2.set_color(YELLOW)
fig2 = Circle(radius=1,color=YELLOW)
self.play(Write(Group2))
self.wait()
self.play(ApplyMethod(Group2.to_edge,UP))
self.play(ShowCreation(fig2))
self.wait(2)
self.play(ApplyMethod(Group2.move_to,np.array([5,3,0])))
#### Norma infinito ####
title3 = TextMobject("Norma infinito")
norminfty = TexMobject(r"\Vert x \Vert_{\infty} = \max\{\vert x_i \vert : i \in \{1,2\}\}")
norminfty.next_to(title3,DOWN)
Group3 = VGroup(title3,norminfty)
Group3.scale(0.75)
Group3.set_color(GREEN_SCREEN)
fig3 = Square(side_length=2,color=GREEN_SCREEN)
self.play(Write(Group3))
self.wait()
self.play(ApplyMethod(Group3.to_edge,UP))
self.play(ShowCreation(fig3))
self.wait(2)
self.remove(Group1,Group2,Group3,plano,fig1,fig2,fig3)
#### Norma p ####
intro1 = TextMobject("Podemos definir una norma similar a las anteriores")
intro2 = TexMobject(r"\text{para cada } p\in\mathbb{R},\ p\geq 1")
intro2.next_to(intro1,DOWN)
intro = VGroup(intro1,intro2)
titlep = TexMobject(r"\text{Norma } p")
normp = TexMobject(r"\Vert x \Vert_p = \left(\sum_{i=1}^n \vert x_i \vert ^p \right)^{1/p}")
normp.next_to(titlep,DOWN)
Groupp = VGroup(titlep,normp)
text = TextMobject("Veamos que pasa cuando $p$ crece en $\\mathbb{R}$")
self.play(Write(intro))
self.wait(2)
self.play(ReplacementTransform(intro,Groupp))
self.wait(2)
self.play(FadeOut(Groupp))
self.play(Write(text))
self.play(FadeOut(text))
self.play(ShowCreation(plano))
self.play(FadeIn(Group3),ShowCreation(fig3))
self.play(ApplyMethod(Group3.to_edge,DOWN))
n = 1
while n<10:
valor_sig = TexMobject(r"p="+str(n))
valor_sig.to_edge(UP)
self.add(valor_sig)
D = []
j=0
dj=1/16
while j<1:
dot1 = Dot(radius=0.05,color=PINK)
dot1_2 = Dot(radius=0.05,color=PINK)
dot1.move_to(np.array([j,(1-j**n)**(1/n),0]))
dot1_2.move_to(np.array([(1-j**n)**(1/n),j,0]))
self.add(dot1,dot1_2)
self.wait(0.05)
D.append(dot1)
D.append(dot1_2)
j=j+dj
j=1
while j>0:
dot2 = Dot(radius=0.05,color=PINK)
dot2_2 = Dot(radius=0.05,color=PINK)
dot2.move_to(np.array([j,-(1-j**n)**(1/n),0]))
dot2_2.move_to(np.array([-(1-j**n)**(1/n),j,0]))
self.add(dot2,dot2_2)
self.wait(0.05)
D.append(dot2)
D.append(dot2_2)
j=j-dj
j=0
while j>-1:
dot3 = Dot(radius=0.05,color=PINK)
dot3_2 = Dot(radius=0.05,color=PINK)
dot3.move_to(np.array([j,-(1-(-j)**n)**(1/n),0]))
dot3_2.move_to(np.array([-(1-(-j)**n)**(1/n),j,0]))
self.add(dot3,dot3_2)
self.wait(0.05)
D.append(dot3)
D.append(dot3_2)
j=j-dj
j=-1
while j<0:
dot4 = Dot(radius=0.05,color=PINK)
dot4_2 = Dot(radius=0.05,color=PINK)
dot4.move_to(np.array([j,(1-(-j)**n)**(1/n),0]))
dot4_2.move_to(np.array([(1-(-j)**n)**(1/n),j,0]))
self.add(dot4,dot4_2)
self.wait(0.05)
D.append(dot4)
D.append(dot4_2)
j=j+dj
self.wait(0.5)
for i in D:
self.remove(i)
self.remove(valor_sig)
n=n+0.20
self.remove(plano,Group3,fig3)
conclus1 = TextMobject("Vemos que tiende al ``c\\'{i}rculo'' que resulta de usar")
conclus2 = TextMobject("la norma infinito, de ah\\'{i} su nombre.").next_to(conclus1,DOWN)
conclus = VGroup(conclus1,conclus2)
ejer = TextMobject("Puedes cambiar el código para verlo con más valores de $p$")
self.play(Write(ejer))
self.wait(2)
self.play(FadeOut(ejer))
self.play(Write(conclus))
self.wait(2)
self.play(FadeOut(conclus))
#### Autores y créditos ####
autor1 = TextMobject("Bruno Ram\\'{i}rez")
autor1.scale(0.8)
contact1 = TextMobject("GitHub: @brunormzg")
contact1.scale(0.6)
contact1.next_to(autor1,DOWN)
aut1 = VGroup(autor1,contact1)
#aut1.to_edge(UP)
autor2 = TextMobject("Donaldo Mora")
autor2.scale(0.8)
autor2.next_to(contact1,DOWN)
contact2 = TextMobject("Instagram: donal\\_mora")
contact2.scale(0.6)
contact2.next_to(autor2,DOWN)
aut2 = VGroup(autor2,contact2)
autor3 = TextMobject("Rodrigo Moreno")
autor3.scale(0.8)
autor3.next_to(contact2,DOWN)
contact3 = TextMobject("Instagram: \\_nosoyro")
contact3.scale(0.6)
contact3.next_to(autor3,DOWN)
aut3 = VGroup(autor3,contact3)
#aut3.to_edge(DOWN)
self.play(Write(aut1),Write(aut2),Write(aut3))
### Métrica y sus propiedades ###
### Puntos para propiedad de simetría, PUEDES CAMBIARLOS ###
posa = np.array([4,0,0])
posb = np.array([-4,0,0])
distab = round(np.linalg.norm(posa-posb),3)
distba = round(np.linalg.norm(posb-posa),3)
### Definición de los puntos para la desigualdad del tríangulo, PUEDES CAMBIARLOS ###
posx = np.array([3,2,0])
posy = np.array([-2,-1,0])
posz = np.array([-2,1,0])
### Distancias Desig. del triángulo - NO CAMBIAR ###
dxy = round(np.linalg.norm(posx-posy),3)
dxz = round(np.linalg.norm(posx-posz),3)
dzy = round(np.linalg.norm(posz-posy),3)
class Metrica(Scene):
def construct(self):
dist = TextMobject("La m\\'{e}trica: un concepto de distancia").scale(1.2)
aux1 = TexMobject(r"\text{Auxiliados del concepto de norma, definimos la distancia}").to_edge(UP).shift(DOWN)
aux2 = TexMobject(r"\text{entre dos vectores}\ \vec{x},\ \vec{y}\ \text{como}:").next_to(aux1,DOWN)
aux = VGroup(aux1,aux2)
defdist = TexMobject(r"d(\vec{x},\vec{y}):=\Vert \vec{x} - \vec{y}\Vert").scale(1.2)
tprops1 = TextMobject("Una distancia se conoce como \"m\\'{e}trica\"")
tprops2 = TextMobject(" si satisface las siguientes propiedades:").next_to(tprops1,DOWN)
tprops = VGroup(tprops1,tprops2)
self.play(Write(dist))
self.wait(2)
self.play(FadeOut(dist))
self.play(Write(aux))
self.wait(2)
self.play(ReplacementTransform(aux,defdist))
self.wait(1.5)
self.play(FadeOut(defdist))
self.play(Write(tprops))
self.wait(1.5)
self.play(FadeOut(tprops))
# PROPIEDAD 1: DEFINIDO SEMI-POSITIVO##
distprop1 = TexMobject(r"d(\vec{x},\vec{y}) \geq 0\ \text{y}\ d(\vec{x},\vec{y}) = 0 \Leftrightarrow \vec{x} = \vec{y}").shift(3*DOWN)
xdot = Dot((2,1,0), color = RED, radius = 0.12)
xdot_label = TexMobject(r"\vec{x}").next_to(xdot,RIGHT)
gxdot = VGroup(xdot, xdot_label)
ydot = Dot((-2,-1,0), color = BLUE, radius = 0.12)
ydot_label = TexMobject(r"\vec{y}").next_to(ydot,LEFT)
gydot = VGroup(ydot, ydot_label)
arrow1 = DoubleArrow(xdot.get_center(),ydot.get_center())
arrow1_label = TexMobject(r"d(\vec{x},\vec{y})").next_to(arrow1.get_center(),DOWN+RIGHT)
dmayor = TexMobject(r"d(\vec{x},\vec{y})> 0").shift(3*UP)
garrow1 = VGroup(arrow1, arrow1_label)
cerodot = Dot((0,0,0), color = PURPLE, radius = 0.12)
cerodot_label = TexMobject(r"\vec{x}=\vec{y}").next_to(cerodot,DOWN)
gcerodot = VGroup(cerodot,cerodot_label)
ceroarrow = DoubleArrow((0,0,0),(0,0,0))
dcero = TexMobject(r"d(\vec{x},\vec{y}) = 0").shift(3*UP)
self.play(Write(distprop1))
self.wait()
self.play(ShowCreation(gxdot), ShowCreation(gydot))
self.wait(1)
self.play(GrowArrow(arrow1),Write(dmayor))
self.wait(2)
self.play(ReplacementTransform(gxdot,gcerodot),ReplacementTransform(ydot,cerodot),Transform(arrow1,ceroarrow),
FadeOut(ydot_label))
self.wait()
self.play(ReplacementTransform(dmayor,dcero))
self.wait()
self.play(FadeOut(gcerodot),FadeOut(dcero))
self.play(distprop1.shift, 3*UP, runtime = 1)
self.wait()
self.play(FadeOut(distprop1))
### PROPIEDAD 2: SIMETRÍA ###
distprop2 = TexMobject(r"d(\vec{x},\vec{y}) = d(\vec{y},\vec{x})").shift(3*DOWN)
adot = Dot(posa, color = RED, radius = 0.12)
adot_label = TexMobject(r"\vec{x}").next_to(adot,DOWN)
gadot = VGroup(adot,adot_label)
bdot = Dot(posb, color = BLUE, radius = 0.12)
bdot_label = TexMobject(r"\vec{y}").next_to(bdot,DOWN)
gbdot = VGroup(bdot,bdot_label)
ab_arrow = Arrow(adot.get_center(),bdot.get_center())
ab_arrow_lb= TexMobject(r"d(\vec{x},\vec{y}) = "+str(distab)).next_to(ab_arrow,3*UP)
ba_arrow = Arrow(bdot.get_center(),adot.get_center())
ba_arrow_lb= TexMobject(r"d(\vec{y},\vec{x})= "+ str(distba)).move_to(ab_arrow_lb)
self.play(Write(distprop2))
self.wait()
self.play(ShowCreation(gadot),ShowCreation(gbdot))
self.play(GrowArrow(ab_arrow),Write(ab_arrow_lb))
self.wait(2)
self.play(ReplacementTransform(ab_arrow,ba_arrow), Transform(ab_arrow_lb,ba_arrow_lb))
self.wait()
self.play(FadeOut(gadot),FadeOut(gbdot),FadeOut(ba_arrow),FadeOut(ab_arrow_lb))
self.play(distprop2.shift, 3*UP, runtime = 1)
self.wait(2)
self.play(FadeOut(distprop2))
### PROPIEDAD 3: DESIG DEL TRIÁNGULO ###
distprop3 = TexMobject(r"d(\vec{x},\vec{y}) \leq d(\vec{x},\vec{z})+d(\vec{z},\vec{y})").shift(3*DOWN)
newxdot = Dot(posx,color = RED, radius = 0.12)
newxdot_label = TexMobject(r"\vec{x}").next_to(newxdot,DOWN)
gnewxdot = VGroup(newxdot,newxdot_label)
newydot = Dot(posy,color = BLUE, radius = 0.12)
newydot_label = TexMobject(r"\vec{y}").next_to(newydot,DOWN)
gnewydot = VGroup(newydot,newydot_label)
zdot = Dot(posz, color = YELLOW, radius = 0.12)
zdot_label = TexMobject(r"\vec{z}").next_to(zdot,UP)
gzdot = VGroup(zdot,zdot_label)
narrow1 = DoubleArrow(newxdot.get_center(),newydot.get_center(),stroke_width = 4)
narrow1_label = TexMobject(r"d(\vec{x},\vec{y})").next_to(narrow1.get_center(),DOWN+RIGHT)
arrow2 = DoubleArrow(newxdot.get_center(),zdot.get_center(),stroke_width = 4)
arrow2_label = TexMobject(r"d(\vec{x},\vec{z})").next_to(arrow2.get_center(),UP)
garrow2 = VGroup(arrow2,arrow2_label)
arrow3 = DoubleArrow(newydot.get_center(),zdot.get_center(),stroke_width = 4)
arrow3_label = TexMobject(r"d(\vec{z},\vec{y})").next_to(arrow3.get_center(),2*LEFT)
garrow3 = VGroup(arrow3,arrow3_label)
tdxz = DoubleArrow((0,0,0),(dxz,0,0), color = RED).shift(2.5*LEFT+UP)
tdzy = DoubleArrow((0,0,0),(dzy,0,0), color = BLUE).shift(2.5*LEFT+UP+(dxz*0.9,0,0))
tdxy = DoubleArrow((0,0,0),(dxy,0,0), color = YELLOW).shift(2.5*LEFT)
brace1 = Brace(tdxz,UP)
brace2 = Brace(tdzy,UP)
bracesum = Brace(tdxy, DOWN)
ldxz = TexMobject(r"d(\vec{x},\vec{z})").next_to(brace1.get_center(),UP)
ldzy = TexMobject(r"d(\vec{z},\vec{y})").next_to(brace2.get_center(),UP)
ldxy = TexMobject(r"d(\vec{x},\vec{y})").next_to(bracesum.get_center(),DOWN)
braces = VGroup(brace1,brace2,bracesum)
dists = VGroup(ldxz,ldzy,ldxy)
laigualdad = TextMobject("(La igualdad se da si los tres puntos son colineales)").shift(DOWN).scale(0.8)
revisa = TextMobject("Edita el c\\'{o}digo para visualizar con m\\'{a}s vectores")
self.play(Write(distprop3))
self.wait()
self.play(ShowCreation(gnewxdot),ShowCreation(gnewydot),ShowCreation(gzdot))
self.play(GrowArrow(arrow2),GrowArrow(arrow3))
self.play(Write(arrow2_label),Write(arrow3_label))
self.wait()
self.play(GrowArrow(narrow1))
self.play(Write(narrow1_label))
self.wait()
self.play(FadeOut(gnewxdot),FadeOut(gnewydot),FadeOut(gzdot))
self.play(FadeOut(narrow1_label),FadeOut(arrow2_label),FadeOut(arrow3_label))
self.play(ReplacementTransform(narrow1,tdxy),ReplacementTransform(arrow2,tdxz),ReplacementTransform(arrow3,tdzy))
self.play(ShowCreation(braces),ShowCreation(dists))
self.wait(3)
self.play(FadeOut(dists),FadeOut(braces),FadeOut(tdxy),FadeOut(tdxz),FadeOut(tdzy))
self.play(distprop3.shift, 3*UP, runtime = 1)
self.play(Write(laigualdad))
self.wait(2)
self.play(FadeOut(laigualdad),FadeOut(distprop3))
self.wait()
self.play(Write(revisa))
self.wait() |
995,969 | ffc5e1b4f15f3e6ce62701ec7558e80d6087b73c | # Generated by Django 2.1.7 on 2019-03-26 13:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20190326_1223'),
]
operations = [
migrations.RemoveField(
model_name='new',
name='brief_description',
),
]
|
995,970 | 0906f4aa56a75a4aa1f603f03bc45c66314846bf | from dsutil import NbExecuter
from dsutil.common import * |
995,971 | b0bf92fabd6bbc56fbf7fc66114558aac1b9e3bf | from app import *
ven1 = Venue(name = 'The Musical Hop',
genres = ["Jazz", "Reggae", "Swing", "Classical", "Folk"],
address = '1015 Folsom Street',
city = 'San Francisco',
state = 'CA',
phone = '123-123-1234',
website = 'https://www.themusicalhop.com',
facebook_link = 'https://www.facebook.com/TheMusicalHop',
seeking_talent = True,
seeking_description = 'We are on the lookout for a local artist to play every two weeks. Please call us.',
image_link = 'https://images.unsplash.com/photo-1543900694-133f37abaaa5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=400&q=60'
)
ven2 = Venue(name = 'The Dueling Pianos Bar',
genres = ["Classical", "R&B", "Hip-Hop"],
address = '335 Delancey Street',
city = 'New York',
state = 'NY',
phone = '914-003-1132',
website = 'https://www.theduelingpianos.com',
facebook_link = 'https://www.facebook.com/theduelingpianos',
seeking_talent = False,
image_link = 'https://images.unsplash.com/photo-1497032205916-ac775f0649ae?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=750&q=80'
)
ven3 = Venue(name = 'Park Square Live Music & Coffee',
genres = ["Rock n Roll", "Jazz", "Classical", "Folk"],
address = '34 Whiskey Moore Ave',
city = 'San Francisco',
state = 'CA',
phone = '415-000-1234',
website = 'https://www.parksquarelivemusicandcoffee.com',
facebook_link = 'https://www.facebook.com/ParkSquareLiveMusicAndCoffee',
seeking_talent = False,
image_link = 'https://images.unsplash.com/photo-1485686531765-ba63b07845a7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=747&q=80'
)
db.session.add(ven1)
db.session.add(ven2)
db.session.add(ven3)
art1 = Artist(name = 'Guns N Petals',
genres = ["Rock n Roll"],
city = 'San Francisco',
state = 'CA',
phone = '326-123-5000',
website = 'https://www.gunsnpetalsband.com',
facebook_link = 'https://www.facebook.com/GunsNPetals',
seeking_venue = True,
seeking_description = 'Looking for shows to perform at in the San Francisco Bay Area!',
image_link = 'https://images.unsplash.com/photo-1549213783-8284d0336c4f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=300&q=80'
)
art2 = Artist(name = 'Matt Quevedo',
genres = ["Jazz"],
city = 'New York',
state = 'NY',
phone = '300-400-5000',
facebook_link = 'https://www.facebook.com/mattquevedo923251523',
seeking_venue = False,
image_link = 'https://images.unsplash.com/photo-1495223153807-b916f75de8c5?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=334&q=80'
)
art3 = Artist(name = 'The Wild Sax Band',
genres = ["Jazz", "Classical"],
city = 'San Francisco',
state = 'CA',
phone = '432-325-5432',
seeking_venue = False,
image_link = 'https://images.unsplash.com/photo-1558369981-f9ca78462e61?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=794&q=80'
)
db.session.add(art1)
db.session.add(art2)
db.session.add(art3)
db.session.commit()
sh = Shows(start_time="2019-05-21T21:30:00.000Z")
db.session.add(sh)
ven = Venue.query.filter_by(id=1).first()
art = Artist.query.filter_by(id=1).first()
sh.artists = [art]
sh.venues = [ven]
db.session.commit()
sh = Shows(start_time="2035-04-01T20:00:00.000Z")
db.session.add(sh)
ven = Venue.query.filter_by(id=3).first()
art = Artist.query.filter_by(id=3).first()
sh.artists = [art]
sh.venues = [ven]
db.session.commit()
sh = Shows(start_time="2035-04-08T20:00:00.000Z")
db.session.add(sh)
ven = Venue.query.filter_by(id=3).first()
art = Artist.query.filter_by(id=3).first()
sh.artists = [art]
sh.venues = [ven]
db.session.commit()
sh = Shows(start_time="2035-04-15T20:00:00.000Z")
db.session.add(sh)
ven = Venue.query.filter_by(id=3).first()
art = Artist.query.filter_by(id=3).first()
sh.artists = [art]
sh.venues = [ven]
db.session.commit()
sh = Shows(start_time="2019-06-15T23:00:00.000Z")
db.session.add(sh)
ven = Venue.query.filter_by(id=3).first()
art = Artist.query.filter_by(id=2).first()
sh.artists = [art]
sh.venues = [ven]
db.session.commit()
db.session.close()
|
995,972 | 830407e09552cfb2cb0473e85960160bfe3aa607 | import os
import json
import datetime as dt
from django import forms
from django.db import models
from django.core import exceptions
from django.utils.translation import ugettext_lazy as _
from .mediaPIL import MediaPIL
from .widgets import ImagePILWidget
class ImagePILField(models.TextField):
description = "Image PIL Field"
def __init__(self, pathway="", point=(50, 50), quality=90,
upload_to=".", *args, **kwargs):
self.blank = kwargs.get('blank', False)
if pathway is None:
pathway = ""
self.default_kwargs = {
'pathway': pathway,
'point': point,
'quality': quality,
'upload_to': upload_to,
}
kwargs['default'] = json.dumps(
self.default_kwargs, ensure_ascii=False)
super().__init__(*args, **kwargs)
def from_db_value(self, value, expression, connection):
try:
if value is None:
return self.default_kwargs
if type(value) == str and '{' not in value:
kw = self.default_kwargs.copy()
kw['pathway'] = value
return kw
return json.loads(value)
except Exception as e:
return self.default_kwargs
def clean(self, value, model_instance):
val = json.loads(value)
if not val.get('pathway') and not self.blank:
raise forms.ValidationError(
_('This field is required'), code='invalid')
return value
def get_prep_value(self, value):
if type(value) == str:
return value
return json.dumps(value, ensure_ascii=False)
def value_to_string(self, obj):
return self.get_prep_value(obj.image)
def to_python(self, value):
return self.from_db_value(value=value)
def formfield(self, **kwargs):
widget = kwargs.get('widget')
if 'AdminTextareaWidget' in str(widget):
kwargs['widget'] = ImagePILWidget
return super().formfield(**kwargs)
|
995,973 | 5cc4f6d32dcc17a5198c712bd446651e8b4b547f | # coding=utf-8
import os, sys
import time
import qrcode
# generate the qrcode and save it definition
usage = """
Usage:
python qrCodeGen.py <output file name> <data to put in the qrcode>
"""
def gen_qrcode(dataList, path):
# generate the qrcode
qr = qrcode.QRCode(5, error_correction=qrcode.constants.ERROR_CORRECT_L)
data = ""
for d in dataList:
data += d + " "
qr.add_data(data)
qr.make()
im = qr.make_image()
count = 1
old_path = path.split(".")
while os.path.isfile(path):
path = old_path[0] + "({}).".format(count) + old_path[1]
count += 1
# save the image out
im.save(path, format='png')
# print that its been successful
print("QRCode has been generated under {0}".format(path))
if __name__ == '__main__':
if len(sys.argv) < 2:
print(usage)
else:
gen_qrcode(sys.argv[2:], sys.argv[1]) |
995,974 | f831c7dae183042d0619a63210f7a1916dd06a50 | from typing import Any, Awaitable, Callable, Iterable, List, NoReturn, Optional, TypeVar
from expression.collections import seq
from expression.core import (
MailboxProcessor,
Option,
TailCall,
TailCallResult,
aiotools,
compose,
match,
pipe,
tailrec_async,
)
from expression.system.disposable import AsyncDisposable
from .combine import zip_seq
from .notification import Notification, OnCompleted, OnError, OnNext
from .observables import AsyncAnonymousObservable
from .observers import (
AsyncAnonymousObserver,
AsyncNotificationObserver,
auto_detach_observer,
)
from .transform import map, transform
from .types import AsyncObservable, AsyncObserver
_TSource = TypeVar("_TSource")
_TResult = TypeVar("_TResult")
def choose_async(
chooser: Callable[[_TSource], Awaitable[Option[_TResult]]]
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TResult]]:
async def handler(
next: Callable[[_TResult], Awaitable[None]], xs: _TSource
) -> None:
result = await chooser(xs)
for x in result.to_list():
await next(x)
return transform(handler)
def choose(
chooser: Callable[[_TSource], Option[_TResult]]
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TResult]]:
def handler(
next: Callable[[_TResult], Awaitable[None]], xs: _TSource
) -> Awaitable[None]:
for x in chooser(xs).to_list():
return next(x)
return aiotools.empty()
return transform(handler)
def filter_async(
predicate: Callable[[_TSource], Awaitable[bool]]
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
"""Filter async.
Filters the elements of an observable sequence based on an async
predicate. Returns an observable sequence that contains elements
from the input sequence that satisfy the condition.
Args:
predicate (Callable[[TSource], Awaitable[bool]]): [description]
Returns:
Stream[TSource, TSource]: [description]
"""
async def handler(next: Callable[[_TSource], Awaitable[None]], x: _TSource) -> None:
if await predicate(x):
return await next(x)
return transform(handler)
def filter(
predicate: Callable[[_TSource], bool]
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
def handler(
next: Callable[[_TSource], Awaitable[None]], x: _TSource
) -> Awaitable[None]:
if predicate(x):
return next(x)
return aiotools.empty()
return transform(handler)
def starfilter(
predicate: Callable[..., bool]
) -> Callable[[AsyncObservable[Any]], AsyncObservable[Any]]:
"""Filter and spread the arguments to the predicate.
Filters the elements of an observable sequence based on a predicate.
Returns:
An observable sequence that contains elements from the input
sequence that satisfy the condition.
"""
def handler(
next: Callable[[Iterable[Any]], Awaitable[None]], args: Iterable[Any]
) -> Awaitable[None]:
if predicate(*args):
return next(args)
return aiotools.empty()
return transform(handler)
def filteri(
predicate: Callable[[_TSource, int], bool]
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
ret = compose(
zip_seq(seq.infinite),
starfilter(predicate),
map(seq.head),
)
return ret
def distinct_until_changed(
source: AsyncObservable[_TSource],
) -> AsyncObservable[_TSource]:
"""Distinct until changed.
Return an observable sequence only containing the distinct
contiguous elements from the source sequence.
Args:
source (AsyncObservable[TSource]): [description]
Returns:
Async observable with only contiguous distinct elements.
"""
async def subscribe_async(aobv: AsyncObserver[_TSource]) -> AsyncDisposable:
safe_obv, auto_detach = auto_detach_observer(aobv)
async def worker(inbox: MailboxProcessor[Notification[_TSource]]) -> None:
@tailrec_async
async def message_loop(
latest: Notification[_TSource],
) -> "TailCallResult[NoReturn, [Notification[_TSource]]]":
n = await inbox.receive()
async def get_latest() -> Notification[_TSource]:
with match(n) as case:
for x in case(OnNext[_TSource]):
if n == latest:
break
try:
await safe_obv.asend(x)
except Exception as ex:
await safe_obv.athrow(ex)
break
for err in case(OnError[_TSource]):
await safe_obv.athrow(err)
break
while case(OnCompleted):
await safe_obv.aclose()
break
return n
latest = await get_latest()
return TailCall[Notification[_TSource]](latest)
await message_loop(
OnCompleted
) # Use as sentinel value as it will not match any OnNext value
agent = MailboxProcessor.start(worker)
async def notification(n: Notification[_TSource]) -> None:
agent.post(n)
obv: AsyncObserver[_TSource] = AsyncNotificationObserver(notification)
return await pipe(obv, source.subscribe_async, auto_detach)
return AsyncAnonymousObservable(subscribe_async)
def skip(
count: int,
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
"""[summary]
Bypasses a specified number of elements in an observable sequence
and then returns the remaining elements.
Args:
count (int): [description]
Returns:
Stream[TSource, TSource]: [description]
"""
def _skip(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:
async def subscribe_async(obvAsync: AsyncObserver[_TSource]) -> AsyncDisposable:
safe_obv, auto_detach = auto_detach_observer(obvAsync)
remaining = count
async def asend(value: _TSource) -> None:
nonlocal remaining
if remaining <= 0:
await safe_obv.asend(value)
else:
remaining -= 1
obv = AsyncAnonymousObserver(asend, safe_obv.athrow, safe_obv.aclose)
return await pipe(obv, source.subscribe_async, auto_detach)
return AsyncAnonymousObservable(subscribe_async)
return _skip
def skip_last(
count: int,
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
def _skip_last(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:
async def subscribe_async(observer: AsyncObserver[_TSource]) -> AsyncDisposable:
safe_obv, auto_detach = auto_detach_observer(observer)
q: List[_TSource] = []
async def asend(value: _TSource) -> None:
front = None
q.append(value)
if len(q) > count:
front = q.pop(0)
if front is not None:
await safe_obv.asend(front)
obv = AsyncAnonymousObserver(asend, safe_obv.athrow, safe_obv.aclose)
return await pipe(obv, source.subscribe_async, auto_detach)
return AsyncAnonymousObservable(subscribe_async)
return _skip_last
def take(
count: int,
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
if count < 0:
raise ValueError("Count cannot be negative.")
def _take(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:
async def subscribe_async(obvAsync: AsyncObserver[_TSource]) -> AsyncDisposable:
safe_obv, auto_detach = auto_detach_observer(obvAsync)
remaining = count
async def asend(value: _TSource) -> None:
nonlocal remaining
if remaining > 0:
remaining -= 1
await safe_obv.asend(value)
if not remaining:
await safe_obv.aclose()
obv = AsyncAnonymousObserver(asend, safe_obv.athrow, safe_obv.aclose)
return await pipe(obv, source.subscribe_async, auto_detach)
return AsyncAnonymousObservable(subscribe_async)
return _take
def take_last(
count: int,
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
"""Take last elements from stream.
Returns a specified number of contiguous elements from the end of an
observable sequence.
Args:
count: Number of elements to take.
Returns:
Stream[TSource, TSource]: [description]
"""
def _take_last(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:
async def subscribe_async(aobv: AsyncObserver[_TSource]) -> AsyncDisposable:
safe_obv, auto_detach = auto_detach_observer(aobv)
queue: List[_TSource] = []
async def asend(value: _TSource) -> None:
queue.append(value)
if len(queue) > count:
queue.pop(0)
async def aclose() -> None:
for item in queue:
await safe_obv.asend(item)
await safe_obv.aclose()
obv = AsyncAnonymousObserver(asend, safe_obv.athrow, aclose)
return await pipe(obv, source.subscribe_async, auto_detach)
return AsyncAnonymousObservable(subscribe_async)
return _take_last
def take_until(
other: AsyncObservable[Any],
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
"""Take elements until other.
Returns the values from the source observable sequence until the
other observable sequence produces a value.
Args:
other: The other async observable
Returns:
Stream[TSource, TSource]: [description]
"""
def _take_until(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:
async def subscribe_async(aobv: AsyncObserver[_TSource]) -> AsyncDisposable:
safe_obv, auto_detach = auto_detach_observer(aobv)
async def asend(value: _TSource) -> None:
await safe_obv.aclose()
obv = AsyncAnonymousObserver(asend, safe_obv.athrow)
sub2 = await pipe(obv, other.subscribe_async)
sub1 = await pipe(safe_obv, source.subscribe_async, auto_detach)
return AsyncDisposable.composite(sub1, sub2)
return AsyncAnonymousObservable(subscribe_async)
return _take_until
def slice(
start: Optional[int] = None, stop: Optional[int] = None, step: int = 1
) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:
"""Slices the given source stream.
It is basically a wrapper around skip(), skip_last(), take(),
take_last() and filter().
This marble diagram helps you remember how slices works with
streams. Positive numbers is relative to the start of the events,
while negative numbers are relative to the end (on_completed) of the
stream.
```
r---e---a---c---t---i---v---e---|
0 1 2 3 4 5 6 7 8
-8 -7 -6 -5 -4 -3 -2 -1
```
Example:
>>> result = slice(1, 10, source)
>>> result = slice(1, -2, source)
>>> result = slice(1, -1, 2, source)
Args:
start: Number of elements to skip of take last
stop: Last element to take of skip last
step: Takes every step element. Must be larger than zero
Returns:
A sliced source stream.
"""
def _slice(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:
nonlocal start
if start is not None:
if start < 0:
source = pipe(source, take_last(abs(start)))
else:
source = pipe(source, skip(start))
if stop is not None:
if stop > 0:
start = start or 0
source = pipe(source, take(stop - start))
else:
source = pipe(source, skip_last(abs(stop)))
if step is not None:
if step > 1:
mapper: Callable[[Any, int], bool] = lambda _, i: i % step == 0
xs = pipe(source, filteri(mapper))
source = xs
elif step < 0:
# Reversing streams is not supported
raise TypeError("Negative step not supported.")
return source
return _slice
|
995,975 | 9d4da43063aa287ff2f935ea4d6083419c57bac4 | from sklearn.neighbors import LocalOutlierFactor
from pyod.models.iforest import IForest
from pyod.models.hbos import HBOS
from pyod.models.loda import LODA
from pyod.models.copod import COPOD
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
import ast
import eval.evaluation_utils as utils
from sklearn import metrics
from config import eva_root
def evaluation_od_train(x, y, data_name, model_name="iforest", chosen_subspace=None):
"""
using anomaly detector to yield anomaly score for each subspace,
generate two files: the subspaces with the highest anomaly score & lof score for each subspace
:param x: data matrix
:param y: class information
:param data_name: the data set name, using for naming the ground truth file
:param model_name: anomaly detector name, default: lof
:param chosen_subspace: use this to only evaluate a subset of the power set of full feature space
:return: df: a ground-truth map using anomaly idx as key and ground truth feature subspace as value.
"""
global chosen_model
dim = x.shape[1]
ano_idx = np.where(y == 1)[0]
n_ano = len(ano_idx)
# get all the possible feature subset or just use given subset list
f_subsets = utils.get_subset_candidate(dim, chosen_subspace)
# score anomalies in each subspace, generate the score matrix
n_subsets = len(f_subsets)
score_matrix = np.zeros([n_ano, n_subsets])
for i in tqdm(range(n_subsets)):
subset = f_subsets[i]
x_subset = x[:, subset]
if model_name == "iforest":
clf = IForest()
clf.fit(x_subset)
od_score = clf.decision_scores_
elif model_name == "copod":
clf = COPOD()
clf.fit(x_subset)
od_score = clf.decision_scores_
elif model_name == "hbos":
clf = HBOS()
clf.fit(x_subset)
od_score = clf.decision_scores_
else:
raise ValueError("unsupported od model")
od_score = utils.min_max_norm(od_score)
score_matrix[:, i] = od_score[ano_idx]
if not os.path.exists(eva_root + "data_od_evaluation/"):
os.makedirs(eva_root + "data_od_evaluation/")
# score matrix to df
anomaly_score_df = pd.DataFrame(data=score_matrix, columns=[str(s) for s in f_subsets])
col_name = anomaly_score_df.columns.tolist()
col_name.insert(0, 'ano_idx')
anomaly_score_df["ano_idx"] = ano_idx
anomaly_score_df = anomaly_score_df.reindex(columns=col_name)
path1 = eva_root + "data_od_evaluation/" + data_name + "_score_" + model_name + ".csv"
anomaly_score_df.to_csv(path1, index=False)
# get the ground truth (one subspace for each anomaly that the anomaly can obtain the highest anomaly score)
g_truth_df = pd.DataFrame(columns=["ano_idx", "exp_subspace"])
exp_subspaces = []
for ii, ano_score in enumerate(score_matrix):
max_score_idx = int(np.argmax(ano_score))
exp_subset = str(f_subsets[max_score_idx])
exp_subspaces.append(exp_subset)
g_truth_df["ano_idx"] = ano_idx
g_truth_df["exp_subspace"] = exp_subspaces
g_truth_df.astype({"exp_subspace": "object"})
path2 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
g_truth_df.to_csv(path2, index=False)
return anomaly_score_df, g_truth_df
def evaluation_od(exp_subspace_list, x, y, data_name, model_name):
"""
use outlier detection to evaluate the explanation subspace for each anomaly data object,
to evaluate whether this subspace is a high-contrast subspace to highlight this anomaly
i.e., the anomaly detector can or cannot get a higher score in this space
:param exp_subspace_list: explanation feature subspace for each anomaly, corresponding to ano_idx
:param x: data set
:param y: label
:param data_name: name of dataset
:param model_name: the name of anomaly detector to generate ground truth
:return: average precision, jaccard, and anomaly score
"""
path1 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
if not os.path.exists(path1):
print("annotation file not found, labeling now...")
_, g_truth_df = evaluation_od_train(x, y, data_name, model_name)
else:
g_truth_df = pd.read_csv(path1)
ano_idx = np.where(y == 1)[0]
precision_list = np.zeros(len(ano_idx))
jaccard_list = np.zeros(len(ano_idx))
recall_list = np.zeros(len(ano_idx))
for ii, ano in enumerate(ano_idx):
exp_subspace = list(exp_subspace_list[ii])
gt_subspace_str = g_truth_df.loc[g_truth_df["ano_idx"] == ano]["exp_subspace"].values[0]
gt_subspace = ast.literal_eval(gt_subspace_str)
overlap = list(set(gt_subspace).intersection(set(exp_subspace)))
union = list(set(gt_subspace).union(set(exp_subspace)))
precision_list[ii] = len(overlap) / len(exp_subspace)
jaccard_list[ii] = len(overlap) / len(union)
recall_list[ii] = len(overlap) / len(gt_subspace)
return precision_list.mean(), recall_list.mean(), jaccard_list.mean()
def evaluation_od_auc(feature_weight, x, y, data_name, model_name="iforest"):
"""
use outlier detection to evaluate the explanation subspace for each anomaly data,
whether this subspace is a high-contrast subspace to highlight this anomaly
:param exp_subspace_list: explanation feature subspace for each anomaly, corresponding to ano_idx
:param x: data set
:param y: label
:param data_name: name of dataset
:param model_name: the name of anomaly detector to generate ground truth
:return: average precision, jaccard, and anomaly score
"""
path1 = eva_root + "data_od_evaluation/" + data_name + "_gt_" + model_name + ".csv"
if not os.path.exists(path1):
print("annotation file not found, labeling now...")
_, g_truth_df = evaluation_od_train(x, y, data_name, model_name)
else:
g_truth_df = pd.read_csv(path1)
ano_idx = np.where(y == 1)[0]
dim = x.shape[1]
auroc_list = np.zeros(len(ano_idx))
aupr_list = np.zeros(len(ano_idx))
for ii, ano in enumerate(ano_idx):
score = feature_weight[ii]
# ground_truth metrics
gt_subspace_str = g_truth_df.loc[g_truth_df["ano_idx"] == ano]["exp_subspace"].values[0]
gt_subspace = ast.literal_eval(gt_subspace_str)
gt = np.zeros(dim, dtype=int)
gt[gt_subspace] = 1
if len(gt_subspace) == dim:
auroc_list[ii] = 1
aupr_list[ii] = 1
else:
precision, recall, _ = metrics.precision_recall_curve(gt, score)
aupr_list[ii] = metrics.auc(recall, precision)
auroc_list[ii] = metrics.roc_auc_score(gt, score)
return aupr_list.mean(), auroc_list.mean()
|
995,976 | 6adb5729a13d8d47b3bc043fc17cd7f6c446034b | #!/usr/bin/env python
###Startup and commandline file
from services import service, simple_network, Topology_Service, map_reduce, cfs
##import all current services
import node
import hash_util
#import utility functions
#python2.7 builtins
import random
import time
import Queue
import os
import threading
import sys
import json
from urllib2 import urlopen
local_mode=False # for local testing only
startdelay = 0.0 #number of seconds to block and do nothing on start, usefull to avoid race conditions
if startdelay:
print "starting and waiting"
time.sleep(startdelay)
print "done waiting"
# get my IP and port combo
def myIP():
if not local_mode:
myip = json.load(urlopen('http://httpbin.org/ip'))['origin']##REPLACE WITH SOMETHING BETTER IF IT EXISTS
print "just got my ip:", myip
return myip
else:
return "127.0.0.1"
# backwards-compatibility use of global vars...encapsulation is easily
# possible by ensuring all functionality lives in a service with a reference
# to router which would then be instantiated in main()
services = {}
commands = {}
help_texts = {}
# adds services to the services list
def add_service(service_object):
s_name = service_object.service_id
services[s_name] = service_object
# attaches the services in the services list to the node
# attaches associated to the console
def attach_services():
for s_name in services.keys():
node.add_service(services[s_name])
commands_list = services[s_name].attach_to_console()
if not commands_list is None:
for c in commands_list:
commands[c] = services[s_name]
try:
if commands_list[c]:
help_texts[c] = commands_list[c]
except TypeError:
pass
# Creates the services
# Add new services here in this method
def setup_Node(addr="localhost", port=None):
# Setup the info for the node
node.IPAddr = addr
node.ctrlPort = port
node.thisNode = node.Node_Info(node.IPAddr, node.ctrlPort)
# Setup and attach the network service
# Unlike the others, this one is not added to services
node.net_server = simple_network.NETWORK_SERVICE("", node.ctrlPort)
#node.net_server = dummy_network.start(node.thisNode, node.handle_message)
#### setup services here
add_service(service.Internal_Service())
add_service(service.ECHO_service())
add_service(Topology_Service.Topology())
add_service(map_reduce.Map_Reduce_Service())
add_service(cfs.getCFSsingleton())
#add_service(httpservice.WEBSERVICE(database))
attach_services()
def join_ring(node_name, node_port):
othernode = node.Node_Info(node_name, node_port)
node.join(othernode)
def no_join():
node.create()
# This function runs a loop
# Interprets user input
def console():
cmd = "-"
loaded_script = Queue.Queue()
try:
if loaded_script.empty():
cmd = raw_input()
else:
cmd = loaded_script.get()
loaded_script.task_done()
except EOFError: #the user does not have a terminal
pass
while not ( cmd == "q" or cmd == "Q"):
command, args = None, None
splitted = cmd.split(' ',1)
if len(splitted) >= 1:
command = splitted[0]
if len(splitted) == 2:
args = splitted[1]
if command == "test":
CFS = cfs.getCFSsingleton()
a = cfs.Data_Atom("HELLOO WORLD")
CFS.putChunk(a)
time.sleep(1)
print CFS.getChunk(hash_util.Key(str(a.hashkeyID)))
if command.lower() == "help" or command == "?":#USER NEEDS HELP
if not args or args == "list":
print "Help is availible on the following topics:"
for h in help_texts:
print "\t"+h
print "use: help <topic> \n to get more help"
else:
if args in help_texts:
print help_texts[args]
else:
print "I have no help on "+args
elif command in commands.keys():
mytarget = lambda: commands[command].handle_command(command, args)
t = threading.Thread(target=mytarget)
t.daemon = True
t.start()
elif command == "run":
file2open = file(args,"r")
for l in file2open:
loaded_script.put(l)
file2open.close()
elif command == "stat":
input_size = node.todo.qsize();
print "backlog: "+str(input_size)
if input_size > 0:
print threading.activeCount(), "Active threads. Cheating, spawning new worker."
t = threading.Thread(target=node.message_handler_worker)
t.setDaemon(True)
t.start()
elif command == "threads":
for t in threading.enumerate():
print t
elif command == "num_threads":
print threading.activeCount()
else:
print "successor ", node.successor
print "predecessor", node.predecessor
try:
if loaded_script.empty():
cmd = raw_input()
else:
cmd = loaded_script.get()
loaded_script.task_done()
except EOFError: # the user does not have a terminal
#print "I do not see a terminal!"
time.sleep(1)
pass
node.net_server.stop()
os.exit()
def main():
# Obtain my IP address
myip = myIP()
node.IPAddr = myip
# Grab my port, if provided
# If not provided or given a "?"
# Choose port at random from 9000 to 9999
args = sys.argv
if len(args) > 1 and args[1] != "?":
local_port = int(args[1])
else:
local_port = random.randint(9000, 9999)
# Obtain the destination port/IP, if it exists
other_IP = args[2] if len(args) > 2 else None
other_port = int(args[3]) if len(args) > 3 else None
# Setup my node
setup_Node(addr=myip,port=local_port)
# If we were provided the info of another node, join it's ring
if not other_IP is None and not other_port is None:
join_ring(other_IP, other_port)
else:
no_join()
# Start the node services and the console
node.startup()
console()
if __name__ == "__main__":
main()
|
995,977 | 23219ca71e6e79a65c478fc63117c93f8c765c4a | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .graph_implementation import (
simpleGraphImpl,
simpleDistributedGraphImpl,
npartiteGraphImpl,
)
import cudf
import dask_cudf
from cugraph.utilities.utils import import_optional
pd = import_optional("pandas")
# TODO: Move to utilities
def null_check(input_data):
# input_data can be cudf.Series, cudf.DataFrame, dask_cudf.Series
# and dask_cudf.DataFrame
has_null = input_data.isna().values.any()
if isinstance(input_data, (dask_cudf.Series, dask_cudf.DataFrame)):
has_null = has_null.compute()
if has_null:
raise ValueError("Series/DataFrame contains NULL values")
class Graph:
"""
A GPU Graph Object (Base class of other graph types)
Parameters
----------
m_graph : cuGraph.MultiGraph object or None (default=None)
Initialize the graph from a cugraph.MultiGraph object
directed : boolean, optional (default=False)
Indicated is the graph is directed.
Examples
--------
>>> # undirected graph
>>> G = cugraph.Graph()
>>> # directed graph
>>> G = cugraph.Graph(directed=True)
"""
class Properties:
def __init__(self, directed):
self.directed = directed
self.weights = False
def __init__(self, m_graph=None, directed=False):
self._Impl = None
self.graph_properties = Graph.Properties(directed)
if m_graph is not None:
if isinstance(m_graph, MultiGraph):
elist = m_graph.view_edge_list()
if m_graph.is_weighted():
weights = m_graph.weight_column
else:
weights = None
self.from_cudf_edgelist(
elist,
source=m_graph.source_columns,
destination=m_graph.destination_columns,
edge_attr=weights,
)
else:
raise TypeError(
"m_graph can only be an instance of a "
f"cugraph.MultiGraph, got {type(m_graph)}"
)
def __getattr__(self, name):
"""
__getattr__() is called automatically by python when an attribute does not
exist. Since this class is attempting to hide the internal `_Impl` object,
which is intended to contain many of the attributes needed by this class,
__getattr__ is used to "pass through" attribute access to _Impl and make it
appear as if the _Impl attributes are contained in this class.
"""
if name == "_Impl":
raise AttributeError(name)
if hasattr(self._Impl, name):
return getattr(self._Impl, name)
# FIXME: Remove access to Impl properties
elif hasattr(self._Impl.properties, name):
return getattr(self._Impl.properties, name)
else:
raise AttributeError(name)
def __dir__(self):
return dir(self._Impl)
def from_cudf_edgelist(
self,
input_df,
source="source",
destination="destination",
edge_attr=None,
weight=None,
edge_id=None,
edge_type=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
"""
Initialize a graph from the edge list. It is an error to call this
method on an initialized Graph object. The passed input_df argument
wraps gdf_column objects that represent a graph using the edge list
format. source argument is source column name and destination argument
is destination column name.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
If weights are present, edge_attr argument is the weights column name.
Parameters
----------
input_df : cudf.DataFrame or dask_cudf.DataFrame
A DataFrame that contains edge information If a dask_cudf.DataFrame
is passed it will be reinterpreted as a cudf.DataFrame. For the
distributed path please use from_dask_cudf_edgelist.
source : str or array-like, optional (default='source')
source column name or array of column names
destination : str or array-like, optional (default='destination')
destination column name or array of column names
edge_attr : str or List[str], optional (default=None)
Names of the edge attributes. Can either be a single string
representing the weight column name, or a list of length 3
holding [weight, edge_id, edge_type]. If this argument is
provided, then the weight/edge_id/edge_type arguments must
be left empty.
weight : str, optional (default=None)
Name of the weight column in the input dataframe.
edge_id : str, optional (default=None)
Name of the edge id column in the input dataframe.
edge_type : str, optional (default=None)
Name of the edge type column in the input dataframe.
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs.
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
legacy_renum_only : bool, optional (default=False)
If True, skips the C++ renumbering step. Must be true for
pylibcugraph algorithms. Must be false for algorithms
not yet converted to the pylibcugraph C API.
This parameter is deprecated and will be removed.
Examples
--------
>>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'],
... header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(df, source='0', destination='1',
... edge_attr='2', renumber=False)
"""
if self._Impl is None:
self._Impl = simpleGraphImpl(self.graph_properties)
elif type(self._Impl) is not simpleGraphImpl:
raise RuntimeError("Graph is already initialized")
elif self._Impl.edgelist is not None or self._Impl.adjlist is not None:
raise RuntimeError("Graph already has values")
self._Impl._simpleGraphImpl__from_edgelist(
input_df,
source=source,
destination=destination,
edge_attr=edge_attr,
weight=weight,
edge_id=edge_id,
edge_type=edge_type,
renumber=renumber,
store_transposed=store_transposed,
legacy_renum_only=legacy_renum_only,
)
def from_cudf_adjlist(
self,
offset_col,
index_col,
value_col=None,
renumber=True,
store_transposed=False,
):
"""
Initialize a graph from the adjacency list. It is an error to call this
method on an initialized Graph object. The passed offset_col and
index_col arguments wrap gdf_column objects that represent a graph
using the adjacency list format.
If value_col is None, an unweighted graph is created. If value_col is
not None, a weighted graph is created.
Undirected edges must be stored as directed edges in both directions.
Parameters
----------
offset_col : cudf.Series
This cudf.Series wraps a gdf_column of size V + 1 (V: number of
vertices). The gdf column contains the offsets for the vertices in
this graph.
Offsets must be in the range [0, E] (E: number of edges)
index_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the destination index for each edge.
Destination indices must be in the range [0, V)
(V: number of vertices).
value_col : cudf.Series, optional (default=None)
This pointer can be ``None``. If not, this cudf.Series wraps a
gdf_column of size E (E: number of edges). The gdf column contains
the weight value for each edge. The expected type of
the gdf_column element is floating point number.
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs.
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
Examples
--------
>>> gdf = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'],
... header=None)
>>> M = gdf.to_pandas()
>>> M = scipy.sparse.coo_matrix((M['2'],(M['0'],M['1'])))
>>> M = M.tocsr()
>>> offsets = cudf.Series(M.indptr)
>>> indices = cudf.Series(M.indices)
>>> G = cugraph.Graph()
>>> G.from_cudf_adjlist(offsets, indices, None)
"""
if self._Impl is None:
self._Impl = simpleGraphImpl(self.graph_properties)
elif type(self._Impl) is not simpleGraphImpl:
raise RuntimeError("Graph is already initialized")
elif self._Impl.edgelist is not None or self._Impl.adjlist is not None:
raise RuntimeError("Graph already has values")
self._Impl._simpleGraphImpl__from_adjlist(offset_col, index_col, value_col)
def from_dask_cudf_edgelist(
self,
input_ddf,
source="source",
destination="destination",
edge_attr=None,
weight=None,
edge_id=None,
edge_type=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
"""
Initializes the distributed graph from the dask_cudf.DataFrame
edgelist. Undirected Graphs are not currently supported.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
Note that the graph object will store a reference to the
dask_cudf.DataFrame provided.
Parameters
----------
input_ddf : dask_cudf.DataFrame
The edgelist as a dask_cudf.DataFrame
source : str or array-like, optional (default='source')
Source column name or array of column names
destination : str, optional (default='destination')
Destination column name or array of column names
edge_attr : str or List[str], optional (default=None)
Names of the edge attributes. Can either be a single string
representing the weight column name, or a list of length 3
holding [weight, edge_id, edge_type]. If this argument is
provided, then the weight/edge_id/edge_type arguments must
be left empty.
weight : str, optional (default=None)
Name of the weight column in the input dataframe.
edge_id : str, optional (default=None)
Name of the edge id column in the input dataframe.
edge_type : str, optional (default=None)
Name of the edge type column in the input dataframe.
renumber : bool, optional (default=True)
If source and destination indices are not in range 0 to V where V
is number of vertices, renumber argument should be True.
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
legacy_renum_only : bool, optional (default=False)
If True, skips the C++ renumbering step. Must be true for
pylibcugraph algorithms. Must be false for algorithms
not yet converted to the pylibcugraph C API.
This parameter is deprecated and will be removed.
"""
if self._Impl is None:
self._Impl = simpleDistributedGraphImpl(self.graph_properties)
elif type(self._Impl) is not simpleDistributedGraphImpl:
raise RuntimeError("Graph is already initialized")
elif self._Impl.edgelist is not None:
raise RuntimeError("Graph already has values")
self._Impl._simpleDistributedGraphImpl__from_edgelist(
input_ddf,
source=source,
destination=destination,
edge_attr=edge_attr,
weight=weight,
edge_id=edge_id,
edge_type=edge_type,
renumber=renumber,
store_transposed=store_transposed,
legacy_renum_only=legacy_renum_only,
)
# Move to Compat Module
def from_pandas_edgelist(
self,
pdf,
source="source",
destination="destination",
edge_attr=None,
weight=None,
edge_id=None,
edge_type=None,
renumber=True,
):
"""
Initialize a graph from the edge list. It is an error to call this
method on an initialized Graph object. Source argument is source
column name and destination argument is destination column name.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
Weights, edge ids, and edge types can be passed through either the
edge_attr argument or individually as separate keyword arguments.
All three are optional.
Parameters
----------
pdf : pandas.DataFrame
A DataFrame that contains edge information
source : str or array-like, optional (default='source')
Source column name or array of column names
destination : str or array-like, optional (default='destination')
Destination column name or array of column names
edge_attr : str or List[str], optional (default=None)
Names of the edge attributes. Can either be a single string
representing the weight column name, or a list of length 3
holding [weight, edge_id, edge_type]. If this argument is
provided, then the weight/edge_id/edge_type arguments must
be left empty.
weight : str, optional (default=None)
Name of the weight column in the input dataframe.
edge_id : str, optional (default=None)
Name of the edge id column in the input dataframe.
edge_type : str, optional (default=None)
Name of the edge type column in the input dataframe.
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs.
Examples
--------
>>> # Download dataset from
>>> # https://github.com/rapidsai/cugraph/datasets/...
>>> df = pd.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... header=None, names=["0", "1", "2"],
... dtype={"0": "int32", "1": "int32",
... "2": "float32"})
>>> G = cugraph.Graph()
>>> G.from_pandas_edgelist(df, source='0', destination='1',
... edge_attr='2', renumber=False)
"""
if not isinstance(pdf, pd.core.frame.DataFrame):
raise TypeError("pdf input is not a Pandas DataFrame")
gdf = cudf.DataFrame.from_pandas(pdf)
self.from_cudf_edgelist(
gdf,
source=source,
destination=destination,
edge_attr=edge_attr,
weight=weight,
edge_id=edge_id,
edge_type=edge_type,
renumber=renumber,
)
def from_pandas_adjacency(self, pdf):
"""
Initializes the graph from pandas adjacency matrix.
Parameters
----------
pdf : pandas.DataFrame
A DataFrame that contains adjacency information
"""
if not isinstance(pdf, pd.core.frame.DataFrame):
raise TypeError("pdf input is not a Pandas DataFrame")
np_array = pdf.to_numpy()
columns = pdf.columns
self.from_numpy_array(np_array, columns)
def from_numpy_array(self, np_array, nodes=None):
"""
Initializes the graph from numpy array containing adjacency matrix.
Parameters
----------
np_array : numpy.array
A Numpy array that contains adjacency information
nodes: array-like or None, optional (default=None)
A list of column names, acting as labels for nodes
"""
if not isinstance(np_array, np.ndarray):
raise TypeError("np_array input is not a Numpy array")
if len(np_array.shape) != 2:
raise ValueError("np_array is not a 2D matrix")
src, dst = np_array.nonzero()
weight = np_array[src, dst]
df = cudf.DataFrame()
if nodes is not None:
df["src"] = nodes[src]
df["dst"] = nodes[dst]
else:
df["src"] = src
df["dst"] = dst
df["weight"] = weight
self.from_cudf_edgelist(df, "src", "dst", edge_attr="weight")
def from_numpy_matrix(self, np_matrix):
"""
Initializes the graph from numpy matrix containing adjacency matrix.
Parameters
----------
np_matrix : numpy.matrix
A Numpy matrix that contains adjacency information
"""
if not isinstance(np_matrix, np.matrix):
raise TypeError("np_matrix input is not a Numpy matrix")
np_array = np.asarray(np_matrix)
self.from_numpy_array(np_array)
def unrenumber(self, df, column_name, preserve_order=False, get_column_names=False):
"""
Given a DataFrame containing internal vertex ids in the identified
column, replace this with external vertex ids. If the renumbering
is from a single column, the output dataframe will use the same
name for the external vertex identifiers. If the renumbering is from
a multi-column input, the output columns will be labeled 0 through
n-1 with a suffix of _column_name.
Note that this function does not guarantee order in single GPU mode,
and does not guarantee order or partitioning in multi-GPU mode. If you
wish to preserve ordering, add an index column to df and sort the
return by that index column.
Parameters
----------
df: cudf.DataFrame or dask_cudf.DataFrame
A DataFrame containing internal vertex identifiers that will be
converted into external vertex identifiers.
column_name: string
Name of the column containing the internal vertex id.
preserve_order: bool, optional (default=False)
If True, preserve the order of the rows in the output DataFrame to
match the input DataFrame
get_column_names: bool, optional (default=False)
If True, the unrenumbered column names are returned.
Returns
-------
df : cudf.DataFrame or dask_cudf.DataFrame
The original DataFrame columns exist unmodified. The external
vertex dentifiers are added to the DataFrame, the internal
vertex identifier column is removed from the dataframe.
"""
return self.renumber_map.unrenumber(
df, column_name, preserve_order, get_column_names
)
def lookup_internal_vertex_id(self, df, column_name=None):
"""
Given a DataFrame containing external vertex ids in the identified
columns, or a Series containing external vertex ids, return a
Series with the internal vertex ids.
Note that this function does not guarantee order in single GPU mode,
and does not guarantee order or partitioning in multi-GPU mode.
Parameters
----------
df: cudf.DataFrame, cudf.Series, dask_cudf.DataFrame, dask_cudf.Series
A DataFrame containing external vertex identifiers that will be
converted into internal vertex identifiers.
column_name: string, optional (default=None)
Name of the column containing the external vertex ids
Returns
-------
series : cudf.Series or dask_cudf.Series
The internal vertex identifiers
"""
return self.renumber_map.to_internal_vertex_id(df, column_name)
def add_internal_vertex_id(
self,
df,
internal_column_name,
external_column_name,
drop=True,
preserve_order=False,
):
"""
Given a DataFrame containing external vertex ids in the identified
columns, return a DataFrame containing the internal vertex ids as the
specified column name. Optionally drop the external vertex id columns.
Optionally preserve the order of the original DataFrame.
Parameters
----------
df: cudf.DataFrame or dask_cudf.DataFrame
A DataFrame containing external vertex identifiers that will be
converted into internal vertex identifiers.
internal_column_name: string
Name of column to contain the internal vertex id
external_column_name: string or list of strings
Name of the column(s) containing the external vertex ids
drop: bool, optional (default=True)
Drop the external columns from the returned DataFrame
preserve_order: bool, optional (default=False)
Preserve the order of the data frame (requires an extra sort)
Returns
-------
df : cudf.DataFrame or dask_cudf.DataFrame
Original DataFrame with new column containing internal vertex
id
"""
return self.renumber_map.add_internal_vertex_id(
df,
internal_column_name,
external_column_name,
drop,
preserve_order,
)
def clear(self):
"""
Empty the graph.
"""
self._Impl = None
def is_bipartite(self):
"""
Checks if Graph is bipartite. This solely relies on the user call of
add_nodes_from with the bipartite parameter. This does not parse the
graph to check if it is bipartite.
NOTE: Currently not implemented and always returns False
"""
# TO DO: Call coloring algorithm
return False
def is_multipartite(self):
"""
Checks if Graph is multipartite. This solely relies on the user call
of add_nodes_from with the partition parameter. This does not parse
the graph to check if it is multipartite.
NOTE: Currently not implemented and always returns False
"""
# TO DO: Call coloring algorithm
return False
def is_multigraph(self):
"""
Returns True if the graph is a multigraph. Else returns False.
NOTE: Currently not implemented and always returns False
"""
# TO DO: Call coloring algorithm
return False
def is_directed(self):
"""
Returns True if the graph is a directed graph.
Returns False if the graph is an undirected graph.
"""
return self.graph_properties.directed
def is_renumbered(self):
"""
Returns True if the graph is renumbered.
"""
return self.properties.renumbered
def is_weighted(self):
"""
Returns True if the graph has edge weights.
"""
return self.properties.weighted
def has_isolated_vertices(self):
"""
Returns True if the graph has isolated vertices.
"""
return self.properties.isolated_vertices
def is_remote(self):
"""
Returns True if the graph is remote; otherwise returns False.
"""
return False
def is_multi_gpu(self):
"""
Returns True if the graph is a multi-gpu graph; otherwise
returns False.
"""
return isinstance(self._Impl, simpleDistributedGraphImpl)
def to_directed(self):
"""
Return a directed representation of the graph.
This function sets the directed attribute as True and returns the
directed view.
Returns
-------
G : Graph
A directed graph with the same nodes, and each edge (u,v,weights)
replaced by two directed edges (u,v,weights) and (v,u,weights).
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> DiG = G.to_directed()
"""
directed_graph = type(self)()
directed_graph.graph_properties.directed = True
directed_graph._Impl = type(self._Impl)(directed_graph.graph_properties)
self._Impl.to_directed(directed_graph._Impl)
return directed_graph
def to_undirected(self):
"""
Return an undirected copy of the graph.
Returns
-------
G : Graph
A undirected graph with the same nodes, and each directed edge
(u,v,weights) replaced by an undirected edge (u,v,weights).
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> DiG = cugraph.Graph(directed=True)
>>> DiG.from_cudf_edgelist(M, '0', '1')
>>> G = DiG.to_undirected()
"""
if self.graph_properties.directed is False:
undirected_graph = type(self)()
elif self.__class__.__bases__[0] == object:
undirected_graph = type(self)()
else:
undirected_graph = self.__class__.__bases__[0]()
undirected_graph._Impl = type(self._Impl)(undirected_graph.graph_properties)
self._Impl.to_undirected(undirected_graph._Impl)
return undirected_graph
def add_nodes_from(self, nodes):
"""
Add nodes information to the Graph.
Parameters
----------
nodes : list or cudf.Series
The nodes of the graph to be stored.
"""
self._Impl._nodes["all_nodes"] = cudf.Series(nodes)
def density(self) -> float:
"""
Compute the density of the graph.
Density is the measure of how many edges are in the graph versus
the max number of edges that could be present.
Returns
-------
density : float
Density is the measure of how many edges are in the graph versus
the max number of edges that could be present.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> DiG = cugraph.Graph(directed=True)
>>> DiG.from_cudf_edgelist(M, '0', '1')
>>> density = G.density()
"""
if self.is_directed():
factor = 1
else:
factor = 2
num_e = self._Impl.number_of_edges(directed_edges=True)
num_v = self._Impl.number_of_vertices()
density = (factor * num_e) / (num_v * (num_v - 1))
return density
# TODO: Add function
# def properties():
class MultiGraph(Graph):
"""
A Multigraph; a Graph containing more than one edge between vertex pairs.
"""
def __init__(self, directed=False):
super(MultiGraph, self).__init__(directed=directed)
self.graph_properties.multi_edge = True
def is_multigraph(self):
"""
Returns True if the graph is a multigraph. Else returns False.
"""
# TO DO: Call coloring algorithm
return True
def density(self):
"""
Density is the measure of how many edges are in the graph versus
the max number of edges that could be present.
This function is not support on a Multigraph.
Since the maximal number of possible edges between any vertex pairs
can be greater than 1 (undirected) a realistic max number of possible
edges cannot be determined. Running density on a MultiGraph
could produce a density score greater than 1 - meaning more than
100% of possible edges are present in the Graph
"""
raise TypeError("The density function is not support on a Multigraph.")
class Tree(Graph):
"""
A Tree
"""
def __init__(self, directed=False):
super(Tree, self).__init__(directed=directed)
self.graph_properties.tree = True
class NPartiteGraph(Graph):
def __init__(self, bipartite=False, directed=False):
super(NPartiteGraph, self).__init__(directed=directed)
self.graph_properties.bipartite = bipartite
self.graph_properties.multipartite = True
def from_cudf_edgelist(
self,
input_df,
source="source",
destination="destination",
edge_attr=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
"""
Initialize a graph from the edge list. It is an error to call this
method on an initialized Graph object. The passed input_df argument
wraps gdf_column objects that represent a graph using the edge list
format. source argument is source column name and destination argument
is destination column name.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
If weights are present, edge_attr argument is the weights column name.
Parameters
----------
input_df : cudf.DataFrame or dask_cudf.DataFrame
A DataFrame that contains edge information. If a
dask_cudf.DataFrame is passed it will be reinterpreted as a
cudf.DataFrame. For the distributed path please use
from_dask_cudf_edgelist.
source : str or array-like, optional (default='source')
Source column name or array of column names
destination : str or array-like, optional (default='destination')
Destination column name or array of column names
edge_attr : str or None, optional (default=None)
The weights column name
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
legacy_renum_only : bool, optional (default=False)
If True, skips the C++ renumbering step. Must be true for
pylibcugraph algorithms. Must be false for algorithms
not yet converted to the pylibcugraph C API.
This parameter is deprecated and will be removed.
Examples
--------
>>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'],
... header=None)
>>> G = cugraph.BiPartiteGraph()
>>> G.from_cudf_edgelist(df, source='0', destination='1',
... edge_attr='2', renumber=False)
"""
if self._Impl is None:
self._Impl = npartiteGraphImpl(self.graph_properties)
# API may change in future
self._Impl._npartiteGraphImpl__from_edgelist(
input_df,
source=source,
destination=destination,
edge_attr=edge_attr,
renumber=renumber,
)
def from_dask_cudf_edgelist(
self,
input_ddf,
source="source",
destination="destination",
edge_attr=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
"""
Initializes the distributed graph from the dask_cudf.DataFrame
edgelist. Undirected Graphs are not currently supported.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
Note that the graph object will store a reference to the
dask_cudf.DataFrame provided.
Parameters
----------
input_ddf : dask_cudf.DataFrame
The edgelist as a dask_cudf.DataFrame
source : str or array-like, optional (default='source')
Source column name or array of column names
destination : str, optional (default='destination')
Destination column name or array of column names
edge_attr : str, optional (default=None)
Weights column name.
renumber : bool, optional (default=True)
If source and destination indices are not in range 0 to V where V
is number of vertices, renumber argument should be True.
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
legacy_renum_only : bool, optional (default=False)
If True, skips the C++ renumbering step. Must be true for
pylibcugraph algorithms. Must be false for algorithms
not yet converted to the pylibcugraph C API.
This parameter is deprecated and will be removed.
"""
raise TypeError("Distributed N-partite graph not supported")
def add_nodes_from(self, nodes, bipartite=None, multipartite=None):
"""
Add nodes information to the Graph.
Parameters
----------
nodes : list or cudf.Series
The nodes of the graph to be stored. If bipartite and multipartite
arguments are not passed, the nodes are considered to be a list of
all the nodes present in the Graph.
bipartite : str, optional (default=None)
Sets the Graph as bipartite. The nodes are stored as a set of nodes
of the partition named as bipartite argument.
multipartite : str, optional (default=None)
Sets the Graph as multipartite. The nodes are stored as a set of
nodes of the partition named as multipartite argument.
"""
if self._Impl is None:
self._Impl = npartiteGraphImpl(self.graph_properties)
if bipartite is None and multipartite is None:
self._Impl._nodes["all_nodes"] = cudf.Series(nodes)
else:
self._Impl.add_nodes_from(
nodes, bipartite=bipartite, multipartite=multipartite
)
def is_multipartite(self):
"""
Checks if Graph is multipartite. This solely relies on the user call
of add_nodes_from with the partition parameter and the Graph created.
This does not parse the graph to check if it is multipartite.
"""
return True
class BiPartiteGraph(NPartiteGraph):
"""
A Bipartite Graph
"""
def __init__(self, directed=False):
super(BiPartiteGraph, self).__init__(directed=directed, bipartite=True)
def is_bipartite(self):
"""
Checks if Graph is bipartite. This solely relies on the user call of
add_nodes_from with the bipartite parameter and the Graph created.
This does not parse the graph to check if it is bipartite.
"""
return True
def is_directed(G):
"""
Returns True if the graph is a directed graph.
Returns False if the graph is an undirected graph.
"""
return G.is_directed()
def is_multigraph(G):
"""
Returns True if the graph is a multigraph. Else returns False.
"""
return G.is_multigraph()
def is_multipartite(G):
"""
Checks if Graph is multipartite. This solely relies on the Graph
type. This does not parse the graph to check if it is multipartite.
"""
return G.is_multipatite()
def is_bipartite(G):
"""
Checks if Graph is bipartite. This solely relies on the Graph type.
This does not parse the graph to check if it is bipartite.
"""
return G.is_bipartite()
def is_weighted(G):
"""
Returns True if the graph has edge weights.
"""
return G.is_weighted()
|
995,978 | a09598fb7c6b14cdde79c00c5234c90387efd98e | # /usr/bin/env python3.8
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import torchvision
from packaging.version import Version
import pytest
import torch
from torchvision import models
from aimet_torch.model_preparer import prepare_model, _prepare_traced_model
from aimet_torch.model_validator.model_validator import ModelValidator
from aimet_torch.quantsim import QuantizationSimModel
def evaluate(model: torch.nn.Module, dummy_input: torch.Tensor):
"""
Helper function to evaluate model given dummy input
:param model: torch model
:param dummy_input: dummy input to model
"""
if isinstance(dummy_input, torch.Tensor):
dummy_input = [dummy_input]
model.eval()
with torch.no_grad():
model(*dummy_input)
class TestModelPreparer:
@pytest.mark.cuda
def test_inception_v3(self):
""" Verify inception_v3 """
model = models.inception_v3().eval().cuda()
prepared_model = prepare_model(model)
print(prepared_model)
input_shape = (1, 3, 299, 299)
dummy_input = torch.randn(*input_shape).cuda()
# Verify bit-exact outputs.
assert torch.equal(prepared_model(dummy_input), model(dummy_input))
# Verify that validator checks pass.
assert ModelValidator.validate_model(prepared_model, dummy_input)
# Verify with Quantization workflow.
quant_sim = QuantizationSimModel(prepared_model, dummy_input=dummy_input)
quant_sim.compute_encodings(evaluate, dummy_input)
quant_sim.model(dummy_input)
@pytest.mark.cuda
def test_deeplab_v3(self):
""" Verify deeplab_v3 """
# Set the strict flag to False so that torch.jit.trace can be successful.
from aimet_torch.meta import connectedgraph
connectedgraph.jit_trace_args.update({"strict": False})
if Version(torchvision.__version__) < Version('0.10.2'):
model = models.segmentation.deeplabv3_resnet50(pretrained_backbone=False).eval().cuda()
else:
model = models.segmentation.deeplabv3_resnet50(weights_backbone=None).eval().cuda()
prepared_model = prepare_model(model)
print(prepared_model)
input_shape = (1, 3, 224, 224)
dummy_input = torch.randn(*input_shape).cuda()
# Verify bit-exact outputs.
assert torch.equal(prepared_model(dummy_input)['out'], model(dummy_input)['out'])
# Verify that validator checks pass.
assert ModelValidator.validate_model(prepared_model, dummy_input)
# Verify with Quantization workflow.
quant_sim = QuantizationSimModel(prepared_model, dummy_input=dummy_input)
quant_sim.compute_encodings(evaluate, dummy_input)
quant_sim.model(dummy_input)
@pytest.mark.cuda
@pytest.mark.skipif(Version(torch.__version__) < Version('1.10.0'), reason="torch1.13.1 is required.")
def test_fx_with_vit(self):
""" Verify VIT """
from transformers import ViTModel, ViTConfig
from transformers.utils.fx import symbolic_trace
# Set the strict flag to False so that torch.jit.trace can be successful.
from aimet_torch.meta import connectedgraph
connectedgraph.jit_trace_args.update({"strict": False})
model = ViTModel(ViTConfig()).cuda()
dummy_input = torch.randn(1, 3, 224, 224).cuda()
traced_model = symbolic_trace(model, ["pixel_values"])
_prepare_traced_model(traced_model)
with torch.no_grad():
outputs = model(dummy_input)
outputs2 = traced_model(dummy_input)
# Verify bit-exact outputs.
assert torch.equal(dict(outputs)["last_hidden_state"], outputs2["last_hidden_state"])
assert torch.equal(dict(outputs)["pooler_output"], outputs2["pooler_output"])
# Verify that validator checks pass.
assert ModelValidator.validate_model(traced_model, dummy_input)
def test_dummy(self):
# pytest has a 'feature' that returns an error code when all tests for a given suite are not selected
# to be executed
# So adding a dummy test to satisfy pytest
pass
|
995,979 | d24df7d81dd72023f4a20a3d99788dd3af698021 | from sys import stdin, argv
import matplotlib.pyplot as plt
import json
"""Show matplotlib plot for single metric (e.g. avg_return).
Used by visualize_metric script.
"""
x = []
y = []
for line in stdin:
datapoint = json.loads(line)
x.append(datapoint["step"])
y.append(datapoint["value"])
plt.plot(x, y, "r-")
plt.xlabel("Timestep")
plt.ylabel(argv[1])
plt.show() |
995,980 | 88f3d280fae93bde375ed3dfa3cfe9abb36ddd92 | /Users/Roge/anaconda/lib/python3.5/random.py |
995,981 | bc98941850572696e593a78c15b9a3cc6b27e4ef | print(f"I am apple module with __name__ of {__name__}. I run once when loaded")
print(f"I am apple module and am about to import banana")
import banana
print(f"I am apple module and I imported {banana.module_name}")
if __name__ == "__main__":
print(
f"I am apple module with __name__ of {__name__} and only run when executed as a 'script'"
)
else:
print(
f"I am apple module with __name__ of {__name__} and only run when imported"
)
|
995,982 | d52b57cb3046466d4772d358427bdb2fe452cbc4 | #encoding=utf-8
__author__ = 'reason'
from lib.bottle import template,Bottle,request,view
from lib.cachewrap import cache
from dbsetting import dbsetting as db
import sys,hashlib
from setting import SITE_DOMAIN
reload(sys)
sys.setdefaultencoding('utf-8')
app = Bottle()
@app.route('/getnotice')
@view('options/add_note')
def getnotice():
d={}
sql = 'select id,column_value from HNSJ_SYS_CONFIG WHERE column_name=%s'
params=('MARQUEE_NOTE')
res = db.query_one(sql,params)
print res
d['note_id'] = res[0]
d['note_value'] =res[1]
return dict(note=d,domain=SITE_DOMAIN)
@app.post('/updatenote')
def updatenote():
note_id = request.params.get('note_id')
note_value=request.params.get('note_value')
print note_value
sql = 'update HNSJ_SYS_CONFIG set column_value=%s where id=%s'
params=(note_value,note_id)
res = db.update(sql,params)
print res
return getnotice()
|
995,983 | e2b1e1233bf94cbc34e233f4df781c86d3e0de16 | #TODO: utilize multiple stream processors for high throughput.
# Stream Management
# Streams allow concurrency of execution on a single device within a given context. Queued work items in the same stream execute sequentially, but work items in different streams may execute concurrently. Most operations involving a CUDA device can be performed asynchronously using streams, including data transfers and kernel execution. For further details on streams, see the CUDA C Programming Guide Streams section.
# Streams are instances of numba.cuda.cudadrv.driver.Stream:
# nota from numba at https://numba.pydata.org/numba-doc/dev/cuda-reference/host.html
numba.cuda.cudadrv.driver.Device.reset()
"""
deletes the context for the device. This will destroy all memory allocations, events, and streams created within the context.
"""
classnumba.cuda.cudadrv.driver.Stream(context, handle, finalizer, external=False)
https://numba.pydata.org/numba-doc/dev/cuda-reference/host.html#numba.cuda.cudadrv.driver.Stream
work items in different streams may execute concurrently
conda install numba cudatoolkit pyculib
|
995,984 | 93c5fe74c6eb555a21c04c280e943cf7f6f9fe49 | '''
---------------------------------------------------------------------------
Tool: RepairDetailedBrokenLinks (4)
Script: 4_DataSourceRepairX.py
Toolbox: CheckAndFixLinks.tbx
Purpose:
Fix "broken source links" from input .csv
NOTE: run from the machine and user that will use the mxd
---------------------------------------------------------------------------
Author: Rebecca Strauch - ADFG-DWC-GIS
Created on: May 11, 2015
last modification: August 24, 2015
NOTE: run from the machine and user that will use the mxd
---------------------------------------------------------------------------
Description:
This script will loop thru all the MXDs within a folder, and the broken
links and attempt to repair them based on the input comma delimited file
with the following fields (header columns are suggested...):
columns:
[0] UniqID: a record number just to help keep track of records
[1] dataType: contains layer, shape, or sde from ListUniqueBrokenLink
Posible Type values:
Raster, Raster.tif, Raster.sid, Raster.TOPO, Raster.bmp,
Raster.gif, Raster.jpg,
Sde (sde connection file, test may think it's a raster type)
Shape, Fgdb, Pgdb, Dbf, Txt
Cover_poly, Cover_arc, Cover_region, Cover_point, Cover_tic:
Esri.sdc
other
Unknown:
Events: event table (temporary) should skip these broken links
Table_excel, Table_dbf, Table_dat, Table_other
[2] newType: _review if new path not set yet
if same as dataType, then will be same type
if deiferent than dataType, format is changing
[3] brokenPath: contains the full path of the broken link
[4] newPath: contains the new path: = brokenPath if no change
uses the inFile to replace the full path, then saves it to a new mxd
in 10.1/10.2 format with _fix appendd to the name
Arguments:
[0] theWorkspace: Folder/directory to search (walk thru, includes subfolders)
[1] inFile: input .csv file for updating paths
[2] outWorkspace: directory where _repair folder will be create and new mxds written
Updates:
---------------------------------------------------------------------------
'''
# Import modules
import arcpy
import os
import csv
from _miscUtils import * # handles the myMsgs and time functions
from _gpdecorators import * # handles main for catching errors
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def findUpdatePath(inFile, searchOldPath, serviceLayer):
#def findUpdatePath(inFile, searchOldPath, serviceLayer):
#rtnNewPath = searchOldPath # for those that can't be found
# serviceLyaer, blank unless a service
with open(inFile) as inFix:
reader = csv.reader(inFix)
for row in reader:
if row[3] in searchOldPath:
oldType = (row[1].strip().lower())
#print("{0}, {1}..row[1]: {2}, \n serviceLayer: {3} \n".format((serviceLayer != ""), (oldType == serviceLayer), row[1].strip().lower(), serviceLayer))
if (serviceLayer == "") and row[3]:
myMsgs(' >>>> Match found: {}'.format(row))
rtnNewPath = row[4]
rtnNewType = row[2].strip().lower()
rtnSameTypeTF = (row[1].strip().lower() == row[2].strip().lower())
rtnOldType = row[1].strip().lower()
rtnOldPath = row[3]
break
elif (serviceLayer != "") and (oldType == serviceLayer) and row[3]:
#print("row[1]: {0}".format(row[1].lower()))
myMsgs(' >>>> Match found: {}'.format(row))
rtnNewPath = row[4]
rtnNewType = row[2].strip().lower()
rtnSameTypeTF = (row[1].strip().lower() == row[2].strip().lower())
rtnOldType = row[1].strip().lower()
rtnOldPath = row[3]
break
else:
#print("no match")
rtnNewPath = "no match"
rtnNewType = False
rtnSameTypeTF = False
rtnOldType = row[1]
rtnOldPath = row[3]
return(rtnNewPath, rtnNewType, rtnSameTypeTF, rtnOldType, rtnOldPath)
# catch_errors decorator must preceed a function using the @ notation.
@catch_errors
def main():
"""
Main function to add updated source paths to fix broken paths
"""
# Script arguments...
""" If running as standalone, hardcode theWorkspace and inFile """
theWorkspace = arcpy.GetParameterAsText(0)
if not theWorkspace:
theWorkspace = r"d:\_dataTest"
arcpy.env.workspace = theWorkspace
arcpy.env.overwriteOutput = True
inFile = arcpy.GetParameterAsText(1)
if not inFile:
inFile = "updateMultipleSourcePaths.csv"
inFile = r"\\dfg.alaska.local\gis\Anchorage\GISStaff\___gisStaffConnections\RepairBrokenSrcAug242015.csv"
outWorkspace = arcpy.GetParameterAsText(2)
if not outWorkspace:
outWorkspace = os.path.join(theWorkspace, "_repaired")
'''if not os.path.isdir(outWorkspace):
os.makedirs(outWorkspace)
myMsgs("created new directory {0} \n".format(outWorkspace))'''
# Create .txt Report of what it thinks was fixed, tagged with YYYYMMDD_HHMM
outFile = "FixedReport"
fileDateTime = curFileDateTime()
currentDate = curDate()
outfileTXT = os.path.join(theWorkspace, outFile) + fileDateTime + ".txt"
myMsgs (outFile)
reportFile = open(outfileTXT, 'w')
myMsgs( "File {0} is open? {1}".format(outfileTXT, str(not reportFile.closed)))
outText = "Report for what it THINKS it repaired in {0}, on {1} \n ".format(theWorkspace, currentDate)
outText += " Includes coverages (pts, poly, arc, anno), shapes, and FGDB data." + '\n'
outText += "-----------------------------------------------------" + '\n'
reportFile.write(outText)
mxd = None
outMXDName = "none"
updatePath = []
cvrList = [r"\arc", r"\polygon", r"\region", r"\point", r"\tic" ]
lstExtDatatype = [[".shp", "SHAPEFILE_WORKSPACE" ], [".sde","SDE_WORKSPACE"],
[".mdb", "ACCESS_WORKSPACE" ], [".gdb", "FILEGDB_WORKSPACE"],
["cover", "ARCINFO_WORKSPACE"]]
cntMXD = 0
cntFixed = 0
cntTotalFixed = 0
# makes sure the .csv file exists
if arcpy.Exists(inFile):
myMsgs ("->Using {0} to repair paths.\n==============================".format(inFile))
# walks thru the workspace to create list of files
for root, dirs, files in os.walk(theWorkspace):
for fileName in files:
if root == outWorkspace: # don't process mxd's in the target directory
pass
else:
fullPath = os.path.join(root, fileName)
basename, extension = os.path.splitext(fileName)
# Only process .mxd files
if extension == ".mxd":
myMsgs("\nReviewing MXD: {0}".format(fullPath))
reportFile.write("\nReviewing MXD: {0}".format(fullPath))
mxd = arcpy.mapping.MapDocument(fullPath)
dfs = arcpy.mapping.ListDataFrames(mxd)
cntMXD += 1
cntFixed = 0
basename, extension = os.path.splitext(fileName)
# New output mxd name....
outMXDName = os.path.join(outWorkspace, (str(basename) + ".mxd")) #"_fix.mxd"))
# create list of the tables since they are handle differently
theTables = arcpy.mapping.ListTableViews(mxd)
# Loops thru dataframes so adding and deleting Services will work.
for df in dfs:
# Loops thru layers, checks for broken links and tries to repair
lyrList = arcpy.mapping.ListLayers(mxd, "", df)
for lyr in lyrList:
if lyr.isBroken:
if not lyr.supports("DATASOURCE") and not lyr.isServiceLayer:
myMsgs(" ->Skipping {0} not a Service layer, and does not support DATASOURCE".format(lyr.name))
pass #continue
elif not lyr.supports("DATASOURCE") and lyr.isServiceLayer:
myMsgs(" -Broken Service: {0}".format(lyr.name))
else:
myMsgs(" -Broken: {0}".format(lyr.dataSource))
#myMsgs("layer is Group {0} or ServiceLayer {1}".format(lyr.isGroupLayer, lyr.isServiceLayer))
if (lyr.isGroupLayer or ("Events" in lyr.name)) and (not lyr.isServiceLayer): # Groups and Event FC skipped
myMsgs(" ...skipping group or event: {0}".format(lyr.name))
reportFile.write("\n *skipping group or event: {0} \n".format(lyr.name))
pass #break
elif lyr.isServiceLayer: # services might have to be handle differently
if lyr.supports("SERVICEPROPERTIES"):
for spType, spName in lyr.serviceProperties.iteritems():
myMsgs(" Service Properties: {0}: {1}".format(spType, spName ))
if spType == "URL":
dataSource = str(spName)
lyrType = ("service_{}".format(lyr.name))
break
myMsgs(" ->this ia a service....using add and remove layer")
updatePath = findUpdatePath(inFile, dataSource, lyrType.strip().lower())
newDSPath, newDSName = os.path.split(updatePath[0])
if ("service" in updatePath[3]) and ("service" in updatePath[1]):
insertLayer = arcpy.mapping.Layer(updatePath[0])
print("dataframe: {0}".format(df))
arcpy.mapping.InsertLayer(df, lyr, insertLayer, "AFTER")
arcpy.mapping.RemoveLayer(df, lyr)
reportFile.write("\n ->sees this as service....{0} \n".format(dataSource))
# will still look at deleted version after insert, not the new version..
# isBroken will give false info even if fixed, so
# don't use myMsgs("Still broken? {0}".format(lyr.isBroken))
else:
myMsgs(" --> a service layer but no SERVICE PROPERTIES")
elif lyr.supports("DATASOURCE") and lyr.supports("DATASETNAME"):
# not a group, event or what it thinks is a service
updatePath = findUpdatePath(inFile, lyr.dataSource, "")
newDSPath, newDSName = os.path.split(updatePath[0])
sameType = updatePath[2]
for cvr in cvrList: #checks to see if the source layer is a coverage...must handle different
if cvr in lyr.dataSource:
sourceIsCoverage = True
break
else:
sourceIsCoverage = False
# updatePath[1] is False if there wasn't a match
# so "not update[1]" means no match was found, and moves to next layer
if not updatePath[1]: # if no match was found
myMsgs(" !! no match to: {0} ".format(lyr.dataSource))
updateStatus = "no match, not changed" # used for message only
pass
elif updatePath[1].strip().lower() == "drive":
myMsgs(" skipping drive-letter matches for now: {0}".format(lyr.dataSource))
updateStatus = "can only find drive match...look into it)"
pass
elif updatePath[1].strip().lower() == "_review":
myMsgs(" no new source assigned yet for: {0}".format(lyr.dataSource))
updateStatus = ("review and update {0}".format(inFile))
pass
else: #if lyr.supports("DATASOURCE") and lyr.supports("DATASETNAME"):
updateStatus = str(updatePath[0]) # used for message only
if lyr in theTables:
#myMsgs(" thinks its a table....using findAndReplsWorkspacePath")
myMsgs(" *Moving {0}: {1} to new: {2}".format(updatePath[3], lyr.dataSource, updatePath[0]))
reportFile.write("\n Moving {0}: {1} to new: {2} \n".format(updatePath[3], lyr.dataSource, updatePath[0]))
lyr.findAndReplaceWorkspacePath(lyr.dataSource, updatePath, False)
elif lyr.isRasterLayer:
#myMsgs(" thinks its a raster....using findAndReplsWorkspacePath")
myMsgs(" *Moving {0}: {1} to new: {2}".format(updatePath[3], lyr.dataSource, updatePath[0]))
reportFile.write("\n Moving {0}: {1} to new: {2} \n".format(updatePath[3], lyr.dataSource, updatePath[0]))
newType = "RASTER_WORKSPACE"
for extType in lstExtDatatype:
if extType[0] in updatePath[0]:
newType = extType[1]
if extType[0] == '.gdb':
newDSPath = newDSPath.split('.gdb', 1)[0] + '.gdb'
#newType = extType[1]
elif extType[0] == '.sde':
newDSPath = newDSPath.split('.sde', 1)[0] + '.sde'
break
lyr.replaceDataSource(newDSPath, newType, newDSName, False)
if not sameType:
testOldTOC = updatePath[4].strip('\\')
if lyr.name == testOldTOC:
lyr.name = lyr.datasetName
else:
newType = updatePath[1]
if sourceIsCoverage and sameType:
newDSPath = os.path.split(newDSPath)[0]
newType = "ARCINFO_WORKSPACE"
for extType in lstExtDatatype:
if extType[0] in updatePath[0]:
newType = extType[1]
if extType[0] == '.gdb':
newDSPath = newDSPath.split('.gdb', 1)[0] + '.gdb'
#newType = extType[1]
elif extType[0] == '.sde':
newDSPath = newDSPath.split('.sde', 1)[0] + '.sde'
break
print("line ~281 newType is: {0}".format(newType))
myMsgs(" *Moving {0}: {1} to new: {2}".format(updatePath[3], lyr.dataSource, updatePath[0]))
reportFile.write("\n Moving {0}: {1} to new: {2}".format(updatePath[3], lyr.dataSource, updatePath[0]))
lyr.replaceDataSource(newDSPath, newType, newDSName, False)
#myMsgs(" new datasource: {0}".format(lyr.dataSource))
myMsgs(" **the new data source: {0}".format(updateStatus))
cntFixed += 1
myMsgs(" Still broken? {0}".format(lyr.isBroken))
else:
myMsgs("not sure what it is, but can't process {0}".format(lyr.name))
else:
myMsgs(" -Not Broken: {0}".format(str(lyr)))
myMsgs(" Number of links fixed processed: {0}".format(cntFixed))
myMsgs(" -{0} Review complete.".format(fullPath))
reportFile.write(" -Number of links fixed processed: {0} \n".format(cntFixed))
reportFile.write(" -{0} Review complete. \n\n".format(fullPath))
if cntFixed > 0:
mxd.save()
myMsgs("saved to {0}".format(fullPath))
reportFile.write("saved to {0}".format(fullPath))
cntTotalFixed += cntFixed
cntFixed = 0
"""if cntFixed > 0:
mxd.saveACopy(outMXDName, '10.1')
myMsgs("saved to {0}".format(outMXDName))
cntFixed = 0"""
'''if arcpy.Exists(outMXDName):
outMXDName.()
myMsgs("saved 1")
else:
mxd.saveACopy(outMXDName, '10.1')
myMsgs("saved 2")'''
del mxd
cntFixed = 0
else:
myMsgs ("ERROR: Required repair source list: [0] does not exit. \n".format(inFile))
outText = ("\n\n ==========================================")
outText += ("\n Number of MXD's processed: {0} \n".format(cntMXD))
outText += (" Total Number of links it fixed, all mxds: {0} \n".format(cntTotalFixed) )
myMsgs(" {0}".format(outText))
reportFile.write(outText)
# close the .txt file,
reportFile.close()
myMsgs( "File {0} is closed? {1}".format(outfileTXT, str(reportFile.closed)))
myMsgs('!!! Success !!! ')
# End main function
if __name__ == '__main__':
main()
|
995,985 | 4e2e9539db03c7ea073f7aee416dddae77644b00 | import os
from typing import List
from setuptools import setup
with open("README.md") as f:
readme = f.read()
def find_stub_files(name: str) -> List[str]:
"""
It seems setuptools does not support recursive patterns.
This function is stolen from django-stubs project :)
"""
result = []
for root, dirs, files in os.walk(name):
for file in files:
if file.endswith(".pyi"):
if os.path.sep in root:
sub_root = root.split(os.path.sep, 1)[-1]
file = os.path.join(sub_root, file)
result.append(file)
return result
setup(
name="drf-yasg-stubs",
version="0.1.4",
description="Typing stubs for drf-yasg library (PEP 484 stubs for Mypy and PyCharm)",
license="MIT",
url="https://github.com/intgr/drf-yasg-stubs",
author="Marti Raudsepp",
author_email="marti@juffo.org",
long_description=readme,
long_description_content_type="text/markdown",
python_requires=">=3.6",
install_requires=[],
packages=["drf_yasg-stubs"],
package_data={"drf_yasg-stubs": find_stub_files("drf_yasg-stubs")},
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: BSD License",
],
)
|
995,986 | 1136b5b5a560926b3014d730f17e0b4280edf58d | #!/usr/bin/env python3
import asyncio
from aioconsole import ainput
from mavsdk import System
from mavsdk.camera import (CameraError, Mode, Option, Setting)
usage_str = """
Usage:
p print current (changeable) camera settings
m change camera mode
s change a setting
"""
camera_mode = Mode.UNKNOWN
current_settings = []
possible_setting_options = []
async def run():
drone = System()
await drone.connect(system_address="udp://:14540")
asyncio.ensure_future(observe_current_settings(drone))
asyncio.ensure_future(observe_camera_mode(drone))
asyncio.ensure_future(observe_possible_setting_options(drone))
while True:
entered_input = await ainput(usage_str)
if (entered_input == "p"):
print(f"\n=== Current settings ===\n")
print_current_settings()
elif (entered_input == "m"):
print(f"\n=== Possible modes ===\n")
print(f"1. PHOTO")
print(f"2. VIDEO")
try:
index_mode = await make_user_choose_camera_mode()
except ValueError:
print("Invalid index")
continue
if (index_mode == 1):
chosen_mode = Mode.PHOTO
else:
chosen_mode = Mode.VIDEO
print(f"Setting camera mode to {chosen_mode}!")
try:
await drone.camera.set_mode(chosen_mode)
print(f" --> Succeeded")
except CameraError as error:
print(f" --> Failed with code: {error._result.result_str}")
elif (entered_input == "s"):
print(f"\n=== Possible settings ===\n")
print_possible_settings(possible_setting_options)
try:
index_setting = await \
make_user_choose_setting(possible_setting_options)
except ValueError:
print("Invalid index")
continue
selected_setting = possible_setting_options[index_setting - 1]
possible_options = selected_setting.options
print(f"\n=== Available options ===")
print(f"Setting: {selected_setting.setting_id}")
if (not selected_setting.is_range):
print(f"Options:")
try:
print_possible_options(possible_options)
index_option = await \
make_user_choose_option(possible_options)
selected_option = possible_options[index_option - 1]
print(f"Setting {selected_setting.setting_id} "
f"to {selected_option.option_description}!")
setting = Setting(
selected_setting.setting_id,
"",
selected_option,
selected_setting.is_range)
except ValueError:
print("Invalid index")
continue
else:
try:
selected_value = await \
make_user_choose_option_range(possible_options)
print(f"Setting {selected_setting.setting_id}"
f" to {selected_value}!")
setting = Setting(
selected_setting.setting_id,
"",
Option(selected_value, ""),
selected_setting.is_range)
except ValueError:
print("Invalid value")
continue
try:
await drone.camera.set_setting(setting)
print(f" --> Succeeded")
except CameraError as error:
print(f" --> Failed with code: {error._result.result_str}")
else:
print("Invalid input!")
continue
async def observe_camera_mode(drone):
global camera_mode
async for mode in drone.camera.mode():
camera_mode = mode
async def observe_current_settings(drone):
global current_settings
async for settings in drone.camera.current_settings():
current_settings = settings
async def observe_possible_setting_options(drone):
global possible_setting_options
async for settings in drone.camera.possible_setting_options():
possible_setting_options = settings
def print_current_settings():
print(f"* CAM_MODE: {camera_mode}")
for setting in current_settings:
print(f"* {setting.setting_id}: {setting.setting_description}")
if setting.is_range:
print(f" -> {setting.option.option_id}")
else:
print(f" -> {setting.option.option_description}")
async def make_user_choose_camera_mode():
index_mode_str = await ainput(f"\nWhich mode do you want? [1..2] >>> ")
index_mode = int(index_mode_str)
if (index_mode < 1 or index_mode > 2):
raise ValueError()
return index_mode
def print_possible_settings(possible_settings):
i = 1
for setting in possible_settings:
print(f"{i}. {setting.setting_id}: {setting.setting_description}")
i += 1
async def make_user_choose_setting(possible_settings):
n_settings = len(possible_settings)
index_setting_str = await \
ainput(f"\nWhich setting do you want to change?"
f" [1..{n_settings}] >>> ")
index_setting = int(index_setting_str)
if (index_setting < 1 or index_setting > n_settings):
raise ValueError()
return index_setting
def print_possible_options(possible_options):
i = 1
for possible_option in possible_options:
print(f"{i}. {possible_option.option_description}")
i += 1
async def make_user_choose_option(possible_options):
n_options = len(possible_options)
index_option_str = await \
ainput(f"\nWhich option do you want? [1..{n_options}] >>> ")
index_option = int(index_option_str)
if (index_option < 1 or index_option > n_options):
raise ValueError()
return index_option
async def make_user_choose_option_range(possible_options):
min_value = float(possible_options[0].option_id)
max_value = float(possible_options[1].option_id)
interval_text = ""
if len(possible_options) == 3:
interval_value = float(possible_options[2].option_id)
interval_text = f"interval: {interval_value}"
value_str = await \
ainput(f"\nWhat value do you want?"
f" [{min_value}, {max_value}] {interval_text} >>> ")
value = float(value_str)
if (value < min_value or value > max_value):
raise ValueError()
return str(value)
if __name__ == "__main__":
# Run the asyncio loop
asyncio.run(run())
|
995,987 | 3ffea30ef642df77cbbf8be4e2f234919f1152e7 | import cv2
import numpy as np
img = cv2.imread('lena.jpg')
lr1 = cv2.pyrDown(img)
lr2 = cv2.pyrDown(lr1)
lr3 = cv2.pyrDown(lr2)
hr1 = cv2.pyrUp(img)
hr2 = cv2.pyrUp(hr1)
hr3 = cv2.pyrUp(hr2)
cv2.imshow('image', img)
cv2.imshow('LR1', lr1)
cv2.imshow('LR2', lr2)
cv2.imshow('LR3', lr3)
cv2.imshow('UR1', hr1)
cv2.imshow('UR2', hr2)
cv2.imshow('UR3', hr3)
cv2.waitKey(0)
cv2.destroyAllWindows() |
995,988 | f8638e5166843999b95f4beca7988ae3b8e4f180 | import datetime
ano = int(input('\033[1;34mDigite O \033[m\033[1mAno\033[m\033[1;34m em Que Você Nasceu:\033[m '))
opc = str(input('\033[1;34mVocê Já Se Alistou:\033[m[S/N]')).lower()
idade = int(datetime.date.today().year) - ano
if idade == 18 and opc == 'n':
print('\n\033[1;34mVocê Deve Se Alistar Este Ano')
elif idade < 18:
if idade < 17:
print('\n\033[1;32mFaltam {} Anos Para Você Se Alistar!\033[m'.format(18 - idade))
elif idade == 17:
print('\n\033[1;32mFalta 1 Ano Para Você Se Alistar\033[m')
elif idade > 18 and opc == 's':
if idade == 19:
print('\n\033[1;31mVocê Se Alistou Há 1 Ano\033[m')
elif idade > 19:
print('\n\033[1;31mVocê Se Alistou Há {} Anos\033[m'.format(idade - 18))
elif idade > 18 and opc == 'n':
if idade == 19:
print('\n\033[1;31mVocê Não Se Alistou Há 1 Ano\033[m')
elif idade > 19:
print('\n\033[1;31mVocê Não Se Alistou Há {} Anos\033[m'.format(idade - 18))
elif idade == 18 and opc == 's':
print('\033[1;34m-\033[m' * 36)
print('\033[1;34mAGUARDE A CONVOCAÇÃO DA\033[m AERONAUTICA!')
print('\033[1;34m-\033[m' * 36)
|
995,989 | 19e56b5c7ff17111e34e3a807f5d566af0b2078a | from turtle import *
def iso_trape(pen, f_color, dofill):
pencolor(pen)
fillcolor(f_color)
if dofill:
begin_fill()
left(60)
forward(159)
right(60)
forward(115)
right(60)
forward(159)
right(120)
forward(255)
if dofill:
end_fill()
def inverse_iso_trape(pen, f_color, dofill):
pencolor(pen)
fillcolor(f_color)
if dofill:
begin_fill()
right(60)
forward(155)
left(60)
forward(115)
left(60)
forward(155)
left(120)
forward(275)
if dofill:
end_fill()
def rhomboid_2(pen, f_color, dofill):
pencolor(pen)
fillcolor(f_color)
if dofill:
begin_fill()
forward(153)
right(60)
forward(160)
right(120)
forward(155)
right(60)
forward(155)
if dofill:
end_fill()
def rhomboid_3(pen, f_color, dofill):
pencolor(pen)
fillcolor(f_color)
if dofill:
begin_fill()
forward(155)
right(60)
forward(155)
right(120)
forward(155)
right(60)
forward(155)
if dofill:
end_fill()
def rhomboid(pen, f_color, dofill):
pencolor(pen)
fillcolor(f_color)
if dofill:
begin_fill()
forward(155)
right(120)
forward(155)
right(60)
forward(155)
right(120)
forward(155)
if dofill:
end_fill()
def triangle(pen, f_color, size, dofill):
pencolor(pen)
fillcolor(f_color)
if dofill:
begin_fill()
for i in range(3):
forward(size)
right(240)
i += 1
if dofill:
end_fill()
def inverse_triangle(pen, f_color, size, dofill):
pencolor(pen)
fillcolor(f_color)
if dofill:
begin_fill()
for i in range(3):
forward(size)
left(240)
i += 1
if dofill:
end_fill()
|
995,990 | af0c6d1d6e8164d2c2cc531d42e3fe597dd33b83 | from pangtreebuild.mafgraph.sorter import sort_mafblocks
from pangtreebuild.pangenome import DAGMaf
from pangtreebuild.pangenome.parameters import msa
from pangtreebuild.tools import logprocess
global_logger = logprocess.get_global_logger()
def get_dagmaf(maf: msa.Maf) -> DAGMaf.DAGMaf:
"""Converts MAF to DagMaf.
Args:
maf: MAF to be converted.
Returns:
DagMaf built from the MAF.
"""
sorted_blocks = sort_mafblocks(maf.filecontent)
dagmafnodes = [
DAGMaf.DAGMafNode(block_id=b.id,
alignment=b.alignment,
orient=b.orient,
order=b.order(),
out_edges=b.out_edges)
for b in sorted_blocks
]
return DAGMaf.DAGMaf(dagmafnodes)
|
995,991 | dc79705a3fd7ce0e4c808bb03d68674d2e4e09c3 | from django.urls import path
from register import views
app_name="register"
urlpatterns = [
path("", views.register_form, name="form"),
path("user", views.register_user, name="user")
] |
995,992 | 9f5235b7d50f535becd029b98be832b5b04fa404 | '''
比较两个文件夹里的文件,如果有不同的文件,拷贝出来到第三个文件夹
'''
import sys, os,shutil
from os.path import walk,join,normpath
PathA = 'D:\\BaiduYunDownload\\中谷教育Python\\'
PathB = 'D:\\BaiduYunDownload\\Python\\中谷\\中谷教育Python\\'
PathC= 'D:\\中谷Goal\\'
def visit(arg,dirname,names):
print dirname
dir = dirname.replace(PathA,"")
dirnameB = os.path.join(PathB,dir)
dirnameC = os.path.join(PathC,dir)
if os.path.isdir(dirnameB):
for file in names:
if os.path.isfile(os.path.join(dirname,file)) and not os.path.isfile(os.path.join(dirnameB,file)):
if not os.path.isdir(dirnameC):
os.system("mkdir %s"%(dirnameC))
shutil.copy2(os.path.join(dirname,file),os.path.join(dirnameC,file))
elif os.path.isdir(os.path.join(dirname,file)) and not os.path.isdir(os.path.join(dirnamB,file)):
if not os.path.isdir(os.path.join(dirnameC,file)):
os.system("mkdir %s"%(os.path.join(dirnameC,file)))
else:
if not os.path.isdir(dirnameC):
os.system("mkdir %s"%(dirnameC))
for file in names:
shutil.copy2(os.path.join(dirname,file),os.path.join(dirnameC,file)) |
995,993 | 1076587e0d584238a1840884259cc335b7815a45 | #27. 利用递归函数调用方式,将所输入的5个字符,以相反顺序打印出来。
def output(s,l):
if l==0:
return
print (s[l-1])
output(s,l-1)
s = input('Input a string:')
l = len(s)
output(s,l) |
995,994 | b8170c6b3f7c815288716ff1773c5051ca7e93e8 | import pandas as pd
import pydot
from IPython.display import Image
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
from copy import deepcopy
## classification importance with and ExtraTreesClassifier
def classification_feature_importance(X,
y,
n_estimators=100,
criterion='gini',
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features='auto',
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=True,
n_jobs=1,
random_state=0,
verbose=0,
class_weight=None):
classifier = ExtraTreesClassifier(n_estimators=n_estimators,
criterion=criterion,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
class_weight=class_weight)
classifier.fit(X,y)
df_importance = pd.DataFrame({'feature':list(X.columns),
'importance':list(classifier.feature_importances_)})
df_importance.sort_values('importance',inplace=True,ascending=False)
return df_importance
## plot a tree
def plot_tree(estimator,features_names):
dot_data = StringIO()
export_graphviz(estimator,
out_file=dot_data,
feature_names=features_names,
filled=True,rounded=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
return Image(graph[0].create_png())
## find correlations
def take_most_correlated(df,correlation_threshold = 0.9):
aux = df.corr()[(df.corr()>=correlation_threshold)].reset_index().melt('index').dropna()
aux = aux[aux['index'] != aux['variable']]
aux['var_left'],aux['var_right'] = aux.apply(lambda r:sorted([r['index'],r['variable']]),axis=1).str
aux = aux.loc[:,['var_left','var_right','value']].drop_duplicates()
var_list = list(aux['var_left'].values) + list(aux['var_right'].values)
return pd.Series(var_list).value_counts(normalize=True)
## map encoder
def map_encoder_categorical(x):
set_ = sorted(set(x))
range_ = list(range(1,len(set_)+1))
return dict(zip(set_,range_))
def map_encoder_fit(df,categorical_features):
return {i:map_encoder_categorical(df[i]) for i in categorical_features}
def map_encoder_transform(df,map_encoder):
df_ = deepcopy(df)
for i in map_encoder.keys():
map_encoder_variable = map_encoder[i]
fill_ = int(max(map_encoder_variable.values()) + 1)
df_[i] = df_[i].map(map_encoder_variable).fillna(fill_).astype(int)
return df_
## onehot encoder
def onehot_encoder_fit(df,columns):
dict_ = {}
dict_['categorical_columns'] = columns
dict_['output_columns'] = list(pd.get_dummies(data=df,columns=columns).columns)
return dict_
def onehot_encoder_transform(df,onehot_encoder):
df_ = deepcopy(df)
df_ = pd.get_dummies(data=df_,columns=onehot_encoder['categorical_columns'])
df_ = df_.loc[:,onehot_encoder['output_columns']].fillna(0)
return df_
|
995,995 | f097eaf4b49262650c0dce9de5640602413bb00f | import os
import pyximport
import numpy as np
import pandas as pd
from array import array
klib_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'klib'))
paq9a_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'paq9a'))
pyximport.install(setup_args={'include_dirs': [np.get_include(), klib_dir, paq9a_dir]})
from .algo import diff, diff_depth, undiff, undiff_depth
from .converter import (
parse_csv, parse_df, to_internal,
parse_internal, to_csv, to_np)
from zlib import compress as pack1, decompress as unpack1
from brotli import compress as pack2, decompress as unpack2
from .paq import compress as pack3, decompress as unpack3
__version__ = '0.3.3'
packs = [None, pack1, pack2, pack3]
modes = [None, 'zlib', 'brotli', 'paq']
def get_depth_params(headers):
ts_cols = ['timestamp', 'pre_close',
'open', 'high', 'low', 'close', 'price']
if ('ap1' in headers and 'av1' in headers) or \
('bp1' in headers and 'bv1' in headers):
if 'av1' in headers:
offset = headers.index('av1') - headers.index('ap1')
else:
offset = headers.index('bv1') - headers.index('bp1')
for i, h in enumerate(headers):
if h.startswith('ap') and h[-1].isdigit():
start = i
break
elif h.startswith('bp') and h[-1].isdigit():
start = i
break
start = start + offset
end = start + offset
excludes = array('l')
for i, h in enumerate(headers):
if h not in ts_cols \
and not h[-1].isdigit():
excludes.append(i)
return excludes, start, end
elif set(ts_cols) & set(headers):
start = end = 0
excludes = array('l')
for i, h in enumerate(headers):
if h not in ts_cols \
and not h[-1].isdigit():
excludes.append(i)
return excludes, start, end
def compress_bytes(v, level=2, precision=3):
ncols, nrows, headers, divides, arr = parse_csv(v)
params = get_depth_params(headers)
if params:
arr = diff_depth(ncols, nrows, arr, *params)
else:
arr = diff(ncols, nrows, arr)
data = to_internal(ncols, nrows, arr, headers, divides, modes[level])
return packs[level](data)
def compress_dataframe(v, level=2, precision=3):
ncols, nrows, headers, divides, arr = parse_df(v)
params = get_depth_params(headers)
if params:
arr = diff_depth(ncols, nrows, arr, *params)
else:
arr = diff(ncols, nrows, arr)
data = to_internal(ncols, nrows, arr, headers, divides, modes[level])
return packs[level](data)
def compress(v, level=2, precision=3):
assert level in [1, 2, 3]
if isinstance(v, str):
v = v.encode('utf-8')
if isinstance(v, bytes):
return compress_bytes(v, level, precision)
elif isinstance(v, pd.DataFrame):
return compress_dataframe(v, level, precision)
def decompress(v, format='df'):
if v[:2] == b'\x00c':
internal = unpack3(v)
elif v[:2] in [b'\x78\x01', b'\x78\x5e', b'\x78\x9c', b'\x78\xda']:
internal = unpack1(v)
else:
try:
internal = unpack2(v)
except:
raise ValueError('Unrecognized Format')
if b'+' not in internal:
# format error, return raw internal
return internal
else:
ncols, nrows, headers, divides, arr = parse_internal(internal)
params = get_depth_params(headers)
if params:
arr = undiff_depth(ncols, nrows, arr, *params)
else:
arr = undiff(ncols, nrows, arr)
if format == 'csv':
return to_csv(ncols, nrows, headers, divides, arr)
elif format == 'df':
arr = to_np(ncols, nrows, headers, divides, arr)
return pd.DataFrame(arr)
else:
raise NotImplementedError('format {} not supported'.format(
fomart))
|
995,996 | c92c375c597a33a85ca96d103ab3c88b84692909 | """
TODO
Validation scripts that check that the genotype of each mouse meshes with
the parents
Auto-determine needed action for each litter (and breeding cage?)
Auto-id litters
Slug the mouse name from the litter name and toe num?
"""
from __future__ import unicode_literals
from django.db import models
import datetime
# Create your models here.
class ChrisCage(models.Model):
name = models.CharField(max_length=10, unique=True)
def __str__(self):
return self.name
class ChrisGenotype(models.Model):
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
class ChrisMouse(models.Model):
# Required fields
name = models.CharField(max_length=15, unique=True)
sex = models.IntegerField(
choices=(
(0, 'M'),
(1, 'F'),
(2, '?'),
)
)
genotype = models.ForeignKey(ChrisGenotype)
# Optional fields that can be set by the user
cage = models.ForeignKey(ChrisCage, null=True, blank=True)
dob = models.DateField(blank=True, null=True)
# Chris-specific optional fields
training_name = models.CharField(max_length=20, null=True, blank=True)
headplate_color = models.CharField(max_length=10, null=True, blank=True)
def __str__(self):
return self.name |
995,997 | 636615b33b4c6d1d7538377771ad69811644a21e | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom Training Loop correctness test."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras import optimizers
from keras.applications import resnet_v2
from keras.datasets import fashion_mnist
from keras.distribute import optimizer_combinations
from keras.distribute import strategy_combinations
from keras.testing_infra import test_utils
# isort: off
from tensorflow.python.ops.losses import losses_impl
_NUM_SAMPLES = 66
_BATCH_SIZE = 32
_RANDOM_SEED = 1337
_NUM_EPOCHS = 2
_STEPS_PER_EPOCH = 2
class MaybeStrategyScope:
"""Provides a context allowing no distribution strategy."""
def __init__(self, strategy):
self._strategy = strategy
self._scope = None
def __enter__(self):
if self._strategy:
self._scope = self._strategy.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._strategy:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def get_model(sync_batchnorm=False):
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation="relu", input_shape=(1,)))
model.add(
keras.layers.Dense(
10,
activation="relu",
kernel_regularizer=keras.regularizers.l2(1e-4),
)
)
if sync_batchnorm:
model.add(keras.layers.BatchNormalization(synchronized=True))
else:
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(10, activation="relu"))
model.add(keras.layers.Dense(1))
return model
def get_data():
x_train = np.random.rand(_NUM_SAMPLES, 1)
y_train = 3 * x_train
x_train = x_train.astype("float32")
y_train = y_train.astype("float32")
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(_BATCH_SIZE)
return train_dataset
def compute_loss(labels, logits, reg_losses):
pred_loss = keras.losses.mean_squared_error(labels, logits)
scaled_loss = tf.nn.compute_average_loss(
pred_loss, global_batch_size=_BATCH_SIZE
)
l2_loss = tf.nn.scale_regularization_loss(reg_losses)
return scaled_loss + l2_loss
def iteration_inside_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=None,
sync_batchnorm=None,
jit_compile=False,
):
"""Helper function to test iterating over data inside a tf.function."""
with MaybeStrategyScope(strategy):
if strategy and sync_batchnorm:
model = get_model(sync_batchnorm)
else:
model = get_model()
model.set_weights(initial_weights)
optimizer = optimizer_fn()
training_accuracy = keras.metrics.CategoricalAccuracy(
"training_accuracy", dtype=tf.float32
)
@tf.function
def train_epoch(dist_input):
"""Training StepFn."""
@tf.function(jit_compile=jit_compile)
def step_fn(inputs):
samples, labels = inputs
with tf.GradientTape() as tape:
logits = model(samples)
loss = compute_loss(labels, logits, model.losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_accuracy.update_state(labels, logits)
return loss
total_loss = 0.0
num_batches = 0
if iteration_type == "dataset":
for x in dist_input:
if strategy:
per_replica_losses = strategy.run(step_fn, args=(x,))
total_loss += strategy.reduce(
tf.distribute.ReduceOp.SUM,
per_replica_losses,
axis=None,
)
else:
total_loss += step_fn(x)
num_batches += 1
else:
iterator = iter(dist_input)
for _ in range(_STEPS_PER_EPOCH):
if strategy:
per_replica_losses = strategy.run(
step_fn, args=(next(iterator),)
)
total_loss += strategy.reduce(
tf.distribute.ReduceOp.SUM,
per_replica_losses,
axis=None,
)
else:
total_loss += step_fn(next(iterator))
num_batches += 1
return total_loss / tf.cast(num_batches, dtype=tf.float32)
if strategy:
dataset = strategy.experimental_distribute_dataset(dataset)
for _ in range(_NUM_EPOCHS):
loss = train_epoch(dataset)
return (model.get_weights(), loss, training_accuracy.result())
def iteration_outside_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=None,
sync_batchnorm=None,
jit_compile=False,
):
"""Helper function to test iterating over data outside a tf.function."""
with MaybeStrategyScope(strategy):
model = get_model(sync_batchnorm=sync_batchnorm)
model.set_weights(initial_weights)
optimizer = optimizer_fn()
training_accuracy = keras.metrics.CategoricalAccuracy(
"training_accuracy", dtype=tf.float32
)
@tf.function
def train_step(dist_inputs):
"""Training StepFn."""
@tf.function(jit_compile=jit_compile)
def step_fn(inputs):
samples, labels = inputs
with tf.GradientTape() as tape:
logits = model(samples)
loss = compute_loss(labels, logits, model.losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
training_accuracy.update_state(labels, logits)
return loss
if strategy:
per_replica_losses = strategy.run(step_fn, args=(dist_inputs,))
return strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
else:
return step_fn(dist_inputs)
if strategy:
dataset = strategy.experimental_distribute_dataset(dataset)
total_loss = 0.0
num_batches = 0
if iteration_type == "dataset":
for _ in range(_NUM_EPOCHS):
for x in dataset:
total_loss += train_step(x)
num_batches += 1
else:
for _ in range(_NUM_EPOCHS):
iterator = iter(dataset)
for _ in range(_STEPS_PER_EPOCH):
total_loss += train_step(next(iterator))
num_batches += 1
return (
model.get_weights(),
total_loss / tf.cast(num_batches, dtype=tf.float32),
training_accuracy.result(),
)
@test_utils.run_v2_only
class TestDistributionStrategyDnnCorrectness(
tf.test.TestCase, parameterized.TestCase
):
"""Test custom training loop correctness with a simple DNN model."""
def setUp(self):
super().setUp()
np.random.seed(_RANDOM_SEED)
tf.compat.v1.set_random_seed(_RANDOM_SEED)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.all_strategies,
optimizer_fn=optimizer_combinations.optimizers_v2,
mode=["eager"],
iteration_type=["iterator", "dataset"],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[False],
)
+ tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.multiworker_strategies,
optimizer_fn=[
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn,
optimizer_combinations.adam_experimental_fn,
],
mode=["eager"],
iteration_type=["iterator", "dataset"],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[False],
)
+ tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
optimizer_fn=[
optimizer_combinations.gradient_descent_optimizer_keras_v2_fn,
optimizer_combinations.adagrad_optimizer_keras_v2_fn,
],
mode=["eager"],
iteration_type=["iterator", "dataset"],
inside_func=[False, True],
sync_batchnorm=[True, False],
jit_compile=[True],
)
)
def test_dnn_correctness_minus_tpus(
self,
distribution,
optimizer_fn,
iteration_type,
inside_func,
sync_batchnorm,
jit_compile,
):
# TODO(anjs): Identify why this particular V1 optimizer needs a higher
# tol.
if (
"FtrlV1" in optimizer_fn._name
and "TPU" in type(distribution).__name__
):
self.skipTest("Reduced tolerance of the order of 1e-1 required.")
self.dnn_correctness(
distribution,
optimizer_fn,
iteration_type,
inside_func,
sync_batchnorm,
jit_compile,
)
def dnn_correctness(
self,
distribution,
optimizer_fn,
iteration_type,
inside_func,
sync_batchnorm=None,
jit_compile=False,
):
model = get_model(sync_batchnorm)
initial_weights = model.get_weights()
dataset = get_data()
if inside_func:
iteration_func = iteration_inside_func
else:
iteration_func = iteration_outside_func
wts_with_ds, loss_with_ds, acc_with_ds = iteration_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
strategy=distribution,
sync_batchnorm=sync_batchnorm,
jit_compile=jit_compile,
)
wts, loss, acc = iteration_func(
initial_weights,
dataset,
optimizer_fn,
iteration_type,
sync_batchnorm=sync_batchnorm,
jit_compile=False,
)
self.assertAllClose(wts, wts_with_ds, atol=1e-3, rtol=1e-3)
self.assertAllClose(loss, loss_with_ds, atol=1e-3, rtol=1e-3)
self.assertAllClose(acc, acc_with_ds, atol=1e-3, rtol=1e-3)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)
)
def test_fused_batch_norm_uneven_batch(self, distribution):
"""Test that fused BN works when the last device gets empty data.
Adapted from
https://www.tensorflow.org/tutorials/distribute/custom_training
but using ResNet, which uses fused batchnorm, as the model.
Arguments:
distribution: distribute test configuration
"""
self.skipTest("TODO(b/234354008): Requires fetching data from network.")
(train_images, train_labels), _ = fashion_mnist.load_data()
# add channel dimension to make 2D data into 3D, since some ops of the
# model require it.
train_images = train_images[..., None]
train_images = train_images / np.float32(255)
# Padding images because ResNet requires a minimal shape of (32, 32)
padded_train_images = np.concatenate(
[
np.zeros((len(train_images), 2, 28, 1)),
train_images,
np.zeros((len(train_images), 2, 28, 1)),
],
axis=1,
)
padded_train_images = np.concatenate(
[
np.zeros((len(train_images), 32, 2, 1)),
padded_train_images,
np.zeros((len(train_images), 32, 2, 1)),
],
axis=2,
)
buffer_size = len(train_images)
global_batch_size = distribution.num_replicas_in_sync
num_samples = global_batch_size - 1
epochs = 2
# Keep only the first images, so that the last GPU receives an empty
# batch
padded_train_images = padded_train_images[:num_samples]
train_labels = train_labels[:num_samples]
train_dataset = (
tf.data.Dataset.from_tensor_slices(
(padded_train_images, train_labels)
)
.shuffle(buffer_size)
.batch(global_batch_size)
)
train_dist_dataset = distribution.experimental_distribute_dataset(
train_dataset
)
def create_model():
inputs = keras.Input((32, 32, 1))
preprocessed = keras.layers.Conv2D(3, (1, 1))(
inputs
) # ResNet requires 3 channels
features = resnet_v2.ResNet50V2(
include_top=False,
input_tensor=preprocessed,
pooling="avg",
weights=None,
).output
return keras.Model(inputs, features)
with distribution.scope():
# Set reduction to `none` so we can do the reduction afterwards and
# divide by global batch size.
loss_object = keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=losses_impl.Reduction.NONE
)
def compute_resnet_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=global_batch_size
)
model = create_model()
optimizer = optimizers.adam_legacy.Adam()
def train_step(inputs):
images, labels = inputs
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = compute_resnet_loss(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
@tf.function
def distributed_train_step(dataset_inputs):
per_replica_losses = distribution.run(
train_step, args=(dataset_inputs,)
)
return distribution.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None
)
for epoch in range(epochs):
# Train loop
total_loss = 0.0
num_batches = 0
for x in train_dist_dataset:
total_loss += distributed_train_step(x)
num_batches += 1
train_loss = total_loss / num_batches
print(f"Epoch {epoch+1}, Loss: {train_loss}")
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
|
995,998 | b261a045fc319da9c96d1553de81a809a84f39d3 | import os
import sqlite3
from sqlite3.dbapi2 import Connection
from telegram_manager import TelegramManager
from config import (name_db,
value_limit,
folder_config,
entrance_bot_usage,
name_loc_default,
name_join_default,
value_old_default,
value_limit_search,
value_message_default,
value_message_selection_default,
table_poll,
table_users,
table_groups,
table_locations,
table_poll_groups,
table_users_groups,
table_users_settings,
table_groups_selected,
table_users_locations,
table_user_group_connect)
class DataUsage:
"""
class which is dedicated to produce the values of the
"""
def __init__(self) -> None:
self.folder_current = os.getcwd()
self.telegram_manager = TelegramManager()
self.folder_config = os.path.join(self.folder_current, folder_config)
self.create_folder = lambda x: os.path.exists(x) or os.mkdir(x)
self.produce_values()
def proceed_error(self, msg:str) -> None:
"""
Method which is dedicated to send errors
Input: msg = message of the error
Output: we printed and send to the telegram
"""
print(msg)
self.telegram_manager.proceed_message_values(msg)
def check_db(self) -> None:
"""
Test method for checking the database values
"""
a = self.cursor.execute(f'SELECT * from {table_users};').fetchall()
print(a)
print('#################################################')
b = self.cursor.execute(f'SELECT * from {table_groups};').fetchall()
print(b)
print('#################################################')
c = self.cursor.execute(f'SELECT * from {table_users_groups};').fetchall()
print(c)
print('#################################################')
d = self.cursor.execute(f"SELECT * FROM {table_users_settings};").fetchall()
print(d)
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
e = self.get_search_button_basic()
print(e)
print('________________________________________________________')
def create_connection(self) -> None:
"""
Method which is dedicated to produce the connection of the
Input: None
Output: we created the database
"""
try:
self.create_folder(self.folder_config)
self.connection = sqlite3.connect(self.name_db, check_same_thread=False)
self.cursor = self.connection.cursor()
except Exception as e:
msg = f'We faced problems with the connection. Reason: {e}'
self.proceed_error(msg)
def close_connection(self) -> None:
"""
Method which is dedicated to close the
Input: None
Output: we closed the connection to the database
"""
self.connection.close()
def return_user_values(self, id_user:int) -> set:
"""
Method which is dedicated to return
Input: id_user = id of the selected user
Output: list with values
"""
try:
value_return = self.cursor.execute(f"SELECT name_first, name_last, nickname FROM {table_users} WHERE id={id_user};").fetchone()
return value_return
except Exception as e:
msg = f"We faced problems with the getting of the user; Mistake: {e}"
self.proceed_error(msg)
return []
def update_user_information(self, value_list:list) -> None:
"""
Method which is dedicated to produce the changes of the user values
Input: value_list = list value of the format [name_first, name_last, username, user_id]
Output: we successfully updated all these values
"""
try:
self.cursor.execute(f"UPDATE {table_users} SET name_first=?, name_last=?, nickname=? WHERE id=?;", value_list)
self.connection.commit()
except Exception as e:
msg = f"We faced problems with updating all name values; Mistake: {e}"
self.proceed_error(msg)
def update_text_message(self, id_user:int, text_new:str) -> bool:
"""
Method which is dedicated to update sent text
Input: id_user = id of the selected user
text_new = text which is dedicated to get updated
Output: boolean value which signifies that you updated value succesfully
"""
try:
self.cursor.execute(f"UPDATE {table_users_settings} SET text_sending = ? WHERE id_user = ?;", (text_new, id_user))
self.connection.commit()
return True
except Exception as e:
msg = f"We face problems with updating the message text; Mistake: {e}"
self.proceed_error(msg)
return False
def update_name_default(self, id_user:int, text_new:str) -> bool:
"""
Method which is dedicated to update sent text
Input: id_user = id of the selected user
text_new = text which is dedicated to get updated
Output: boolean value which signifies that you updated value succesfully
"""
try:
self.cursor.execute(f"UPDATE {table_users_settings} SET name_default = ? WHERE id_user = ?;", (text_new, id_user))
self.connection.commit()
return True
except Exception as e:
msg = f"We face problems with updating the default name; Mistake: {e}"
self.proceed_error(msg)
return False
def update_time_default(self, id_user:int, time_new:int) -> bool:
"""
Method which is dedicated to update sent text
Input: id_user = id of the selected user
text_new = text which is dedicated to get updated
Output: boolean value which signifies that you updated value succesfully
"""
try:
self.cursor.execute(f"UPDATE {table_users_settings} SET text_minutes = ? WHERE id_user = ?;", (time_new, id_user))
self.connection.commit()
return True
except Exception as e:
msg = f"We face problems with updating the default name; Mistake: {e}"
self.proceed_error(msg)
return False
def check_presence_locations(self, id_user:int) -> bool:
"""
Method which is dedicated to check presence of the locations in the
Input: id_user = id to check values
Output: booelan which signifies presence location for user
"""
try:
value_list = self.cursor.execute(f"SELECT * FROM {table_users_locations} where id_user={id_user};").fetchone()
if value_list:
return True
return False
except Exception as e:
msg = f"We faced problems with checking the locations. Error: {e}"
self.proceed_error(msg)
return False
def check_presence_groups(self, id_user:int) -> bool:
"""
Method which is dedicated to check presence of selected group by user
Input: id_user = id to check values
Output: boolean value which signifies presence groups for user
"""
try:
value_list = self.cursor.execute(f"SELECT * FROM {table_users_groups} where id_user={id_user};").fetchone()
if value_list:
return True
return False
except Exception as e:
msg = f"We faced problems with checking the groups for users. Error: {e}"
self.proceed_error(msg)
return False
def get_user_values(self, id_user:int) -> bool:
"""
Method which is dedicated to check the presence of the
Input: id_user = user id which is checked to this mission
Output: we successfully checked presence the use within the bot
"""
try:
value_user = self.cursor.execute(f'SELECT id from {table_users} where id={id_user}').fetchone()
if value_user:
return True
return False
except Exception as e:
msg = f'We found problems with checking values of the previous insertion, mistake: {e}'
self.proceed_error(msg)
def get_user_groups(self, id_chat:int, id_limit:int=0) -> set:
"""
Method which is dedicated to return values of the groups and group names
Input: id_chat = id of the chat which was using this feature
Output: set with lists of the values with the groups and names
"""
try:
if id_limit:
value_groups = self.cursor.execute(f"SELECT id_group FROM {table_users_groups} WHERE id_user={id_chat} LIMIT {id_limit};").fetchall()
else:
value_groups = self.cursor.execute(f"SELECT id_group FROM {table_users_groups} WHERE id_user={id_chat};").fetchall()
value_groups = []
return value_groups
except Exception as e:
msg = f"We found problems with returning groups to values, mistake: {e}"
self.proceed_error(msg)
return []
def remove_location_manually(self, value_list:list) -> bool:
"""
Method which is dedicated to remove values of the
Input: value_list = id of the user, latitude and longitude of the user
Output: set with boolean that everything is okay and names of the coordinates which were removed
"""
value_return = []
try:
value_id, value_latitude, value_longitude = value_list
values_id = self.cursor.execute(f"SELECT id_location FROM {table_users_locations} WHERE id_user={value_id};").fetchall()
values_id = [str(v[0]) for v in values_id]
values_id_str = ','.join(values_id)
if values_id_str:
value_names = self.cursor.execute(
f"SELECT name_location FROM {table_locations} WHERE id IN ({values_id_str}) AND latitude={value_latitude} AND longitude={value_longitude};"
).fetchall()
if value_names:
value_return = [f[0] for f in value_names]
self.cursor.execute(f"DELETE FROM {table_users_locations} WHERE id_user={value_id} AND id_location IN ({values_id_str});")
self.cursor.execute(
f"DELETE FROM {table_locations} WHERE id IN ({values_id_str}) AND latitude={value_latitude} AND longitude={value_longitude};").fetchall()
self.connection.commit()
return True, value_return
except Exception as e:
msg = f"We faced problems with the deletion of the location in that cases; Mistake: {e}"
self.proceed_error(msg)
return False, value_return
def get_user_coordinate(self, id_chat:int, id_location:int) -> list:
"""
Method which is dedicated to get location for the user
Input: id_chat = chat which required coordinates
id_location = id of the location of selected user
Output: we returned list values from the
"""
try:
#TODO add check on the checking the coordinates
value_list = self.cursor.execute(f"SELECT * FROM {table_locations} WHERE id={id_location};").fetchone()
if value_list:
return value_list
return []
except Exception as e:
msg = f"We found problems with returning elected coordinate to the selected user, mistake: {e}"
self.proceed_error(msg)
return []
def get_update_coordinate_name(self, id_location:int, name_new:str) -> None:
"""
Method which is dedicated to update values
Input: id_location = id of the location
name_new = new location name for it
Output: we updated coordinate name value
"""
try:
self.cursor.execute(f"UPDATE {table_locations} SET name_location = ? WHERE id=?;", (name_new, id_location))
self.connection.commit()
except Exception as e:
msg = f"We found problems with updating name of the ; Mistake: {e}"
self.proceed_error(msg)
def delete_location_user(self, id_chat:int, id_location:int) -> None:
"""
Method which is dedicated to delete location from the
Input: id_chat = chat id value
id_location = id of the location for the user
Output: We successfully deleted values of the location
"""
try:
self.cursor.execute(f"DELETE FROM {table_users_locations} WHERE id_user={id_chat} AND id_location={id_location};")
self.cursor.execute(f"DELETE FROM {table_locations} WHERE id={id_location};")
self.connection.commit()
except Exception as e:
msg = f'We faced problems with the deleting locations from the database. Mistake: {e}'
self.proceed_error(msg)
def get_group_values(self, group_id:int, group_name:str) -> bool:
"""
Method which is dedicated to check the presence of selected group or update name in other cases
Input: group_id = id of selected group
group_name = name of the selected group
Output: boolean value which shows presence of the
"""
try:
value_list = self.cursor.execute(f"SELECT id, name FROM {table_groups} WHERE id={group_id};").fetchone()
if not value_list:
return False
group_used_id, group_used_name = value_list
if group_used_name != group_name:
self.cursor.execute(f"UPDATE {table_groups} SET name={group_name} WHERE id={group_used_id};")
self.connection.commit()
return True
except Exception as e:
msg = f"We faced problems with checking of the group prensence. Mistake: {e}"
self.proceed_error(msg)
return False
def get_search_button_basic(self, groups_limit:int=value_limit_search) -> list:
"""
Method which is dedicated to make basic search
Input: groups_limit = limitations to the returnal of groups
Output: we get search values of the lists
"""
try:
value_list = self.cursor.execute(f"SELECT id, name FROM {table_groups} ORDER BY date_value DESC LIMIT({groups_limit});").fetchall()
return value_list
except Exception as e:
msg = f"We faced problems with get basic search groups. Mistake: {e}"
self.proceed_error(msg)
return []
def get_search_button_manually(self, input_string:str, groups_limit:int=value_limit_search) -> list:
"""
Method which is dedicated to make manual search of the groups for the more clear usage of it
Input: input_string = input string from the user
groups_limit = limitations for the returnal of groups
Output: we get groups for user to manually search
"""
try:
value_list = self.cursor.execute(
f"SELECT id, name FROM {table_groups} WHERE name LIKE ? ORDER BY date_value DESC LIMIT({groups_limit});", (f"%{input_string}%",)).fetchall()
return value_list
except Exception as e:
msg = f"We faced problems with getting manual search groups. Mistake: {e}"
self.proceed_error(msg)
return []
def produce_insert_group_user_connect(self, id_user:int, id_group:int, connect_name:str=name_join_default) -> bool:
"""
Method which is dedicated to make
Input: id_user = id of the user which must b used
id_group = id of the group which is required to make
connect_name = connected names with the values
Output: inserted values to the table which i required to make the connection
"""
try:
self.cursor.execute(f"INSERT INTO {table_user_group_connect}(id_user, id_group, text_message) VALUES (?,?,?);",
(id_user, id_group, connect_name))
self.connection.commit()
return True
except Exception as e:
msg = f"We faced problems with the setting the connection table of user and group. Mistake: {e}"
self.proceed_error(msg)
return False
def check_insert_group_user(self, id_user:int, id_group:int) -> bool:
"""
Method which is dedicated to check inserted this values previously
Input: id_user = id of the user
id_group = id of the group
Output: boolean value which signifies that we already made this
"""
try:
value_list = self.cursor.execute(f"SELECT * FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};").fetchone()
if value_list:
return True
return False
except Exception as e:
msg = f"We faced problems with the check previous insertion on th. Mistake: {e} "
self.proceed_error(msg)
return False
def return_inserted_message(self, id_user:int, id_group:int) -> str:
"""
Method which is dedicated to return previously
Input: id_user = id of the user
id_group = id of the group
Output: string value which user is required to send
"""
try:
value_string = self.cursor.execute(f"SELECT text_message FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};").fetchone()
if value_string:
return value_string[0]
return ''
except Exception as e:
msg = f"We found problems with getting message of groups to resend; Mistake: {e}"
self.proceed_error(msg)
return ''
def delete_user_group_values(self, id_user:int, id_group:int) -> None:
"""
Method which is dedicated to remove this value in cases of we connected groups
Input: id_user = id of the user
id_group = id of this group
Output: We removed all possibl values
"""
try:
self.cursor.execute(f"DELETE FROM {table_user_group_connect} WHERE id_user={id_user} AND id_group={id_group};")
self.connection.commit()
except Exception as e:
msg = f"We faced problems ith deletion from {table_user_group_connect} table, Mistake: {e}"
self.proceed_error(msg)
def return_group_values(self, id_user:int) -> set:
"""
Method which is dedicated to return for the user group values
Input: id_user = id of the selected user
Output: list of lists with the
"""
try:
value_list_id = self.cursor.execute(f"SELECT id_group FROM {table_users_groups} WHERE id_user={id_user};").fetchall()
if not value_list_id:
return [], []
value_list_id = ','.join([str(v[0]) for v in value_list_id])
value_list_group = self.cursor.execute(f"SELECT id, name FROM {table_groups} WHERE id IN ({value_list_id});").fetchall()
return [v[0] for v in value_list_group], [v[1] for v in value_list_group]
except Exception as e:
msg = f"We faced problems with getting values of the groups to the user; Mistake: {e}"
self.proceed_error(msg)
return [], []
def get_current_id(self) -> int:
"""
Method which is dedicated to manually return values from the database manually
Input: None
Output: we successfully returned last id of the coordinate
"""
try:
return self.cursor.execute(f"SELECT MAX(id) FROM {table_locations};").fetchone()
except Exception as e:
msg = f'We faced some problems with the getting last id value. Mistake: {e}'
self.proceed_error(msg)
return -1
def insert_location(self, id_list:list, name_location:str, latitude:float, longitude:float) -> bool:
"""
Method which is dedicated to insert location to the values
Input: id_list = list of the user values which inserted location
name_location = name of the location which we would add
latitude = latitude of the coordinates
longitude = longitude of the coordinates
Output: we successfully inserted coordinates and
"""
try:
id_user, username, name_first, name_last = id_list
self.insert_settings(id_user)
if not self.get_user_values(id_user):
self.insert_username(id_user, username, name_first, name_last)
self.cursor.execute(f"INSERT INTO {table_locations} (name_location, latitude, longitude) VALUES (?, ?, ?);",
(name_location, latitude, longitude))
self.cursor.execute(f"INSERT INTO {table_users_locations} (id_user, id_location) VALUES (?, ?);", (id_user, self.cursor.lastrowid))
self.connection.commit()
return True
except Exception as e:
msg = f'We faced problems with the performing of the operating of the location inserting. Mistake: {e}'
self.proceed_error(msg)
return False
def make_group_insertion(self, group_id:int, group_name:str) -> bool:
"""
Method which is dedicated to make the group insertion
Input: group_id = id of the selected values
group_name = name of the group
Output: we successfully created
"""
try:
self.cursor.execute(f"INSERT INTO {table_groups} (id, name) VALUES (?, ?);", (group_id, group_name))
self.connection.commit()
return True
except Exception as e:
msg = f"We faced problems with isertion of the groups. Mistake: {e}"
self.proceed_error(msg)
return False
def check_chat_id(self, id_chat:int) -> set:
"""
Method which is dedicated to check that
Input: id_chat = value chat which was previously used
Output: boolean values for the check
"""
try:
value_user = bool(self.cursor.execute(f"SELECT id FROM {table_users} WHERE id={id_chat};").fetchone())
value_group = bool(self.cursor.execute(f"SELECT id FROM {table_groups} WHERE id={id_chat};").fetchone())
return value_user, value_group
except Exception as e:
msg = f"We faced problems with check on which chat it can be used. Mistake: {e}"
self.proceed_error(msg)
return False, False
def connect_user_group(self, id_group:int, id_user:int) -> bool:
"""
Method which is dedicated to connect uer to the group
Input: id_group = id of selected user
id_user = id of the telegram user
Output: we inserted to the foreign keys values
"""
try:
self.cursor.execute(f"INSERT INTO {table_users_groups} (id_user, id_group) VALUES (?, ?);", (id_user, id_group))
self.connection.commit()
return True
except Exception as e:
msg = f'We have problems with the connection between user and group. Mistake: {e}'
self.proceed_error(msg)
return False
def disconnect_whole_group(self, id_group:int) -> bool:
"""
Method which is dedicated to remove whole group from the
Input: id_group = id in the groups table
Output: bool which signifies that we successfully removed all values
"""
try:
self.cursor.execute(f"DELETE FROM {table_groups} WHERE id={id_group};")
self.connection.commit()
return True
except Exception as e:
msg = f"We found problems with deletion of the whole group from the {table_groups} in database. Mistake: {e}"
self.proceed_error(msg)
return False
def disconnect_user_group(self, id_user:int, id_group:int) -> set:
"""
Method which is dedicated to remove connections between user and group
Input: id_user = value of the user id
id_group = value of the group id
Output: boolean value which signifies that we need to make further check and bool that all is okay
"""
try:
check_value = self.cursor.execute(f"SELECT COUNT(id_user) FROM {table_users_groups} WHERE id_group={id_group};").fetchone()
check_value = check_value[0] if check_value else 0
if check_value:
self.cursor.execute(f"DELETE FROM {table_users_groups} WHERE id_user={id_user} AND id_group={id_group};")
self.connection.commit()
if check_value == 1:
return True, True, True
return True, False, True
else:
return True, False, False
except Exception as e:
msg = f'We have problems with the connection deletion between user and group. Mistake: {e}'
self.proceed_error(msg)
return False, False, False
def check_user_group_connection(self, id_group:int, id_user:int) -> bool:
"""
Method which is dedicated to check that user has added group to the connection
Input: id_group = id of the selected group
id_user = id of the selected user
Output: boolean value that signifies that we have successfully
"""
try:
value_list = self.cursor.execute(f"SELECT * FROM {table_users_groups} WHERE id_group={id_group} AND id_user={id_user};").fetchone()
if value_list:
return True
return False
except Exception as e:
msg = f"We have problem with getting values from the {table_users_groups}. Mistake: {e}"
self.proceed_error(msg)
return False
def insert_group_additional(self, group_id:int, group_name:str) -> None:
"""
Method which is dedicated to add groups
Input: group_id = id of the group
group_name = name of the group
Output: We added new group in that cases
"""
try:
if not self.get_group_values(group_id, group_name):
self.make_group_insertion(group_id, group_name)
except Exception as e:
msg = f"We faced the problem with additional insertion of the values; Mistake: {e}"
self.proceed_error(msg)
def insert_user_group_additional(self, id_group:int, id_user:int) -> None:
"""
Method which is dedicated to directly insert user_group
Input: id_group = id of the selected group
id_user = id of the selected user
Output: we created connection between user and group; None
"""
try:
if not self.check_user_group_connection(id_group, id_user):
self.connect_user_group(id_group, id_user)
except Exception as e:
msg = f"We faced problems with additional insertion values; Mistake: {e}"
self.proceed_error(msg)
def insert_group(self, group_id:int, group_name:str, id_user:int, username:str, name_first:str, name_last:str) -> bool:
"""
Method which is dedicated to insert group which was inserted to the
Input: group_id = id of the group which was inserted
group_name = name of the group
id_user = user id values
username = username of the telegram
name_first = first name of the telegram user
name_last = last name of the telegram user
Output: we successfully inserted values of the group
"""
try:
self.insert_settings(id_user)
if not self.get_user_values(id_user):
self.insert_username(id_user, username, name_first, name_last)
self.insert_group_additional(group_id, group_name)
self.insert_user_group_additional(group_id, id_user)
self.connection.commit()
return True
except Exception as e:
msg = f"We faced problem with inserting the group. Mistake: {e}"
self.proceed_error(msg)
return False
def insert_username(self, id_user:int, username:str, name_first:str, name_last:str) -> bool:
"""
Method which is dedicated to insert username to the
Input: id_username = id of the selected user
name_first = first name of the user
name_last = last name of the user
username = username of the
Output: we inserted username values
"""
try:
self.cursor.execute(f"INSERT INTO {table_users}(id, name_first, name_last, nickname) VALUES (?, ?, ?, ?);",
(id_user, name_first, name_last, username))
self.connection.commit()
return True
except Exception as e:
msg = f'We faced problem with inserting values within the database. Mistake: {e}'
self.proceed_error(msg)
return False
def return_user_name_default_bool(self, id_user:int) -> bool:
"""
Method to return values for the
Input: id_user = user id from the telegram
Output: boolean value for this values
"""
try:
name_default_boolean = self.cursor.execute(f"SELECT name_default_boolean FROM {table_users_settings} WHERE id_user={id_user};").fetchone()
if not name_default_boolean:
self.insert_settings(id_user)
return self.return_user_name_default_bool(id_user)
return bool(name_default_boolean[0])
except Exception as e:
msg = f"We faced problems with the work of the setting to the users. Mistake: {e}"
self.proceed_error(msg)
return False
def update_user_settings_default_name(self, id_user:int) -> None:
"""
Method which is dedicated to change the possibility of the default name
Input: id_user = id of all possible users between there
Output: Non, but the boolean value was successfully inserted
"""
try:
value_selected = self.return_user_name_default_bool(id_user)
value_bool_new = True if not value_selected else False
self.cursor.execute(f"UPDATE {table_users_settings} SET name_default_boolean={value_bool_new} WHERE id_user={id_user};")
self.connection.commit()
except Exception as e:
msg = f"We faced problems with the changing of the default name usage. Mistake: {e}"
self.proceed_error(msg)
def return_user_name_settings(self, id_user:int) -> str:
"""
Method which is dedicated to return default location name
Input: id_user = user id
Output: string of the default name
"""
try:
name_default = self.cursor.execute(f"SELECT name_default FROM {table_users_settings} WHERE id_user={id_user};").fetchone()
if not name_default:
self.insert_settings(id_user)
return self.return_user_name_settings(id_user)
return name_default[0]
except Exception as e:
msg = f"We faced problems with return default name. Mistake: {e}"
self.proceed_error(msg)
return name_loc_default
def return_user_text(self, id_user:int) -> str:
"""
Method which is dedicated to return default location name
Input: id_user = user id
Output: string of the default name
"""
try:
name_default = self.cursor.execute(f"SELECT text_sending FROM {table_users_settings} WHERE id_user={id_user};").fetchone()
if not name_default:
self.insert_settings(id_user)
return self.return_user_text(id_user)
return name_default[0]
except Exception as e:
msg = f"We faced problems with return default text. Mistake: {e}"
self.proceed_error(msg)
return entrance_bot_usage
def return_user_minutes(self, id_user:int) -> str:
"""
Method which is dedicated to return default location name
Input: id_user = user id
Output: string of the default name
"""
try:
name_default = self.cursor.execute(f"SELECT text_minutes FROM {table_users_settings} WHERE id_user={id_user};").fetchone()
if not name_default:
self.insert_settings(id_user)
return self.return_user_minutes(id_user)
return name_default[0]
except Exception as e:
msg = f"We faced problems with return default text. Mistake: {e}"
self.proceed_error(msg)
return value_message_default
def return_user_settings(self, id_user:int) -> list:
"""
Method which is dedicated to
Input: id_user = id from the telebot
Output: list with all values of the user's settings
"""
try:
value_settings = self.cursor.execute(f"SELECT * FROM {table_users_settings} WHERE id_user={id_user};").fetchone()
if not value_settings:
self.insert_settings(id_user)
return self.return_user_settings(id_user)
return value_settings
except Exception as e:
msg = f"We faced problems with the work of the setting to the users. Mistake: {e}"
self.proceed_error(msg)
return []
def insert_settings(self, id_user:int) -> bool:
"""
Method which is dedicated to insert the values to the
Input: id_user = user id value which requires for that
Output: boolean value which signifies that everything
"""
try:
value_check = self.cursor.execute(f"SELECT id_user FROM {table_users_settings} WHERE id_user={id_user};").fetchone()
if not value_check:
self.cursor.execute(f"INSERT INTO {table_users_settings}(id_user) VALUES ({id_user});")
self.connection.commit()
return True
except Exception as e:
msg = f'We faced problem with inserted settings to the user. Mistake: {e}'
self.proceed_error(msg)
return False
def get_user_coordinates(self, id:int) -> set:
"""
Method which is dedicated to produce the user coordinates of the
Input: id = id of the user which is required to find them
Output: list with the strings of coordinate names of the user, boolean with signifies maximum capacity
"""
try:
list_id = self.cursor.execute(f"SELECT id_location FROM {table_users_locations} WHERE id_user={id};").fetchall()
if not list_id:
return [], [], True
list_id = [str(l[0]) for l in list_id]
value_str = ','.join(list_id)
value_list = self.cursor.execute(f"SELECT name_location from {table_locations} WHERE id IN ({value_str});").fetchall()
return [f[0] for f in value_list], list_id, len(value_list) < value_limit
except Exception as e:
msg = f"We have problems with getting coordinates for the users. Mistake: {e}"
self.proceed_error(msg)
return [], [], False
def get_length_settings(self, id_user:int) -> set:
"""
Method which is dedicated to return values
Input: id_user = id of the user
Output: we returned values of length of the values
"""
try:
value_id_loc = self.cursor.execute(f"SELECT COUNT(*) FROM {table_users_locations} WHERE id_user={id_user};").fetchone()
value_id_group = self.cursor.execute(f"SELECT COUNT(*) FROM {table_users_groups} WHERE id_user={id_user};").fetchone()
if not value_id_loc and not value_id_group:
return 0, 0
return value_id_loc[0], value_id_group[0]
except Exception as e:
msg = f"We found problem with the getting lengthes of the locations and groups of the users. Mistake: {e}"
self.proceed_error(msg)
return 0, 0
def delete_poll(self, value_id:int) -> bool:
"""
Method which is dedicated to removing from the poll coolumns
Input: value_id = id of the poll
Output: remove values from poll column
"""
try:
self.cursor.execute(f"DELETE FROM {table_poll} WHERE id=?;", (value_id,))
self.connection.commit()
return True
except Exception as e:
msg = f'We faced problems with the deleting from the poll table; Mistake: {e}'
self.proceed_error(msg)
return False
def delete_poll_group(self, value_id:int) -> bool:
"""
Method which is dedicated to remove from the poll groups
Input: value_id = id of the poll
Output: removed from the poll_groups table
"""
try:
self.cursor.execute(f"DELETE FROM {table_poll_groups} WHERE id_poll=?;", (value_id,))
self.connection.commit()
return True
except Exception as e:
msg = f"We faced problems with deleting from the poll group table; Mistake: {e}"
self.proceed_error(msg)
return False
def produce_deletion_current_poll(self, value_id:int) -> bool:
"""
Methodw which is dedicated to produce deletion from the current id
Input: value_id = id of the poll
Output: we removed all values with the
"""
try:
self.delete_poll_group(value_id)
self.delete_poll(value_id)
return True
except Exception as e:
msg = f"We faced problems with the producing of the deletion; Mistake: {e}"
self.proceed_error(msg)
return False
def produce_deletion_previous_values_poll(self) -> bool:
"""
Method which is dedicated to remove every olda value from the database
Input: None
Output: we removed values which are too old for it
"""
try:
value_groups = self.cursor.execute(f"SELECT id from {table_poll} WHERE (julianday('now') - julianday(datetime)) * 24 * 60 > {value_old_default};").fetchall()
value_groups = [str(f[0]) for f in value_groups]
value_present = ','.join(value_groups)
self.cursor.execute(f"DELETE FROM {table_poll_groups} WHERE id_poll IN ({value_present});")
self.cursor.execute(f"DELETE FROM {table_poll} WHERE id IN ({value_present});")
self.connection.commit()
return True
except Exception as e:
msg = f"We faced problems with the getting old poll values and deleting them; Mistake: {e}"
self.proceed_error(msg)
return False
def produce_insertion_poll(self, value_list:list, id_user, latitude, longitude) -> bool:
"""
Method which is dedicated to make basic insertion to the
Input: value_list = list with conditions [index, id_group, id_poll]
Output: boolean value which was previously signed
"""
try:
value_id = value_list[0][-1]
self.cursor.execute(f"INSERT INTO {table_poll}(id, id_user, latitude, longitude) VALUES(?,?,?,?);", (value_id, id_user, latitude, longitude))
self.connection.commit()
return True
except Exception as e:
msg = f"We faced errors with the execution of insertion to the poll table; Mistake: {e}"
self.proceed_error(msg)
return False
def produce_insertion_poll_group(self, value_list:list) -> bool:
"""
Method which is dedicated to produce insertion to the poll group
Input: value_list = list for the insertion with values [index, id_group, id_poll]
Output: boolean value which signify that everything is okay
"""
try:
self.cursor.executemany(f"INSERT INTO {table_poll_groups}(id_int, id_group, id_poll) VALUES (?, ?, ?);", value_list)
self.connection.commit()
return True
except Exception as e:
msg = f'We faced problems with multiple group insertion to the database; Mistake: {e}'
self.proceed_error(msg)
return False
def return_poll_id(self, value_id:int) -> set:
"""
Method which is dedicated to return all necessary values for the poll id
Input: value_id = id of the poll which we would further use
Output: we get all required values for the sending to the user
"""
try:
value_coordinates = self.cursor.execute(f"SELECT id_user, latitude, longitude FROM {table_poll} WHERE id=?;", (value_id,)).fetchall()
value_lists = self.cursor.execute(f"SELECT id_group FROM {table_poll_groups} WHERE id_poll=?;", (value_id,)).fetchall()
value_coordinates = value_coordinates[-1] if value_coordinates else []
value_lists = [f[0] for f in value_lists] if value_lists else []
return value_coordinates, value_lists
except Exception as e:
msg = f"We faced problems with getting values from the database via poll id; Mistake: {e}"
self.proceed_error(msg)
return [], []
def produce_multiple_insertion_poll(self, value_list:list, chat_id:int, latitude, longitude) -> bool:
"""
Method which is dedicated to insert values for the quizez
Input: value_list = list with values of the [index, id_group, id_poll]
latitude = coordinate value of latitude
longitude = coordinate value of longitude
Output: we inserted all values and previously checked
"""
try:
self.produce_deletion_previous_values_poll()
self.produce_insertion_poll(value_list, chat_id, latitude, longitude)
self.produce_insertion_poll_group(value_list)
return True
except Exception as e:
msg = f"We faced problems with insertion values to the {table_poll} and {table_poll_groups}; Mistake: {e}"
self.proceed_error(msg)
return False
def produce_values(self) -> None:
"""
Method which is dedicated to create the database for the bot usage
Input: Nothing
Output: we sucessfully created database with the tables
"""
self.create_folder(self.folder_config)
self.name_db = os.path.join(self.folder_config, name_db)
if not os.path.exists(self.name_db) or not os.path.isfile(self.name_db):
self.connection = sqlite3.connect(self.name_db, check_same_thread=False)
self.cursor = self.connection.cursor()
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_users}(
id INTEGER PRIMARY KEY,
name_first TEXT,
name_last TEXT,
nickname TEXT
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_locations}(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name_location TEXT,
latitude TEXT,
longitude TEXT
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_groups}(
id INTEGER PRIMARY KEY,
name TEXT,
date_value DATETIME DEFAULT CURRENT_TIMESTAMP
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_users_groups}(
id_user INTEGER,
id_group INTEGER,
PRIMARY KEY (id_user, id_group),
FOREIGN KEY (id_user) REFERENCES {table_users} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (id_group) REFERENCES {table_groups} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_groups_selected}(
id_user INTEGER,
id_group INTEGER,
PRIMARY KEY (id_user, id_group),
FOREIGN KEY (id_user) REFERENCES {table_users} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (id_group) REFERENCES {table_groups} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_users_locations}(
id_user INTEGER,
id_location INTEGER,
PRIMARY KEY (id_user, id_location),
FOREIGN KEY (id_user) REFERENCES {table_users} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (id_location) REFERENCES {table_locations} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_users_settings}(
id_user INTEGER PRIMARY KEY,
text_sending TEXT DEFAULT "{entrance_bot_usage}",
text_minutes INTEGER DEFAULT {value_message_default},
name_default TEXT DEFAULT '{name_loc_default}',
name_default_boolean BOOLEAN DEFAULT TRUE,
name_default_audio TEXT,
audio_boolean BOOLEAN DEFAULT FALSE,
name_default_video TEXT,
video_boolean BOOLEAN DEFAULT FALSE,
message_priority INTEGER DEFAULT {value_message_selection_default}
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_user_group_connect}(
id_user INTEGER,
id_group INTEGER,
text_message TEXT DEFAULT "{name_join_default}",
PRIMARY KEY(id_user, id_group),
FOREIGN KEY (id_user) REFERENCES {table_users} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (id_group) REFERENCES {table_groups} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_poll}(
id INTEGER,
id_user INTEGER,
latitude TEXT,
longitude TEXT,
datetime DATETIME DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id)
);""")
self.cursor.execute(f"""
CREATE TABLE IF NOT EXISTS {table_poll_groups}(
id_int INTEGER,
id_poll INTEGER,
id_group INTEGER,
PRIMARY KEY (id_poll, id_group),
FOREIGN KEY (id_poll) REFERENCES {table_poll} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (id_group) REFERENCES {table_groups} (id)
ON DELETE CASCADE
ON UPDATE NO ACTION
);""")
self.connection.commit()
else:
self.create_connection() |
995,999 | b7d2203979cdac83b003060f38e9b6b6755a9ad1 | import pandas as pd
import quandl
import numpy as np
import math
import datetime
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
style.use('ggplot')
df = quandl.get('WIKI/GOOGL') # get historic of a stock
df["HL_PCT"] = ((df["Adj. High"] - df["Adj. Close"]) / df["Adj. Close"]) * 100 # High / Close Percentage
df["PCT_change"] = ((df["Adj. Close"] - df["Adj. Open"]) / df["Adj. Open"]) * 100 # Close/Open Precentage
df=df[["Adj. Close","HL_PCT","PCT_change","Adj. Volume"]]
forecast_col = "Adj. Close"; # the real value
df.fillna(-99999, inplace = True)
forecast_out = int(math.ceil(0.01*len(df)))
df0 = df[["Adj. Close","HL_PCT","PCT_change","Adj. Volume"]] # keep a copy of original data
df = df[:-forecast_out] # remove the last days
df["label"] = df[forecast_col].shift(-forecast_out) # Value in the future (1% of the whole period) ~ 34 days
# so we are training the machine to predict 34 days in the future
print (forecast_out, df.tail(50))
x = np.array(df.drop(['label'], 1)) # columns without label
y = np.array(df['label']) # array from column label
x = preprocessing.scale(x) # scaling
x_lately = x[-forecast_out:] # remove rows without training data
x = x[:-forecast_out]
df.dropna(inplace=True) # remove rows without training data
y = np.array(df['label'])
clf = LinearRegression()
clf.fit(x, y) # machine learn self train to model the known behave
forecast_set = clf.predict(x_lately) # run for last days (untrained input)
df['Forecast'] = np.nan
d = df0.index.values
dates = d[-forecast_out:] # these are the days you assume is predicting
# ???????????????????????????????????????????????????????????????????????????????????????????
# dates = d[-forecast_out*2:] # these are the days that is actually predicting (uncomment to see)
# ???????????????????????????????????????????????????????????????????????????????????????????
for i in range(len(forecast_set)):
df.loc[dates[i]] = [np.nan for _ in range(len(df.columns)-1)] + [forecast_set[i]]
print(df.tail(50), forecast_set)
df0["Adj. Close"].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel("Date")
plt.ylabel("Price")
plt.show()
y = 0 #breakpoint here
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.