index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
22,600 | b5ee2a41e60d5abd787c224f476fa6ea17547f2b | import networkx as nx
import matplotlib.pyplot as plt
import wikipedia as wk
import sys
from random import randint
def main():
if __name__ == "__main__":
main()
|
22,601 | feec6e4fbaaf466e5e0a28a74769f8f60a035cfc | import numpy as np
import cv2
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
X_train = np.load('X.npy')
y_train = np.load('y.npy')
X_test = np.load('X_test.npy')
y_test = np.load('y_test.npy')
print(y_train.shape)
print(y_test.shape)
X_train = np.vstack((X_train, X_test))
y_train = np.concatenate((y_train, y_test.reshape(-1)))
#X_train, X_test, y_train, y_test = train_test_split(X,y,
# test_size = 0.2, random_state = 42)
clf = LinearSVC(C = 0.01)
#clf = SVC(C = 0.1, kernel = 'rbf')
clf.fit(X_train,y_train)
y_predict = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_predict)
print(accuracy)
joblib.dump(clf,'clf.pkl')
'''
N = y_test.shape[0]
for i in range(N):
if (y_predict[i] != y_test[i]):
img = X_test[i,:3072].reshape((32,32,3))
img = cv2.resize(img,(100,100))
s = 'actual'+str(y_test[i])+'pred'+str(y_predict[i])
cv2.imshow(s,img)
cv2.waitKey()
cv2.destroyAllWindows()
'''
|
22,602 | dcc0423f26e4d062cc0b301325c9a0ae9ad5b9a0 | # --- Day 1: No Time for a Taxicab ---
#
# Santa's sleigh uses a very high-precision clock to guide its movements,
# and the clock's oscillator is regulated by stars.
# Unfortunately, the stars have been stolen... by the Easter Bunny.
# To save Christmas, Santa needs you to retrieve all fifty stars by
# December 25th.
#
# Collect stars by solving puzzles.
# Two puzzles will be made available on each day in the advent calendar;
# the second puzzle is unlocked when you complete the first.
# Each puzzle grants one star. Good luck!
#
# You're airdropped near Easter Bunny Headquarters in a city somewhere.
# "Near", unfortunately, is as close as you can get - the instructions
# on the Easter Bunny Recruiting Document the Elves intercepted start here,
# and nobody had time to work them out further.
#
# The Document indicates that you should start at the given coordinates
# (where you just landed) and face North. Then, follow the provided sequence:
# either turn left (L) or right (R) 90 degrees,
# then walk forward the given number of blocks, ending at a new intersection.
#
# There's no time to follow such ridiculous instructions on foot,
# though, so you take a moment and work out the destination.
# Given that you can only walk on the street grid of the city,
# how far is the shortest path to the destination?
#
# For example:
#
# Following R2, L3 leaves you 2 blocks East and 3 blocks North,
# or 5 blocks away.
# R2, R2, R2 leaves you 2 blocks due South of your starting position,
# which is 2 blocks away.
# R5, L5, R5, R3 leaves you 12 blocks away.
# How many blocks away is Easter Bunny HQ?
#
# Your puzzle answer was 246.
#
# --- Part Two ---
#
# Then, you notice the instructions continue on the back of the
# Recruiting Document.
# Easter Bunny HQ is actually at the first location you visit twice.
#
# For example, if your instructions are R8, R4, R4, R8,
# the first location you visit twice is 4 blocks away, due East.
#
# How many blocks away is the first location you visit twice?
#
# Your puzzle answer was 124.
# ----------------------------------------------------------------------------
import re
f = open('inputs/day01.txt', 'r')
data = f.read()
class Location:
def __init__(self):
self.x = 0
self.y = 0
self.cardinal = 'North'
self.next_move = None
self.previous_locations = []
def current_location(self):
return (self.x, self.y)
def parse_move(self, string):
direction = re.search('R|L', string).group(0)
distance = int(re.search('[0-9]+', string).group(0))
self.next_move = (direction, distance)
return self.next_move
def move(self, direction=None, distance=None):
if(direction is None):
direction = self.next_move[0]
if(distance is None):
distance = self.next_move[1]
self.turn(direction)
for i in range(distance):
self.move1(self.cardinal)
self.next_move = None
return (self.current_location())
def move1(self, cardinal):
self.previous_locations.append(self.current_location())
if(cardinal == 'North'):
self.y = self.y + 1
elif(cardinal == 'South'):
self.y = self.y - 1
elif(cardinal == 'East'):
self.x = self.x + 1
elif(cardinal == 'West'):
self.x = self.x - 1
def turn(self, direction):
cardinal = self.cardinal
if(cardinal == 'North'):
if(direction == 'R'):
cardinal = 'East'
elif(direction == 'L'):
cardinal = 'West'
elif(cardinal == 'South'):
if(direction == 'R'):
cardinal = 'West'
elif(direction == 'L'):
cardinal = 'East'
elif(cardinal == 'East'):
if(direction == 'R'):
cardinal = 'South'
elif(direction == 'L'):
cardinal = 'North'
elif(cardinal == 'West'):
if(direction == 'R'):
cardinal = 'North'
elif(direction == 'L'):
cardinal = 'South'
self.cardinal = cardinal
return self.cardinal
if __name__ == '__main__':
f = open('inputs/day01.txt')
data = f.read()
moves = re.split(',', data)
bunny = Location()
# Part 1, go through all the moves
for move in moves:
bunny.parse_move(move)
bunny.move()
print(abs(bunny.x) + abs(bunny.y))
# Correct answer is 246
# Part 2, find first overlapping location
for i in reversed(range(len(bunny.previous_locations))):
if(bunny.previous_locations[i] in bunny.previous_locations[0:(i-1)]):
print(True)
print(bunny.previous_locations[i])
# Corrdinates are printed to the screen. I had to scroll through the
# printout to find the right one.
# Correct answer is (-109, -15), or 124
|
22,603 | 9bd9f324b85c2146e48a1c4f48bbba729c24f14e | import json
from discord import Color
with open('data/udyr_quotes.json') as f:
QUOTES = json.load(f)
QUOTES_LEN = len(QUOTES)
BOT_NAME = 'Udyr Bot'
COMMAND_PREFIX = '!'
HELP_TEXT = '\n '.join([
'!quote - Random Udyr quote',
'!help - This help message',
'!github - Get the GitHub address of the project',
'!summ - Get Ranked Information for a summoner: `!summ Donger Dingo`, `!summ yassuo --region NA`',
'!game - Get Information for a currently playing game: `!game Donger Dingo`, `!game Imaqtpie --region NA`'
])
GITHUB_PROJECT_URL = 'https://github.com/jakubclark/udyr_bot'
BASE_EMBED = {
'color': Color.dark_green().value,
'thumbnail': {
'url': 'https://raw.githubusercontent.com/jakubclark/udyr_bot/master/data/udyr1.png',
'height': 45,
'width': 45
}
}
|
22,604 | bd9cdce28e29196845deb4ba2f39650d507afd2c | import uuid
from copy import copy
from dataclasses import dataclass, field, replace
from datetime import datetime
from typing import Mapping, Union, Optional, Tuple
import requests
from preacher import __version__ as _version
from preacher.core.context import Context, closed_context
from preacher.core.datetime import now
from preacher.core.status import Statused, Status
from preacher.core.util.error import to_message
from .request import Request
from .response import Response, ResponseBody
from .url_param import resolve_url_params
_DEFAULT_HEADERS = {"User-Agent": f"Preacher {_version}"}
class ResponseBodyWrapper(ResponseBody):
def __init__(self, res: requests.Response):
self._res = res
@property
def text(self) -> str:
return self._res.text
@property
def content(self) -> bytes:
return self._res.content
class ResponseWrapper(Response):
def __init__(self, id: str, res: requests.Response):
self._id = id
self._res = res
self._body = ResponseBodyWrapper(self._res)
@property
def id(self) -> str:
return self._id
@property
def elapsed(self) -> float:
return self._res.elapsed.total_seconds()
@property
def status_code(self) -> int:
return self._res.status_code
@property
def headers(self) -> Mapping[str, str]:
# Convert to the normal dictionary to adapt jq.
# Names are converted to lower case to normalize.
return {name.lower(): value for (name, value) in self._res.headers.items()}
@property
def body(self) -> ResponseBody:
return self._body
@dataclass
class PreparedRequest:
method: str
url: str
headers: Mapping[str, str]
body: Union[None, str, bytes]
@dataclass(frozen=True)
class ExecutionReport(Statused):
status: Status = Status.SKIPPED
starts: datetime = field(default_factory=now)
request: Optional[PreparedRequest] = None
message: Optional[str] = None
class Requester:
def __init__(
self,
base_url: str = "",
timeout: Optional[float] = None,
):
"""
Args:
base_url: A base URL.
timeout: The timeout in seconds. ``None`` means no timeout.
"""
self._base_url = base_url
self._timeout = timeout
@property
def base_url(self) -> str:
return self._base_url
def execute(
self,
request: Request,
session: Optional[requests.Session] = None,
context: Optional[Context] = None,
) -> Tuple[ExecutionReport, Optional[Response]]:
"""
Executes a request.
Args:
request: A request.
session: A session object to execute.
context: Execution context.
Returns:
A tuple of execution report and response.
When there is no response, the response will be ``None``.
"""
if session is None:
with requests.Session() as new_session:
return self.execute(request, session=new_session)
context = context if context is not None else Context()
starts = now()
report = ExecutionReport(starts=starts)
try:
with closed_context(context, starts=starts) as context:
prepped = self._prepare_request(request, context)
proxies = session.rebuild_proxies(prepped, proxies=None)
except Exception as error:
message = to_message(error)
report = replace(report, status=Status.FAILURE, message=message)
return report, None
report = replace(
report,
request=PreparedRequest(
method=prepped.method or "",
url=prepped.url or "",
headers=prepped.headers,
body=prepped.body,
),
)
try:
res = session.send(prepped, proxies=proxies, timeout=self._timeout)
except Exception as error:
message = to_message(error)
report = replace(report, status=Status.UNSTABLE, message=message)
return report, None
report = replace(report, status=Status.SUCCESS)
response = ResponseWrapper(id=_generate_id(), res=res)
return report, response
def _prepare_request(self, request: Request, context: Context) -> requests.PreparedRequest:
url = self._base_url + request.path
headers = copy(_DEFAULT_HEADERS)
data = None
if request.body:
content_type = request.body.content_type
headers["Content-Type"] = content_type
data = request.body.resolve(context)
headers.update(request.headers)
params = resolve_url_params(request.params, context)
req = requests.Request(
method=request.method.value,
url=url,
headers=headers,
params=params,
data=data,
)
return req.prepare()
def _generate_id() -> str:
return str(uuid.uuid4())
|
22,605 | 87ed2593abadb600f1fcc513df8c7ab23d51c20a | import json
import warnings
from typing import Union, Dict
from dataclasses import dataclass
def _check_fields(item: Dict, cls):
out = {}
for k, v in item.items():
normalized_key = _normalize_key(k)
if normalized_key not in cls.__dataclass_fields__:
warnings.warn(f"Found field \"{k}\" in input, which is not part of dataclass \"{cls.__name__}\"",
RuntimeWarning)
else:
out[normalized_key] = v
return out
def _normalize_key(identifier: str):
res = identifier.replace("-", "_")
return res
@dataclass
class GenericDataClass:
@classmethod
def from_json(cls, input: Union[str, Dict]):
if isinstance(input, str):
input = json.loads(input)
input = _check_fields(input, cls)
return cls(**input)
@dataclass
class EventClass(GenericDataClass):
@classmethod
def from_event(cls, event):
return cls.from_json(event)
|
22,606 | 7fa90a7710236b8492c7d4eb3e04bce83147c957 | import os
import sys
from argparse import ArgumentParser
import random
# # python.dataScience.notebookFileRoot=${fileDirname}
# wdir = os.path.abspath(os.getcwd() + "/../../")
# sys.path.append(wdir)
# print(sys.path)
# print(wdir)
import text_loaders as tl
import rnn_encoder_decoder as encdec
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
import pytorch_lightning.metrics.functional as plfunc
from pytorch_lightning.loggers import TensorBoardLogger
#%%
class Seq2SeqCorrector(pl.LightningModule):
"""Encoder decoder pytorch module for trainning seq2seq model with teacher forcing
Module try to learn mapping from one sequence to antother. This implementation try to learn to reverse string of chars
"""
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--emb_dim", type=int, default=32)
parser.add_argument("--hidden_dim", type=int, default=64)
parser.add_argument("--dropout", type=float, default=0.1)
return parser
def __init__(
self,
vocab_size,
padding_index=0,
emb_dim=8,
hidden_dim=32,
dropout=0.1,
**kwargs,
) -> None:
super().__init__()
self.vocab_size = vocab_size
# dynamic, based on tokenizer vocab size defined in datamodule
self.input_dim = vocab_size
self.output_dim = vocab_size
self.enc_emb_dim = emb_dim # ENC_EMB_DIM
self.dec_emb_dim = emb_dim # DEC_EMB_DIM
self.enc_hid_dim = hidden_dim # ENC_HID_DIM
self.dec_hid_dim = hidden_dim # DEC_HID_DIM
self.enc_dropout = dropout # ENC_DROPOUT
self.dec_dropout = dropout # DEC_DROPOUT
self.pad_idx = padding_index
self.save_hyperparameters()
self.max_epochs = kwargs["max_epochs"]
self.learning_rate = 0.0005
# self.input_src = torch.LongTensor(1).to(self.device)
# self.input_src_len = torch.LongTensor(1).to(self.device)
# self.input_trg = torch.LongTensor(1).to(self.device)
# todo: remove it this blocks loading state_dict from checkpoints
# Error(s) in loading state_dict for Seq2SeqCorrector:
# size mismatch for input_src: copying a param with shape
# torch.Size([201, 18]) from checkpoint,
# the shape in current model is torch.Size([1]).
# self.register_buffer("input_src", torch.LongTensor(1))
# self.register_buffer("input_src_len", torch.LongTensor(1))
# self.register_buffer("input_trg", torch.LongTensor(1))
self._loss = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
self.attention = encdec.Attention(self.enc_hid_dim, self.dec_hid_dim)
# INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT
self.encoder = encdec.Encoder(
self.input_dim,
self.enc_emb_dim,
self.enc_hid_dim,
self.dec_hid_dim,
self.enc_dropout,
)
self.decoder = encdec.Decoder(
self.output_dim, # OUTPUT_DIM,
self.dec_emb_dim, # DEC_EMB_DIM,
self.enc_hid_dim, # ENC_HID_DIM,
self.dec_hid_dim, # DEC_HID_DIM,
self.dec_dropout, # DEC_DROPOUT,
self.attention,
)
self._init_weights()
def _init_weights(self):
for name, param in self.named_parameters():
if "weight" in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def create_mask(self, src):
mask = (src != self.pad_idx).permute(1, 0)
return mask
def forward(self, src, src_len, trg, teacher_forcing_ratio=0.5):
# src = [src len, batch size]
# src_len = [batch size]
# trg = [trg len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs TODO: change to registered buffer in pyLightning
decoder_outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(
self.device
)
# encoder_outputs is all hidden states of the input sequence, back and forwards
# hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src, src_len)
mask = self.create_mask(src)
# mask = [batch size, src len]
# without sos token at the beginning and eos token at the end
# first input to the decoder is the <sos> tokens
input = trg[0, :]
# starting with input=<sos> (trg[0]) token and try to predict next token trg[1] so loop starts from 1 range(1, trg_len)
for t in range(1, trg_len):
# insert input token embedding, previous hidden state, all encoder hidden states
# and mask
# receive output tensor (predictions) and new hidden state
output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask)
# place predictions in a tensor holding predictions for each token
decoder_outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = trg[t] if teacher_force else top1
return decoder_outputs
def loss(self, logits, target):
return self._loss(logits, target)
def configure_optimizers(self):
# return optim.Adam(self.parameters(), lr=5e-4)
# optimizer = optim.Adam(self.parameters(), lr=1e-3)
# scheduler = optim.LambdaLR(optimizer, ...)
# return [optimizer], [scheduler]
# optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
# scheduler = optim.lr_scheduler.InverseSquareRootLR(optimizer, self.lr_warmup_steps)
# return (
# [optimizer],
# [
# {
# "scheduler": scheduler,
# "interval": "step",
# "frequency": 1,
# "reduce_on_plateau": False,
# "monitor": "val_loss",
# }
# ],
# )
optimizer = optim.AdamW(self.parameters(), lr=self.learning_rate)
lr_scheduler = {
"scheduler": optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.learning_rate,
steps_per_epoch=int(len(self.train_dataloader())),
epochs=self.max_epochs,
anneal_strategy="linear",
final_div_factor=1000,
pct_start=0.01,
),
"name": "learning_rate",
"interval": "step",
"frequency": 1,
}
return [optimizer], [lr_scheduler]
def training_step(self, batch, batch_idx):
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
# trg_lengths = trg_batch["trg_lengths"]
# resize input buffers, should speed up training and help
# with memory leaks https://discuss.pytorch.org/t/how-to-debug-causes-of-gpu-memory-leaks/6741
# self.input_src.resize_(src_seq.shape).copy_(src_seq)
# self.input_src_len.resize_(src_lengths.shape).copy_(src_lengths)
# self.input_trg.resize_(trg_seq.shape).copy_(trg_seq)
# just for testing lr scheduler
# output = torch.randn((*trg_seq.size(), self.output_dim), requires_grad=True, device=trg_seq.device)
# output = self.forward(self.input_src, self.input_src_len, self.input_trg)
# old version of forward, with tensors from dataloader
output = self.forward(src_seq, src_lengths, trg_seq)
# do not know if this is a problem, loss will be computed with sos token
# without sos token at the beginning and eos token at the end
output = output[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(output, trg)
self.log(
"train_loss",
loss.item(),
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return loss
def validation_step(self, batch, batch_idx):
"""validation is in eval mode so we do not have to use
placeholder input tensors
"""
src_batch, trg_batch = batch
src_seq = src_batch["src_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
src_seq = src_seq.transpose(0, 1)
src_lengths = src_batch["src_lengths"]
trg_seq = trg_batch["trg_ids"]
# change from [batch, seq_len] -> to [seq_len, batch]
trg_seq = trg_seq.transpose(0, 1)
trg_lengths = trg_batch["trg_lengths"]
outputs = self.forward(src_seq, src_lengths, trg_seq, 0)
# # without sos token at the beginning and eos token at the end
logits = outputs[1:].view(-1, self.output_dim)
# trg = trg_seq[1:].view(-1)
trg = trg_seq[1:].reshape(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = self.loss(logits, trg)
# take without first sos token, and reduce by 2 dimension, take index of max logits (make prediction)
# seq_len * batch size * vocab_size -> seq_len * batch_size
pred_seq = outputs[1:].argmax(2)
# change layout: seq_len * batch_size -> batch_size * seq_len
pred_seq = pred_seq.T
# change layout: seq_len * batch_size -> batch_size * seq_len
trg_batch = trg_seq[1:].T
# compere list of predicted ids for all sequences in a batch to targets
acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))
# need to cast to list of predicted sequences (as list of token ids) [ [seq1_tok1, seq1_tok2, ...seq1_tokN],..., [seqK_tok1, seqK_tok2, ...seqK_tokZ]]
predicted_ids = pred_seq.tolist()
# need to add additional dim to each target reference sequence in order to
# convert to format needed by bleu_score function [ seq1=[ [reference1], [reference2] ], seq2=[ [reference1] ] ]
target_ids = torch.unsqueeze(trg_batch, 1).tolist()
# bleu score needs two arguments
# first: predicted_ids - list of predicted sequences as a list of predicted ids
# second: target_ids - list of references (can be many, list)
bleu_score = plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(
self.device
) # torch.unsqueeze(trg_batchT,1).tolist())
self.log(
"val_loss",
loss,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_acc",
acc,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
self.log(
"val_bleu_idx",
bleu_score,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
sync_dist=True,
)
return loss, acc, bleu_score
if __name__ == "__main__":
# look to .vscode/launch.json file - there are set some args
parser = ArgumentParser()
# add PROGRAM level args
parser.add_argument("--N_samples", type=int, default=256 * 10)
parser.add_argument("--N_valid_size", type=int, default=32 * 10)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument(
"--dataset_path",
type=str,
default="./data/10k_sent_typos_wikipedia.jsonl",
)
# add model specific args
parser = Seq2SeqCorrector.add_model_specific_args(parser)
# add all the available trainer options to argparse
# ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
dm = tl.ABCSec2SeqDataModule(
batch_size=args.batch_size,
N_random_samples=args.N_samples,
N_valid_size=args.N_valid_size,
num_workers=args.num_workers,
)
# dm = tl.SeqPairJsonDataModule(
# path=args.dataset_path,
# batch_size=args.batch_size,
# n_samples=args.N_samples,
# n_valid_size=args.N_valid_size,
# num_workers=args.num_workers,
# )
dm.prepare_data()
dm.setup("fit")
# to see results run in console
# tensorboard --logdir tb_logs/
# then open browser http://localhost:6006/
log_desc = f"RNN with attention model vocab_size={dm.vocab_size} data_size={dm.dims}, emb_dim={args.emb_dim} hidden_dim={args.hidden_dim}"
print(log_desc)
logger = TensorBoardLogger(
"model_corrector", name="pl_tensorboard_logs", comment=log_desc
)
from pytorch_lightning.callbacks import LearningRateMonitor
lr_monitor = LearningRateMonitor(logging_interval="step")
trainer = pl.Trainer.from_argparse_args(
args, logger=logger, replace_sampler_ddp=False, callbacks=[lr_monitor]
) # , distributed_backend='ddp_cpu')
model_args = vars(args)
model = Seq2SeqCorrector(
vocab_size=dm.vocab_size, padding_index=dm.padding_index, **model_args
)
# most basic trainer, uses good defaults (1 gpu)
trainer.fit(model, dm)
# sample cmd
# python seq2seq_trainer.py --dataset_path /data/10k_sent_typos_wikipedia.jsonl \
# --gpus=2 --max_epoch=5 --batch_size=16 --num_workers=4 \
# --emb_dim=128 --hidden_dim=512 \
# --log_gpu_memory=True --weights_summary=full \
# --N_samples=1000000 --N_valid_size=10000 --distributed_backend=ddp --precision=16 --accumulate_grad_batches=4 --val_check_interval=640 --gradient_clip_val=2.0 --track_grad_norm=2
# tensorboard dev --logdir model_corrector/pl_tensorboard_logs/version??
|
22,607 | 6c3960fca3f5a8e654726e59e8821690782fdbad | import bz2
import os
import pickle
import scrappybara.config as cfg
def path_exists(path):
"""Whether a file/directory exists"""
return os.path.exists(path)
def files_in_dir(path):
"""Returns a list of filenames found in a directory"""
return os.listdir(path)
def txt_file_reader(path):
"""Opens a text file"""
return open(path, encoding=cfg.ENCODING)
def txt_file_writer(path):
"""Writes a text file"""
return open(path, 'w', encoding=cfg.ENCODING)
def bz2_file_reader(path):
"""Opens compressed file .bz2"""
return bz2.open(path, 'rt')
def bz2_file_bytes_reader(path):
"""Opens compressed file .bz2 in bytes mode"""
return bz2.open(path, 'rb')
def load_pkl_file(path):
"""Loads pkl file & returns python object"""
with open(path, 'rb') as pkl_file:
return pickle.load(pkl_file)
def save_pkl_file(python_object, path):
pickle.dump(python_object, open(path, 'wb'))
def load_set_from_txt_file(path, value_type=str):
"""Opens a txt file & loads each line into a set"""
with txt_file_reader(path) as txt_file:
return {value_type(line.strip()) for line in txt_file}
def load_dict_from_txt_file(path, key_type=str, value_type=str):
"""Opens a txt file and loads tab-separated columns into a dictionary"""
with txt_file_reader(path) as txt_file:
return {key_type(key): value_type(value) for key, value in [line.strip().split('\t') for line in txt_file]}
|
22,608 | abbd09b34d81762e1348358b7cf80c4f4fe86c10 | # coding=utf-8
class CostCenter:
def __init__(self):
self.revenue_cost = 0.0
self.labor_cost = 0.0
self.reimbursement_cost = 0.0
self.workstation_cost = 0.0
self.tax_rate = 0.0
# 成都
self.cd_revenue_cost = 0.0
self.cd_labor_cost = 0.0
self.cd_reimbursement_cost = 0.0
self.cd_workstation_cost = 0.0
self.cd_tax_rate = 0.0
# 杭州
self.hz_revenue_cost = 0.0
self.hz_labor_cost = 0.0
self.hz_reimbursement_cost = 0.0
self.hz_workstation_cost = 0.0
self.hz_tax_rate = 0.0
# 深圳
self.sz_revenue_cost = 0.0
self.sz_labor_cost = 0.0
self.sz_reimbursement_cost = 0.0
self.sz_workstation_cost = 0.0
self.sz_tax_rate = 0.0
# 西安
self.xa_revenue_cost = 0.0
self.xa_labor_cost = 0.0
self.xa_reimbursement_cost = 0.0
self.xa_workstation_cost = 0.0
self.xa_tax_rate = 0.0
# 北京
self.bj_revenue_cost = 0.0
self.bj_labor_cost = 0.0
self.bj_reimbursement_cost = 0.0
self.bj_workstation_cost = 0.0
self.bj_tax_rate = 0.0
# 苏州
self.szhou_revenue_cost = 0.0
self.szhou_labor_cost = 0.0
self.szhou_reimbursement_cost = 0.0
self.szhou_workstation_cost = 0.0
self.szhou_tax_rate = 0.0
# 南京
self.nj_revenue_cost = 0.0
self.nj_labor_cost = 0.0
self.nj_reimbursement_cost = 0.0
self.nj_workstation_cost = 0.0
self.nj_tax_rate = 0.0
def set_revenue_cost(self, value):
self.revenue_cost = abs(value)
def get_revenue_cost(self):
return self.revenue_cost
def set_labor_cost(self, value):
self.labor_cost = value
def get_labor_cost(self):
return self.labor_cost
def set_reimbursement_cost(self, value):
self.reimbursement_cost = value
def get_reimbursement_cost(self):
return self.reimbursement_cost
def set_workstation_cost(self, value):
self.workstation_cost = value
def get_workstation_cost(self):
return self.workstation_cost
def set_tax_rate(self, value):
self.tax_rate = value
def get_tax_rate(self):
return self.tax_rate
def set_cd_revenue_cost(self, value):
self.cd_revenue_cost = abs(value)
def get_cd_revenue_cost(self):
return self.cd_revenue_cost
def set_cd_labor_cost(self, value):
self.cd_labor_cost = value
def get_cd_labor_cost(self):
return self.cd_labor_cost
def set_cd_reimbursement_cost(self, value):
self.cd_reimbursement_cost = value
def get_cd_reimbursement_cost(self):
return self.cd_reimbursement_cost
def set_cd_workstation_cost(self, value):
self.cd_workstation_cost = value
def get_cd_workstation_cost(self):
return self.cd_workstation_cost
def set_cd_tax_rate(self, value):
self.cd_tax_rate = value
def get_cd_tax_rate(self):
return self.cd_tax_rate
def set_hz_revenue_cost(self, value):
self.hz_revenue_cost = abs(value)
def get_hz_revenue_cost(self):
return self.hz_revenue_cost
def set_hz_labor_cost(self, value):
self.hz_labor_cost = value
def get_hz_labor_cost(self):
return self.hz_labor_cost
def set_hz_reimbursement_cost(self, value):
self.hz_reimbursement_cost = value
def get_hz_reimbursement_cost(self):
return self.hz_reimbursement_cost
def set_hz_workstation_cost(self, value):
self.hz_workstation_cost = value
def get_hz_workstation_cost(self):
return self.hz_workstation_cost
def set_hz_tax_rate(self, value):
self.hz_tax_rate = value
def get_hz_tax_rate(self):
return self.hz_tax_rate
def set_nj_revenue_cost(self, value):
self.nj_revenue_cost = abs(value)
def get_nj_revenue_cost(self):
return self.nj_revenue_cost
def set_nj_labor_cost(self, value):
self.nj_labor_cost = value
def get_nj_labor_cost(self):
return self.nj_labor_cost
def set_nj_reimbursement_cost(self, value):
self.nj_reimbursement_cost = value
def get_nj_reimbursement_cost(self):
return self.nj_reimbursement_cost
def set_nj_workstation_cost(self, value):
self.nj_workstation_cost = value
def get_nj_workstation_cost(self):
return self.nj_workstation_cost
def set_nj_tax_rate(self, value):
self.nj_tax_rate = value
def get_nj_tax_rate(self):
return self.nj_tax_rate
def set_bj_revenue_cost(self, value):
self.bj_revenue_cost = abs(value)
def get_bj_revenue_cost(self):
return self.bj_revenue_cost
def set_bj_labor_cost(self, value):
self.bj_labor_cost = value
def get_bj_labor_cost(self):
return self.bj_labor_cost
def set_bj_reimbursement_cost(self, value):
self.bj_reimbursement_cost = value
def get_bj_reimbursement_cost(self):
return self.bj_reimbursement_cost
def set_bj_workstation_cost(self, value):
self.bj_workstation_cost = value
def get_bj_workstation_cost(self):
return self.bj_workstation_cost
def set_bj_tax_rate(self, value):
self.bj_tax_rate = value
def get_bj_tax_rate(self):
return self.bj_tax_rate
def set_xa_revenue_cost(self, value):
self.xa_revenue_cost = abs(value)
def get_xa_revenue_cost(self):
return self.xa_revenue_cost
def set_xa_labor_cost(self, value):
self.xa_labor_cost = value
def get_xa_labor_cost(self):
return self.xa_labor_cost
def set_xa_reimbursement_cost(self, value):
self.xa_reimbursement_cost = value
def get_xa_reimbursement_cost(self):
return self.xa_reimbursement_cost
def set_xa_workstation_cost(self, value):
self.xa_workstation_cost = value
def get_xa_workstation_cost(self):
return self.xa_workstation_cost
def set_xa_tax_rate(self, value):
self.xa_tax_rate = value
def get_xa_tax_rate(self):
return self.xa_tax_rate
def set_szhou_revenue_cost(self, value):
self.szhou_revenue_cost = abs(value)
def get_szhou_revenue_cost(self):
return self.szhou_revenue_cost
def set_szhou_labor_cost(self, value):
self.szhou_labor_cost = value
def get_szhou_labor_cost(self):
return self.szhou_labor_cost
def set_szhou_reimbursement_cost(self, value):
self.szhou_reimbursement_cost = value
def get_szhou_reimbursement_cost(self):
return self.szhou_reimbursement_cost
def set_szhou_workstation_cost(self, value):
self.szhou_workstation_cost = value
def get_szhou_workstation_cost(self):
return self.szhou_workstation_cost
def set_szhou_tax_rate(self, value):
self.szhou_tax_rate = value
def get_szhou_tax_rate(self):
return self.szhou_tax_rate
def set_sz_revenue_cost(self, value):
self.sz_revenue_cost = abs(value)
def get_sz_revenue_cost(self):
return self.sz_revenue_cost
def set_sz_labor_cost(self, value):
self.sz_labor_cost = value
def get_sz_labor_cost(self):
return self.sz_labor_cost
def set_sz_reimbursement_cost(self, value):
self.sz_reimbursement_cost = value
def get_sz_reimbursement_cost(self):
return self.sz_reimbursement_cost
def set_sz_workstation_cost(self, value):
self.sz_workstation_cost = value
def get_sz_workstation_cost(self):
return self.sz_workstation_cost
def set_sz_tax_rate(self, value):
self.sz_tax_rate = value
def get_sz_tax_rate(self):
return self.sz_tax_rate
|
22,609 | 6122d4402bbb02ae06c176892319c164a33a4b2e | n = int(input())
pairs = []
for i in range(n):
l = input().strip().split()
if l[0] == "1":
pairs.append([l[1], int(l[2])])
elif l[0] == "2":
a = min(l[1], l[2])
b = max(l[1], l[2])
for i in range(len(pairs)):
if pairs[i][0] >= a and pairs[i][0] <= b:
pairs[i][1] *= -1
else:
answer = -1
for i, x in enumerate(pairs):
if x[0] == l[1]:
answer = x[1]
print(answer) |
22,610 | 17bca249d23d323010a13bbe2e7dca427bb1357e | """
Strings
A string consists of a sequence of characters. You use
single or double quotation marks to represent them
"""
single_quote = 'This is a single message'
double_quote = "Hey it is my books"
single_char_string = "A"
empty_string = ""
single_within_double_quote = "Opportunities don't happen. You create them"
double_within_single_quote = "Why did she call the man 'smart'?"
same_quotes = 'I\'ve an idea'
triple_quote_string = '''This
is
triple
quote
'''
triple_quote_string
type(single_quote)
"""
str() function
returns a string which is considered an informal or nicely
printable representation of the given object
str(object)
"""
str(10)
create_string = str()
type(create_string)
"""
Basic String Operations
strings can be concatenated with _
strings can be repeated with *
"""
string_1 = "face"
string_2 = "book"
concatenated_string = string_1 + string_2
concatenated_string
concatenated_string_with_space = "Hi " + "There"
concatenated_string_with_space
singer = 50 + "cent" #Type error
singer = str(50) + "cent"
singer
repitition_of_string = "wow" * 5
repitition_of_string
"""
Membership operators
in
not in
"""
fruit_string = "apple is a fruit"
fruit_sub_string = "apple"
fruit_sub_string in fruit_string
another_fruit_string = "orange"
another_fruit_string not in fruit_string
"""
String Comparison
You can compare strings based on ASCII value using
<, >, <=, >=, ==, !=
"""
"january" == "jane"
"january" != "jane"
"january" < "jane"
"january" > "jane"
"january" <= "jane"
"january" >= "jane"
"filled" > ""
"""
Built-In Functions Used On strings
len() number of characters in string
max() returns character having highest ASCII value
min() returns character having lowest ASCII value
"""
count_characters = len("eskimos")
count_characters
max("axel")
min("brad")
"""
Accessing Characters in String by Index Number
string_name[index]
"""
word_phrase = "be yourself"
word_phrase[0]
word_phrase[1]
word_phrase[2]
word_phrase[3]
word_phrase[10]
word_phrase[11]
word_phrase[-1]
word_phrase[-2]
"""
String Slicing and Joining
string_name[start:end[:step]]
you can access a sequence o characters by specifying a
range of index numbers separated by a colon
String slicing returns a sequence of characters beginning
at start and extending up to but not including end.
Can use position or negative integers
"""
healthy_drink = "green tea"
healthy_drink[0:3]
healthy_drink[0:5]
healthy_drink[6:]
healthy_drink[:]
healthy_drink[4:4]
healthy_drink[6:20]
healthy_drink[-3:-1]
healthy_drink[6:-1]
newspaper = "new york times"
newspaper[0:12:4]
newspaper[::4]
newspaper[::-1] |
22,611 | c4ecaf5e404e8843cfa180d5c50483072290b116 | from setuptools import setup
from os import path
curr_dir = path.abspath(path.dirname(__file__))
with open(path.join(curr_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='wbsync',
version='0.1.3',
packages=['wbsync', 'wbsync.external', 'wbsync.synchronization',
'wbsync.triplestore', 'wbsync.util'],
url='https://github.com/weso/rdf-wb-sync',
license='MIT',
author='Alejandro González Hevia, Othmane Bakhtaoui',
author_email='alejandrgh11@gmail.com, b.othmane98@live.fr',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[
'requests==2.23.0', 'rdflib==5.0.0', 'ontospy==1.9.8.3'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
)
|
22,612 | 2ade6e7cc3f8b48fc17e710f52437bd5a6d2cf63 | ################################################
# VIEWS #
# Used for displaying things to users #
# Uses models to get stuff to show #
################################################
from flask import Flask, render_template, request, make_response, redirect, session, jsonify
from src import app
#######################
# Static pages #
# and errors #
#######################
@app.route('/')
@app.route('/index')
def index():
"""
Returns the home page
"""
resp = make_response(render_template("index.html", title='Home'))
return resp
@app.route('/donate')
def donate():
return render_template("donate.html", title='Donate')
@app.errorhandler(500)
def internal_error(error):
return render_template('505.html'), 505
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
@app.errorhandler(403)
def forbidden_error(error):
return render_template('403.html'), 403
#######################
# Course info #
#######################
from models import dbtools
@app.route('/courses')
def courses():
db = dbtools.CourseDB('src/models/course_info.db', 'courses')
courses = db.get_HTML()
depts = db.get_depts()
db.close_db()
resp = render_template('courses.html',
title='Courses',
courses=courses,
letter_links=depts)
return resp
#######################
# Scheduling #
#######################
from models import scheduler
import json
PER_PAGE = 10
AMOUNT_OF_COURSES = 10
@app.route('/sched_entry')
def how_many_post():
"""
Goes to form with AMOUNT_OF_COURSES text boxes to input
courses to schedule, form action=/schedules, method=POST
"""
default_courses = ['CS 442', 'CS 392', 'CS 519', 'MA 331']
resp = make_response(render_template(
"sched_entry.html",
quantity=AMOUNT_OF_COURSES,
title='Scheduler',
default_vals=default_courses))
resp.set_cookie('course_combos', '', expires=0)
return resp
@app.route('/schedules', methods=['GET','POST'])
def my_form_post():
"""
Gets input from form, puts it in a list, gets the schedules,
send JSON of course combinations and send then to /sched as
a cookie
"""
text_list = []
#make list of form inputs
for i in range(1, AMOUNT_OF_COURSES + 1):
form_num = 'text' + str(i)
text_list.append(request.form[form_num])
#remove items with no input, generate string of courses
final_list = []
for text in text_list:
if not text == "":
final_list.append(text)
courses_str = ""
for course in final_list[:-1]:
courses_str += (str(course) + ',')
courses_str += str(final_list[-1])
courses_str = courses_str.upper()
#turn string of courses entered into list
c_list = courses_str.split(',')
#get the schedules
#print "\nCourse list:"
#print str(c_list) + "\n"
my_combos = scheduler.schedule(c_list)
resp = make_response(redirect('/sched'))
resp.set_cookie('course_combos', '', expires=0)
resp.set_cookie('course_combos', json.dumps(my_combos))
return resp
@app.route('/get_combos', methods=['GET'])
def getCombosAPI():
"""
Upon a GET request containing csv course names in a query string...
Find the combos and send them as JSON
"""
all_args = request.args.lists()
course_list = all_args[0][1][0].split(",")
u_COURSE_LIST = map((lambda x: x.upper()), course_list)#make all caps just in case
COURSE_LIST = map( str, u_COURSE_LIST)#unicode list -> list of python strs
combos = scheduler.schedule(COURSE_LIST)
return jsonify(combos)
def getCombosForPage(page_num, per_page, count_of_combos, combos):
"""Returns the set of combos for the current page"""
combos_start = (per_page * (page_num - 1)) + 1
combos_end = combos_start + per_page
these_combos = {}
for key in range(combos_start, combos_end):
try:
# if new dict is not an int schedules are not sorted on the page
these_combos[key] = combos[str(key)]
except KeyError:
pass
return these_combos
def isLastPage(page_num, count_of_combos, per_page):
"""Return True if this is the last page in the pagination"""
if count_of_combos <= (page_num * per_page):
return True
return False
@app.route('/sched/', defaults={'page': 1})
@app.route('/sched/page/<int:page>')
def scheduleMe(page):
"""
Display schedules as links and iframes
"""
querystring_combos = request.cookies.get('course_combos')
if not querystring_combos:
return render_template('404.html'), 404
combos = json.loads(querystring_combos)
#print querystring_combos
count = len(combos)
pagination_needed = count > PER_PAGE
this_page_combos = combos
if pagination_needed:
this_page_combos = getCombosForPage(page, PER_PAGE, count, combos)
last_page = isLastPage(page, count, PER_PAGE)
if not this_page_combos and page != 1:
return render_template('404.html'), 404
return render_template("sched.html",
title="Scheduler",
combos=this_page_combos,
combo_amount=str(count),
page=page,
last_page=last_page,
pagination=pagination_needed)
########################
# Random stuff #
########################
|
22,613 | 1ea15849b2caa7a002efa83bcc33526be5e324a6 | # Import packages
import speech_recognition as sr
import os
from gtts import gTTS
import datetime
import warnings
import calendar
import random
import wikipedia
#warnings.filterwarnings('ignore')
r = sr.Recognizer()
flag = 0
language = 'en'
# lang_dict ={1:'en',2:'hi'}
# n = int(input("Enter the language you want to talk:\n1: English\n2: Hindi\n"))
# lang = lang_dict[n-1]
#A functions to get persons name from text
def getPerson(text):
wordList = text.split() #splits the text to words
for i in range(0, len(wordList)):
if i+3 <= len(wordList)-1 and wordList[i].lower() == 'who' and wordList[i+1].lower() == 'is':
return wordList[i+2] + ' '+ wordList[i+3]
# Date function
def getDate():
now = datetime.datetime.now()
my_date = datetime.datetime.today()
weekday = calendar.day_name[my_date.weekday()] #sunday
monthNum = now.month
dayNum = now.day
#A list of months
month_names = ['January', 'February', 'March', ' April', 'May', 'June', 'July','August', 'September', ' October', 'November', 'December']
#A list of ordinal Numbers
ordinalNumbers = [ '1st', '2nd', '3rd', ' 4th', '5th', '6th', '7th', '8th', '9th', '10th', '11th', '12th', '13th', '14th', '15th', '16th',
'17th', '18th', '19th', '20th', '21st', '22nd', '23rd','24rd', '25th', '26th', '27th', '28th', '29th', '30th', '31st']
return 'Today is '+weekday+', '+month_names[monthNum - 1]+' the '+ ordinalNumbers[dayNum -1]+' .'
while True:
with sr.Microphone() as source:
print("Speak Anything :")
audio = r.listen(source)
try:
text = r.recognize_google(audio)
mytext=text.lower()
print("You said : {}".format(text))
# Exit keyword
if mytext=='exit' or mytext=='chup' or mytext=='shutdown' or mytext=='nikal':
print("[Ending..]")
break
# Today's Date
if "today's date" in mytext or "aaj ki tarikh" in mytext:
#print("Todays date")
get_date = getDate()
mytext = get_date
#check to see if the user said 'who is'
elif 'who is' in mytext:
person = getPerson(text)
wiki = wikipedia.summary(person, sentences=2)
mytext = wiki
except sr.UnknownValueError:
flag+=1
print("Sorry could not recognize what you said")
except sr.RequestError as e :
print('Request results from google speechrecognition service error' +e)
break
if flag ==2:
print("[Ending..]")
break
elif flag!=0:
continue
else:
print("Reply:", mytext)
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("voice.mp3")
os.system("mpg321 voice.mp3")
#print("You said : {}".format(mytext))
|
22,614 | 871269c61e6d047d0ac607e780410629adc1b6b0 | from itertools import product
from hashlib import sha256
from z3 import *
def merge_sort(tbl, start, size):
res = [0 for i in range(size)]
global p
if size >= 2:
half_size = size / 2
merge_sort(tbl, start, half_size)
half_rem_size = size - half_size
merge_sort(tbl, start + half_size, size - half_rem_size)
i_hi = 0
i_lo = 0
i_cur = 0
if half_rem_size >= 1:
while True:
c_hi = tbl[start + half_size + i_hi]
c_lo = tbl[start + i_lo]
if d[p] == 0:
s.add(c_lo > c_hi)
cur = c_hi
i_hi += 1
else:
s.add(c_hi > c_lo)
cur = c_lo
i_lo += 1
p += 1
res[i_cur] = cur
i_cur += 1
if i_lo >= half_size or i_hi >= half_rem_size:
break
if half_size > i_lo:
for i in range(half_size - i_lo):
res[i_cur + i] = tbl[start + i_lo + i]
i_cur = i_cur + half_size - i_lo
if i_hi < half_rem_size:
for i in range(size - i_hi - half_size):
res[i_cur + i] = tbl[start + i_hi + half_size + i]
for i in range(size):
tbl[start + i] = res[i]
inp = [Int('inp_%d' % i) for i in range(16)]
o_inp = inp[::]
d = [0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0]
c = 1
p = 0
s = Solver()
for i in range(16):
s.add(inp[i] >= 0, inp[i] < 16)
merge_sort(inp, 0, 16)
assert(s.check() == sat)
m = s.model()
target = []
for i in range(16):
target.append(m[o_inp[i]].as_long())
arr = []
for i in target:
arr.append([(0, i), (i, 0)])
for l in product(*arr):
s = ''.join(chr(i[0]) + chr(i[1]) for i in l)
if sha256(s).hexdigest() == "2e325c91c91478b35c2e0cb65b7851c6fa98855a77c3bfd3f00840bc99ace26b":
print l
break
|
22,615 | 902fa078b70067e63bc81166aafda02e2292cad2 | import sys
import threading
import time
import os
from serial import Serial
import seeed_python_ircamera
global minHue
global maxHue
hetaData = []
lock = threading.Lock()
minHue = 180
maxHue = 360
#app = QApplication(sys.argv)
#window = painter()
dataThread = DataReader(None)
#dataThread.drawRequire.connect(window.draw)
dataThread.start()
#window.show()
#app.exec_()
|
22,616 | 3e9ecdba365403c99bf2b37d501701bc3dad2ef4 | import time
import os, sys
import tarfile
import json
from shutil import copyfile
from train import training_loop
def strip_quotes(args):
for k in args:
args[k] = args[k].strip('\"')
return args
if __name__ == "__main__":
HYPERPARAMETERS_PATH = sys.argv[1]
with open(HYPERPARAMETERS_PATH) as json_file:
args = json.load(json_file)
args = strip_quotes(args)
os.environ["CUDA_VISIBLE_DEVICES"]=args["gpu_id"]
output_dir = args["output_dir"]
input_dir = args["input_dir"]
training_loop(input_dir = input_dir,
output_dir = output_dir,
img_size_x = int(args["img_size_x"]),
img_size_y = int(args["img_size_y"]),
batch_size = int(args["batch_size"]),
num_epochs_1 = int(args["num_epochs_1"]),
num_epochs_2 = int(args["num_epochs_2"]),
lr_1 = float(args["lr_1"]),
lr_2 = float(args["lr_2"]),
gradient_accumulation = int(args["gradient_accumulation"]),
cv_fold = int(args["cv_fold"]),
num_workers = 8,
model_type = args["model_type"],
model_fname = args["model_fname"])
|
22,617 | fdd3d2190b769c00411c90e87782af0b4735885d | import numpy as np
from itertools import count
def day06(inp):
# return part1,part2
blocks = np.fromstring(inp, sep=' ', dtype=np.int64)
seens = {tuple(blocks): 0}
for step in count(1):
ind = blocks.argmax()
val = blocks[ind]
blocks[ind] = 0
np.add.at(blocks, np.arange(ind+1,ind+1+val) % blocks.size, 1)
if tuple(blocks) in seens:
return step,step-seens[tuple(blocks)]
seens[tuple(blocks)] = step
|
22,618 | d04d1f394c8cabd07984bebf1d5145fae513cb0e | from sqlalchemy import and_, func, desc
from app.lib.dns.instances.search_params import SearchParams
from app import db
from app.lib.models.dns import DNSQueryLogModel, DNSZoneModel, DNSZoneTagModel
from flask_login import current_user
import datetime
class SearchManager:
def __init__(self, tag_manager, alias_manager):
self.__tag_manager = tag_manager
self.__alias_manager = alias_manager
def search_from_request(self, request, paginate=True, method='get'):
params = SearchParams(request=request, method=method)
return {
'results': self.search(params, paginate=paginate),
'params': params,
'filters': self.get_filters()
}
def search(self, search_params, paginate=False):
query = DNSQueryLogModel.query
# Default is that users can only search for themselves.
user_ids = [current_user.id]
if current_user.admin:
# Plot-twist! Unless they are an admin.
if search_params.user_id <= 0:
# Search for everyone.
user_ids = []
elif search_params.user_id > 0:
user_ids = [search_params.user_id]
if len(user_ids) > 0:
query = query.outerjoin(DNSZoneModel, DNSZoneModel.id == DNSQueryLogModel.dns_zone_id)
query = query.filter(DNSZoneModel.user_id.in_(user_ids))
if len(search_params.domain) > 0:
if '%' in search_params.domain:
query = query.filter(DNSQueryLogModel.domain.ilike(search_params.domain))
else:
query = query.filter(func.lower(DNSQueryLogModel.domain) == search_params.domain.lower())
if len(search_params.source_ip) > 0:
if '%' in search_params.source_ip:
query = query.filter(DNSQueryLogModel.source_ip.ilike(search_params.source_ip))
else:
query = query.filter(DNSQueryLogModel.source_ip == search_params.source_ip)
if len(search_params.cls) > 0:
query = query.filter(DNSQueryLogModel.cls == search_params.cls)
if len(search_params.type) > 0:
query = query.filter(DNSQueryLogModel.type == search_params.type)
if search_params.matched in [0, 1]:
query = query.filter(DNSQueryLogModel.found == bool(search_params.matched))
if search_params.forwarded in [0, 1]:
query = query.filter(DNSQueryLogModel.forwarded == bool(search_params.forwarded))
if search_params.blocked in [0, 1]:
query = query.filter(DNSQueryLogModel.blocked == bool(search_params.blocked))
if len(search_params.tags) > 0:
user_id = None if current_user.admin else current_user.id
tag_ids = self.__tag_manager.get_tag_ids(search_params.tags, user_id=user_id)
query = query.outerjoin(DNSZoneTagModel, DNSZoneTagModel.dns_zone_id == DNSQueryLogModel.dns_zone_id)
query = query.filter(DNSZoneTagModel.tag_id.in_(tag_ids))
date_from = search_params.full_date_from
date_to = search_params.full_date_to
if isinstance(date_from, datetime.datetime):
query = query.filter(DNSQueryLogModel.created_at >= date_from)
if isinstance(date_to, datetime.datetime):
query = query.filter(DNSQueryLogModel.created_at <= date_to)
if len(search_params.alias) > 0:
alias = self.__alias_manager.get(None, name=search_params.alias)
if alias:
query = query.filter(DNSQueryLogModel.source_ip == alias.ip)
query = query.order_by(desc(DNSQueryLogModel.id))
if paginate:
rows = query.paginate(search_params.page, search_params.per_page, False)
else:
rows = query.all()
return rows
def get_filters(self):
filters = {
'classes': [],
'types': [],
'users': {},
'tags': []
}
sql = "SELECT cls FROM dns_query_log GROUP BY cls ORDER BY cls"
results = db.session.execute(sql)
for result in results:
filters['classes'].append(result.cls)
sql = "SELECT type FROM dns_query_log GROUP BY type ORDER BY type"
results = db.session.execute(sql)
for result in results:
filters['types'].append(result.type)
sql = "SELECT id, username FROM users GROUP BY id, username ORDER BY username"
results = db.session.execute(sql)
for result in results:
filters['users'][result.id] = result.username
user_id = None if current_user.admin else current_user.id
tags = self.__tag_manager.all(user_id=user_id, order_column='name', order_by='asc')
for tag in tags:
if tag.name not in filters['tags']:
filters['tags'].append(tag.name)
return filters
|
22,619 | f2c299211e1e7f89d64230989bfb4b93535ceec0 | from output.models.nist_data.list_pkg.g_month_day.schema_instance.nistschema_sv_iv_list_g_month_day_pattern_2_xsd.nistschema_sv_iv_list_g_month_day_pattern_2 import NistschemaSvIvListGMonthDayPattern2
obj = NistschemaSvIvListGMonthDayPattern2(
value=[
"--05-22",
"--04-14",
"--09-03",
"--07-16",
"--05-07",
"--06-06",
"--09-13",
"--08-06",
"--04-19",
]
)
|
22,620 | a3bee74c6f33176a6a23438922e6d4d8b374cbf4 | CodeKata7 = int(input())
for i in range(CodeKata7):
print('Hello') |
22,621 | 7eaf92149fe7b65c7e514badc555d504c505166a | #Based on:
#https://wiki.python.org/moin/PyQt/Compass%20widget
#https://stackoverflow.com/questions/12011147/how-to-create-a-3-color-gradient-dial-indicator-the-one-that-shows-green-yellow
import sys
from PyQt4.QtGui import QVBoxLayout, QSlider, QSizePolicy, QWidget, QPainter, \
QConicalGradient, QPen, QPalette, QPolygon, QFont, QFontMetricsF, QApplication
from PyQt4.QtCore import QPoint, pyqtSignal, pyqtProperty, Qt, QRect, QPointF, pyqtSlot
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
self.setGeometry(0, 0,700,700)
self.move(300, 200)
self.setWindowTitle('Dial Guage')
p = self.palette()
p.setColor(self.backgroundRole(), Qt.white)
self.setPalette(p)
layout = QVBoxLayout(self)
sld = QSlider(Qt.Horizontal);
sld.setMinimum(0)
sld.setMaximum(360)
sld.setValue(0)
layout.addWidget(sld)
self.gauge = GaugeWidget(1.0)
self.gauge.setSizePolicy(QSizePolicy(
QSizePolicy.Expanding,
QSizePolicy.Expanding))
layout.addWidget(self.gauge)
sld.valueChanged.connect(self.gauge.setAngle)
class GaugeWidget(QWidget):
angleChanged = pyqtSignal(float)
def __init__(self, initialValue=0, *args, **kwargs):
super(GaugeWidget, self).__init__(*args, **kwargs)
self.setValue(initialValue)
self._angle = 0.0
self._margins = 10
self._pointText = {0: "0", 45: "45", 90: "90", 135: "135", 180: "180",
225: "225", 270: "270", 315: "315"}
def setValue(self, val):
val = float(min(max(val, 0), 1))
self._value = -270 * val
self.update()
def paintEvent(self, e):
painter = QPainter(self)
painter.setRenderHint(painter.Antialiasing)
rect = e.rect()
gauge_rect = QRect(rect)
size = gauge_rect.size()
pos = gauge_rect.center()
gauge_rect.moveCenter( QPoint(pos.x()-size.width(), pos.y()-size.height()) )
gauge_rect.setSize(size*.9)
gauge_rect.moveCenter(pos)
painter.save()
painter.setBrush(Qt.gray)
gauge_rect2 = QRect(rect)
size2 = gauge_rect2.size()
pos2 = gauge_rect2.center()
gauge_rect2.moveCenter( QPoint(pos2.x()-size2.width(), pos2.y()-size2.height()) )
gauge_rect2.setSize(size2*.8)
gauge_rect2.moveCenter(pos2)
painter.setPen(Qt.NoPen)
grad = QConicalGradient(QPointF(gauge_rect.center()), 270.0)
grad.setColorAt(.75, Qt.green)
grad.setColorAt(.5, Qt.yellow)
grad.setColorAt(.1, Qt.red)
painter.setBrush(grad)
painter.drawPie(gauge_rect, 225.0*16, self._value*16)
painter.setBrush(Qt.gray)
painter.drawPie(gauge_rect,225.0*16, (90)*16)
painter.drawPie(gauge_rect2, 226.0*16, (self._value-2)*16)
painter.restore()
self.drawMarkings(painter)
self.drawNeedle(painter)
painter.end()
super(GaugeWidget,self).paintEvent(e)
def drawNeedle(self, painter):
painter.save()
painter.translate(self.width()/2, self.height()/2)
painter.rotate(self._angle)
scale = min((self.width() - self._margins)/120.0,
(self.height() - self._margins)/120.0)
painter.scale(scale, scale)
painter.setPen(QPen(Qt.NoPen))
painter.setBrush(Qt.black)
painter.drawPolygon(
QPolygon([QPoint(-6, 0), QPoint(0, -45), QPoint(6, 0),
QPoint(0, 45), QPoint(-6, 0)])
)
painter.setBrush(Qt.blue)
painter.drawPolygon(
QPolygon([QPoint(-3, -25), QPoint(0, -45), QPoint(3, -25),
QPoint(0, -30), QPoint(-3, -25)])
)
painter.restore()
def drawMarkings(self, painter):
painter.save()
painter.translate(self.width()/2, self.height()/2)
scale = min((self.width() - self._margins)/120.0,
(self.height() - self._margins)/120.0)
painter.scale(scale, scale)
font = QFont(self.font())
font.setPixelSize(5)
metrics = QFontMetricsF(font)
painter.setFont(font)
painter.setPen(Qt.black)
i = 0
while i < 360:
if i % 45 == 0:
painter.drawLine(0, -40, 0, -50)
painter.drawText(-(metrics.width(self._pointText[i])+1)/2.0, -52,
self._pointText[i])
else:
painter.drawLine(0, -45, 0, -50)
painter.rotate(15)
i += 15
painter.restore()
def angle(self):
return self._angle
@pyqtSlot(float)
def setAngle(self, angle):
if angle != self._angle:
self._angle = angle
self.angleChanged.emit(angle)
self.update()
angle = pyqtProperty(float, angle, setAngle)
def main():
app = QApplication(sys.argv)
ex = Example()
ex.show()
ex.raise_()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
22,622 | a941581cfd42fca826b3aa234d206e2155836e09 | score = {
'萧峰': 95,
'段誉': 97,
'虚竹': 89
}
print (score['萧峰'])
print(score.get('慕容复'))
score['慕容复'] = 88
print(score.get('慕容复'))
del score['萧峰']
print(score.get('萧峰')) |
22,623 | 0de23faa510a0ececfedffc7fc31c3acf41b9202 | from company.models import Company, DepartmentRelations
from employee.models import Employee
def get_correct_format_department_list():
'''
Возвращает список формата:
list = [
['Название отдела, руководитель, сотрудники', 'Кому подчиняется', 'Описание']
['Название отдела, руководитель, сотрудники', 'Кому подчиняется', 'Описание']
['Название отдела, руководитель, сотрудники', 'Кому подчиняется', 'Описание']
['Название отдела, руководитель, сотрудники', 'Кому подчиняется', 'Описание']
['Название отдела, руководитель, сотрудники', 'Кому подчиняется', 'Описание']
], а также экземпляр объекта Компании
'''
company = Company.objects.first()
# беру только первую компанию, потому что не было задачи как-то фильтровать
correct_format_list = []
relations = DepartmentRelations.objects.all()
for relation in relations:
department = relation.department
staff = Employee.objects.filter(department=department)
base_string = f'<b>{department.name}</b><br>'
for employee in staff:
if employee.post_type == 'MN':
manager_name = f'{employee.last_name} {employee.first_name} {employee.patronymic}'
manager_post = employee.post
manager = f'{manager_name}<div style="color:red; font-style:italic">{manager_post}</div><br>'
base_string = (base_string + manager)
if employee.post_type == 'SB':
subordinate_name = f'{employee.last_name} {employee.first_name} {employee.patronymic}'
subordinate_post = employee.post
subordinate = f'{subordinate_name}<div style="color:red; font-style:italic">{subordinate_post}</div>'
base_string = (base_string + subordinate)
first = {
'v': f'{department.name}',
'f': base_string
}
if relation.high_level_department:
last = f'{relation.high_level_department}'
else:
last = ''
about = 'Описание отсутствует'
relation_pack = list()
relation_pack.append(first)
relation_pack.append(last)
relation_pack.append(about)
correct_format_list.append(relation_pack)
return company, correct_format_list
|
22,624 | fc37847054fb7158c35442853066204f6e052d76 | numb = int(input("Digite o número de notas: "))
soma = 0
for i in range(0, numb):
nota = float(input("Nota: "))
soma += nota
print(f"A média das notas é: {soma/numb}")
|
22,625 | ee448c3f8208173604554170f0e1bacee979b12f | class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: None Do not return anything, modify board in-place instead.
"""
m = len(board)
n = len(board[0])
fangxiang_list = [(0, 1), (-1, 0), (0, -1), (1, 0)]
already_search_point = set()
for i in range(m):
for j in range(n):
if (i, j) in already_search_point:
continue
if board[i][j] == "X":
continue
search_points = [(i, j)]
already_search_point.add((i, j))
z = 0
is_can_tu_se = True
while z < len(search_points):
search_point = search_points[z]
search_point_x = search_point[0]
search_point_y = search_point[1]
z += 1
for fangxiang_x, fangxiang_y in fangxiang_list:
new_x = search_point_x + fangxiang_x
new_y = search_point_y + fangxiang_y
if new_x < 0 or new_x >= m or new_y < 0 or new_y >= n:
is_can_tu_se = False
continue
if (new_x, new_y) in already_search_point:
continue
if board[new_x][new_y] == "O":
already_search_point.add((new_x, new_y))
search_points.append((new_x, new_y))
if is_can_tu_se:
for points_x, points_y in search_points:
board[points_x][points_y] = "X"
solution = Solution()
solution.solve([["O","X","O"],["X","O","X"],["O","X","O"]])
|
22,626 | a271e9800d2b6eaad1648fd234a26d453c0336e1 | """Functional tests using the API with a fake Apple TV."""
from ipaddress import IPv4Address
import logging
import math
from typing import Optional
import pyatv
from pyatv.conf import AppleTV, ManualService
from pyatv.const import (
DeviceModel,
DeviceState,
FeatureName,
FeatureState,
InputAction,
MediaType,
OperatingSystem,
PowerState,
Protocol,
ShuffleState,
)
from pyatv.interface import OutputDevice
from pyatv.protocols.mrp.protobuf import CommandInfo_pb2
from pyatv.support.http import (
BasicHttpServer,
HttpRequest,
HttpResponse,
HttpSimpleRouter,
http_server,
)
from tests import common_functional_tests
from tests.fake_device import FakeAppleTV
from tests.fake_device.airplay import DEVICE_CREDENTIALS
from tests.fake_device.mrp import (
APP_NAME,
BUILD_NUMBER,
DEVICE_MODEL,
DEVICE_UID,
OS_VERSION,
PLAYER_IDENTIFIER,
VOLUME_STEP,
)
from tests.utils import faketime, stub_sleep, until
_LOGGER = logging.getLogger(__name__)
ARTWORK_BYTES = b"1234"
ARTWORK_MIMETYPE = "image/png"
ARTWORK_ID = "artwork_id1"
DEMO_APP_NAME = "Demo App"
TEST_PLAYER = "com.github.postlund.test"
class MRPFunctionalTest(common_functional_tests.CommonFunctionalTests):
async def setUpAsync(self):
await super().setUpAsync()
self.conf = AppleTV(IPv4Address("127.0.0.1"), "Test device")
self.conf.add_service(
ManualService(
"mrp_id", Protocol.MRP, self.fake_atv.get_port(Protocol.MRP), {}
)
)
airplay_service = ManualService(
"airplay_id",
Protocol.AirPlay,
self.fake_atv.get_port(Protocol.AirPlay),
properties={"features": "0x1"}, # AirPlayVideoV1 supported
)
airplay_service.credentials = DEVICE_CREDENTIALS
self.conf.add_service(airplay_service)
self.atv = await self.get_connected_device()
self.artwork_server = None
def tearDown(self):
self.atv.close()
if self.artwork_server is not None:
self.artwork_server.close()
super().tearDown()
async def get_application(self, loop=None):
self.fake_atv = FakeAppleTV(self.loop)
self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP)
self.airplay_state, self.airplay_usecase = self.fake_atv.add_service(
Protocol.AirPlay
)
return self.fake_atv.app
async def get_connected_device(self):
return await pyatv.connect(self.conf, loop=self.loop)
def supported_volume_controls(self):
return [
FeatureName.VolumeUp,
FeatureName.VolumeDown,
FeatureName.Volume,
FeatureName.SetVolume,
]
async def serve_artwork(self, path: str) -> int:
class ArtworkHandler(HttpSimpleRouter):
def __init__(self, path: str):
super().__init__()
self.add_route("GET", path, self.handle_artwork)
def handle_artwork(self, request: HttpRequest) -> Optional[HttpResponse]:
return HttpResponse(
"HTTP",
"1.0",
200,
"OK",
{"Content-Type": ARTWORK_MIMETYPE},
ARTWORK_BYTES,
)
self.artwork_server, port = await http_server(
lambda: BasicHttpServer(ArtworkHandler(path))
)
return port
async def test_button_up_actions(self):
await self.atv.remote_control.up(action=InputAction.DoubleTap)
await self.wait_for_button_press("up", InputAction.DoubleTap)
await self.atv.remote_control.up(action=InputAction.Hold)
await self.wait_for_button_press("up", InputAction.Hold)
async def test_button_down_actions(self):
await self.atv.remote_control.down(action=InputAction.DoubleTap)
await self.wait_for_button_press("down", InputAction.DoubleTap)
await self.atv.remote_control.down(action=InputAction.Hold)
await self.wait_for_button_press("down", InputAction.Hold)
async def test_button_left_actions(self):
await self.atv.remote_control.left(action=InputAction.DoubleTap)
await self.wait_for_button_press("left", InputAction.DoubleTap)
await self.atv.remote_control.left(action=InputAction.Hold)
await self.wait_for_button_press("left", InputAction.Hold)
async def test_button_right_actions(self):
await self.atv.remote_control.right(action=InputAction.DoubleTap)
await self.wait_for_button_press("right", InputAction.DoubleTap)
await self.atv.remote_control.right(action=InputAction.Hold)
await self.wait_for_button_press("right", InputAction.Hold)
async def test_button_top_menu(self):
await self.atv.remote_control.top_menu()
await self.wait_for_button_press("top_menu", InputAction.SingleTap)
async def test_button_home(self):
await self.atv.remote_control.home()
await self.wait_for_button_press("home", InputAction.SingleTap)
await self.atv.remote_control.home(action=InputAction.DoubleTap)
await self.wait_for_button_press("home", InputAction.DoubleTap)
await self.atv.remote_control.home(action=InputAction.Hold)
await self.wait_for_button_press("home", InputAction.Hold)
async def test_button_home_hold(self):
await self.atv.remote_control.home_hold()
await self.wait_for_button_press("home", InputAction.Hold)
async def test_button_select_actions(self):
await self.atv.remote_control.select(action=InputAction.DoubleTap)
await self.wait_for_button_press("select", InputAction.DoubleTap)
await self.atv.remote_control.select(action=InputAction.Hold)
await self.wait_for_button_press("select", InputAction.Hold)
async def test_button_menu_actions(self):
await self.atv.remote_control.menu(action=InputAction.DoubleTap)
await self.wait_for_button_press("menu", InputAction.DoubleTap)
await self.atv.remote_control.menu(action=InputAction.Hold)
await self.wait_for_button_press("menu", InputAction.Hold)
async def test_button_suspend(self):
await self.atv.remote_control.suspend()
await until(lambda: self.state.last_button_pressed == "suspend")
async def test_button_wakeup(self):
await self.atv.remote_control.wakeup()
await until(lambda: self.state.last_button_pressed == "wakeup")
async def test_shuffle_state_albums(self):
self.usecase.example_video(shuffle=ShuffleState.Albums)
playing = await self.playing(shuffle=ShuffleState.Albums)
self.assertEqual(playing.shuffle, ShuffleState.Albums)
async def test_set_shuffle_albums(self):
self.usecase.example_video()
await self.atv.remote_control.set_shuffle(ShuffleState.Albums)
playing = await self.playing(shuffle=ShuffleState.Albums)
self.assertEqual(playing.shuffle, ShuffleState.Albums)
async def test_metadata_artwork_id(self):
self.usecase.example_video()
self.usecase.change_artwork(ARTWORK_BYTES, ARTWORK_MIMETYPE, ARTWORK_ID)
await self.playing(title="dummy")
self.assertEqual(self.atv.metadata.artwork_id, ARTWORK_ID)
async def test_metadata_artwork_id_no_identifier(self):
self.usecase.example_video(identifier="some_id")
self.usecase.change_artwork(ARTWORK_BYTES, ARTWORK_MIMETYPE, None)
await self.playing(title="dummy")
self.assertEqual(self.atv.metadata.artwork_id, "some_id")
async def test_metadata_artwork_erroneously_available(self):
self.usecase.example_video()
# Metadata suggests that artwork is available but no artwork is available
# when requested by client
self.usecase.change_artwork(None, ARTWORK_MIMETYPE, ARTWORK_ID)
await self.playing(title="dummy")
artwork = await self.atv.metadata.artwork(width=123, height=456)
self.assertIsNone(artwork)
async def test_metadata_artwork_width_and_height(self):
self.usecase.example_video()
self.usecase.change_artwork(
ARTWORK_BYTES, ARTWORK_MIMETYPE, width=111, height=222
)
await self.playing(title="dummy")
# Request one size but simulate that a smaller artwork was returned
artwork = await self.atv.metadata.artwork(width=123, height=456)
self.assertEqual(artwork.width, 111)
self.assertEqual(artwork.height, 222)
async def test_metadata_artwork_url(self):
port = await self.serve_artwork("/test")
self.usecase.example_video()
self.usecase.change_artwork(b"", "", url=f"http://localhost:{port}/test")
await self.playing(title="dummy")
artwork = await self.atv.metadata.artwork(width=123, height=456)
self.assertEqual(artwork.bytes, ARTWORK_BYTES)
self.assertEqual(artwork.mimetype, ARTWORK_MIMETYPE)
async def test_metadata_artwork_url_in_identifier(self):
port = await self.serve_artwork("/test/123x456bb.png")
self.usecase.example_video()
self.usecase.change_artwork(
b"",
"",
identifier=f"http://localhost:{port}/test/{{w}}x{{h}}{{c}}.{{f}}",
url=f"http://localhost:{port}/test/1200x1200bb.heic",
)
await self.playing(title="dummy")
artwork = await self.atv.metadata.artwork(width=123, height=456)
self.assertEqual(artwork.bytes, ARTWORK_BYTES)
self.assertEqual(artwork.mimetype, ARTWORK_MIMETYPE)
async def test_item_updates(self):
self.usecase.video_playing(
False, "dummy", 100, 1, identifier="id", artist="some artist"
)
with faketime("pyatv", 0):
await self.playing(title="dummy")
# Trigger update of single item by changing title
self.usecase.change_metadata(title="foobar", identifier="id")
playing = await self.playing(title="foobar")
# Make sure other metadata is untouched
self.assertEqual(playing.title, "foobar")
self.assertEqual(playing.artist, "some artist")
self.assertEqual(playing.total_time, 100)
self.assertEqual(playing.position, 1)
async def test_item_id_hash(self):
initial_hash = (await self.atv.metadata.playing()).hash
# Verify that content identifier is used as hash
self.usecase.example_video(identifier="some_id")
playing = await self.playing(title="dummy")
self.assertEqual(playing.hash, "some_id")
# Ensure that we fall back to initial hash if nothing is playing
self.usecase.nothing_playing()
nothing_playing = await self.playing(device_state=DeviceState.Idle)
self.assertEqual(nothing_playing.hash, initial_hash)
async def test_metadata_playback_rate_device_state(self):
self.usecase.example_video(paused=False, playback_rate=0.0)
playing = await self.playing(title="dummy")
self.assertEqual(playing.device_state, DeviceState.Playing)
self.usecase.change_metadata(title="dummy2", playback_rate=1.0)
playing = await self.playing(title="dummy2")
self.assertEqual(playing.device_state, DeviceState.Playing)
self.usecase.example_video(paused=True, title="dummy3", playback_rate=0.0)
playing = await self.playing(title="dummy3")
self.assertEqual(playing.device_state, DeviceState.Paused)
async def test_power_state(self):
class PowerListener:
def __init__(self):
self.old_state = None
self.new_state = None
def powerstate_update(self, old_state, new_state):
self.old_state = old_state
self.new_state = new_state
listener = PowerListener()
self.atv.power.listener = listener
# Check initial power state during connect
self.assertEqual(self.atv.power.power_state, PowerState.On)
# Check if power state changes after turn_off command
await self.atv.power.turn_off()
assert math.isclose(stub_sleep(), 0.1)
await until(lambda: self.atv.power.power_state == PowerState.Off)
await until(lambda: listener.old_state == PowerState.On)
await until(lambda: listener.new_state == PowerState.Off)
# Check if power state changes after turn_on command
await self.atv.power.turn_on()
await until(lambda: self.atv.power.power_state == PowerState.On)
await until(lambda: listener.old_state == PowerState.Off)
await until(lambda: listener.new_state == PowerState.On)
async def test_power_state_acknowledgement(self):
self.assertEqual(self.atv.power.power_state, PowerState.On)
await self.atv.power.turn_off(await_new_state=True)
self.assertEqual(self.atv.power.power_state, PowerState.Off)
await self.atv.power.turn_on(await_new_state=True)
self.assertEqual(self.atv.power.power_state, PowerState.On)
async def test_basic_device_info(self):
self.assertEqual(self.atv.device_info.operating_system, OperatingSystem.TvOS)
self.assertEqual(self.atv.device_info.build_number, BUILD_NUMBER)
self.assertEqual(self.atv.device_info.version, OS_VERSION)
self.assertEqual(self.atv.device_info.raw_model, DEVICE_MODEL)
self.assertEqual(self.atv.device_info.model, DeviceModel.Gen4K)
async def test_always_available_features(self):
self.assertFeatures(
FeatureState.Available,
FeatureName.Down,
FeatureName.Home,
FeatureName.HomeHold,
FeatureName.Left,
FeatureName.Menu,
FeatureName.Right,
FeatureName.Select,
FeatureName.TopMenu,
FeatureName.Up,
FeatureName.TurnOn,
FeatureName.TurnOff,
FeatureName.PowerState,
FeatureName.OutputDevices,
FeatureName.AddOutputDevices,
FeatureName.RemoveOutputDevices,
FeatureName.SetOutputDevices,
)
async def test_features_artwork(self):
self.assertFeatures(FeatureState.Unavailable, FeatureName.Artwork)
self.usecase.example_video()
self.usecase.change_artwork(ARTWORK_BYTES, ARTWORK_MIMETYPE, ARTWORK_ID)
await self.playing(title="dummy")
self.assertFeatures(FeatureState.Available, FeatureName.Artwork)
async def test_features_with_supported_commands(self):
feature_map = {
FeatureName.Next: CommandInfo_pb2.NextTrack,
FeatureName.Pause: CommandInfo_pb2.Pause,
FeatureName.Play: CommandInfo_pb2.Play,
FeatureName.PlayPause: CommandInfo_pb2.TogglePlayPause,
FeatureName.Previous: CommandInfo_pb2.PreviousTrack,
FeatureName.Stop: CommandInfo_pb2.Stop,
FeatureName.SetPosition: CommandInfo_pb2.SeekToPlaybackPosition,
FeatureName.SetRepeat: CommandInfo_pb2.ChangeRepeatMode,
FeatureName.SetShuffle: CommandInfo_pb2.ChangeShuffleMode,
FeatureName.Shuffle: CommandInfo_pb2.ChangeShuffleMode,
FeatureName.Repeat: CommandInfo_pb2.ChangeRepeatMode,
FeatureName.SkipForward: CommandInfo_pb2.SkipForward,
FeatureName.SkipBackward: CommandInfo_pb2.SkipBackward,
}
# No supported commands by default
self.usecase.example_video()
await self.playing(title="dummy")
self.assertFeatures(FeatureState.Unavailable, *feature_map.keys())
# Inject all expected commands to be enabled
self.usecase.example_video(
title="dummy2", supported_commands=list(feature_map.values())
)
await self.playing(title="dummy2")
self.assertFeatures(FeatureState.Available, *feature_map.keys())
async def test_playing_app(self):
self.usecase.nothing_playing()
# Nothing playing => no app running
self.assertIsNone(self.atv.metadata.app)
self.assertEqual(
self.atv.features.get_feature(FeatureName.App).state,
FeatureState.Unavailable,
)
self.usecase.example_video()
await self.playing(title="dummy")
# Video playing with default app
self.assertEqual(self.atv.metadata.app.name, APP_NAME)
self.assertEqual(self.atv.metadata.app.identifier, PLAYER_IDENTIFIER)
self.assertEqual(
self.atv.features.get_feature(FeatureName.App).state, FeatureState.Available
)
# Change app display_name name
self.usecase.update_client(display_name=DEMO_APP_NAME)
self.usecase.change_metadata(title="dummy2")
await self.playing(title="dummy2")
self.assertEqual(self.atv.metadata.app.name, DEMO_APP_NAME)
# Do not include display name and re-use previous one
self.usecase.update_client(display_name=None)
self.usecase.change_metadata(title="dummy3")
await self.playing(title="dummy3")
self.assertEqual(self.atv.metadata.app.name, DEMO_APP_NAME)
async def test_skip_forward_backward(self):
self.usecase.example_video(
supported_commands=[
CommandInfo_pb2.SkipForward,
CommandInfo_pb2.SkipBackward,
],
skip_time=12,
)
# Get initial position and use as base
prev_position = (await self.playing(title="dummy")).position
await self.atv.remote_control.skip_forward()
self.usecase.change_metadata(title="dummy2")
metadata = await self.playing(title="dummy2")
self.assertEqual(metadata.position, prev_position + 12)
prev_position = metadata.position
# Change skip time 8 to verify that we respect provided values
self.usecase.change_state(title="dummy3", skip_time=8)
metadata = await self.playing(title="dummy3")
await self.atv.remote_control.skip_backward()
self.usecase.change_metadata(title="dummy4")
metadata = await self.playing(title="dummy4")
self.assertEqual(metadata.position, prev_position - 8)
async def test_button_play_pause(self):
self.usecase.example_video(supported_commands=[CommandInfo_pb2.TogglePlayPause])
await self.playing(title="dummy")
await self.atv.remote_control.play_pause()
await until(lambda: self.state.last_button_pressed == "playpause")
async def test_play_pause_emulation(self):
self.usecase.example_video(paused=False)
await self.playing(device_state=DeviceState.Playing)
self.assertFeatures(FeatureState.Unavailable, FeatureName.PlayPause)
await self.atv.remote_control.play_pause()
await until(lambda: self.state.last_button_pressed == "pause")
self.usecase.example_video(
paused=True,
supported_commands=[CommandInfo_pb2.Play, CommandInfo_pb2.Pause],
)
await self.playing(device_state=DeviceState.Paused)
self.assertFeatures(FeatureState.Available, FeatureName.PlayPause)
await self.atv.remote_control.play_pause()
await until(lambda: self.state.last_button_pressed == "play")
async def test_update_client_before_setstate(self):
self.usecase.update_client(APP_NAME, TEST_PLAYER)
self.usecase.example_video(title="test", player=TEST_PLAYER, app_name=None)
await self.playing(title="test")
self.assertEqual(self.atv.metadata.app.name, APP_NAME)
self.assertEqual(self.atv.metadata.app.identifier, TEST_PLAYER)
async def test_set_default_commands(self):
self.usecase.default_supported_commands(
[CommandInfo_pb2.Play, CommandInfo_pb2.Pause]
)
self.usecase.example_video()
await self.playing(title="dummy")
self.assertFeatures(FeatureState.Available, FeatureName.Play, FeatureName.Pause)
async def test_playing_immutable_update_content_item(self):
self.usecase.example_video(position=1)
playing = await self.playing(title="dummy")
self.usecase.change_metadata(position=100)
await self.playing(position=100)
self.assertEqual(playing.position, 1)
async def test_metadata_tv_playing(self):
self.usecase.tv_playing(
paused=False,
series_name="tv",
total_time=40,
position=10,
season_number=12,
episode_number=4,
content_identifier="identifier",
)
with faketime("pyatv", 0):
playing = await self.playing(series_name="tv")
self.assertEqual(playing.media_type, MediaType.Video)
self.assertEqual(playing.device_state, DeviceState.Playing)
self.assertEqual(playing.series_name, "tv")
self.assertEqual(playing.total_time, 40)
self.assertEqual(playing.position, 10)
self.assertEqual(playing.season_number, 12)
self.assertEqual(playing.episode_number, 4)
self.assertEqual(playing.content_identifier, "identifier")
self.assertFeatures(
FeatureState.Available,
FeatureName.SeriesName,
FeatureName.TotalTime,
FeatureName.Position,
FeatureName.SeasonNumber,
FeatureName.EpisodeNumber,
FeatureName.ContentIdentifier,
)
async def test_absolute_volume_features(self):
features = [
FeatureName.Volume,
FeatureName.SetVolume,
]
self.assertFeatures(FeatureState.Unavailable, *features)
self.usecase.change_volume_control(
available=True, support_absolute=False, support_relative=True
)
self.usecase.example_video(title="dummy2")
await self.playing(title="dummy2")
self.assertFeatures(FeatureState.Unavailable, *features)
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=False
)
self.usecase.example_video(title="dummy3")
await self.playing(title="dummy3")
self.assertFeatures(FeatureState.Available, *features)
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=True
)
self.usecase.example_video(title="dummy4")
await self.playing(title="dummy4")
self.assertFeatures(FeatureState.Available, *features)
async def test_volume_change(self):
self.usecase.change_volume_control(available=True)
assert math.isclose(self.atv.audio.volume, 0.0)
await until(
lambda: self.atv.features.in_state(
FeatureState.Available, FeatureName.SetVolume
)
)
# Manually set a new volume level
await self.atv.audio.set_volume(20.0)
await until(lambda: math.isclose(self.atv.audio.volume, 20.0))
# Trigger volume change from device
self.usecase.set_volume(0.3, DEVICE_UID)
await until(lambda: math.isclose(self.atv.audio.volume, 30.0))
async def _test_audio_volume_up_increases_volume(self):
await until(
lambda: self.atv.features.in_state(
FeatureState.Available, FeatureName.SetVolume
)
)
await self.atv.audio.set_volume(20.0)
assert math.isclose(self.atv.audio.volume, 20.0)
await self.atv.audio.volume_up()
assert self.atv.audio.volume == round(20.0 + VOLUME_STEP * 100.0)
await self.atv.audio.volume_up()
assert self.atv.audio.volume == round(20.0 + 2 * VOLUME_STEP * 100.0)
async def test_audio_volume_up_increases_volume_relative(self):
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=True
)
await self._test_audio_volume_up_increases_volume()
async def test_audio_volume_up_increases_volume_absolute(self):
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=False
)
await self._test_audio_volume_up_increases_volume()
async def _test_audio_volume_down_decreases_volume(self):
self.usecase.change_volume_control(available=True)
await until(
lambda: self.atv.features.in_state(
FeatureState.Available, FeatureName.SetVolume
)
)
await self.atv.audio.set_volume(20.0)
assert math.isclose(self.atv.audio.volume, 20.0)
await self.atv.audio.volume_down()
assert self.atv.audio.volume == round(20 - VOLUME_STEP * 100.0)
await self.atv.audio.volume_down()
assert self.atv.audio.volume == round(20 - 2 * VOLUME_STEP * 100.0)
async def test_audio_volume_down_decreases_volume_relative(self):
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=True
)
await self._test_audio_volume_down_decreases_volume()
async def test_audio_volume_down_decreases_volume_absolute(self):
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=False
)
await self._test_audio_volume_down_decreases_volume()
async def _test_audio_volume_up_above_max(self):
await until(
lambda: self.atv.features.in_state(
FeatureState.Available, FeatureName.SetVolume
)
)
await self.atv.audio.set_volume(100.0)
assert math.isclose(self.atv.audio.volume, 100.0)
# Should not yield a timeout
await self.atv.audio.volume_up()
async def test_audio_volume_up_above_max_relative(self):
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=True
)
await self._test_audio_volume_up_above_max()
async def test_audio_volume_up_above_max_absolute(self):
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=False
)
await self._test_audio_volume_up_above_max()
async def _test_audio_volume_down_below_zero(self):
await until(
lambda: self.atv.features.in_state(
FeatureState.Available, FeatureName.SetVolume
)
)
await self.atv.audio.set_volume(0.0)
assert math.isclose(self.atv.audio.volume, 0.0)
# Should not yield a timeout
await self.atv.audio.volume_down()
async def test_audio_volume_down_below_zero_relative(self):
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=True
)
await self._test_audio_volume_down_below_zero()
async def test_audio_volume_down_below_zero_absolute(self):
self.usecase.change_volume_control(
available=True, support_absolute=True, support_relative=False
)
await self._test_audio_volume_down_below_zero()
async def test_volume_clustered_devices(self):
cluster_id = "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE"
self.usecase.set_cluster_id(cluster_id)
self.usecase.change_volume_control(available=True)
await until(
lambda: self.atv.features.in_state(
FeatureState.Available, FeatureName.SetVolume
)
)
# Manually set a new volume level
await self.atv.audio.set_volume(20.0)
await until(lambda: math.isclose(self.atv.audio.volume, 20.0))
# Trigger volume change from device with wrong id
self.usecase.set_volume(0.3, DEVICE_UID)
await until(lambda: math.isclose(self.atv.audio.volume, 20.0))
# Trigger volume change from device
self.usecase.set_volume(0.3, cluster_id)
await until(lambda: math.isclose(self.atv.audio.volume, 30.0))
async def test_output_devices(self):
assert self.atv.audio.output_devices == [
OutputDevice("Fake MRP ATV", "E510C430-B01D-45DF-B558-6EA6F8251069")
]
async def test_output_devices_change(self):
await self.atv.audio.add_output_devices("AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE")
await until(
lambda: self.atv.audio.output_devices
== [
OutputDevice("Fake MRP ATV", "E510C430-B01D-45DF-B558-6EA6F8251069"),
OutputDevice("Device AA", "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE"),
]
)
await self.atv.audio.remove_output_devices(
"E510C430-B01D-45DF-B558-6EA6F8251069"
)
await until(
lambda: self.atv.audio.output_devices
== [OutputDevice("Device AA", "AAAAAAAA-BBBB-CCCC-DDDD-EEEEEEEEEEEE")]
)
await self.atv.audio.set_output_devices("E510C430-B01D-45DF-B558-6EA6F8251069")
await until(
lambda: self.atv.audio.output_devices
== [OutputDevice("Fake MRP ATV", "E510C430-B01D-45DF-B558-6EA6F8251069")]
)
|
22,627 | 334282370921390bdbf3e2516239923c71f4bbfb | #
# @lc app=leetcode.cn id=1217 lang=python3
#
# [1217] 玩筹码
#
# @lc code=start
class Solution:
def minCostToMoveChips(self, position: List[int]) -> int:
# @lc code=end
|
22,628 | 74cf266c934dcebd2036fb03e321ea0647b49223 | # -*- coding: utf-8 -*-
import tornado.web
from tornado.log import access_log
from views import *
def Custom(handler):
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
if "X-Real-Ip" in handler.request.headers.keys():
real_ip = handler.request.headers["X-Real-Ip"]
else:
real_ip = "127.0.0.1"
log_method("%s %d %s %s %s %.2fms", real_ip, handler.get_status(),
handler.request.version, handler.request.method, handler.request.uri, request_time)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/newblog', NewBlogHandler), #最新博文
(r'/blog', BlogHandler), #博文增删改查
(r'/bloglist', ListHandler), #博文列表
(r'/taglist', TagListHandler), #标签/分类 列表
(r'/login', LoginHandler), #登录
(r'/logout', LogoutHandler), #登出
(r'/system', SystemHandler), #系统监控
(r'/message', MessageHandler), #系统通知
(r'/imgupload', ImgUploadHandler), #图片上传
(r'/download', DownloadHandler), #博文下载
(r'/countview', CountViewHandler), #访问计数
(r'/search', SearchHandler), #搜索
(r'/comment', CommentHandler), #评论
(r'/todolist', TodoHandler), #待办事项
(r'/visitor', VisitorHandler), #七天访问量
(r'/blogdetail/(?P<ids>\d*)', TemplateHandler), # 模板
(r'.*', ErrorHandler), #捕捉错误页面
]
settings = dict(
log_function=Custom,
debug=True, #调试模式
cookie_secret="SECRET_DONT_LEAK",
login_url="/login",
#xsrf_cookies=True,
)
super(Application, self).__init__(handlers, **settings) |
22,629 | efb6d7c54da4137f6291df8b322b2f605b02d92c | # for32 Прохоренков Егор ИСП-211.
n = int(input())
a = 1
for i in range(1, n + 1):
print((a + 1) / i)
a = (a + 1) / i |
22,630 | 6ca5cbd288622d5cc4659ed0910094146f36eb5e | #Importing the modules
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from accounts import views
from django.contrib.auth import views as auth
"""
desc: Calling the list of views of each url from urlpatterns by urls.py everytime
webbrowser request
"""
urlpatterns = [
path('', include('frontend.urls')),
path('admin/', admin.site.urls),
path('accounts/',include('accounts.urls')),
path('chatroom/', include('chatroom.urls')),
]
|
22,631 | 2862bf1d276395724b80a3f4b706245e0f8abad4 | from os import system
import os
import json
from subprocess import Popen
import subprocess
from deepstream import get, post
import sys
from time import sleep
import curses
from threading import Thread
global keyIn, screen, iftop, reach, imu
iftop = {}
iftopTail = ""
reach = {}
reachTail = ""
imu = {}
imuTail = ""
mode = {}
timeDiff = {}
keyIn = 0
path = json.load(open('pathToTitanRover.json'))
processes = json.load(open('processes.json'))
class c:
BLUE = '\033[34m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
CYAN = '\033[36m'
MAGENTA = '\033[35'
RED = '\033[31m'
DEFAULT = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if os.getuid() is not 0:
print(c.RED+"Please run script as sudo:\n\t"+c.YELLOW+"sudo python main.py\n"+c.DEFAULT)
sys.exit()
if sys.platform != "linux":
if sys.platform != "linux2":
print("Your system: " + sys.platform)
print(c.RED+"\nThis script was written ONLY for Linux OS."+c.DEFAULT)
sys.exit()
path = json.load(open('pathToTitanRover.json'))
if path["path"] is None or path["path"][-1:] == "/":
print(c.RED+"You need to set the path in the pathToTitanRover.json file"+c.DEFAULT)
print(c.RED+" To the path leading up to the /TitanRover2018 file"+c.DEFAULT)
print(c.RED+" An example of pathToTitanRover.json might be:"+c.DEFAULT)
print(c.YELLOW+" { \"path\": \"/home/pi\" }\n"+c.DEFAULT)
sys.exit()
def cleanUpScreenLogs():
for item in processes:
fullPath = path["path"] + item["path"] + "screenlog.0"
f = open(fullPath, "w")
f.write("")
f.close()
cleanUpScreenLogs()
screen = curses.initscr()
curses.noecho()
curses.start_color()
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
def restartProcess(screenName):
o = [x for x in processes if x['screenName'] == screenName][0]
sn = str(o["screenName"])
fullPath = str(path["path"]) + str(o["path"])
py = str(o["python"])
scrpt = str(o["script"]) + "\015\""
cmd = py + " " + scrpt
p = Popen([ "screen", "-S", screenName, "-X", "kill"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
p = Popen(["screen", "-dmLS", sn], cwd=fullPath, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
p = Popen(["screen", "-S", sn, "-X", "stuff", cmd], cwd=fullPath, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
def runWindow():
global keyIn, screen, iftop, imu, reach, mode
while keyIn != ord(chr(27)):
screen = curses.initscr()
screen.clear()
screen.border(0)
screen.addstr(1, 2, "Titan Rover CLI Process Manager", curses.color_pair(1))
screen.addstr(2, 4, "To restart a process, type the number of the listed process", curses.color_pair(3))
screen.addstr(3, 4, "To exit, hold the ESC key", curses.color_pair(3))
screen.addstr(4, 4, "Process List:")
screen.addstr(4, 60, "Mode:")
screen.addstr(4, 78, "TimeDiff:")
screen.addstr(6, 6, " imu:")
screen.addstr(12, 6, " reach:")
screen.addstr(18, 6, "1 - iftop: ")
# information from imu record
if type(imu) == dict:
if imu == {}:
screen.addstr(6, 25, "WAITING ON IMU...", curses.color_pair(2))
else:
screen.addstr(6, 25, "Heading:")
screen.addstr(7, 25, str(imu["heading"]).rjust(8), curses.color_pair(1))
screen.addstr(6, 41, "Pitch:")
screen.addstr(7, 41, str(imu["pitch"]).rjust(8), curses.color_pair(1))
screen.addstr(6, 53, "Roll:")
screen.addstr(7, 51, str(imu["roll"]).rjust(8), curses.color_pair(1))
screen.addstr(6, 61, "MagCal:")
if imu["mag"] == 3:
screen.addstr(7, 65, "yes", curses.color_pair(1))
else:
screen.addstr(7, 65, "no", curses.color_pair(2))
elif type(imu) == str:
screen.addstr(6, 25, str(imu), curses.color_pair(2))
# information from reach record
if type(reach) == dict:
if reach == {}:
screen.addstr(12, 25, "WAITING ON REACH...", curses.color_pair(2))
else:
pass
elif type(reach) == str:
screen.addstr(12, 25, reach, curses.color_pair(2))
# information from iftop record
if type(iftop) == dict:
if iftop == {}:
screen.addstr(18, 25, "NO_RECORD", curses.color_pair(2))
else:
log = getTail("speed")
screen.addstr(18, 25, "IP Address:")
screen.addstr(19, 25, iftop["ip"], curses.color_pair(1))
screen.addstr(18, 41, "Download:")
screen.addstr(19, 41, str(iftop["download"]), curses.color_pair(1))
screen.addstr(18, 53, "Upload:")
screen.addstr(19, 53, str(iftop["upload"]), curses.color_pair(1))
screen.addstr(21, 25, "Output:")
screen.addstr(22, 25, log, curses.color_pair(3))
elif type(iftop) == str:
screen.addstr(18, 25, iftop, curses.color_pair(2))
# information from the mode record
if type(mode) == dict:
if mode == {}:
screen.addstr(4, 66, "WAITING ON MODE...", curses.color_pair(2))
else:
screen.addstr(4, 66, mode["mode"], curses.color_pair(3))
elif type(mode) == str:
screen.addstr(4, 66, mode, curses.color_pair(2))
# information from the timeDiff record which tracks the difference in the timestamps
if type(timeDiff) == dict:
if timeDiff == {}:
screen.addstr(4, 88, "WAITING ON TIME...", curses.color_pair(2))
else:
screen.addstr(4, 88, timeDiff["timeDiff"], curses.color_pair(3))
elif type(mode) == str:
screen.addstr(4, 88, mode, curses.color_pair(2))
screen.refresh()
sleep(.1)
if keyIn == ord("1"):
keyIn = 0
restartProcess("speed")
iftop = {}
sleep(.1)
if keyIn == ord("2"):
pass
if keyIn == ord("3"):
pass
curses.endwin()
quit()
# This function is where you add the deepstream record for your screenName
def getDataFromDeepstream():
global keyIn, screen, iftop, reach, imu, mode
while True:
try:
iftop = get("speed")
except:
iftop = "NO_RECORD"
sleep(.08)
try:
reach = get("reach")
except:
reach = "NO_RECORD"
sleep(.08)
try:
imu = get("imu")
except:
imu = {}
sleep(.08)
try:
mode = get("mode")
except:
mode = {}
sleep(.08)
try:
timeDiff = get("timeDiff")
except:
timeDiff = {}
sleep(.08)
if keyIn == ord(chr(27)):
quit()
def getTail(screenName):
path = json.load(open('pathToTitanRover.json'))
processes = json.load(open('processes.json'))
o = [x for x in processes if x['screenName'] == screenName][0]
fullPath = str(path["path"]) + str(o["path"])
p = Popen(["cat", "screenlog.0"], cwd=fullPath, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
p = p.split("\r\n")
if len(p) > 2:
p = p[len(p) - 2]
else:
return ""
return p
def getCharFromUser():
global keyIn, screen
while True:
keyIn = screen.getch()
if keyIn == ord(chr(27)):
break
sleep(.05)
curses.endwin()
quit()
t1 = Thread(target=runWindow)
t2 = Thread(target=getDataFromDeepstream)
t3 = Thread(target=getCharFromUser)
t1.start()
t2.start()
t3.start()
|
22,632 | 9baf19c8127a88e56a179883aadf2f865af398f5 | # AC
import math
def is_prime(n):
if n < 2:
return False
if n == 2:
return True
if n%2 == 0:
return False
for i in xrange(3, int(math.sqrt(n))+1, 2):
if n%i == 0:
return False
return True
m = list()
s = 0
for i in xrange(10**5):
if is_prime(i) and is_prime((i+1)/2):
s += 1
m.append(s)
n = input()
for _ in xrange(n):
s = 0
a, b = map(int, raw_input().split())
print m[b] - m[a-1]
|
22,633 | 00675d1672c1cc127317b19b2d50058a9b2259f1 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, RadioButtons
def plot_colormap_interactive(fig, ax, x, y, z, xlabel=None, ylabel=None, zlabel=None):
'''
Takes the fig and ax created from plt.subplots() and plots x, y, z
'''
plt.subplots_adjust(bottom=0.3, left=0.35)
heat_map = plt.pcolormesh(x, y, z)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# ax.margins(x=0)
ax_min = plt.axes([0.3, 0.1, 0.5, 0.03])
ax_max = plt.axes([0.3, 0.15, 0.5, 0.03])
ax_gamma = plt.axes([0.3, 0.05, 0.5, 0.03])
ax_colorscheme = plt.axes([0.05, 0.05, 0.15, 0.8])
cb = plt.colorbar(mappable=heat_map, ax=ax, label=zlabel)
slider_min = Slider(ax_min, 'Min', np.min(z), np.max(z), valinit=np.min(z))
slider_max = Slider(ax_max, 'Max', np.min(z), np.max(z), valinit=np.max(z))
slider_gamma = Slider(ax_gamma, '$\gamma$', 0, 3, 1)
def _update(val):
val_min = slider_min.val
val_max = slider_max.val
val_gamma = slider_gamma.val
norm = mpl.colors.PowerNorm(val_gamma, vmin=val_min, vmax=val_max)
heat_map.set_norm(norm)
cb.update_normal(heat_map)
fig.canvas.draw_idle()
slider_min.on_changed(_update)
slider_max.on_changed(_update)
slider_gamma.on_changed(_update)
buttons_colorscheme = RadioButtons(ax_colorscheme, \
['viridis', 'plasma', 'magma', \
'cividis', 'seismic', 'gist_stern'],\
activecolor='#206ba9')
def _colorfunc(label):
heat_map.set_cmap(buttons_colorscheme.value_selected)
fig.canvas.draw_idle()
buttons_colorscheme.on_clicked(_colorfunc)
|
22,634 | f8ea60de3d3c14601abb8188495ed1a83d0c39b9 | #!usr/bin/env python3
'''
File name: evaluate_testing_policies_infection_days
Author: PFA
Date: Fri May 14, 2021
Evaluate testing policies with respect to expected number of days infecting
'''
import numpy as np
import pandas as pd
from itertools import combinations, product
import time
# Global Vars
T = 7
# Methods
def count_infecting(df, t):
'''
Count number of viral load curves in df that are infecting at day t
'''
c = str(t)
aux = df.loc[df[c] >= 6]
return aux.shape[0]
def count_infecting_corr(df, t, s):
'''
Count number of viral load curves in df that are infecting at day t AND also at day s.
Method necessary to compute the variance of the estimator (covariance across time).
'''
c_t = str(t)
c_s = str(s)
aux = df[(df[c_t] >= 6) & (df[c_s] >= 6)]
return aux.shape[0]
def filter_antigen_tested(df, t):
'''
Filter out viral load curves that are antigen-positive at day t
'''
c = str(t)
return df.loc[df[c] < 5]
def filter_pcr_tested(df, t):
'''
Filter out viral load curves with a positive PCR result at day t
'''
c = str(t - 1)
return df.loc[df[c] < 3]
def filter_symptomatic(df, t):
'''
Filter out symptomatic individuals
'''
asymptomatic = (df['14'] < 1)
pre_symptoms = (df['14'] > t)
return df[pre_symptoms | asymptomatic]
def generate_budgeted_policies(num_antigen, num_pcr):
'''
Generate an array of possible testing policies with budgets for each type of test given by num_antigen and num_pcr
'''
antigen_days = combinations(range(0, T), num_antigen)
pcr_days = combinations(range(1, T + 1), num_pcr)
return product(antigen_days, pcr_days)
def evaluate_policy(df, policy):
'''
Evaluate expected cost of a policy.
A policy consists in a tuple, where the first entry are the antigen result test days, and the second are the PCR result days.
'''
aux = df.copy()
num_mc = df.shape[0]
D_a = policy[0]
D_p = policy[1]
prob_inf = 0
for t in range(0, T + 1):
aux = filter_symptomatic(aux, t)
if t - 1 in D_a:
aux = filter_antigen_tested(aux, t - 1)
if t - 1 in D_p:
aux = filter_pcr_tested(aux, t - 1)
num = count_infecting(aux, t)
den = aux.shape[0]
if den != 0:
prob_inf += num / num_mc #den
return prob_inf
def evaluate_policy_lag(df, policy, t_l=0):
'''
Evaluate expected cost of a policy when there is a lag of t_l in identifying exposure.
A policy consists in a tuple, where the first entry are the antigen result test days, and the second are the PCR result days.
'''
aux = df.copy()
num_mc = df.shape[0]
D_a = [d + t_l for d in policy[0]]
D_p = [d + t_l for d in policy[1]]
prob_inf = [] # (1): Expected number of infecting days
work_days = [] # (2): Expected number of non-infecting days
variance_pi = 0 # (3): Variance of (1)
variance_wd = 0 # (4): Variance of (2)
# (5): x% quantile of (1)
quantile_pi = {.95: 0,
.90: 0,
.80: 0,
.50: 0,
.20: 0,
.10: 0,
.05: 0,}
# (6): x% quantile of (2)
quantile_wd = {.95: 0,
.90: 0,
.80: 0,
.50: 0,
.20: 0,
.10: 0,
.05: 0}
for t in range(0, 14):
aux = filter_symptomatic(aux, t)
if t in D_a:
aux = filter_antigen_tested(aux, t)
if t in D_p:
aux = filter_pcr_tested(aux, t)
den = aux.shape[0]
if den != 0:
num = count_infecting(aux, t)
prob_inf.append(num / num_mc)
work_days.append((den - num) / num_mc)
variance_pi += prob_inf[t] * (1 - prob_inf[t])
variance_wd += work_days[t] * (1 - work_days[t])
for s in range(t):
num_tau = count_infecting_corr(aux, t, s)
variance_pi += 2 * (num_tau / num_mc - prob_inf[t] * prob_inf[s])
variance_wd += 2 * ((den - num) / num_mc - work_days[t] * work_days[s])
for q in quantile_pi:
quantile_pi[q] = quantile_pi[q] + (prob_inf[t] >= 1 - q)
quantile_wd[q] = quantile_wd[q] + (work_days[t] >= 1 - q)
return sum(prob_inf), sum(work_days), variance_pi, variance_wd, quantile_pi, quantile_wd
def weight_policy_symptom_day(df, policy, index='symptoms'):
'''
Weight the cost of each policy by the (distribution) of the day in which the individual was infected by the index, where the distribution was already computed based on the conditional distribution of infected days given symptoms on day 0 or antigen at day 0, depending on which one was used to identify the index.
'''
if index == 'symptoms':
inf_day_pdf = {0: 0.1669353401405535,
1: 0.22514759942847082,
2: 0.26480058806134404,
3: 0.21587741781643335,
4: 0.10776722889318635,
5: 0.01918233381609943}
if index == 'antigen':
inf_day_pdf = {0: 0.23550498586741955,
1: 0.18526076665827995,
2: 0.1491931526909608,
3: 0.12145630421492379,
4: 0.09797953217203573,
5: 0.07684305227089296,
6: 0.057105543597348894,
7: 0.03934966233862648,
8: 0.024580317017036127,
9: 0.012726683172475764}
inf_day_pdf = {d: inf_day_pdf[d] / sum(inf_day_pdf.values()) for d in inf_day_pdf}
prob_inf = []
work_days = []
variance_pi = []
variance_wd = []
quantile_pi = []
quantile_wd = []
for d in inf_day_pdf:
pi, wd, v_pi, v_wd, q_pi, q_wd = evaluate_policy_lag(df, policy, t_l=d)
prob_inf.append(pi)
work_days.append(wd)
variance_pi.append(v_pi)
variance_wd.append(v_wd)
quantile_pi.append(q_pi)
quantile_wd.append(q_wd)
prob_inf_w = sum([prob_inf[d] * inf_day_pdf[d] for d in inf_day_pdf])
work_days_w = sum([work_days[d] * inf_day_pdf[d] for d in inf_day_pdf])
variance_pi_w = sum([variance_pi[d] * inf_day_pdf[d] for d in inf_day_pdf])
variance_wd_w = sum([variance_wd[d] * inf_day_pdf[d] for d in inf_day_pdf])
quantile_pi_w = {q: sum([quantile_pi[d][q] * inf_day_pdf[d] for d in inf_day_pdf]) for q in q_pi}
quantile_wd_w = {q: sum([quantile_wd[d][q] * inf_day_pdf[d] for d in inf_day_pdf]) for q in q_wd}
return prob_inf_w, work_days_w, variance_pi_w, variance_wd_w, quantile_pi_w, quantile_wd_w
def evaluate_budgeted_policy(df, num_antigen, num_pcr):
'''
Evaluate all posible policies that use exactly num_antigen antigen test and num_pcr PCR tests.
'''
policies = generate_budgeted_policies(num_antigen, num_pcr)
policy_cost = []
for p in policies:
prob_inf, work_days, variance_pi, variance_wd, quantile_pi, quantile_wd = weight_policy_symptom_day(df, p)
aux_df = pd.DataFrame({'num_antigen': num_antigen,
'num_pcr': num_pcr,
'antigen_days': [[d for d in p[0]]],
'pcr_days': [[d - 1 for d in p[1]]],
'expected_infecting_days': prob_inf,
'variance_infecting_days': variance_pi,
'expected_non_infecting_days': work_days,
'variance_non_infecting_days': variance_wd})
aux_q_pi = {'infecting_days_' + str(d): [quantile_pi[d]] for d in quantile_pi}
aux_q_wd = {'non_infecting_days_' + str(d): [quantile_wd[d]] for d in quantile_wd}
aux_df = aux_df.join(pd.DataFrame(aux_q_pi)).join(pd.DataFrame(aux_q_wd))
policy_cost.append(aux_df)
return policy_cost
# Main
def main2():
df = pd.read_csv('data/viral_load_mc.csv').iloc[:int(1e3)]
df.rename(columns={str(i): str(i + 1) for i in range(14)})
policy_space_limits = {'antigen': [0, 5],
'pcr': [0, 2]}
policy_space = {'antigen': range(policy_space_limits['antigen'][0],
policy_space_limits['antigen'][1] + 1),
'pcr': range(policy_space_limits['pcr'][0],
policy_space_limits['pcr'][1] + 1)}
policy_cost = []
for num_pcr in policy_space['pcr']:
for num_antigen in policy_space['antigen']:
start_time = time.time()
results = evaluate_budgeted_policy(df, num_antigen, num_pcr)
policy_cost = policy_cost + results
print(str((num_antigen, num_pcr)) + ' finished in ' + str((time.time() - start_time) / 60))
pd.concat(policy_cost).to_csv('data/results_28_05_2.csv')
# Execute
start = time.time()
main2()
print((time.time() - start) / 60) |
22,635 | 10c687d2b5c7a003767414dac442b572717226fc | # Import modules
import random
from day10_art import logo2, vs
from day10_gamedata import data
from os import system, name
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
def pull_data():
return random.choice(data)
# Build a comparison function
def compare(account_a, account_b):
"""Compares to see if the answer is correct"""
choice = input(f"Does {account_b['name']} have a higher or lower follower count? :")
if choice == 'higher' and account_a['follower_count'] < account_b['follower_count']:
return True
elif choice == 'higher' and account_a['follower_count'] > account_b['follower_count']:
return False
elif choice == 'lower' and account_a['follower_count'] > account_b['follower_count']:
return True
elif choice == 'lower' and account_a['follower_count'] < account_b['follower_count']:
return False
else:
print("correct input required")
return False
def game():
score = 0
result = True
account_a = pull_data()
account_b = pull_data()
while result:
print(logo2)
print(f"Your current score is {score}")
print(f"Choice A is {account_a['name']} who is a {account_a['description']} from {account_a['country']}")
print(vs)
print(f"Choice B is {account_b['name']} who is a {account_b['description']} from {account_b['country']}")
result = compare(account_a, account_b)
if result:
score += 1
account_a = account_b
account_b = pull_data()
if account_a == account_b:
account_b = pull_data()
print(f"That's correct! Your score is {score}")
clear()
print(f"Sorry that is incorrect, better luck next time. Your final score is {score}")
game()
while input("Do you want to play again? 'yes' or 'no': ") == 'yes':
game()
print("Thanks for playing!")
|
22,636 | b0a8268b553319a7df6889279e8d03cca920aeea | # _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++, Cuda
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>
from ..io import imread
from ..path import abspath, join
from .._base import __curr__
from ..annotations import Optional, Tuple
import numpy as np
from ..coreten import Tensor
HERE = join(__curr__, 'data').replace('\\', "/") + "/"
def _get_path_to_data(name) -> str:
return join(HERE, name)
def audio_mixer(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of an audio mixer.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.audio_mixer()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'audio_mixer.jpg', target_size=target_size, rgb=rgb)
def bear(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a bear.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.bear()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'bear.jpg', target_size=target_size, rgb=rgb)
def beverages(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of beverages.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.beverages()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'beverages.jpg', target_size=target_size, rgb=rgb)
def black_cat(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a black cat.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.black_cat()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'black_cat.jpg', target_size=target_size, rgb=rgb)
def blue_tang(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x414 image Tensor (RGB, by default) of a blue tang (a type of fish).
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.audio_mixer()
>> tens.shape
(414, 640, 3)
"""
return imread(HERE+'blue_tang.jpg', target_size=target_size, rgb=rgb)
def camera(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a camera.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.camera()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'camera.jpg', target_size=target_size, rgb=rgb)
def controller(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a game controller.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.controller()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'controller.jpg', target_size=target_size, rgb=rgb)
def drone(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x358 image Tensor (RGB, by default) of a robotic drone.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.drone()
>> tens.shape
(358, 640, 3)
"""
return imread(HERE+'drone.jpg', target_size=target_size, rgb=rgb)
def dusk(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a dusk landscape.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.dusk()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'dusk.jpg', target_size=target_size, rgb=rgb)
def fighter_fish(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x640 image Tensor (RGB, by default) of a fighter fish.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.fighter_fish()
>> tens.shape
(640, 640, 3)
"""
return imread(HERE+'fighter_fish.jpg', target_size=target_size, rgb=rgb)
def gold_fish(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x901 image Tensor (RGB, by default) of a gold fish.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.gold_fish()
>> tens.shape
(901, 640, 3)
"""
return imread(HERE+'gold_fish.jpg', target_size=target_size, rgb=rgb)
def green_controller(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x512 image Tensor (RGB, by default) of a green game controller.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.green_controller()
>> tens.shape
(512, 640, 3)
"""
return imread(HERE+'green_controller.jpg', target_size=target_size, rgb=rgb)
def green_fish(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x430 image Tensor (RGB, by default) of a green fish.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.green_fish()
>> tens.shape
(430, 640, 3)
"""
return imread(HERE+'green_fish.jpg', target_size=target_size, rgb=rgb)
def guitar(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a guitar.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.guitar()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'guitar.jpg', target_size=target_size, rgb=rgb)
def island(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x426 image Tensor (RGB, by default) of an island.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.island()
>> tens.shape
(426, 640, 3)
"""
return imread(HERE+'island.jpg', target_size=target_size, rgb=rgb)
def jellyfish(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a jellyfish.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.jellyfish()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'jellyfish.jpg', target_size=target_size, rgb=rgb)
def laptop(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a laptop.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.laptop()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'laptop.jpg', target_size=target_size, rgb=rgb)
def mountain(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a mountain.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.mountain()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'mountain.jpg', target_size=target_size, rgb=rgb)
def night(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a night landscape.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.night()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'night.jpg', target_size=target_size, rgb=rgb)
def puppies(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a litter of puppies.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.puppies()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'puppies.jpg', target_size=target_size, rgb=rgb)
def puppy(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x512 image Tensor (RGB, by default) of a puppy.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.puppy()
>> tens.shape
(512, 640, 3)
"""
return imread(HERE+'puppy.jpg', target_size=target_size, rgb=rgb)
def red_fish(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a red fish.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.red_fish()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'red_fish.jpg', target_size=target_size, rgb=rgb)
def phone(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a rotary phone.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.phone()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'rotary_phone.jpg', target_size=target_size, rgb=rgb)
def sea_turtle(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x400 image Tensor (RGB, by default) of a sea turtle.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.sea_turtle()
>> tens.shape
(400, 640, 3)
"""
return imread(HERE+'sea_turtle.jpg', target_size=target_size, rgb=rgb)
def snow(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x360 image Tensor (RGB, by default) of snow.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.snow()
>> tens.shape
(360, 640, 3)
"""
return imread(HERE+'snow.jpg', target_size=target_size, rgb=rgb)
def snowflake(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x480 image Tensor (RGB, by default) of a snowflake.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.snowflake()
>> tens.shape
(480, 640, 3)
"""
return imread(HERE+'snowflake.jpg', target_size=target_size, rgb=rgb)
def sunrise(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a sunrise landscape.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.sunrise()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'sunrise.jpg', target_size=target_size, rgb=rgb)
def tent(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:
r"""
Returns a standard 640x427 image Tensor (RGB, by default) of a tent.
Args:
target_size (Optional[Tuple[int, int]]): Intended target size (follows the ``(width, height)`` format).
If None, the unaltered tensor will be returned.
rgb (bool): Boolean whether to return an RGB Tensor (default is ``True``).
Returns:
Tensor of shape ``(height, width, channels)``.
Examples::
>> tens = caer.data.tent()
>> tens.shape
(427, 640, 3)
"""
return imread(HERE+'tent.jpg', target_size=target_size, rgb=rgb)
__all__ = [d for d in dir() if not d.startswith('_')]
# __all__ = [
# 'audio_mixer',
# 'bear',
# 'beverages',
# 'black_cat',
# 'blue_tang',
# 'camera',
# 'controller',
# 'drone',
# 'dusk',
# 'fighter_fish',
# 'gold_fish',
# 'green_controller',
# 'green_fish',
# 'guitar',
# 'island',
# 'jellyfish',
# 'laptop',
# 'mountain',
# 'night',
# 'puppies',
# 'puppy',
# 'red_fish',
# 'phone',
# 'sea_turtle',
# 'snow',
# 'sunrise',
# 'tent'
# ] |
22,637 | 3f56dbc4888478c3d4eda49046d8553256e1c0f8 | ## https://leetcode.com/problems/dota2-senate/
## go through the rounds of voting, where a vote is to ban another
## senator or, if all senators of the other party are banned, delcare
## victory.
## the optimal play for each senator is to ban the first member of
## the opposition party after them. fastest way to handle that is to
## basically keep track of the number of bans that we have remaining
## to give out, noting that we'll always have bans from just one party.
## after all, the Ds will ban any Rs before they can vote if they have
## the chance to (and vice versa). that means we can keep track of the
## bans to give out using a single counter that can go positive for one
## party and negative for the other.
## this solution is quite good, coming in at almost the 78th percentile
## for runtime and about the 50th percentile for memory.
class Solution:
def predictPartyVictory(self, senate: str) -> str:
## Ds add to this, Rs subtract
## so if > 0 and encouter an R, eliminate that R
## if > 0 and encounter another D, add another
## if < 0 and encounter a D, eliminate that D
## if < 0 and encounter another R, subtract another
bans_to_proc = 0
values = {'D': 1, 'R': - 1}
## go through rounds of voting until we have all one party
while len(set(senate)) > 1:
next_senate = ''
for ii, char in enumerate(senate):
if bans_to_proc == 0:
## no bans from either party in the stack, so this character gets to
## ban the next of the opposition party and survives to the next round
next_senate += char
bans_to_proc += values[char]
elif bans_to_proc > 0 and char == 'D':
## no R bans to proc, so this character will ban the next R and survive
bans_to_proc += 1
next_senate += char
elif bans_to_proc > 0 and char == 'R':
## have an R ban to proc, so this character gets banned (but uses up a ban)
bans_to_proc -= 1
## don't add this character to the next senate because it got banned
elif bans_to_proc < 0 and char == 'R':
## no R bans to proc, so this character will ban the next D and survive
bans_to_proc -= 1
next_senate += char
elif bans_to_proc < 0 and char == 'D':
## have a D ban to proc, so proc it and ban this character
bans_to_proc += 1
## again, got banned, so skip this character in the next senate
senate = next_senate
## now we know we have all one party, so just return the party of the first senator
if senate[0] == 'D':
return 'Dire'
else:
return 'Radiant' |
22,638 | 0eaa035ece7cb9c0c7da1ca436aad4e79efdcbf9 | n = int(input('Digite um número: '))
div = 0
for i in range(1, n + 1):
if n % i == 0:
print('\033[33m', end='') #Código amarelo.
div += 1
else:
print('\033[31m', end='') #Código vermelho.
print(i, end=' ')
print('\n\033[mO número {} foi divisível {} vezes.'.format(n, div))
if div == 2: #Só é primo se for divisível por dois números.
print('E por isso ele é PRIMO!')
else:
print('E por isso ele NÃO É PRIMO!')
|
22,639 | 3ea65ac39ec73864ff2c293111a66aacce3d7f9a | with open('tables.txt', 'r') as f: # context Handler
for s in f:
print(s, end = '')
print(f.closed)
print(f.closed)
|
22,640 | 9abdb2a3771e2b9d4a6a92070b29038961419f14 | from setuptools import find_packages, setup
setup(
name='app',
version='1.0.0',
description='a simple speecht to text converter',
author='Tim Gaspard',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask',
'sqlite3',
'google-cloud-core'
'google-cloud-speech'
],
) |
22,641 | 8a97f2597ff045a0ed0b2e9208c8f8c6d5fa1cf2 | def canCompleteCircuit(gas, cost):
total = 0
tank = 0
currentIteration = 'ass'
for i in range(len(gas)):
diff = gas[i] - cost[i]
total +=diff
tank +=diff
if(currentIteration == 'ass'):
currentIteration = i
if(tank<0):
tank=0
currentIteration = 'ass'
if(total<0):
return -1
if(currentIteration == 'ass'):
return -1
return currentIteration
print(canCompleteCircuit([1,2,3,4,5], [3,4,5,1,2])) |
22,642 | 63b6e88e8266c67da8cc27d654a028460b7a9836 | """
MENU ANALYSIS PROGRAM
get_score(cuisine_file, menu): returns the score for a given
cuisine and menu
to_JSON(meal, list_of_cuisines, list_of_menus): writes all of the cuisine
and menu score dictionaries to a JSON file, entitled meal+"data.json"
"""
import os
import numpy as np
import json
# global info
# change according to cuisine files
cuisines = {"italian": "italian.txt", "mexican": "mexican.txt", "greek": "greek.txt", "thai":"thai.txt", "korean":"korean.txt"}
# used to name things in JSON file
menus = ["frank", "frary", "collins", "pitzer", "mudd", "scripps", "oldenborg"]
def separate_words(file_name):
"""
This function returns a list of all of the words in a document.
"""
file = open(file_name, 'r')
all_words = []
new_word_list = []
for line in file:
for char in line:
if char.isalpha():
new_word_list.append(str(char).lower())
elif not char.isalpha() and len(new_word_list) > 0:
all_words.append("".join(new_word_list))
new_word_list = []
all_words.append("".join(new_word_list))
return list(all_words)
def count_all_words(file_name):
"""
This function counts how many words are in the given file_name
"""
return len(separate_words(file_name))
def count_same_words(cuisine_file, menu):
"""
This function counts how many words are the same between the cuisine file
and menu list.
"""
cuisine_list = separate_words(cuisine_file)
same_word_count = 0
for i in cuisine_list:
for j in menu:
if i == j:
same_word_count += 1
return same_word_count
def get_score(cuisine_file, menu):
return float(count_same_words(cuisine_file, menu))/len(menu)
def to_JSON(meal, list_of_cuisines, list_of_menus):
"""
Writes a dictionary of cuisines, scores per dining hall menu to a JSON file
meal: string describing name of meal - "breakfast", "lunch", or "dinner"
list_of_cuisines: list of cuisine names - ["italian", "mexican"]
list_of_menus: list of menu lists - [["egg, bacon"], ["pancakes"], ...]
order matters; should be in this order:
"frank", "frary", "collins", "pitzer", "mudd", "scripps", "oldenborg"
"""
data = {}
for cuisine in list_of_cuisines:
cuisine_list = separate_words(cuisines[cuisine])
scores = {}
for i in range(len(list_of_menus)):
scores[menus[i]] = get_score(cuisines[cuisine], list_of_menus[i])
data[cuisine] = scores
with open(meal+'data.json', 'w') as f:
json.dump(data, f)
return data
|
22,643 | 8fc4cd835baa6cd95c2c0dc4c22e169aa754eb82 | '''class Computer:
def _init_(self,cpu,ram):
self.cpu=cpu
self.ram=ram
def config(self,cpu,ram):
print("config is", cpu, ram)
com1=Computer()
com2=Computer()
com1.config("i5",8)
com2.config("i3",16)'''
#*******************************
class Bankaccount:
print("welcome sir/madam:")
amount=0
def __init__(self,name):
self.name=name
def deposit(self):
amount=int(input("enter amount:"))
self.amount+=amount
print("amount deposited:",amount)
def withdraw(self):
amount=int(input("enter amount:"))
if self.amount>=amount:
self.amount-=amount
print("amount withdrawn:",amount)
else:
print("insufficient funds")
def balance(self):
print("net balance is:",self.amount)
b = Bankaccount("subbu")
print(b.name)
b.deposit()
b.withdraw()
b.balance()
print("Thankyou sir/madam,Have a nice day:")
#********************************************************
#class vendingmachine:
|
22,644 | 05a7b3a2c4c84d4c785c624aae738eeda6a184b6 | '''
Created on Mar 24, 2015
@author: lly
'''
import logging
from bleu import BLEU
def GetScorer(sctype, scconfig):
if sctype == "BLEU":
return BLEU(scconfig)
else:
logging.info("unknown score type: " + sctype)
raise Exception("unknown")
return None
if __name__ == '__main__':
pass |
22,645 | f169025d2bd018ee577a8d1c5541e67d6f57974f | import sys
import re
import math
def largestPowOftwo(dec, power):
if dec - 2**power > 0:
return largestPowOftwo(dec, power + 1)
elif dec - 2**power == 0:
return power
else:
return power - 1
def decToBinary(dec):
print( "decimal: ", dec)
largestPower = largestPowOftwo(dec, 0)
output = ""
for power in range(largestPower, -1, -1):
if dec - 2**power >= 0:
output += "1"
dec -= 2**power
elif dec - 2**power < 0:
output += "0"
return output
decimals = []
if len(sys.argv) != 4 :
sys.exit("please provide two binary arguments followed by either x, /, +, or -")
for arg in range(1, len(sys.argv) - 1):
arg = sys.argv[arg]
matches = re.findall(r'^[0-1]+$', arg)
if len(matches) == 0:
sys.exit("please use binary numbers as input")
if arg != matches[0]:
sys.exit("please use binary numbers as input")
decimal = 0
power = 0
for index in range(len(arg) -1 , -1, -1):
value = arg[index]
if value == "1" :
decimal += 2**power
power += 1
decimals.append(decimal)
operator = sys.argv[3]
output = 0
if(operator == "+"):
output = decimals[0] + decimals[1]
elif operator == "x":
output = decimals[0] * decimals[1]
elif operator == "-":
output = decimals[0] - decimals[1]
elif operator == "/":
output = math.floor(decimals[0] / decimals[1])
else:
sys.exit("operator not supported")
print("Binary: ", decToBinary(output))
|
22,646 | 43aaa83e68520842e5ac948477e1040ff9678ad1 | class Man(object):
def __init__(self, name):
self._name = name
def say(self):
print ('Hi! My name is %s!' % self._name)
class Jetpack(object):
def __init__(self, man):
self._man = man
def __getattr__(self, item):
return getattr(self._man, item)
def fly(self):
print ('%s fly with jetpack!' % self._man._name)
if __name__ == '__main__':
man = Man('Mark')
man_jetpack = Jetpack(man)
man_jetpack.say()
man_jetpack.fly() |
22,647 | e7e4b19f21b5354a7537c083876a9ccea0959976 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# dicomInfo.py
usage="""
usage:
python dicomInfo.py folderName/
note:
print Dicom Info to screen
"""
import os
import sys
import dicom
def printDsBasic(ds):
print '\n--- --- New Series Info: --- ---'
print " Series Number ", ds[0x0020, 0x0011].value
print " Series Description ", ds[0x0008, 0x103e].value # LO: 't2_tse_SA_512'
print " [GradientMode] ", ds[0x0019, 0x100f].value #SH: 'Normal'
print "\n Pixel Spacing ", ds[0x0028, 0x0030].value
print " Slice Thickness ", ds[0x0018, 0x0050].value
print " Spacing Between Slices ", ds[0x0018, 0x0088].value
print "\n Acquisition Matrix ", ds[0x0018, 0x1310].value
print " Rows ", ds[0x0028, 0x0010].value
print " Columns ", ds[0x0028, 0x0011].value
print " Fild of View ", ds[0x0051, 0x100c].value
print "\n [PositivePCSDirections] ", ds[0x0051, 0x1013].value #SH: '+LPH'
#print " Patient Position ", ds[0x0018, 0x5100].value CS: 'HFS'
print "\n Image Type ", ds[0x0008, 0x0008].value # CS: ['ORIGINAL', 'PRIMARY', 'M', 'ND', 'NORM']
print "\n Manufacturer ", ds[0x0008, 0x0070].value # LO: 'SIEMENS'
print " Institution Name ", ds[0x0008, 0x0080].value # LO: 'HOPITAL NEURO-CARDIOLOGIQUE LYON'
print " Institution Address ", ds[0x0008, 0x0081].value # ST: 'Avenue Doyen Lepine 14,BRON,LYON,FR,69500'
print "\n Patient's Name ", ds[0x0010, 0x0010].value # PN: '61-10'
print " Patient's Sex ", ds[0x0010, 0x0040].value # CS: 'F'
print " Patient's Age ", ds[0x0010, 0x1010].value # AS: '026Y'
print "\n Scanning Sequence ", ds[0x0018, 0x0020].value # CS: 'SE'
print " Sequence Variant ", ds[0x0018, 0x0021].value # CS: ['SK', 'SP', 'OSP']
print " MR Acquisition Type ", ds[0x0018, 0x0023].value # CS: '2D'
print " Sequence Name ", ds[0x0018, 0x0024].value # SH: '*tse2d1_13'
print " Repetition Time ", ds[0x0018, 0x0080].value # DS: '5270'
print " Echo Time ", ds[0x0018, 0x0081].value # DS: '102'
print " Magnetic Field Strength ", ds[0x0018, 0x0087].value # DS: '1.5'
print "\n Image Position (Patient) ", ds[0x0020, 0x0032].value # DS: ['-140.17887394514', '-71.444345677834', '103.32542656514']
print " Image Orientation (Patient) ", ds[0x0020, 0x0037].value # DS: ['0.96819733646427', '-0.2501877648158', '-4.622649e-009', '0.06885999451951', '0.26648012853844', '-0.9613774712614']
print " Slice Location ", ds[0x0020, 0x1041].value # DS: '-71.778530079241'
# print "\n [CSA Image Header Type] ", ds[0x0029, 0x1008].value # CS: 'IMAGE NUM 4'
# print " [CSA Image Header Version] ", ds[0x0029, 0x1009].value # LO: '20100202'
# print " [CSA Image Header Info] ", ds[0x0029, 0x1010].value # OB: Array of 9388 bytes
# print " [CSA Series Header Type] ", ds[0x0029, 0x1018].value # CS: 'MR'
# print " [CSA Series Header Version] ", ds[0x0029, 0x1019].value # LO: '20100202'
# print " [CSA Series Header Info] ", ds[0x0029, 0x1020].value # OB: Array of 70156 bytes
print "\n Requested Procedure Description ", ds[0x0032, 0x1060].value # LO: 'Cardio_coeur DIFFUSION'
# print " Study Comments ", ds[0x0032, 0x4000].value # LT: 'NOYADE'
print " [CSA Image Header Type] ", ds[0x0051, 0x1008].value # CS: 'IMAGE NUM 4'
print " [CSA Image Header Version ??] ", ds[0x0051, 0x1009].value # LO: '1.0'
print " [Unknown] ", ds[0x0051, 0x100a].value # LO: 'TA 02:12'
# print " Pixel Data ", ds[0x7fe0, 0x0010].value # OW: Array of 458752 bytes
def printDs(pathName):
fileList = os.listdir(pathName)
fileList.sort()
gradientMode = [] # used to note the changes
seriesNumber = [] # used to note the changes
for fileName in fileList:
fullName = os.path.join(pathName, fileName)
ds = dicom.read_file(fullName)
# another series ?!
if seriesNumber != ds[0x0020, 0x0011].value:
seriesNumber = ds[0x0020, 0x0011].value
printDsBasic(ds)
gradientMode = ds[0x0019, 0x100f].value
if gradientMode != 'Normal':
print "\n [NumberOfImagesInMosaic] ", ds[0x0019, 0x100a].value
# print b value and gradient
if gradientMode != 'Normal':
bValue = ds[0x0019, 0x100c].value
if bValue == 0:
print '\n', fileName, '>>',
print "[B_value]", bValue
else:
print fileName, '>>',
print "[B_value]", bValue,
print " [DiffusionGradientDirection]", ds[0x0019, 0x100e].value
if __name__ == "__main__":
if len(sys.argv) != 2:
print usage
sys.exit()
pathName = sys.argv[1]
printDs(pathName)
print "---done---"
#print " Acquisition Number ", ds[0x0020, 0x0012].value
#print " Instance Number ", ds[0x0020, 0x0013].value
#print " Smallest Image Pixel Value ", ds[0x0028, 0x0106].value
#print " Largest Image Pixel Value ", ds[0x0028, 0x0107].value
#print " Bits Allocated ", ds[0x0028, 0x0100].value
#print " Bits Stored ", ds[0x0028, 0x0101].value
# ,
#print " Number of Averages ", ds[0x0018, 0x0083].value DS: '1'
#print " Echo Number(s) ", ds[0x0018, 0x0086].value IS: '1'
|
22,648 | 66c9eb27c10037570ca60366c7f186cc4a8c3c2a | from copy import deepcopy
def is_win(board, player):
# Verificando as Linhas do Tabuleiro
if board[0][0] == player and board[0][1] == player and board[0][2] == player:
return True
if board[1][0] == player and board[1][1] == player and board[1][2] == player:
return True
if board[2][0] == player and board[2][1] == player and board[2][2] == player:
return True
# Verificando as Colunas do Tabuleiro
if board[0][0] == player and board[1][0] == player and board[2][0] == player:
return True
if board[0][1] == player and board[1][1] == player and board[2][1] == player:
return True
if board[0][2] == player and board[1][2] == player and board[2][2] == player:
return True
# Verificando as Diagonais do Tabuleiro
if board[0][0] == player and board[1][1] == player and board[2][2] == player:
return True
if board[2][0] == player and board[1][1] == player and board[0][2] == player:
return True
return False
def is_draw(board):
for line in board:
if ' ' in line:
return False
return True
def is_terminal(board):
if is_win(board, CPU_PLAYER) or is_win(board, HUMAN_PLAYER) or is_draw(board):
return True
return False
def print_board(board):
print('\n')
for i, line in enumerate(board):
print(*line, sep=' | ')
if i != len(line) - 1:
print('--+---+--')
print('\n')
def candidates(board, player):
candidate_moves = []
for i in range(len(board)):
for j in range(len(board)):
if board[i][j] != ' ':
continue
candidate = deepcopy(board)
candidate[i][j] = player
candidate_moves.append(candidate)
return candidate_moves
def evaluate_heuristic_sequence(sequence):
number_of_blank = sequence.count(' ')
has_two_simbols = (CPU_PLAYER in sequence and HUMAN_PLAYER in sequence)
if number_of_blank == 3 or has_two_simbols:
return 0
if number_of_blank == 2:
return 1 if CPU_PLAYER in sequence else -1
return 10 if CPU_PLAYER in sequence else -10
def heuristic(board):
if is_win(board, CPU_PLAYER):
return float('+inf')
if is_win(board, HUMAN_PLAYER):
return float('-inf')
score = 0
# Avaliando as linhas do tabuleiro
for line in board:
score += evaluate_heuristic_sequence(line)
# Avaliando as colunas do tabuleiro
for j in range(len(board)):
column = [board[i][j] for i in range(len(board))]
score += evaluate_heuristic_sequence(column)
# Avaliando as diagonais do tabuleiro
main_diag = [board[i][i] for i in range(len(board))]
secondary_diag = [board[i][j] for i, j in zip(range(3), range(2, -1, -1))]
score += evaluate_heuristic_sequence(main_diag)
score += evaluate_heuristic_sequence(secondary_diag)
return score
def minimax_heuristic(board, depth=2, alpha=float('-inf'), beta=float('+inf'), maximizing=False):
if depth==0 or is_terminal(board):
return heuristic(board)
if maximizing:
value = float('-inf')
for child in candidates(board, CPU_PLAYER):
value = max(value, minimax_heuristic(child, depth - 1, alpha, beta, False))
if value >= beta:
break
alpha = max(alpha, value)
else:
value = float('+inf')
for child in candidates(board, HUMAN_PLAYER):
value = min(value, minimax_heuristic(child, depth - 1, alpha, beta, True))
if value <= alpha:
break
beta = min(beta, value)
return value
if __name__ == '__main__':
CPU_PLAYER = 'X'
HUMAN_PLAYER = 'O'
human_play_first = input('Você deseja começar o jogo [s|n]: ') == 's'
if human_play_first:
HUMAN_PLAYER = 'X'
CPU_PLAYER = 'O'
board = [[' ', ' ', ' '],
[' ', ' ', ' '],
[' ', ' ', ' ']]
while not is_terminal(board):
if human_play_first:
print_board(board)
position = int(input('Insira a posição da jogada [1-9]: '))
i = (position - 1) // 3
j = (position - 1) % 3
board[i][j] = HUMAN_PLAYER
if is_terminal(board):
break
candidate_moves = candidates(board, CPU_PLAYER)
board = max(candidate_moves, key=minimax_heuristic)
human_play_first = True
print_board(board)
if is_win(board, HUMAN_PLAYER):
print('Você venceu!!')
elif is_win(board, CPU_PLAYER):
print('Você perdeu!!')
else:
print('Deu Empate!!')
|
22,649 | 5f7aed4e437e4aef3937220ad9060b18091bb36c | def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int, long)):
pass
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def hash2(data):
norm = 2 ** (-32)
a = 2095533
s = 0
c = 1
t = 0
t0 = 0
r = None
for char in data:
s = s - ord(char) * 65537 * norm
if s < 0:
s = s + 1
t = a * s + c * norm
'''
Since bitwise ops are only defined on integers, floor floats.
In JS, (0.74 | 93.21) === (0 | 93).
s = t - (c = int(t) | 0) is syntactically invalid
'''
c = int(t) | 0
t0 = s = t - c
t = a * s + c * norm
c = int(t) | 0
s = t - c
r = s + t0 * norm
# Careful : std rep for floats is 6 dec.
r = int("{0:.16}".format(r)[2:])
return "{0}".format(base36encode(r))
|
22,650 | 16b7fdfffdce75be73e8214ca57da0c82df54a29 | from jarvis import *
if __name__ == "__main__":
i=0
while True:
query = wakecommand().lower()
if 'jarvis' in query:
i=main(i) |
22,651 | fb376fd20d52121d9bd6be8c9bf5bdd4c35c0308 | from Sender import Sender
from Receiver import Receiver
|
22,652 | ebec776c107c8d4f8511a9ae66276346c5d1197f | import logging
def setup_logger(logger_name, level=logging.INFO):
# create logger object
logger = logging.getLogger(logger_name)
logger.setLevel(level)
# create stream handler and set level to INFO
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to handlers
ch.setFormatter(formatter)
# add handlers to logger
logger.addHandler(ch)
return logger |
22,653 | 368c64a011efcc5a3b234b58cbd05ae61c670fde | """tasker project management package.
the :mod:`tasker` module contains a model view control stucture to view and manipulate task data.
- :mod:`tasker.model`
- :mod:`tasker.control`
- :mod:`tasker.ui`
- :mod:`tasker.db_cofig`
- :mod:`tasker.templates`
One can use the :func:`tasker.control.new_project` to create a new project in the database to hold tasks relationships
and states.
"""
import logging
FORMAT = "%(filename)s:%(funcName)s - %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
log = logging.getLogger(__name__)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from tagging.model import Base
from tagging.db_config import database
__author__ = 'Dominik'
engine = create_engine(database)
Base.metadata.bind = engine
Session = sessionmaker(bind=engine)
|
22,654 | aaf1b57467464375e66d47dda2f1cef607869a56 | import numexpr
electronvolt = 1.0e-9;
kiloelectronvolt = 1.e+3*electronvolt;
megaelectronvolt = 1.e+6*electronvolt;
gigaelectronvolt = 1.e+9*electronvolt;
teraelectronvolt = 1.e+12*electronvolt;
petaelectronvolt = 1.e+15*electronvolt;
MeV = megaelectronvolt;
eV = electronvolt;
keV = kiloelectronvolt;
GeV = gigaelectronvolt;
TeV = teraelectronvolt;
PeV = petaelectronvolt;
class baseEnum(int):
name = None
values = {}
def __repr__(self):
return self.name
class metaEnum(type):
def __new__(cls, classname, bases, classdict):
newdict = {"values":{}}
for k in classdict.keys():
if not (k.startswith('_') or k == 'name' or k == 'values'):
val = classdict[k]
member = baseEnum(val)
member.name = k
newdict['values'][val] = member
newdict[k] = member
# Tell each member about the values in the enum
for k in newdict['values'].keys():
newdict['values'][k].values = newdict['values']
# Return a new class with the "values" attribute filled
return type.__new__(cls, classname, bases, newdict)
class enum(baseEnum, metaclass=metaEnum):
"""This class mimicks the interface of boost-python-wrapped enums.
Inherit from this class to construct enumerated types that can
be passed to the I3Datatype, e.g.:
class DummyEnummy(tableio.enum):
Foo = 0
Bar = 1
Baz = 2
desc = tableio.I3TableRowDescription()
desc.add_field('dummy', tableio.I3Datatype(DummyEnummy), '', '')
"""
class ParticleType(enum):
PPlus = 14
He4Nucleus = 402
N14Nucleus = 1407
O16Nucleus = 1608
Al27Nucleus = 2713
Fe56Nucleus = 5626
NuE = 66
NuEBar = 67
NuMu = 68
NuMuBar = 69
NuTau = 133
NuTauBar = 134
class PDGCode(enum):
PPlus = 2212
He4Nucleus = 1000020040
N14Nucleus = 1000070140
O16Nucleus = 1000080160
Al27Nucleus = 1000130270
Fe56Nucleus = 1000260560
NuE = 12
NuEBar = -12
NuMu = 14
NuMuBar = -14
NuTau = 16
NuTauBar = -16
PDGCode.from_corsika = classmethod(lambda cls, i: getattr(cls, ParticleType.values[i].name))
ParticleType.from_pdg = classmethod(lambda cls, i: getattr(cls, PDGCode.values[i].name))
def build_lookup(mapping, var='ptype', default='ptype'):
"""
Build an expression equivalent to a lookup table
"""
if len(mapping) > 0:
return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default))
else:
return str(default)
class CompiledFlux(object):
"""
An efficient pre-compiled form of a multi-component flux. For single-element evalutions
this is ~2 times faster than switching on the primary type with an if statement; for 1e5
samples it is 2000 times faster than operating on masked slices for each primary type.
"""
pdg_to_corsika = numexpr.NumExpr(build_lookup([(int(PDGCode.from_corsika(v)), v) for v in ParticleType.values.keys()]))
def __init__(self, expr):
self.expr = numexpr.NumExpr(expr)
# by default, assume PDG codes
self._translator = CompiledFlux.pdg_to_corsika
def to_PDG(self):
"""
Convert to a form that takes PDG codes rather than CORSIKA codes.
"""
new = copy.copy(self)
new._translator = CompiledFlux.pdg_to_corsika
return new
def __call__(self, E, ptype):
"""
:param E: particle energy in GeV
:param ptype: particle type code
:type ptype: int
"""
if self._translator:
ptype = self._translator(ptype)
return self.expr(E, ptype)
@staticmethod
def build_lookup(mapping, var='ptype', default=0.):
"""
Build an expression equivalent to a lookup table
"""
# force mapping to be a list if it wasn't already
mapping=list(mapping)
if len(mapping) > 0:
return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default))
else:
return str(default)
class GaisserHillas(CompiledFlux):
ptypes = [getattr(ParticleType, p) for p in ('PPlus', 'He4Nucleus', 'N14Nucleus', 'Al27Nucleus', 'Fe56Nucleus')]
def get_expression(self, flux, gamma, rigidity):
z = "where(ptype > 100, ptype%100, 1)"
return "%(flux)s*E**(-%(gamma)s)*exp(-E/(%(rigidity)s*%(z)s))" % locals()
def get_flux(self):
return [[7860., 3550., 2200., 1430., 2120.]]
def get_gamma(self):
return [[2.66, 2.58, 2.63, 2.67, 2.63]]
def get_rigidity(self):
return [4*PeV]
def __init__(self):
flux = [self.build_lookup(zip(self.ptypes, f)) for f in self.get_flux()]
gamma = [self.build_lookup(zip(self.ptypes, g)) for g in self.get_gamma()]
rigidity = self.get_rigidity()
CompiledFlux.__init__(self, "+".join([self.get_expression(f, g, r) for f, g, r in zip(flux, gamma, rigidity)]))
class GaisserH3a(GaisserHillas):
def get_flux(self):
return super(GaisserH3a, self).get_flux() + [[20]*2 + [13.4]*3, [1.7]*2 + [1.14]*3]
def get_gamma(self):
return super(GaisserH3a, self).get_gamma() + [[2.4]*5, [2.4]*5]
def get_rigidity(self):
return super(GaisserH3a, self).get_rigidity() + [30*PeV, 2e3*PeV]
class GaisserH4a(GaisserH3a):
def get_flux(self):
return super(GaisserH4a, self).get_flux()[:-1] + [[200]]
def get_gamma(self):
return super(GaisserH4a, self).get_gamma()[:-1] + [[2.6]]
def get_rigidity(self):
return super(GaisserH4a, self).get_rigidity()[:-1] + [60e3*PeV]
|
22,655 | 2883b6edebe2e7bea84da368c0283fb9b6b88a6a | alp1 = list('abcdefghijklmnopqrstuvwxyz'[::-1])
num = [i for i in range(1,26)]
alp = {}
for a,n in zip(alp1,num):
alp[a] = n
s = input()
K = int(input())
ans =''
now = 0
while True:
if s[now] == 'a':
ans += 'a'
elif K >= alp[s[now]]:
K -= alp[s[now]]
ans += 'a'
else:
ans += s[now]
now += 1
if now == len(s):
break
if K:
alp1 = alp1[::-1]
last = ans[-1]
ans = ans[:-1]
ind = alp1.index(last)
ind = (ind+K)%26
ans += alp1[ind]
print(ans)
else:
print(ans) |
22,656 | aa8f648b2d117bcb1025a7fafca6497da79d0a3f | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pandas as pd
import pymysql
class CatPipeline:
def __init__(self):
dbInfo = {
'host' : 'localhost',
'port' : 3306,
'user' : 'root',
'password' : 'root',
'db' : 'test',
'table':'猫眼'
}
self.host = dbInfo['host']
self.port = dbInfo['port']
self.user = dbInfo['user']
self.password = dbInfo['password']
self.db = dbInfo['db']
self.table = dbInfo['table']
def open_spider(self, spider):
#开启连接
self.conn = pymysql.connect(
host = self.host,
port = self.port,
user = self.user,
password = self.password,
db = self.db,
charset='utf8mb4',
)
#创建游标
self.cur = self.conn.cursor()
def process_item(self, item, spider):
try:
#跟同窗学的,用字符串的format方法让代码看起来很清爽
sql_fmt = ("""INSERT INTO `{}`(`电影`, `类型`, `上映日期`) VALUES ('{}', '{}', '{}');""")
sql = sql_fmt.format(self.table, item['movie_name'], item['movie_type'],item['movie_time'])
self.cur.execute(sql)
self.conn.commit()
return item
except :
#保存失败,回滚操作
self.conn.rollback()
movie_name = item['movie_name']
movie_type = item['movie_type']
movie_time = item['movie_time']
movie_number = item['movie_number']
output = f'|{movie_number}|\t|{movie_name}|\t|{movie_type}|\t|{movie_time}|\n\n'
with open('./maoyan.txt', 'a+', encoding='utf-8') as article:
article.write(output)
article.close()
movie_list=[]
movie_list.append(movie_number)
movie_list.append(movie_name)
movie_list.append(movie_type)
movie_list.append(movie_time)
movie1 = pd.DataFrame(data = movie_list)
movie1.to_csv('./maoyan.csv',mode='a', encoding='utf8', index=False, header=False)
return item
def close_spider(self, spider):
#运行结束关闭连接
self.conn.close()
|
22,657 | 188dbbcc238b3a8c1ceda1a78495a275c0eff9e2 | class Employee:
def __init__(self,eid,ename,desgn,salary):
self.eid=eid
self.ename=ename
self.desgn=desgn
self.salary=salary
def printValues(self):
print(self.eid)
print(self.ename)
print(self.desgn)
print(self.salary)
def __str__(self):
return self.ename
emp=Employee(1,"ammu","studnt",23000)
emp1=Employee(2,"achu","studnt",25000)
emp2=Employee(1,"addu","studnt",33000)
lst=[]
lst.append(emp)
lst.append(emp2)
lst.append(emp1)
for em in lst:
max_sal=max(em.salary)
if(em.salary==max_sal):
print(em)
|
22,658 | 8a35a52ab3a5fd8a1fb31950f70ae0225c8b86a7 | from PyQt5 import uic
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class TimerMessageBox(QMessageBox):
def __init__(self, timeout=3, parent=None):
super(TimerMessageBox, self).__init__(parent)
self.setWindowTitle("Atención..!!")
self.setIcon(QMessageBox.Warning)
self.time_to_wait = timeout
self.setText("Número máximo de personas sin mascarillas superado..!")
self.setStandardButtons(QMessageBox.NoButton)
self.timer = QTimer(self)
self.timer.setInterval(1000)
self.timer.timeout.connect(self.changeContent)
self.timer.start()
def changeContent(self):
self.setText("Número máximo de personas sin mascarillas superado..!\nPor favor use mascarilla")
self.time_to_wait -= 1
if self.time_to_wait <= 0:
self.close() |
22,659 | d2728d7575ef0dc2b92751167e2f2bbfbbd131fc | This is python file
first line.
second line.
|
22,660 | 5ed0a099cf79f3ab444cf56ad0922590341cafca | # region ornek_1
"""
kullanıcı sayı girecek
int dönüşümü yapılacak
girilen sayının tam bölenleri ekrana yazılacak
s = int(input("l. s. giriniz \t: "))
for i in range(1, s+1):
if s % i == 0:
print(i, end = " ")
"""
# endregion
# region ornek_2
"""
girilen sayıya kadar olan asal sayıları
ekrana yazdıran programı yazalımkullanıcı sayı girecek
say = 0
son = int(input("l. son değeri. giriniz \t: "))
for i in range(2, son+1):
for j in range(1, i):
if i % j == 0:
say +=1
if say<2:
print(i, end= " ")
say = 0
"""
# endregion
# region ornek_3
"""
obeb = 0
s1 = int(input("lütfen 1. s . gi "))
s2 = int(input("lütfen 2. s . gi "))
for i in range(1, min(s1, s2)+1):
if s1 % i == 0:
if s2 % i == 0:
obeb = i
print(obeb)
"""
# endregion
# region ornek_4
"""
kullanıcının girdiği sayı TAU sayısı mıdır?
TAU sayısı :
24 → 1, 2, 3, 4, 6, 8, 12, 24 → 24 % 8 == 0 TAU dur.
15 → 1, 3, 5, 15 → 15 % 4 != 0 TAU değildir.
Anlamı: pozitif bölenleri sayısına bakıldığında mod 0 ise TAU dur.
sayac = 0
sayi = int(input("lütfen sy giriniz: "))
for i in range(1, sayi + 1):
if sayi % i == 0:
sayac += 1
if sayi % sayac == 0:
print("TAUDUR")
else:
print("TAU DEĞİLDİR")
"""
# endregion
# region ornek_5
"""
mükemmel sayı kendisi haricindeki tüm çarpanlarının
toplamı kendisini veren sayıdır
6 → 1, 2, 3 → toplamı 6 mükemmel sayı
24 → 1, 2, 3, 4, 6, 8, 12 → toplamı 36 mükemmel sayı değildir
toplam = 0
sayi = int(input("lütfen sy giriniz: "))
for i in range(1, int(sayi/2)+1):
if sayi%i == 0:
toplam += i
if toplam == sayi:
print("mükemmeldir")
else:
print("mükemmel değildir")
"""
# endregion
# ödev →
"""
- Kullanıcının girdiği 2sayı arasındaki değerleri,
her zaman küçükten büyüğe sıralayan for döngüsü yazın.
1,10 -> 1,2,3,4,5,6,7,8,9
10-1 -> 1,2,3,4,5,6,7,8,9
"""
|
22,661 | 6568459a88d13262d5edf032b53b2dae045a5a2e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
URL = "https://portal.compassplus.com/Employees/Pages/OfficeReferenceBook.aspx"
URL_GET_EMPLOYEES_LIST = (
"https://portal.compassplus.com/_layouts/15/tbi/employees.ashx?"
"p=50&c=1&s={}&fl=MPhotoUrl;NameLink;JobTitle;Department;WorkPhone"
)
URL_GET_EMPLOYEE_INFO = (
"https://portal.compassplus.com/_layouts/15/tbi/ui.ashx?u={}"
"&ctrl=TBI.SharePoint.Employees.WebParts/EmployeeFlyout"
)
SETTINGS_FILE_NAME = "settings"
PERSON_PLACEHOLDER_PHOTO = "iVBORw0KGgoAAAANSUhEUgAAAMgAAACWCAYAAACb3McZAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAkbSURBVHhe7Z0LUxNJFIX3//8bRPDBW1FUFA0g+AAEBN+KBQHxgeOe1LBbu7nTmSQzSd/Od6q+Klezydx0n8x09+3bf42NjWUAYINBAAJgEIAAGAQgAAYBCIBBAAJgEIAAGAQgAAYBCIBBAAJgEIAAGAQgAAYBCIBBAAJgEIAAGAQgAAYBCIBBAAJgEIAAGAQgAAYBCIBBAAJgEIAAGAQgAAYBCIBBBsjU1FS2srKSbW9vZ+/fv8+Ojo5afP/+/Z8/v3v3rvXvep1eb70PDA4MUjPXr1/PXrx4kZ2enma9SP/fy5cvs9nZWfP9oV4wSE3Mz89n+/v72cXFRd7V+9eXL1+ypaUl8/OgHjBIxczNzWUfP37Mu3Q9klFkQOvzoVowSEWMj49nz58/r/SOEdLv37+zra2t7OrVq+b1QDVgkAq4ceNG61d9GNLnapxjXRf0Dwbpk4WFhez8/DzvrsPR2dlZ69HOuj7oDwzSB7dv385+/vyZd9PhStfBAL56MEiP3Lp1K/v161fePeOQxiWPHj0yrxd6A4P0wPT0dGtxL0ZpkuDu3bvmdUP3YJAumZiYyE5OTvLuGKd0Z2NhsRowSJfs7u7m3TBuHR8fMwVcARikC5aXl/Pu50N7e3tmHFAeDFISPVp9+/Yt73o+pEE7K+79gUFKooRDj/r8+bMZD5QDg5Tg2rVr0ax39KIHDx6YcUFnMEgJvN49LtVsNlu5YlZsEAaDdEBjD893j0utrq6a8UEYDNKBx48f513MtzTta8UHYTBIB4aVpVuHFhcXzRihGAwS4ObNm3nXSkPa4WjFCcVgkACNRiPvWmlI+WNXrlwxYwUbDBLg7du3eddKR0rRt2IFGwwSINaM3X707NkzM1awwSAFpDb+uJQKSljxgg0GKeD+/ft5l0pLP378MOMFGwxSwPr6et6l0pOKTFgxQzsYpACV/0xVd+7cMWOGdjBIAYeHh3l3Sk+q+2vFDO1gkAJURDpVra2tmTFDOxikAO2jSFWbm5tmzNAOBikgZYMofd+KGdrBIAXo/I5UpeMUrJihHQxSQMqDdBXZtmKGdjBIAcp8TVUbGxtmzNAOBilAv7Kp6smTJ2bM0A4GKUA1blMVpUnLg0EKUHHqVDUzM2PGDO1gkAJUtlOF11KT6vayaao8GCSAjmVOTdpjb8UKNhgkQIoJi4rJihVsMEiAe/fu5d0qHSkmK1awwSABNA5Jadutxh+Tk5NmrGCDQTqQ0oKhilBYMUIxGKQDKU33UsS6ezBICVKYzaImVm9gkBLol9e7KPfTGxikJJ73h6g6vc44seKCMBikJDrKzOvKOuntvYNBuuD169d5l/MjnauoM06seKAzGKQLVE/K22E6ykq2YoFyYJAuUckcLzo4ODBjgPJgkB7w8Kh1dnbGwLwCMEgPKAUl5pOnlFKiBU7r2qE7MEiP6Nf55OQk75LxSDNtKrxtXTN0DwbpAw3aYzKJzEFZ0WrBIH2iO0kMqSgXFxfZ8vKyeY3QOxikAjQmGebA/fz8nKPVagKDVIjK6eiAmkFKJ0Zx3kd9YJCK0SPXq1evak9LkRGpb1U/GKQmZmdna6nvq9QRnX7FzsDBgEFqRusRe3t7faeofP36tZU2wp6OwYJBCtBzvY4q02OMsmHVOZXRa722DBrILy0ttSqra5ExZBg9nuk1qkCiz52enjbfs1sWFhZa8ezs7GS7u7utuDQtrLud9XrAIP9BFT+0b1sr0UXSukdVCYAyoTrt/7Fe2w8aF3WaZdOOQ+2/13fAXepfRt4gU1NTrQNlNFXajT59+tQ6S916z5jQ3afZbOZXXU76Lp4+fUou19+MpEH0C6m7gDp5P9JMUsyFEBRjP2Mf3Uk1I6cfEev9R4GRM4g69PHxcd4FqpGe5zXGsD5vGGiGq+qFS71fVWMhT4yMQdS4/d4xQtLYZG5uzvzsQaKJAE0F1yGls+iOMkqPXiNhEM3UDGInoB5JVldXzWuoG22r1Z1sENKAflTOGEnaIBprDKMyombCBvkru7i4mJ2enuafPjg1Gg3zelIiWYOMj49nb968yZty8NKvbFXTwUXIhIO6axRJs13WtaVCkgbRnUO/4jFIC35Vz3RdTk0POjHSkhY1U64Yn6RBtFIcm7RHfGtrq6dndxleq/qqjhhjATsZNdUZruQMok1DsUuPX0pT1+OROr3SP7Sf43IlXf+tv9e/6w6k2aPYpanzFBMokzKInsm7XRFH1SnF4xWSMojm6NFwVffExKBJxiAauNa9SQl1lqabNYNotZFHkjGI9lygOLS2tma2kUeSMIgGh6EUdTRYKe3GaiePJGEQreiiuJRKlZUkDBJzGdBRlR55rbbyhnuDKIMWxadUzkR0bxDtq0ZxKoUC2u4NUuceD9SfNjY2zDbzhGuD6Bbu7cSnUZLqglnt5gnXBtHiIIpXGodY7eYJ1wbRORgobnnP8nVtEG3WQXHr4cOHZtt5wbVBhr2bDnWW9zPaXRuEBcL4pW3PVtt5wbVB2PsRv7SRymo7L7g1iAq1ofil3ZBW+3nBrUFUkRz5kOcTsNwaRJU0kA95TjlxaxBVMEQ+5Pn0XbcG0UE0yIc87zB0a5DDw8P860exy/NaiFuDsAbiR6o2Y7WhB9wahDUQPzo4ODDb0AMuDaKyMpT48aMPHz6Y7egBlwYhzd2Xjo6OzHb0gEuDqGIG8iMVk7Pa0QMuDaLylsiPtOvTakcPuDTI+vp6/tUjL4rpkNNucGmQ7e3t/GtHXuQ1H8ulQVgk9Kf5+XmzLWPHpUFiPGUJhaUTsqy2jB2XBmk2m/nXjrzIa8KiS4NQyd2fdKyc1Zax484gOmYN+dPm5qbZnrHjziAUq/YpnfBrtWfsuDOIjlFG/rS/v2+2Z+y4M8jKykr+lSNP8noCrjuDqGI48idV4bfaM3bcGWRnZyf/ypEnea2P5c4gqtSH/Ekb3Kz2jB13BmGrrU95LSDnziCsovvVxMSE2aYx484grKL7lceMXlcGmZyczL9q5FEzMzNmu8aMK4PoC0Z+5bEEqSuDsBfdtzymvLsyiFKmkV/pTEmrXWPGlUFU4xX5lYptWO0aM64MohqvyK8ajYbZrvEylv0BRxk2BoRZQcsAAAAASUVORK5CYII="
|
22,662 | 0e25a96313347223691db8d0c0f3c3407f399fc8 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'tblMenu.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(824, 548)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tableWidget = QtWidgets.QTableWidget(Form)
self.tableWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.tableWidget.setRowCount(5)
self.tableWidget.setColumnCount(4)
self.tableWidget.setObjectName("tableWidget")
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 2, item)
item = QtWidgets.QTableWidgetItem()
icon = QtGui.QIcon.fromTheme("ok")
item.setIcon(icon)
self.tableWidget.setItem(0, 3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(1, 2, item)
self.horizontalLayout.addWidget(self.tableWidget)
self.retranslateUi(Form)
self.tableWidget.customContextMenuRequested['QPoint'].connect(Form.generateMenu)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "QTableWidget例子"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("Form", "姓名"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("Form", "性别"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("Form", "体重"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("Form", "图形"))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
item = self.tableWidget.item(0, 0)
item.setText(_translate("Form", "张三"))
item = self.tableWidget.item(0, 1)
item.setText(_translate("Form", "男"))
item = self.tableWidget.item(0, 2)
item.setText(_translate("Form", "160"))
item = self.tableWidget.item(1, 0)
item.setText(_translate("Form", "李四"))
item = self.tableWidget.item(1, 1)
item.setText(_translate("Form", "女"))
item = self.tableWidget.item(1, 2)
item.setText(_translate("Form", "170"))
self.tableWidget.setSortingEnabled(__sortingEnabled)
|
22,663 | 8fe658ea4ae9093f04f01d9338648296aa8bdcda | from abc import ABC, abstractmethod
from enum import Enum
import numpy as np
import scipy.optimize as opt
def _distance_point_point(x1, y1, x2, y2):
return np.linalg.norm([x1 - x2, y1 - y2])
def _as_arrays(*x):
assert len(x) % 2 == 0
return list(map(np.asarray, zip(x[::2], x[1::2])))
def _distance_point_line(x, y, x1, y1, x2, y2):
# https://stackoverflow.com/a/39840218
# it's at this point that I regret using coordinates
p, p1, p2 = _as_arrays(x, y, x1, y1, x2, y2)
return np.abs(np.cross(p2-p1, p1-p))/np.linalg.norm(p2-p1)
def _distance_point_segment(x, y, x1, y1, x2, y2):
p, p1, p2 = _as_arrays(x, y, x1, y1, x2, y2)
# if obtuse
if np.dot(p - p1, p2 - p1) < 0:
return _distance_point_point(x, y, x1, y1)
elif np.dot(p - p2, p1 - p2) < 0:
return _distance_point_point(x, y, x2, y2)
else:
return _distance_point_line(x, y, x1, y1, x2, y2)
def _from_angle(theta):
return np.asarray([np.cos(theta), np.sin(theta)])
class Renderer(ABC):
class Objects(Enum):
SEGMENT = 1
POINT = 2
CIRCLE = 3
MARGIN_RATIO = 0.1
LETTER_HEIGHT = 24
LETTER_WIDTH = LETTER_HEIGHT
LETTER_RADIUS = np.linalg.norm([LETTER_WIDTH, LETTER_HEIGHT])/2
EDGE_WEIGHT = 1
POINT_RADIUS = 3
def __init__(self, name, width, height):
self.name = name
self.width = width
self.height = height
self.x_bound = None
self.y_bound = None
self.scene = []
self.transformed_objects = []
self.labels = []
@staticmethod
def _expanded_bound(bound, a):
if bound is None:
return (a, a)
else:
if a < bound[0]:
return (a, bound[1])
elif a > bound[1]:
return (bound[0], a)
else:
return bound
def expand_x(self, x):
self.x_bound = self._expanded_bound(self.x_bound, x)
def expand_y(self, y):
self.y_bound = self._expanded_bound(self.y_bound, y)
def draw_segment(self, x1, y1, x2, y2):
self.expand_x(x1)
self.expand_x(x2)
self.expand_y(y1)
self.expand_y(y2)
self.scene.append((Renderer.Objects.SEGMENT, x1, y1, x2, y2))
def draw_point(self, label, x, y):
# assume negligable radius for scene sizing purposes
self.expand_x(x)
self.expand_y(y)
self.scene.append((Renderer.Objects.POINT, label, x, y))
def draw_circle(self, cx, cy, r):
self.expand_x(cx - r)
self.expand_x(cx + r)
self.expand_y(cy - r)
self.expand_y(cy + r)
self.scene.append((Renderer.Objects.CIRCLE, cx, cy, r))
def transform_length(self, length):
return int(length * self.transform_scale)
def transform_point(self, x, y):
x, y = self._transform_point(x, y)
xp = self.transform_length(x - self.scene_center_x) + self.screen_center_x
yp = self.transform_length(y - self.scene_center_y) + self.screen_center_y
# should transform to large values where rounding doesn't matter
return (int(xp), int(yp))
def _transform_point(self, x, y):
""" Implementing classes can override to perform a transformation """
return (x, y)
def calculate_transforms(self):
scene_width = self.x_bound[1] - self.x_bound[0]
scene_height = self.y_bound[1] - self.y_bound[0]
margin_scale = 1 - 2*self.MARGIN_RATIO
scale_w = margin_scale * self.width / scene_width
scale_h = margin_scale * self.height / scene_height
self.transform_scale = min(scale_w, scale_h)
self.scene_center_x = scene_width/2 + self.x_bound[0]
self.scene_center_y = scene_height/2 + self.y_bound[0]
self.screen_center_x = self.width/2
self.screen_center_y = self.height/2
def place_label(self, label, preferred_x, preferred_y):
length = len(label)
width = length * self.LETTER_WIDTH
height = self.LETTER_HEIGHT
def error(xy):
x, y = xy
return np.linalg.norm([x - preferred_x, y - preferred_y])
def constraints():
out = []
# 1 for each circle representing a letter
for i in range(length):
for constraint in letter_constraints(i):
out.append(constraint)
return out
def letter_constraints(i):
out = []
# 1 constraint for each other object
for object in self.transformed_objects:
type = object[0]
attrs = object[1:]
out.append(letter_object_constraint(i, type, attrs))
return out
# look at one letter as a circle
def letter_object_constraint(i, type, attrs):
def error(xy):
x, y = xy
r = self.LETTER_RADIUS
cy = y
cx = x - ((length-1)/2 - i) * r * 2
if type == Renderer.Objects.SEGMENT:
x1, y1, x2, y2 = attrs
return _distance_point_segment(cx, cy, x1, y1, x2, y2) - r
elif type == Renderer.Objects.POINT:
x1, y1 = attrs
return _distance_point_point(x, y, x1, y1) - r
elif type == Renderer.Objects.CIRCLE:
cx1, cy1, r1 = attrs
# absolute value allow letters inside circles
return np.abs(_distance_point_point(cx, cy, cx1, cy1) - r1) - r
return err
return {
"type": "ineq",
"fun": error
}
all_constraints = constraints()
center_x0 = np.asarray([preferred_x, preferred_y], dtype=float)
for displacement_size in range(1, 10, 2):
for attempts in range(10):
displacement = _from_angle(np.random.random() * np.pi * 2)
# more fudge for longer words
displacement *= displacement_size * length
x0 = center_x0 + displacement
solution = opt.minimize(fun=error, x0=x0, \
method="COBYLA", constraints=all_constraints
)
if solution.success: break
if solution.success: break
if solution.success:
return solution.x
else:
return None
def finish_drawing(self):
self.calculate_transforms()
for object in self.scene:
type = object[0]
attrs = object[1:]
if type == Renderer.Objects.SEGMENT:
x1, y1, x2, y2 = attrs
x1, y1 = self.transform_point(x1, y1)
x2, y2 = self.transform_point(x2, y2)
self._draw_segment(x1, y1, x2, y2)
self.transformed_objects.append((type, x1, y1, x2, y2))
elif type == Renderer.Objects.POINT:
label, x, y = attrs
x, y = self.transform_point(x, y)
self._draw_point(x, y)
# do labels at end afer everything drawn in
self.labels.append((label, x, y))
self.transformed_objects.append((type, x, y))
elif type == Renderer.Objects.CIRCLE:
cx, cy, r = attrs
cx, cy = self.transform_point(cx, cy)
r = self.transform_length(r)
self._draw_circle(cx, cy, r)
self.transformed_objects.append((type, cx, cy, r))
else:
assert False, "Unknown Object"
for label, preferred_x, preferred_y in self.labels:
placement = self.place_label(label, preferred_x, preferred_y)
if placement is not None:
label_x, label_y = placement
self._draw_label(label, int(label_x), int(label_y))
else:
print(f"Warning, could not place the label '{label}'")
return self._finish_drawing()
@abstractmethod
def _draw_segment(self, x1, y1, x2, y2):
pass
@abstractmethod
def _draw_point(self, x, y):
pass
@abstractmethod
def _draw_circle(self, cx, cy, r):
pass
@abstractmethod
def _draw_label(label, label_x, label_y):
""" Implemented label methods should place labels within a LETTER_HEIGHT x
len(label) * LETTER_WIDTH region centered at label_x, label_y"""
pass
@abstractmethod
def _finish_drawing(self):
pass
|
22,664 | f9523a4d426397fa2613343ed31416c61daff2ea | # a115_buggy_image.py
import turtle as trtl
##############
trtl_spider = trtl.Turtle()
#
trtl_spider.pensize(40)
trtl_spider.circle(20)
# # #
spider_legs = 6
leg_length = 38
leg_angle = 380 / spider_legs
trtl_spider.pensize(5)
leg_increment = 0
# #
while (leg_increment < spider_legs):
trtl_spider.goto(0,0)
trtl_spider.setheading(leg_angle*leg_increment)
trtl_spider.forward(70)
# trtl_spider.forward(leg_length)
leg_increment = leg_increment + 1
# # #
trtl_spider.hideturtle()
# # # #
wn = trtl.Screen()
wn.mainloop() |
22,665 | 544e110c48ba35e35ee2c214a047d47e72de5719 |
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
host_from_gpu,
gpu_contiguous, HostFromGpu,
gpu_alloc_empty)
from theano.sandbox.cuda.dnn import GpuDnnConvDesc, GpuDnnConv, GpuDnnConvGradI, dnn_conv, dnn_pool
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import numpy as np
def deconv(X, w, subsample=(1, 1), border_mode=(0, 0), conv_mode='conv'):
img = gpu_contiguous(X)
kerns = gpu_contiguous(w)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample, conv_mode=conv_mode)(gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape, kerns.shape)
out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1])
d_img = GpuDnnConvGradI()(kerns, img, out, desc)
return d_img
class DeConvLayer(object):
def __init__(self, in_channels, out_channels, activation, W, b,batch_norm = False):
#self.filter_shape = np.asarray((in_channels, out_channels, kernel_len, kernel_len))
kernel_len = W.get_value().shape[2]
print "kernel len", kernel_len
self.activation = activation
self.batch_norm = batch_norm
self.W = W
self.b = b
def output(self, input):
conv_out = deconv(input, self.W, subsample=(2,2), border_mode=(2,2))
if self.batch_norm:
conv_out = (conv_out - conv_out.mean(axis = (0,2,3), keepdims = True)) / (1.0 + conv_out.std(axis = (0,2,3), keepdims = True))
conv_out = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
if self.activation == "relu":
out = T.maximum(0.0, conv_out)
elif self.activation == "tanh":
out = T.tanh(conv_out)
elif self.activation == None:
out = conv_out
else:
raise Exception()
return out
|
22,666 | 216e50912eeb6e1c931a1ed7e503a8b661705f9d | from pycsp3.problems.data.parsing import *
from pycsp3.compiler import Compilation
import random
def random_edge():
x, y = random.randint(0, data.nNodes - 1), random.randint(0, data.nNodes - 1)
return (x, y) if x != y else random_edge()
data.nNodes = ask_number("Number of nodes ?")
data.nColors = ask_number("Number of colors ?")
nEdges = ask_number("Number of edges ?")
data.edges = [random_edge() for _ in range(nEdges)]
Compilation.string_data = "-" + "-".join(str(v) for v in (data.nNodes, data.nColors, nEdges))
|
22,667 | e90d82e616b9debb29e321237cf0adab0fee8ecd | from utils import create_connection
import pymysql
import numpy as np
from utils import CONFIG
import os
import ast
import astunparse
def get_one_model_result(notebook_id):
def deal_content(content,metric_type):
if type(content).__name__ == 'float' or type(content).__name__ == 'int':
if content > 0 and content < 1:
return (content,metric_type)
else:
return (-1,metric_type)
else:
return (-1,metric_type)
def deal_str_content(str_content,metric_type):
if metric_type == 'cross_val_score':
str_content = str_content[1:-1]
score_list = str_content.split(' ')
score = 0
count = 0
for i in score_list:
try:
if float(i) > 0 and float(i) < 1:
score += float(i)
count += 1
except:
continue
if count != 0:
return (score/count,metric_type)
else:
return (-1,metric_type)
else:
return (-1,metric_type)
cursor, db = create_connection()
# 找到所有datasetid的id和title
sql = "SELECT distinct content,metric_type,model_type from result where notebook_id=" + str(notebook_id);
cursor.execute(sql)
sql_res = cursor.fetchall()
# score = 0
# count = 0
count = {}
score = {}
model_dic = {}
for row in sql_res:
dc = deal_content(row[0], row[1])
temp_score = dc[0]
if row[2] not in model_dic:
model_dic[row[2]] = {}
if temp_score != -1:
if dc[1] not in model_dic[row[2]].keys():
model_dic[row[2]][dc[1]] = [0,0]
model_dic[row[2]][dc[1]][0] += temp_score
model_dic[row[2]][dc[1]][1] += 1
sql = "SELECT distinct str_content,metric_type,model_type from result where id not in (select id from result where isnull(str_content)) and notebook_id=" + str(notebook_id);
cursor.execute(sql)
sql_res_1 = cursor.fetchall()
for row in sql_res_1:
dsc = deal_str_content(row[0],row[1])
temp_score = dsc[0]
if row[2] not in model_dic:
model_dic[row[2]] = {}
if temp_score != -1:
if dsc[1] not in score.keys():
model_dic[row[2]][dsc[1]] = [0,0]
model_dic[row[2]][dsc[1]][0] += temp_score
model_dic[row[2]][dsc[1]][1] += 1
# result = {}
for model in model_dic:
for score in model_dic[model]:
if model_dic[model][score][1] != 0 and model_dic[model][score][0] != -1:
model_dic[model][score] = model_dic[model][score][0]/model_dic[model][score][1]
# print(model_dic)
return model_dic
def get_one_seq_len(notebook_id):
cursor, db = create_connection()
# 找到所有datasetid的id和title
sql = "SELECT count(id) from operator where notebook_id=" + str(notebook_id);
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
return row[0]
def get_one_result(notebook_id):
def deal_content(content,metric_type):
if type(content).__name__ == 'float' or type(content).__name__ == 'int':
if content > 0 and content < 1:
return (content,metric_type)
else:
return (-1,metric_type)
else:
return (-1,metric_type)
def deal_str_content(str_content,metric_type):
if metric_type == 'cross_val_score':
str_content = str_content[1:-1]
score_list = str_content.split(' ')
score = 0
count = 0
for i in score_list:
try:
if float(i) > 0 and float(i) < 1:
score += float(i)
count += 1
except:
continue
if count != 0:
return (score/count,metric_type)
else:
return (-1,metric_type)
else:
return (-1,metric_type)
cursor, db = create_connection()
# 找到所有datasetid的id和title
sql = "SELECT distinct content,metric_type from result where notebook_id=" + str(notebook_id);
cursor.execute(sql)
sql_res = cursor.fetchall()
# score = 0
# count = 0
count = {}
score = {}
for row in sql_res:
dc = deal_content(row[0], row[1])
temp_score = dc[0]
if temp_score != -1:
if dc[1] not in score.keys():
score[dc[1]] = 0
if dc[1] not in count.keys():
count[dc[1]] = 0
score[dc[1]] += temp_score
count[dc[1]] += 1
sql = "SELECT distinct str_content,metric_type from result where id not in (select id from result where isnull(str_content)) and notebook_id=" + str(notebook_id);
cursor.execute(sql)
sql_res_1 = cursor.fetchall()
for row in sql_res_1:
dsc = deal_str_content(row[0],row[1])
temp_score = dsc[0]
if temp_score != -1:
if dsc[1] not in score.keys():
score[dsc[1]] = 0
if dsc[1] not in count.keys():
count[dsc[1]] = 0
score[dsc[1]] += temp_score
count[dsc[1]] += 1
result = {}
for i in score:
if count[i] != 0 and score[i] != -1:
result[i] = score[i]/count[i]
return result
def get_exist_dic():
"""
:return:
(has_result:
{
'auc': ([0.98, 0.96, ..., 0.91], 100),
'f1_score': ([0.95, 0.92, ..., 0.98], 12),
...,
},
nhas_result:
{
'auc': ([0.98, 0.96, ..., 0.91], 100),
'f1_score': ([0.95, 0.92, ..., 0.98], 12),
...,
})
"""
in_result = []
cursor, db = create_connection()
sql = "select distinct notebook_id from result"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
sql = "select distinct notebook_id from operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
has_count = {}
has_score = {}
nhas_score = {}
nhas_count = {}
has_score_list = []
for row in sql_res:
has_score_list.append(row[0])
has_f1_score_list = []
nhas_f1_score_list = []
has_score_list_ = []
nhas_score_list_ = []
has_cross_val_score_list = []
nhas_cross_val_score_list = []
has_accuracy_score_list = []
nhas_accuracy_score_list = []
has_roc_auc_score_list = []
nhas_roc_auc_score_list = []
has_precision_score_list = []
nhas_precision_score_list = []
has_recall_score_list = []
nhas_recall_score_list = []
has_best_score_list = []
nhas_best_score_list = []
has_auc_list = []
nhas_auc_list = []
for notebook_id in in_result:
if notebook_id in has_score_list:
temp_score_result = get_one_result(notebook_id)
for i in temp_score_result:
if temp_score_result[i] != -1:
if i == 'f1_score':
has_f1_score_list.append(temp_score_result[i])
if i == 'score':
has_score_list_.append(temp_score_result[i])
if i == 'cross_val_score':
has_cross_val_score_list.append(temp_score_result[i])
if i == 'accuracy_score':
has_accuracy_score_list.append(temp_score_result[i])
if i == 'roc_auc_score':
has_roc_auc_score_list.append(temp_score_result[i])
if i == 'precision_score':
has_precision_score_list.append(temp_score_result[i])
if i == 'recall_score':
has_recall_score_list.append(temp_score_result[i])
if i == 'best_score_':
has_best_score_list.append(temp_score_result[i])
if i == 'auc':
has_auc_list.append(temp_score_result[i])
if i not in has_score:
has_score[i] =[]
if i not in has_count:
has_count[i] = 0
has_score[i].append(temp_score_result[i])
has_count[i] += 1
else:
temp_score_result = get_one_result(notebook_id)
for i in temp_score_result:
if temp_score_result[i] != -1:
if i == 'f1_score':
nhas_f1_score_list.append(temp_score_result[i])
if i == 'score':
nhas_score_list_.append(temp_score_result[i])
if i == 'cross_val_score':
nhas_cross_val_score_list.append(temp_score_result[i])
if i == 'accuracy_score':
nhas_accuracy_score_list.append(temp_score_result[i])
if i == 'roc_auc_score':
nhas_roc_auc_score_list.append(temp_score_result[i])
if i == 'precision_score':
nhas_precision_score_list.append(temp_score_result[i])
if i == 'recall_score':
nhas_recall_score_list.append(temp_score_result[i])
if i == 'best_score_':
nhas_best_score_list.append(temp_score_result[i])
if i == 'auc':
nhas_auc_list.append(temp_score_result[i])
if i not in nhas_score:
nhas_score[i] = []
if i not in nhas_count:
nhas_count[i] = 0
nhas_score[i].append(temp_score_result[i])
nhas_count[i] += 1
has_result = {}
for i in has_score:
if has_count[i] != 0 and has_score[i] != []:
sum = 0
for sc in has_score[i]:
sum += sc
mean = sum/has_count[i]
sq = 0
for sc in has_score[i]:
sq += (sc-mean)**2
sq /= has_count[i]
has_result[i] = (mean, sq, has_count[i])
else:
has_result[i] = (-1, -1, 0)
nhas_result = {}
for i in nhas_score:
if nhas_count[i] != [] and nhas_score[i] != -1:
sum = 0
for sc in nhas_score[i]:
sum += sc
mean = sum / nhas_count[i]
sq = 0
for sc in nhas_score[i]:
sq += (sc - mean)**2
sq /= nhas_count[i]
nhas_result[i] = (mean, sq, nhas_count[i])
else:
nhas_result[i] = (-1, -1, 0)
np.save('has_f1_score_list',has_f1_score_list)
np.save('nhas_f1_score_list', nhas_f1_score_list)
np.save('has_score_list_', has_score_list_)
np.save('nhas_score_list_', nhas_score_list_)
np.save('has_cross_val_score_list', has_cross_val_score_list)
np.save('nhas_cross_val_score_list', nhas_cross_val_score_list)
np.save('has_accuracy_score_list', has_accuracy_score_list)
np.save('nhas_accuracy_score_list', nhas_accuracy_score_list)
np.save('has_roc_auc_score_list', has_roc_auc_score_list)
np.save('nhas_roc_auc_score_list', nhas_roc_auc_score_list)
np.save('has_precision_score_list', has_precision_score_list)
np.save('nhas_precision_score_list', nhas_precision_score_list)
np.save('has_recall_score_list', has_recall_score_list)
np.save('nhas_recall_score_list', nhas_recall_score_list)
np.save('has_best_score_list', has_best_score_list)
np.save('nhas_best_score_list', nhas_best_score_list)
np.save('has_auc_list', has_auc_list)
np.save('nhas_auc_list', nhas_auc_list)
np.save('./0d0mgap1_.npy',{'has_ope':has_result,'nhas_ope':nhas_result})
return (has_result,nhas_result)
# def get_exit_dic_len():
# in_result = []
# cursor, db = create_connection()
# sql = "select distinct notebook_id from result"
# cursor.execute(sql)
# sql_res = cursor.fetchall()
# for row in sql_res:
# in_result.append(int(row[0]))
#
# sql = "select distinct notebook_id from operator"
# cursor.execute(sql)
# sql_res = cursor.fetchall()
# has_count = 0
# has_ope_notebooks = []
# has_ope_notebooks = []
# nhas_count = 0
# has_score_list = []
# for row in sql_res:
# if int(row[0]) in in_result:
# has_ope_notebooks.append(int(row[0]))
#
# for notebook in has_ope_notebooks:
# sql = "select rank from operator where notebook_id="+str(notebook)
# cursor.execute(sql)
# sql_res = cursor.fetchall()
# max_rank = 0
# for row in sql_res:
# max_rank += 1
def get_exist_dic_len():
"""
:return:
(has_result:
{
'auc': ([0.98, 0.96, ..., 0.91], 100),
'f1_score': ([0.95, 0.92, ..., 0.98], 12),
...,
},
nhas_result:
{
'auc': ([0.98, 0.96, ..., 0.91], 100),
'f1_score': ([0.95, 0.92, ..., 0.98], 12),
...,
})
"""
in_result = []
cursor, db = create_connection()
sql = "select distinct notebook_id from result"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
sql = "select distinct notebook_id from operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
has_count = 0
has_score = []
nhas_score = []
nhas_count = 0
has_score_list = []
for row in sql_res:
has_score_list.append(row[0])
for notebook_id in in_result:
if notebook_id in has_score_list:
temp_score_result = get_one_seq_len(notebook_id)
has_score.append(temp_score_result)
has_count += 1
else:
temp_score_result = get_one_seq_len(notebook_id)
nhas_score.append(temp_score_result)
nhas_count += 1
if has_count != 0 and has_score != []:
sum = 0
for sc in has_score:
sum += sc
mean = sum/has_count
sq = 0
for sc in has_score:
sq += (sc-mean)**2
sq /= has_count
has_result = (mean, sq, has_count)
else:
has_result = (-1, -1, 0)
if nhas_count != [] and nhas_score != -1:
sum = 0
for sc in nhas_score:
sum += sc
mean = sum / nhas_count
sq = 0
for sc in nhas_score:
sq += (sc - mean)**2
sq /= nhas_count
nhas_result = (mean, sq, nhas_count)
else:
nhas_result = (-1, -1, 0)
np.save('./0d0mgap2_.npy',{'has_operator':has_result,'nhas_operator': nhas_result})
return (has_result,nhas_result)
def get_dataset_exist_dic():
dataset_temp_score = {}
dataset_temp_score_1 = {}
cursor, db = create_connection()
dataset_dic = np.load('./dataset_score_dic.npy',allow_pickle=True).item()
print('get operator list and result list')
sql = "SELECT distinct notebook_id from operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
has_operator_list = []
count= 0
for row in sql_res:
count+=1
has_operator_list.append(row[0])
np.save('has_operator_list.npy',has_operator_list)
sql = "SELECT distinct notebook_id from result"
cursor.execute(sql)
sql_res = cursor.fetchall()
has_result_list = []
count = 0
for row in sql_res:
count += 1
has_result_list.append(row[0])
print(has_operator_list)
print(has_result_list)
print('get operator list and result list end')
print('get pairs')
sql = "SELECT pair.nid,pair.did from pair inner join notebook on pair.nid=notebook.id where notebook.add_run=1"
cursor.execute(sql)
sql_res = cursor.fetchall()
print('get pairs end')
notebook_id_list = []
has_f1_score_list = {}
has_score_list_ = {}
has_cross_val_score_list = {}
has_accuracy_score_list = {}
has_roc_auc_score_list = {}
has_precision_score_list = {}
has_recall_score_list = {}
has_best_score_list = {}
has_auc_list = {}
nhas_f1_score_list = {}
nhas_score_list_ = {}
nhas_cross_val_score_list = {}
nhas_accuracy_score_list = {}
nhas_roc_auc_score_list = {}
nhas_precision_score_list = {}
nhas_recall_score_list = {}
nhas_best_score_list = {}
nhas_auc_list = {}
for row in sql_res:
notebook_id = int(row[0])
dataset_id = int(row[1])
if notebook_id in notebook_id_list:
# print('already in')
continue
notebook_id_list.append(notebook_id)
if notebook_id not in has_result_list:
# print('not in result')
continue
if notebook_id not in has_operator_list:
print('not has operator')
result = get_one_result(notebook_id)
if dataset_id not in dataset_temp_score_1.keys():
dataset_temp_score_1[dataset_id] = {}
# all_score = dataset_dic[dataset_id][0]
# all_count = dataset_dic[dataset_id][1]
for i in result:
if result[i] != -1:
if i not in dataset_temp_score_1[dataset_id]:
dataset_temp_score_1[dataset_id][i] = ([], 0)
if i == 'f1_score':
if dataset_id not in has_f1_score_list:
has_f1_score_list[dataset_id] = []
has_f1_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
if i == 'score':
if dataset_id not in has_score_list_:
has_score_list_[dataset_id] = []
has_score_list_[dataset_id].append(dataset_temp_score_1[dataset_id][i])
if i == 'cross_val_score':
if dataset_id not in has_cross_val_score_list:
has_cross_val_score_list[dataset_id] = []
has_cross_val_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
if i == 'accuracy_score':
if dataset_id not in has_accuracy_score_list:
has_accuracy_score_list[dataset_id] = []
has_accuracy_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
if i == 'roc_auc_score':
if dataset_id not in has_roc_auc_score_list:
has_roc_auc_score_list[dataset_id] = []
has_roc_auc_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
# if i == 'precision_score':
# if dataset_id not in has_precision_score_list:
# has_precision_score_list[dataset_id] = []
# has_precision_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
# if i == 'recall_score':
# if dataset_id not in has_recall_score_list:
# has_recall_score_list[dataset_id] = []
# has_recall_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
if i == 'best_score_':
if dataset_id not in has_best_score_list:
has_best_score_list[dataset_id] = []
has_best_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
# if i == 'auc':
# if dataset_id not in has_auc_list:
# has_auc_list[dataset_id] = []
# has_auc_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
all_score = dataset_temp_score_1[dataset_id][i][0]
all_count = dataset_temp_score_1[dataset_id][i][1]
all_score.append(result[i])
all_count += 1
dataset_temp_score_1[dataset_id][i] = (all_score, all_count)
print(dataset_id, dataset_temp_score_1[dataset_id][i])
continue
print('has operator')
result = get_one_result(notebook_id)
if dataset_id not in dataset_temp_score.keys():
dataset_temp_score[dataset_id] = {}
# all_score = dataset_dic[dataset_id][0]
# all_count = dataset_dic[dataset_id][1]
for i in result:
if result[i] != -1:
if i not in dataset_temp_score[dataset_id]:
dataset_temp_score[dataset_id][i] = ([],0)
if i == 'f1_score':
if dataset_id not in nhas_f1_score_list:
nhas_f1_score_list[dataset_id] = []
nhas_f1_score_list[dataset_id].append(dataset_temp_score[dataset_id][i])
if i == 'score':
if dataset_id not in nhas_score_list_:
nhas_score_list_[dataset_id] = []
nhas_score_list_[dataset_id].append(dataset_temp_score[dataset_id][i])
if i == 'cross_val_score':
if dataset_id not in nhas_cross_val_score_list:
nhas_cross_val_score_list[dataset_id] = []
nhas_cross_val_score_list[dataset_id].append(dataset_temp_score[dataset_id][i])
if i == 'accuracy_score':
if dataset_id not in nhas_accuracy_score_list:
nhas_accuracy_score_list[dataset_id] = []
nhas_accuracy_score_list[dataset_id].append(dataset_temp_score[dataset_id][i])
if i == 'roc_auc_score':
if dataset_id not in nhas_roc_auc_score_list:
nhas_roc_auc_score_list[dataset_id] = []
nhas_roc_auc_score_list[dataset_id].append(dataset_temp_score[dataset_id][i])
# if i == 'precision_score':
# if dataset_id not in has_precision_score_list:
# has_precision_score_list[dataset_id] = []
# has_precision_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
# if i == 'recall_score':
# if dataset_id not in has_recall_score_list:
# has_recall_score_list[dataset_id] = []
# has_recall_score_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
if i == 'best_score_':
if dataset_id not in nhas_best_score_list:
nhas_best_score_list[dataset_id] = []
nhas_best_score_list[dataset_id].append(dataset_temp_score[dataset_id][i])
# if i == 'auc':
# if dataset_id not in has_auc_list:
# has_auc_list[dataset_id] = []
# has_auc_list[dataset_id].append(dataset_temp_score_1[dataset_id][i])
all_score = dataset_temp_score[dataset_id][i][0]
all_count = dataset_temp_score[dataset_id][i][1]
all_score.append(result[i])
all_count += 1
dataset_temp_score[dataset_id][i] = (all_score, all_count)
print(dataset_id, dataset_temp_score[dataset_id][i])
# for i in dataset_temp_score:
# for j in dataset_temp_score[i]:
# all_score = dataset_temp_score[i][j][0]
# all_count = dataset_temp_score[i][j][1]
# if all_count == 0:
# dataset_temp_score[i][j] = (-1,0)
# else:
# dataset_temp_score[i][j] = (all_score/all_count,all_count)
# for i in dataset_temp_score_1:
# for j in dataset_temp_score_1[i]:
# all_score = dataset_temp_score_1[i][j][0]
# all_count = dataset_temp_score_1[i][j][1]
# if all_count == 0:
# dataset_temp_score_1[i][j] = (-1,0)
# else:
# dataset_temp_score_1[i][j] = (all_score/all_count,all_count)
result = {}
for i in dataset_dic:
result[i] = {}
for j in dataset_dic[i]:
# try:
# a = dataset_dic[i][j][0]
# except:
# continue
if i not in dataset_temp_score_1:
c=(-1,-1,0)
elif j not in dataset_temp_score_1[i]:
c=(-1,-1,0)
else:
if dataset_temp_score_1[i][j][1] == 0:
c = (-1, -1, 0)
else:
score_list = dataset_temp_score_1[i][j][0]
sum = 0
for sc in score_list:
sum += sc
mean = sum/dataset_temp_score_1[i][j][1]
sq = 0
for sc in score_list:
sq += (sc-mean)**2
sq = sq / dataset_temp_score_1[i][j][1]
c = (mean, sq, dataset_temp_score_1[i][j][1])
if i not in dataset_temp_score:
b=(-1,-1,0)
elif j not in dataset_temp_score[i]:
b=(-1,-1,0)
else:
if dataset_temp_score[i][j][1] == 0:
b = (-1, -1, 0)
else:
score_list = dataset_temp_score[i][j][0]
sum = 0
for sc in score_list:
sum += sc
mean = sum / dataset_temp_score[i][j][1]
sq = 0
for sc in score_list:
sq += (sc - mean)**2
sq = sq / dataset_temp_score[i][j][1]
b = (mean, sq, dataset_temp_score[i][j][1])
result[i][j] = (b,c)
np.save('1d0mgap1data/dataset_has_f1_score_list', has_f1_score_list)
np.save('1d0mgap1data/dataset_nhas_f1_score_list', nhas_f1_score_list)
np.save('1d0mgap1data/dataset_has_score_list_', has_score_list_)
np.save('1d0mgap1data/dataset_nhas_score_list_', nhas_score_list_)
np.save('1d0mgap1data/dataset_has_cross_val_score_list', has_cross_val_score_list)
np.save('1d0mgap1data/dataset_nhas_cross_val_score_list', nhas_cross_val_score_list)
np.save('1d0mgap1data/dataset_has_accuracy_score_list', has_accuracy_score_list)
np.save('1d0mgap1data/dataset_nhas_accuracy_score_list', nhas_accuracy_score_list)
np.save('1d0mgap1data/dataset_has_roc_auc_score_list', has_roc_auc_score_list)
np.save('1d0mgap1data/dataset_nhas_roc_auc_score_list', nhas_roc_auc_score_list)
np.save('1d0mgap1data/dataset_has_best_score_list', has_best_score_list)
np.save('1d0mgap1data/dataset_nhas_best_score_list', nhas_best_score_list)
np.save('./exist_operator_groupby_dataset.npy',result)
return result
def get_dataset_exist_dic_len():
dataset_temp_score = {}
dataset_temp_score_1 = {}
cursor, db = create_connection()
dataset_dic = np.load('./dataset_score_dic.npy',allow_pickle=True).item()
print('get operator list and result list')
sql = "SELECT distinct notebook_id from operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
has_operator_list = []
count= 0
for row in sql_res:
count+=1
has_operator_list.append(row[0])
np.save('has_operator_list.npy',has_operator_list)
sql = "SELECT distinct notebook_id from result"
cursor.execute(sql)
sql_res = cursor.fetchall()
has_result_list = []
count = 0
for row in sql_res:
count += 1
has_result_list.append(row[0])
print(has_operator_list)
print(has_result_list)
print('get operator list and result list end')
print('get pairs')
sql = "SELECT pair.nid,pair.did from pair inner join notebook on pair.nid=notebook.id where notebook.add_run=1"
cursor.execute(sql)
sql_res = cursor.fetchall()
print('get pairs end')
notebook_id_list = []
for row in sql_res:
notebook_id = int(row[0])
dataset_id = int(row[1])
if notebook_id in notebook_id_list:
# print('already in')
continue
notebook_id_list.append(notebook_id)
if notebook_id not in has_result_list:
# print('not in result')
continue
if notebook_id not in has_operator_list:
print('not has operator')
result = get_one_seq_len(notebook_id)
if dataset_id not in dataset_temp_score_1.keys():
dataset_temp_score_1[dataset_id] =([], 0)
# all_score = dataset_dic[dataset_id][0]
# all_count = dataset_dic[dataset_id][1]
all_score = dataset_temp_score_1[dataset_id][0]
all_count = dataset_temp_score_1[dataset_id][1]
all_score.append(result)
all_count += 1
dataset_temp_score_1[dataset_id] = (all_score, all_count)
print(dataset_id, dataset_temp_score_1[dataset_id])
continue
print('has operator')
result = get_one_seq_len(notebook_id)
if dataset_id not in dataset_temp_score.keys():
dataset_temp_score[dataset_id] = ([], 0)
all_score = dataset_temp_score[dataset_id][0]
all_count = dataset_temp_score[dataset_id][1]
all_score.append(result)
all_count += 1
dataset_temp_score[dataset_id] = (all_score, all_count)
print(dataset_id, dataset_temp_score[dataset_id])
# for i in dataset_temp_score:
# for j in dataset_temp_score[i]:
# all_score = dataset_temp_score[i][j][0]
# all_count = dataset_temp_score[i][j][1]
# if all_count == 0:
# dataset_temp_score[i][j] = (-1,0)
# else:
# dataset_temp_score[i][j] = (all_score/all_count,all_count)
# for i in dataset_temp_score_1:
# for j in dataset_temp_score_1[i]:
# all_score = dataset_temp_score_1[i][j][0]
# all_count = dataset_temp_score_1[i][j][1]
# if all_count == 0:
# dataset_temp_score_1[i][j] = (-1,0)
# else:
# dataset_temp_score_1[i][j] = (all_score/all_count,all_count)
result = {}
for i in dataset_dic:
result[i] = {}
if i not in dataset_temp_score_1:
c=(-1,-1,0)
else:
if dataset_temp_score_1[i][1] == 0:
c = (-1, -1, 0)
else:
score_list = dataset_temp_score_1[i][0]
sum = 0
for sc in score_list:
sum += sc
mean = sum/dataset_temp_score_1[i][1]
sq = 0
for sc in score_list:
sq += (sc-mean)**2
sq = sq / dataset_temp_score_1[i][1]
c = (mean, sq, dataset_temp_score_1[i][1])
if i not in dataset_temp_score:
b=(-1,-1,0)
else:
if dataset_temp_score[i][1] == 0:
c = (-1, -1, 0)
else:
score_list = dataset_temp_score[i][0]
sum = 0
for sc in score_list:
sum += sc
mean = sum / dataset_temp_score[i][1]
sq = 0
for sc in score_list:
sq += (sc - mean)**2
sq = sq / dataset_temp_score[i][1]
c = (mean, sq, dataset_temp_score[i][1])
result[i] = (b,c)
np.save('./exist_operator_groupby_dataset_len.npy',result)
return result
def get_mean_group_by_dataset():
"""
:return:
{
12:{
'auc': ([0.97,0.22,...], 21),
...
}
}
"""
in_result = []
cursor, db = create_connection()
sql = "select distinct notebook_id from result"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
sql = "SELECT pair.nid,pair.did from pair inner join notebook on pair.nid=notebook.id where notebook.add_run=1"
cursor.execute(sql)
sql_res = cursor.fetchall()
dataset_dic = {}
for row in sql_res:
notebook_id = int(row[0])
dataset_id = int(row[1])
if notebook_id not in in_result:
continue
result = get_one_result(notebook_id)
print("notebookid:" + str(notebook_id)+ ',result:',result)
if dataset_id not in dataset_dic.keys():
dataset_dic[dataset_id] = {}
# all_score = dataset_dic[dataset_id][0]
# all_count = dataset_dic[dataset_id][1]
for i in result:
if result[i] != -1:
if i not in dataset_dic[dataset_id]:
dataset_dic[dataset_id][i] = ([],0)
all_score = dataset_dic[dataset_id][i][0]
all_count = dataset_dic[dataset_id][i][1]
all_score.append(result[i])
all_count += 1
dataset_dic[dataset_id][i] = (all_score, all_count)
# for i in dataset_dic:
# for j in dataset_dic[i]:
# all_score = dataset_dic[i][j][0]
# all_count = dataset_dic[i][j][1]
# if all_count == 0:
# dataset_dic[i][j] = -1
# else:
# dataset_dic[i][j] = all_score/all_count
np.save('./dataset_score_dic.npy', dataset_dic)
return dataset_dic
# def get_all_score():
# cursor, db = create_connection()
# sql = "SELECT distinct notebook_id FROM result"
# cursor.execute(sql)
# sql_res = cursor.fetchall()
# notebook_score = {}
# for row in sql_res:
# notebook_id = row[0]
# result = get_one_result(notebook_id)
# if notebook_id not in notebook_score.keys():
# notebook_score[notebook_id] = {}
# for i in result:
# if result[i] != -1:
# if i not in notebook_score[notebook_id]:
# notebook_score[notebook_id][i] = (0, 0)
# all_score = notebook_score[notebook_id][i][0]
# all_count = notebook_score[notebook_id][i][1]
# all_score += result[i]
# all_count += 1
# notebook_score[notebook_id][i] = (all_score, all_count)
#
# for i in notebook_score:
# for j in notebook_score[i]:
# all_score = notebook_score[i][j][0]
# all_count = notebook_score[i][j][1]
# if all_count == 0:
# notebook_score[i][j] = -1
# else:
# notebook_score[i][j] = all_score/all_count
# np.save('./notebook_score.npy', notebook_score)
# return notebook_score
def param_walking(node):
if type(node).__name__ == 'Str':
return ('str',node.s)
elif type(node).__name__ == 'Module':
if len(node.body) != 0:
return param_walking(node.body[0])
else:
return ('str','')
elif type(node).__name__ == 'Num':
return ('Num',node.n)
elif type(node).__name__ == 'Name':
return ('variable',node.id)
elif type(node).__name__ == 'Call':
if (type(node.func).__name__ == 'Name'):
return ('func',node.func.id)
elif (type(node.func).__name__ == 'Attribute'):
return ('func',node.func.attr)
elif type(node).__name__ == 'Attribute':
return ('attr', astunparse.unparse(node))
elif type(node).__name__ == 'Assign':
return param_walking(node.value)
elif type(node).__name__ == 'BinOp':
return ('binop', astunparse.unparse(node))
elif type(node).__name__ == 'BoolOp':
return ('boolcomp', astunparse.unparse(node))
elif type(node).__name__ == 'List':
return ('list', astunparse.unparse(node))
elif type(node).__name__ == 'NameConstant':
return ('nameconst', node.value)
elif type(node).__name__ == 'Subscript':
return ('subdata', astunparse.unparse(node))
elif type(node).__name__ == 'Dict':
return ('dict', astunparse.unparse(node))
elif type(node).__name__ == 'Tuple':
return ('tuple', astunparse.unparse(node))
elif type(node).__name__ == 'Set':
return ('set', astunparse.unparse(node))
elif type(node).__name__ == 'ListComp':
return ('listcomp', astunparse.unparse(node))
elif type(node).__name__ == 'Expr':
return param_walking(node.value)
elif type(node).__name__ == 'UnaryOp':
return ('UnaryOp', astunparse.unparse(node))
elif type(node).__name__ == 'AnnAssign':
return ('annassign', astunparse.unparse(node))
else:
return (str(type(node).__name__).lower(), astunparse.unparse(node))
def parse_param(param_code):
# print(param_code)
try:
r_node = ast.parse(param_code)
except:
return ('compile fail', param_code)
# print(ast.dump(r_node))
res = param_walking(r_node)
return res
def get_operator_param_rate(regenerate=True):
param_code_dic = {}
param_type_rate_dic = {}
param_code_rate_dic = {}
if regenerate == True:
# CONFIG.read('config.ini')
operator_dic = eval(CONFIG.get('operators', 'operations'))
print('select all operator')
cursor, db = create_connection()
sql = "SELECT operator,parameter_1_code,parameter_2_code,parameter_3_code,parameter_4_code,parameter_5_code,parameter_6_code,parameter_7_code FROM operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
print('select all operator end')
print('generate param_code_dic')
for row in sql_res:
operator = row[0]
param_list = [row[1],row[2],row[3],row[4],row[5],row[6],row[7]]
rm_1 = False
if operator not in param_code_dic:
param_code_dic[operator] = {}
if operator_dic[operator]['call_type']== 2 or operator_dic[operator]['call_type']== 4 :
rm_1 = True
for index,param_code in enumerate(param_list):
if rm_1 == True and index == 0:
continue
if param_code == None:
param_type, param_content = ('default','Null')
else:
param_type,param_content = parse_param(param_code)
# print(param_type,param_content)
if index not in param_code_dic[operator]:
param_code_dic[operator][index] = []
param_code_dic[operator][index].append((param_type, param_content))
np.save('./param_code_dic.npy', param_code_dic)
print('save param_code_dic end')
else:
param_code_dic = np.load('./param_code_dic.npy', allow_pickle=True).item()
print('count param_code')
for operator in param_code_dic:
if operator not in param_code_rate_dic:
param_code_rate_dic[operator] = {}
if operator not in param_type_rate_dic:
param_type_rate_dic[operator] = {}
for index in param_code_dic[operator]:
if index not in param_code_rate_dic[operator]:
param_code_rate_dic[operator][index] = {}
if index not in param_type_rate_dic[operator]:
param_type_rate_dic[operator][index] = {}
all = 0
for tup in param_code_dic[operator][index]:
# print(tup)
if tup[0] not in param_type_rate_dic[operator][index]:
param_type_rate_dic[operator][index][tup[0]] = 0
param_type_rate_dic[operator][index][tup[0]] += 1
if tup[1] not in param_code_rate_dic[operator][index]:
param_code_rate_dic[operator][index][tup[1]] = 0
param_code_rate_dic[operator][index][tup[1]] += 1
all += 1
for ptype in param_type_rate_dic[operator][index]:
if all != 0:
param_type_rate_dic[operator][index][ptype] = (param_type_rate_dic[operator][index][ptype]/all, param_type_rate_dic[operator][index][ptype])
else:
param_type_rate_dic[operator][index][ptype] =(0,0)
for pcode in param_code_rate_dic[operator][index]:
if all != 0:
param_code_rate_dic[operator][index][pcode] = (param_code_rate_dic[operator][index][pcode] / all, param_code_rate_dic[operator][index][pcode])
else:
param_code_rate_dic[operator][index][pcode] = (0, 0)
np.save('./param_type_rate_dic.npy', param_type_rate_dic)
np.save('./param_code_rate_dic.npy', param_code_rate_dic)
print('save param_code end')
def get_operator_param_score():
cursor, db = create_connection()
CONFIG.read('config.ini')
operator_dic = eval(CONFIG.get('operators', 'operations'))
ope_dic = {}
sql = "SELECT notebook_id,operator,parameter_1_value,parameter_2_value,parameter_3_value,parameter_4_value,parameter_5_value,parameter_6_value,parameter_7_value FROM operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
notebook_score = {}
parameter_dic = {}
for row in sql_res:
if row[0] not in notebook_score:
result = get_one_result(row[0])
notebook_score[row[0]] = result
result = notebook_score[row[0]]
if row[1] not in parameter_dic:
parameter_dic[row[1]] = {}
for num in range(2,9):
if row[num] not in parameter_dic[row[1]]:
parameter_dic[row[1]][row[num]] = {}
for i in result:
if result[i] != -1:
if i not in parameter_dic[row[1]][row[num]]:
parameter_dic[row[1]][row[num]][i] = ([], 0)
all_score = parameter_dic[row[1]][row[num]][i][0]
all_count = parameter_dic[row[1]][row[num]][i][1]
all_score.append(result[i])
all_count += 1
parameter_dic[row[1]][row[num]][i] = (all_score, all_count)
# np.save('./param_score_dic.npy', ope_dic)
for i in parameter_dic: # operator
for j in parameter_dic[i]: # parameter
for k in parameter_dic[i][j]: # score type
all_score = parameter_dic[i][j][k][0]
all_count = parameter_dic[i][j][k][1]
if all_count == 0:
parameter_dic[i][j][k] = (-1,-1,0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
parameter_dic[i][j][k] = (mean, sq, all_count)
np.save('./param_score_dic.npy', ope_dic)
def get_operator_exist_dic():
cursor, db = create_connection()
CONFIG.read('config.ini')
operator_dic = eval(CONFIG.get('operators', 'operations'))
in_result = []
sql = "SELECT distinct notebook_id FROM result"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(int(row[0]))
operator_notebook_dic = {}
for operator in operator_dic.keys():
operator_notebook_dic[operator] = []
sql = "SELECT distinct notebook_id FROM operator where operator = '" + operator + "'"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
operator_notebook_dic[operator].append(int(row[0]))
ope_dic = {}
nope_dic = {}
for notebook_id in in_result:
print('notebook_idL',notebook_id)
for operator in operator_dic.keys():
if notebook_id in operator_notebook_dic[operator]:
result = get_one_result(notebook_id)
if operator not in ope_dic.keys():
ope_dic[operator] = {}
for i in result:
if result[i] != -1:
if i not in ope_dic[operator]:
ope_dic[operator][i] = ([], 0)
all_score = ope_dic[operator][i][0]
all_count = ope_dic[operator][i][1]
all_score.append(result[i])
all_count += 1
ope_dic[operator][i] = (all_score, all_count)
else:
result = get_one_result(notebook_id)
if operator not in nope_dic.keys():
nope_dic[operator] = {}
for i in result:
if result[i] != -1:
if i not in nope_dic[operator]:
nope_dic[operator][i] = ([], 0)
all_score = nope_dic[operator][i][0]
all_count = nope_dic[operator][i][1]
all_score.append(result[i])
all_count += 1
nope_dic[operator][i] = (all_score, all_count)
for i in nope_dic:
for j in nope_dic[i]:
all_score = 0
all_count = 0
if i in ope_dic:
if j in ope_dic[i]:
all_score = ope_dic[i][j][0]
all_count = ope_dic[i][j][1]
n_all_score = nope_dic[i][j][0]
n_all_count = nope_dic[i][j][1]
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum / all_count
sq = 0
for sc in all_score:
sq += (sc - mean) ** 2
sq /= all_count
ope_temp = (mean, sq, all_count)
if n_all_count == 0:
n_ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in n_all_score:
sum += sc
mean = sum / n_all_count
sq = 0
for sc in n_all_score:
sq += (sc - mean) ** 2
sq /= n_all_count
n_ope_temp = (mean, sq, n_all_count)
if i not in ope_dic:
ope_dic[i] = {}
ope_dic[i][j] =(ope_temp,n_ope_temp)
np.save('./ope_score_dic.npy', ope_dic)
# def get_operator_exist_dic_by_one_dataset(dataset_id, notebook_list):
# cursor, db = create_connection()
# CONFIG.read('config.ini')
# operator_dic = eval(CONFIG.get('operators', 'operations'))
# ope_dic = {}
# notebook_list_of_dataset = []
# operator_notebook_dic = {}
# noperator_notebook_dic = {}
#
# if notebook_list != []:
# notebook_list_of_dataset = notebook_list
# else:
# sql = "SELECT pair.nid FROM pair where pair.did=" + str(dataset_id)
# cursor.execute(sql)
# sql_res = cursor.fetchall()
# for row in sql_res:
# notebook_list_of_dataset.append(int(row[0]))
#
# for operator in operator_dic.keys():
# sql = "SELECT distinct notebook_id FROM operator where operator = '" + operator + "'"
# cursor.execute(sql)
# sql_res = cursor.fetchall()
# operator_notebook_dic[operator] = []
# if operator not in ope_dic.keys():
# ope_dic[operator] = {}
# for row in sql_res:
# notebook_id = int(row[0])
# if notebook_id not in notebook_list_of_dataset:
# continue
# operator_notebook_dic[operator].append(notebook_id)
# result = get_one_result(notebook_id)
# for i in result:
# if result[i] != -1:
# if i not in ope_dic[operator]:
# ope_dic[operator][i] = (0, 0)
# all_score = ope_dic[operator][i][0]
# all_count = ope_dic[operator][i][1]
# all_score += result[i]
# all_count += 1
# ope_dic[operator][i] = (all_score, all_count)
# print('add_one_result:',ope_dic[operator][i])
#
# for operator in operator_notebook_dic:
# noperator_notebook_dic[operator] = []
# for notebook in notebook_list_of_dataset:
# if notebook not in operator_notebook_dic[operator]:
# noperator_notebook_dic[operator].append(notebook)
#
# nope_dic = {}
# for operator in operator_dic.keys():
# for notebook_id in noperator_notebook_dic[operator]:
# result = get_one_result(notebook_id)
# if operator not in nope_dic.keys():
# nope_dic[operator] = {}
# for i in result:
# if result[i] != -1:
# if i not in nope_dic[operator]:
# nope_dic[operator][i] = (0, 0)
# all_score = nope_dic[operator][i][0]
# all_count = nope_dic[operator][i][1]
# all_score += result[i]
# all_count += 1
# nope_dic[operator][i] = (all_score, all_count)
#
# for i in nope_dic:
# for j in nope_dic[i]:
# all_score = 0
# all_count = 0
# if i in ope_dic:
# if j in ope_dic[i]:
# all_score = ope_dic[i][j][0]
# all_count = ope_dic[i][j][1]
# n_all_score = nope_dic[i][j][0]
# n_all_count = nope_dic[i][j][1]
#
# if all_count == 0:
# ope_temp = (-1, 0)
# else:
# ope_temp = (all_score / all_count, all_count)
# if n_all_count == 0:
# n_ope_temp = (-1, 0)
# else:
# n_ope_temp = (n_all_score/n_all_count, n_all_count)
# if i not in ope_dic:
# ope_dic[i] = {}
# ope_dic[i][j] =(ope_temp,n_ope_temp)
#
# return ope_dic
def get_dataset_operator_exist_dic():
print("get_pair_dic")
cursor, db = create_connection()
in_result = []
sql = 'select distinct(notebook_id) from result'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
pair_dic = {}
sql = 'select * from pair'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_id = int(row[0])
dataset_id = int(row[1])
if notebook_id not in in_result:
continue
if dataset_id not in pair_dic.keys():
pair_dic[dataset_id] = []
pair_dic[int(dataset_id)].append(int(notebook_id))
np.save('./pair_dic.npy',pair_dic)
print("get_pair_dic end")
# if number != '-1':
# sql = "SELECT count(distinct result.notebook_id),pair.did FROM result inner join pair on result.notebook_id = pair.nid group by pair.did order by count(distinct result.notebook_id) limit " + str(number)
# else:
# sql = "SELECT count(distinct result.notebook_id),pair.did FROM result inner join pair on result.notebook_id = pair.nid group by pair.did order by count(distinct result.notebook_id)"
# cursor.execute(sql)
# sql_res = cursor.fetchall()
result = {}
count = 0
for dataset_id in pair_dic:
print(count)
count += 1
result[dataset_id] = get_operator_exist_dic_by_one_dataset(dataset_id, pair_dic[dataset_id])
np.save('./1d0mgap3_.npy',result)
return result
def get_notebook_operator_dic():
all_seq_notebook_list = []
cursor, db = create_connection()
sql = "select distinct notebook_id from operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
notebook_operator_dic = {}
for row in sql_res:
all_seq_notebook_list.append(row[0])
notebook_operator_dic[row[0]] = []
sql = "select rank,logic_operation from operator where notebook_id="+str(row[0])
cursor.execute(sql)
sql_res1 = cursor.fetchall()
for row1 in sql_res1:
notebook_operator_dic[row[0]].append(row1[0])
np.save('./notebook_operator_dic.npy',notebook_operator_dic)
return notebook_operator_dic
def get_sequence2id_dic(run_award = False):
if run_award == True or not os.path.exists('./notebook_operator_dic.npy'):
notebook_operator_dic = get_notebook_operator_dic()
else:
notebook_operator_dic = np.load('./notebook_operator_dic.npy', allow_pickle=True).item()
sequence2id = {}
sequence2notebook = {}
seq_id = 1
for notebook in notebook_operator_dic:
sequence = notebook_operator_dic[notebook]
in_dic = False
for j in sequence2id:
if sequence2id[j] == sequence:
in_dic = True
if j not in sequence2notebook:
sequence2notebook[j] = []
sequence2notebook[j].append(notebook)
if in_dic == False:
sequence2id[seq_id] = sequence
seq_id += 1
if seq_id not in sequence2notebook:
sequence2notebook[seq_id] = []
sequence2notebook[seq_id].append(notebook)
np.save('./sequence2id.npy', sequence2id)
np.save('./sequence2notebook.npy', sequence2notebook)
return sequence2notebook
def get_result_of_seq(run_award = False):
if run_award == True or not os.path.exists('./sequence2notebook.npy'):
sequence2notebook = get_sequence2id_dic()
else:
sequence2notebook = np.load('./sequence2notebook.npy', allow_pickle=True).item()
result = {}
for seq_id in sequence2notebook:
notebook_list = sequence2notebook[seq_id]
result[seq_id] = {}
for notebook in notebook_list:
score = get_one_result(notebook)
for i in score:
if score[i] != -1:
if i not in result[seq_id]:
result[seq_id][i] = (0,0)
temp1 = result[seq_id][i][0] + score[i]
temp2 = result[seq_id][i][1] + 1
result[seq_id][i] = (temp1,temp2)
for seq_id in result:
for i in result[seq_id]:
if result[seq_id][i][1] != 0:
temp1 = result[seq_id][i][0]/result[seq_id][i][1]
result[seq_id][i] = (temp1, result[seq_id][i][1])
np.save('./seq_score_dic.npy',result)
def show_dic(showtype):
showtype = int(showtype)
if showtype==4:
ope_score_dic = np.load('./ope_score_dic.npy',allow_pickle=True).item()
new_dic = {}
has_add=set()
for i in ope_score_dic:
if i in has_add:
continue
has_add.add(i)
if (ope_score_dic[i]['accuracy_score'][0][1] + ope_score_dic[i]['auc'][0][1]) == 0:
temp1 = (-1,0)
else:
temp1 = ((ope_score_dic[i]['accuracy_score'][0][0]*ope_score_dic[i]['accuracy_score'][0][1] + ope_score_dic[i]['auc'][0][0]*ope_score_dic[i]['auc'][0][1])/(ope_score_dic[i]['accuracy_score'][0][1] + ope_score_dic[i]['auc'][0][1]),ope_score_dic[i]['accuracy_score'][0][1] + ope_score_dic[i]['auc'][0][1])
if (ope_score_dic[i]['accuracy_score'][1][1] + ope_score_dic[i]['auc'][1][1]) == 0:
temp2 = (-1, 0)
else:
temp2 = ((ope_score_dic[i]['accuracy_score'][1][0]*ope_score_dic[i]['accuracy_score'][1][1] + ope_score_dic[i]['auc'][1][0]*ope_score_dic[i]['auc'][1][1])/(ope_score_dic[i]['accuracy_score'][1][1] + ope_score_dic[i]['auc'][1][1]),ope_score_dic[i]['accuracy_score'][1][1] + ope_score_dic[i]['auc'][1][1])
new_dic[i] = {}
new_dic[i]['accuracy_score'] = (temp1,temp2)
new_dic[i]['f1_score'] = ope_score_dic[i]['f1_score']
for ope in ope_score_dic[i]:
try:
if ope != 'accuracy_score' and ope != 'auc' and ope_score_dic[i][ope][0][1] + ope_score_dic[i][ope][1][1] > 10 and ope_score_dic[i][ope][0][0] != -1:
new_dic[i][ope] = ope_score_dic[i][ope]
except:
continue
print(i)
for score in new_dic[i]:
if new_dic[i][score][0][0] == -1 or new_dic[i][score][1][0] == -1:
continue
if new_dic[i][score][0][1] < 5 :
continue
print('\t' + score)
if new_dic[i][score][0][0] > new_dic[i][score][1][0]:
if score == 'mean_squared_error' or score == 'mean_absolute_error':
print('\t' + "\033[0;31;40m" + str(new_dic[i][score]) + "\033[0m")
else:
print('\t' + "\033[0;32;40m" + str(new_dic[i][score]) + "\033[0m")
else:
if score == 'mean_squared_error' or score == 'mean_absolute_error':
print('\t' + "\033[0;32;40m" + str(new_dic[i][score]) + "\033[0m")
else:
print('\t' + "\033[0;31;40m" + str(new_dic[i][score]) + "\033[0m")
print('********************')
elif showtype==2:
dic = np.load('./dataset_score_dic.npy',allow_pickle=True).item()
for i in dic:
if len(dic[i]) == 0:
continue
print(i)
print(dic[i])
print('********************')
elif showtype == 1:
tup = np.load('./all_exit_tuple.npy',allow_pickle=True)
for score in tup[0].keys():
if score in tup[1].keys():
print(score)
if tup[0][score][0] > tup[1][score][0]:
if score == 'mean_squared_error' or score == 'mean_absolute_error':
print('\t' + "\033[0;31;40m" + str((tup[0][score],tup[1][score])) + "\033[0m")
else:
print('\t' + "\033[0;32;40m" + str((tup[0][score],tup[1][score])) + "\033[0m")
else:
if score == 'mean_squared_error' or score == 'mean_absolute_error':
print('\t' + "\033[0;32;40m" + str((tup[0][score],tup[1][score]))+ "\033[0m")
else:
print('\t' + "\033[0;31;40m" + str((tup[0][score],tup[1][score])) + "\033[0m")
print(tup)
elif showtype==3:
dic = np.load('./exist_operator_groupby_dataset.npy',allow_pickle=True).item()
need_print =[]
false_count = 0
true_count = 0
false_count_1 = 0
true_count_1 = 0
dataset_num = {}
print('dataset_num:', len(dic.keys()))
for i in dic:
dataset_num[i] = [0, 0]
for score in dic[i]:
dataset_num[i][0] += dic[i][score][0][1]
dataset_num[i][1] += dic[i][score][1][1]
if dic[i][score][0][1] == 0 or dic[i][score][1][1] == 0:
continue
need_print.append(i)
printed = []
for i in dic:
if i not in need_print:
continue
if i in printed:
continue
printed.append(i)
print(i)
for score in dic[i]:
if dic[i][score][0][1] == 0 or dic[i][score][1][1] == 0:
continue
print('\t' + score)
if dic[i][score][0][0] > dic[i][score][1][0]:
if score == 'mean_squared_error' or score == 'mean_absolute_error':
print('\t' + "\033[0;31;40m" + str(dic[i][score]) + "\033[0m")
false_count += 1
false_count_1 += dic[i][score][0][1]
false_count_1 += dic[i][score][1][1]
else:
print('\t' + "\033[0;32;40m" + str(dic[i][score]) + "\033[0m")
true_count += 1
true_count_1 += dic[i][score][0][1]
true_count_1 += dic[i][score][1][1]
else:
if score == 'mean_squared_error' or score == 'mean_absolute_error':
print('\t' + "\033[0;32;40m" + str(dic[i][score]) + "\033[0m")
true_count += 1
true_count_1 += dic[i][score][0][1]
true_count_1 += dic[i][score][1][1]
else:
print('\t' + "\033[0;31;40m" + str(dic[i][score]) + "\033[0m")
false_count += 1
false_count_1 += dic[i][score][0][1]
false_count_1 += dic[i][score][1][1]
print('********************')
# if dic[i][1] == -1:
# continue
print("false_count:",false_count)
print("true_count:", true_count)
print("false_count_1:", false_count_1)
print("true_count_1:", true_count_1)
false_num = 0
true_num = 0
for i in dataset_num:
if dataset_num[i][0] > dataset_num[i][1]:
false_num += 1
else:
true_num += 1
print('')
print("false_num:",false_num)
print("true_num:", true_num)
elif showtype==6:
dic = np.load('./dataset_operation_dic.npy', allow_pickle=True).item()
# print(dic)
need_print_dataset = {}
false_count = 0
false_count_1 = 0
true_count = 0
true_count_1 = 0
false_num = 0
true_num = 0
true_all = 0
false_all = 0
print(dic)
# for i in dic:
# if 'accuracy_score' in dic[i] and 'auc' in dic[i]:
# if (dic[i]['accuracy_score'][0][1] + dic[i]['auc'][0][1]) == 0:
# temp1 = (-1,0)
# else:
# temp1 = ((dic[i]['accuracy_score'][0][0]*dic[i]['accuracy_score'][0][1] + dic[i]['auc'][0][0]*dic[i]['auc'][0][1])/(dic[i]['accuracy_score'][0][1] + dic[i]['auc'][0][1]),dic[i]['accuracy_score'][0][1] + dic[i]['auc'][0][1])
# if (dic[i]['accuracy_score'][1][1] + dic[i]['auc'][1][1]) == 0:
# temp2 = (-1, 0)
# else:
# temp2 = ((dic[i]['accuracy_score'][1][0]*dic[i]['accuracy_score'][1][1] + dic[i]['auc'][1][0]*dic[i]['auc'][1][1])/(dic[i]['accuracy_score'][1][1] + dic[i]['auc'][1][1]),dic[i]['accuracy_score'][1][1] + dic[i]['auc'][1][1])
# dic[i]['accuracy_score'] = (temp1,temp2)
# elif 'auc' in dic[i]:
# dic[i]['accuracy_score'] = dic[i]['auc']
#
# # print("\033[0;34;40m" + str(i) + "\033[0m")
#
# for operator in dic[i]:
# # print("\033[0;35;40m" + operator + "\033[0m")
# for score in dic[i][operator]:
# # print("\033[0;36;40m" + score + "\033[0m")
# try:
# if dic[i][operator][score][0][1] < dic[i][operator][score][1][1]:
# true_num += 1
# else:
# false_num += 1
#
# if dic[i][operator][score][0][0] == -1:
# continue
# if dic[i][operator][score][0][1] + dic[i][operator][score][1][1] < 10:
# continue
# if i not in need_print_dataset:
# need_print_dataset[i] = {}
# if operator not in need_print_dataset[i]:
# need_print_dataset[i][operator] = []
# need_print_dataset[i][operator].append(score)
# except:
# continue
#
# for i in dic:
# if i not in need_print_dataset:
# continue
# print(str(i))
# for operator in dic[i]:
# if operator not in need_print_dataset[i]:
# continue
# print('\t'+operator)
# for score in dic[i][operator]:
# if score not in need_print_dataset[i][operator]:
# continue
# print("\t\t" + score)
# try:
# if dic[i][operator][score][0][0] == -1:
# continue
# if dic[i][operator][score][0][1] + dic[i][operator][score][1][1] < 10:
# continue
# elif dic[i][operator][score][0][0] > dic[i][operator][score][1][0]:
# print("\033[0;32;40m\t\t" + str(dic[i][operator][score]) + "\033[0m")
# true_count += 1
# true_count_1 += dic[i][operator][score][0][1]
# true_count_1 += dic[i][operator][score][1][1]
# if dic[i][operator][score][0][1] < dic[i][operator][score][1][1]:
# true_all += 1
# else:
# false_all += 1
# elif dic[i][operator][score][0][0] < dic[i][operator][score][1][0]:
# print("\033[0;31;40m\t\t" + str(dic[i][operator][score]) + "\033[0m")
# false_count += 1
# false_count_1 += dic[i][operator][score][0][1]
# false_count_1 += dic[i][operator][score][1][1]
# false_all += 1
# except:
# continue
# print('********************')
# print('true_count:', true_count)
# print('true_count_1:', true_count_1)
# print('true_num:', true_num)
# print('true_all:', true_all)
# print('false_count:',false_count)
# print('false_count_1:', false_count_1)
# print('false_num:', false_num)
# print('false_all:', false_all)
elif showtype == 8:
print('input page num')
page_num = int(input())
print('input page size')
page_size = int(input())
show_notebook_seq = np.load('./show_notebook_seq.npy', allow_pickle=True).item()
start = (page_num-1)*page_size
for index,i in enumerate(show_notebook_seq):
if index < start:
continue
if index > start + page_size:
break
print(str(i))
for item in show_notebook_seq[i]:
# print(item)
if item[2] == 'Show' and item[0] != 'show':
if item[3] == None:
item[3] = ''
print('\t\033[0;35;40m' + str(item[4]) + ':' + item[0] + ' ' + item[1] + ' '+ item[3] + "\033[0m")
elif item[2] == 'Show_config'or item[0] == 'show':
if item[3] == None:
item[3] = ''
print('\t\033[0;37;40m' + str(item[4]) + ':'+ item[0] + ' ' + item[1] + ' '+ item[3] + ' '"\033[0m")
else:
if item[3] == None:
item[3] = ''
print('\t\033[0;36;40m' + str(item[4]) + ':'+ item[0] + ' ' + item[1] + ' '+ item[3] + ' '"\033[0m")
# break
elif showtype == 9:
print('code or type or all')
ct = input()
if ct == 'code':
rate_dic = np.load('./param_code_rate_dic.npy',allow_pickle=True).item()
elif ct == 'type':
rate_dic = np.load('./param_type_rate_dic.npy', allow_pickle=True).item()
elif ct == 'all':
rate_dic = np.load('./param_code_dic.npy', allow_pickle=True).item()
else:
rate_dic = {}
return
operator_dic = eval(CONFIG.get('operators', 'operations'))
if ct == 'all':
for operator in rate_dic:
print(operator)
for index in rate_dic[operator]:
if index >= len(operator_dic[operator]['params']):
continue
print('\t'+ str(operator_dic[operator]['params'][index]))
for content in rate_dic[operator][index]:
print('\t\t' + str(content))
else:
need_pass_print = {}
for operator in rate_dic:
for index in rate_dic[operator]:
if index >= len(operator_dic[operator]['params']):
continue
for content in rate_dic[operator][index]:
if content == 'default' and rate_dic[operator][index][content][0] > 0.95:
if operator not in need_pass_print:
need_pass_print[operator] = []
need_pass_print[operator].append(index)
for operator in rate_dic:
print(operator)
for index in rate_dic[operator]:
if index >= len(operator_dic[operator]['params']):
continue
if operator in need_pass_print:
if index in need_pass_print[operator]:
continue
print('\t'+ str(operator_dic[operator]['params'][index]))
for content in rate_dic[operator][index]:
print('\t\t' + str(content) + ':' + str(rate_dic[operator][index][content]))
def get_show_sequence():
notebook_list = []
cursor, db = create_connection()
sql = "select distinct notebook_id from show_operator where logic_operation='Show'"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_list.append(int(row[0]))
sql = "select notebook_id, operator,data_object_value,logic_operation,parameter_1_code,rank from show_operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
show_notebook_seq = {}
for row in sql_res:
notebook_id = int(row[0])
if notebook_id not in notebook_list:
continue
operator = row[1]
data_object_value = row[2]
logic_operation = row[3]
parameter_1_code = row[4]
rank = row[5]
if notebook_id not in show_notebook_seq:
show_notebook_seq[notebook_id] = []
show_notebook_seq[notebook_id].append([operator,data_object_value,logic_operation,parameter_1_code,rank])
np.save('./show_notebook_seq.npy',show_notebook_seq)
# for i in show_notebook_seq:
# print("\033[0;32;40m" + str(i) + "\033[0m")
# for item in show_notebook_seq[i]:
# print('\t' + str(item))
#
def get_model_exist_dic():
cursor, db = create_connection()
in_result = []
sql = "SELECT distinct notebook_id FROM result"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
in_ope = []
sql = "SELECT distinct notebook_id FROM operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_ope.append(row[0])
model_dic = {}
nmodel_dic = {}
has_f1_score_list = {}
has_score_list_ = {}
has_cross_val_score_list = {}
has_accuracy_score_list = {}
has_roc_auc_score_list = {}
has_precision_score_list = {}
has_recall_score_list = {}
has_best_score_list = {}
has_auc_list = {}
nhas_f1_score_list = {}
nhas_score_list_ = {}
nhas_cross_val_score_list = {}
nhas_accuracy_score_list = {}
nhas_roc_auc_score_list = {}
nhas_precision_score_list = {}
nhas_recall_score_list = {}
nhas_best_score_list = {}
nhas_auc_list = {}
model_key_dic = eval(CONFIG.get('models', 'model_dic'))
for notebook_id in in_result:
if notebook_id in in_ope:
one_res = get_one_model_result(notebook_id)
# print('one_res',one_res)
for model in one_res:
# print('model:',model)
if model not in model_key_dic:
continue
if model not in model_dic:
model_dic[model] = {}
for score in one_res[model]:
if score == 'f1_score':
if model not in has_f1_score_list:
has_f1_score_list[model] = []
has_f1_score_list[model].append(one_res[model][score])
if score == 'score':
if model not in has_score_list_:
has_score_list_[model] = []
has_score_list_[model].append(one_res[model][score])
if score == 'cross_val_score':
if model not in has_cross_val_score_list:
has_cross_val_score_list[model] = []
has_cross_val_score_list[model].append(one_res[model][score])
if score == 'accuracy_score':
if model not in has_accuracy_score_list:
has_accuracy_score_list[model] = []
has_accuracy_score_list[model].append(one_res[model][score])
if score == 'roc_auc_score':
if model not in has_roc_auc_score_list:
has_roc_auc_score_list[model] = []
has_roc_auc_score_list[model].append(one_res[model][score])
if score == 'precision_score':
if model not in has_precision_score_list:
has_precision_score_list[model] = []
has_precision_score_list[model].append(one_res[model][score])
if score == 'recall_score':
if model not in has_recall_score_list:
has_recall_score_list[model] = []
has_recall_score_list[model].append(one_res[model][score])
if score == 'best_score_':
if model not in has_best_score_list:
has_best_score_list[model] = []
has_best_score_list[model].append(one_res[model][score])
if score == 'auc':
if model not in has_auc_list:
has_auc_list[model] = []
has_auc_list[model].append(one_res[model][score])
model_dic[model][score] = [[one_res[model][score]],1]
# print('model_dic item:',model_dic[model][score])
print('model,score:', (model, score))
print(model_dic[model][score])
else:
for score in one_res[model]:
if score == 'f1_score':
if model not in has_f1_score_list:
has_f1_score_list[model] = []
has_f1_score_list[model].append(one_res[model][score])
if score == 'score':
if model not in has_score_list_:
has_score_list_[model] = []
has_score_list_[model].append(one_res[model][score])
if score == 'cross_val_score':
if model not in has_cross_val_score_list:
has_cross_val_score_list[model] = []
has_cross_val_score_list[model].append(one_res[model][score])
if score == 'accuracy_score':
if model not in has_accuracy_score_list:
has_accuracy_score_list[model] = []
has_accuracy_score_list[model].append(one_res[model][score])
if score == 'roc_auc_score':
if model not in has_roc_auc_score_list:
has_roc_auc_score_list[model] = []
has_roc_auc_score_list[model].append(one_res[model][score])
if score == 'precision_score':
if model not in has_precision_score_list:
has_precision_score_list[model] = []
has_precision_score_list[model].append(one_res[model][score])
if score == 'recall_score':
if model not in has_recall_score_list:
has_recall_score_list[model] = []
has_recall_score_list[model].append(one_res[model][score])
if score == 'best_score_':
if model not in has_best_score_list:
has_best_score_list[model] = []
has_best_score_list[model].append(one_res[model][score])
if score == 'auc':
if model not in has_auc_list:
has_auc_list[model] = []
has_auc_list[model].append(one_res[model][score])
if score not in model_dic[model]:
model_dic[model][score] = [[one_res[model][score]], 1]
else:
temp_li = model_dic[model][score][0]
temp_li.append(one_res[model][score])
model_dic[model][score] = [ temp_li , model_dic[model][score][1]+1 ]
print('model,score:', (model,score))
print(model_dic[model][score])
else:
one_res = get_one_model_result(notebook_id)
for model in one_res:
if model not in nmodel_dic:
if model not in model_key_dic:
continue
nmodel_dic[model] = {}
for score in one_res[model]:
if score == 'f1_score':
if model not in nhas_f1_score_list:
nhas_f1_score_list[model] = []
nhas_f1_score_list[model].append(one_res[model][score])
if score == 'score':
if model not in nhas_score_list_:
nhas_score_list_[model] = []
nhas_score_list_[model].append(one_res[model][score])
if score == 'cross_val_score':
if model not in nhas_cross_val_score_list:
nhas_cross_val_score_list[model] = []
nhas_cross_val_score_list[model].append(one_res[model][score])
if score == 'accuracy_score':
if model not in nhas_accuracy_score_list:
nhas_accuracy_score_list[model] = []
nhas_accuracy_score_list[model].append(one_res[model][score])
if score == 'roc_auc_score':
if model not in nhas_roc_auc_score_list:
nhas_roc_auc_score_list[model] = []
nhas_roc_auc_score_list[model].append(one_res[model][score])
if score == 'precision_score':
if model not in nhas_precision_score_list:
nhas_precision_score_list[model] = []
nhas_precision_score_list[model].append(one_res[model][score])
if score == 'recall_score':
if model not in nhas_recall_score_list:
nhas_recall_score_list[model] = []
nhas_recall_score_list[model].append(one_res[model][score])
if score == 'best_score_':
if model not in nhas_best_score_list:
nhas_best_score_list[model] = []
nhas_best_score_list[model].append(one_res[model][score])
if score == 'auc':
if model not in nhas_auc_list:
nhas_auc_list[model] = []
nhas_auc_list[model].append(one_res[model][score])
nmodel_dic[model][score] = [[one_res[model][score]],1]
print('model,score:', (model, score))
print(nmodel_dic[model][score])
else:
for score in one_res[model]:
if score == 'f1_score':
if model not in nhas_f1_score_list:
nhas_f1_score_list[model] = []
nhas_f1_score_list[model].append(one_res[model][score])
if score == 'score':
if model not in nhas_score_list_:
nhas_score_list_[model] = []
nhas_score_list_[model].append(one_res[model][score])
if score == 'cross_val_score':
if model not in nhas_cross_val_score_list:
nhas_cross_val_score_list[model] = []
nhas_cross_val_score_list[model].append(one_res[model][score])
if score == 'accuracy_score':
if model not in nhas_accuracy_score_list:
nhas_accuracy_score_list[model] = []
nhas_accuracy_score_list[model].append(one_res[model][score])
if score == 'roc_auc_score':
if model not in nhas_roc_auc_score_list:
nhas_roc_auc_score_list[model] = []
nhas_roc_auc_score_list[model].append(one_res[model][score])
if score == 'precision_score':
if model not in nhas_precision_score_list:
nhas_precision_score_list[model] = []
nhas_precision_score_list[model].append(one_res[model][score])
if score == 'recall_score':
if model not in nhas_recall_score_list:
nhas_recall_score_list[model] = []
nhas_recall_score_list[model].append(one_res[model][score])
if score == 'best_score_':
if model not in nhas_best_score_list:
nhas_best_score_list[model] = []
nhas_best_score_list[model].append(one_res[model][score])
if score == 'auc':
if model not in nhas_auc_list:
nhas_auc_list[model] = []
nhas_auc_list[model].append(one_res[model][score])
if score not in nmodel_dic[model]:
nmodel_dic[model][score] = [[one_res[model][score]], 1]
else:
temp_li = nmodel_dic[model][score][0]
temp_li.append(one_res[model][score])
nmodel_dic[model][score] = [ temp_li , nmodel_dic[model][score][1]+1 ]
print('model,score:', (model, score))
print(nmodel_dic[model][score])
for model in nmodel_dic:
for score in nmodel_dic[model]:
all_score = 0
all_count = 0
if model in model_dic:
if score in model_dic[model]:
all_score = model_dic[model][score][0]
all_count = model_dic[model][score][1]
n_all_score = nmodel_dic[model][score][0]
n_all_count = nmodel_dic[model][score][1]
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
if n_all_count == 0:
n_ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in n_all_score:
sum += sc
mean = sum / n_all_count
sq = 0
for sc in n_all_score:
sq += (sc - mean)**2
sq /= n_all_count
n_ope_temp = (mean, sq, n_all_count)
if model not in model_dic:
model_dic[model] = {}
model_dic[model][score] = (ope_temp, n_ope_temp)
for model in model_dic:
for score in model_dic[model]:
if type(model_dic[model][score]).__name__ != 'tuple':
all_score = model_dic[model][score][0]
all_count = model_dic[model][score][1]
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
n_ope_temp = (-1, -1, 0)
model_dic[model][score] = (ope_temp, n_ope_temp)
np.save('model_has_f1_score_list', has_f1_score_list)
np.save('model_nhas_f1_score_list', nhas_f1_score_list)
np.save('model_has_score_list_', has_score_list_)
np.save('model_nhas_score_list_', nhas_score_list_)
np.save('model_has_cross_val_score_list', has_cross_val_score_list)
np.save('model_nhas_cross_val_score_list', nhas_cross_val_score_list)
np.save('model_has_accuracy_score_list', has_accuracy_score_list)
np.save('model_nhas_accuracy_score_list', nhas_accuracy_score_list)
np.save('model_has_roc_auc_score_list', has_roc_auc_score_list)
np.save('model_nhas_roc_auc_score_list', nhas_roc_auc_score_list)
np.save('model_has_precision_score_list', has_precision_score_list)
np.save('model_nhas_precision_score_list', nhas_precision_score_list)
np.save('model_has_recall_score_list', has_recall_score_list)
np.save('model_nhas_recall_score_list', nhas_recall_score_list)
np.save('model_has_best_score_list', has_best_score_list)
np.save('model_nhas_best_score_list', nhas_best_score_list)
np.save('model_has_auc_list', has_auc_list)
np.save('model_nhas_auc_list', nhas_auc_list)
np.save('./model_score_dic.npy', model_dic)
def get_model_exist_dic_len():
cursor, db = create_connection()
in_result = {}
sql = "SELECT distinct notebook_id FROM result"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result[row[0]]=[]
sql = "SELECT distinct model_type from result where notebook_id = " + str(row[0])
cursor.execute(sql)
sql_res1 = cursor.fetchall()
for row1 in sql_res1:
in_result[row[0]].append(row1[0])
in_ope = []
sql = "SELECT distinct notebook_id FROM operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_ope.append(row[0])
model_dic = {}
nmodel_dic = {}
for notebook_id in in_result:
if notebook_id in in_ope:
one_res = get_one_seq_len(notebook_id)
for model in in_result[notebook_id]:
if model not in model_dic:
model_dic[model] = [[one_res],1]
else:
temp_li = model_dic[model][0]
temp_li.append(one_res)
model_dic[model] = [ temp_li , model_dic[model][1]+1 ]
else:
one_res = get_one_seq_len(notebook_id)
for model in in_result[notebook_id]:
if model not in nmodel_dic:
model_dic[model] = [[one_res],1]
else:
temp_li = nmodel_dic[model][0]
temp_li.append(one_res)
nmodel_dic[model] = [ temp_li , nmodel_dic[model][1]+1 ]
for model in nmodel_dic:
for score in nmodel_dic[model]:
all_score = 0
all_count = 0
if model in model_dic:
if score in model_dic[score]:
all_score = model_dic[model][score][0]
all_count = model_dic[model][score][1]
n_all_score = nmodel_dic[model][score][0]
n_all_count = nmodel_dic[model][score][1]
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
if n_all_count == 0:
n_ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in n_all_score:
sum += sc
mean = sum / n_all_count
sq = 0
for sc in n_all_score:
sq += (sc - mean)**2
sq /= n_all_count
n_ope_temp = (mean, sq, n_all_count)
if model not in model_dic:
model_dic[model] = {}
model_dic[model][score] = (ope_temp, n_ope_temp)
np.save('./model_score_dic_len.npy', model_dic)
def get_model_exist_dic_by_one_dataset(dataset_id, notebook_list):
cursor, db = create_connection()
in_result = []
sql = "SELECT distinct notebook_id FROM result"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
in_ope = []
sql = "SELECT distinct notebook_id FROM operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_ope.append(row[0])
notebook_list_of_dataset = []
model_dic = {}
nmodel_dic = {}
has_f1_score_list = {}
nhas_f1_score_list = {}
has_score_list_ = {}
nhas_score_list_ = {}
has_cross_val_score_list = {}
nhas_cross_val_score_list = {}
has_accuracy_score_list = {}
nhas_accuracy_score_list = {}
has_roc_auc_score_list = {}
nhas_roc_auc_score_list = {}
has_best_score_list = {}
nhas_best_score_list = {}
if notebook_list != []:
notebook_list_of_dataset = notebook_list
else:
sql = "SELECT pair.nid FROM pair where pair.did=" + str(dataset_id)
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_list_of_dataset.append(int(row[0]))
for notebook_id in in_result:
if notebook_id not in notebook_list_of_dataset:
continue
if notebook_id in in_ope:
one_res = get_one_model_result(notebook_id)
for model in one_res:
if model not in model_dic:
model_dic[model] = {}
for score in one_res[model]:
if score == 'f1_score':
if model not in has_f1_score_list:
has_f1_score_list[model] = []
has_f1_score_list[model].append(one_res[model][score])
if score == 'score':
if model not in has_score_list_:
has_score_list_[model] = []
has_score_list_[model].append(one_res[model][score])
if score == 'cross_val_score':
if model not in has_cross_val_score_list:
has_cross_val_score_list[model] = []
has_cross_val_score_list[model].append(one_res[model][score])
if score == 'accuracy_score':
if model not in has_accuracy_score_list:
has_accuracy_score_list[model] = []
has_accuracy_score_list[model].append(one_res[model][score])
if score == 'roc_auc_score':
if model not in has_roc_auc_score_list:
has_roc_auc_score_list[model] = []
has_roc_auc_score_list[model].append(one_res[model][score])
if score == 'best_score_':
if model not in has_best_score_list:
has_best_score_list[model] = []
has_best_score_list[model].append(one_res[model][score])
model_dic[model][score] = [[one_res[model][score]],1]
else:
for score in one_res[model]:
if score == 'f1_score':
if model not in has_f1_score_list:
has_f1_score_list[model] = []
has_f1_score_list[model].append(one_res[model][score])
if score == 'score':
if model not in has_score_list_:
has_score_list_[model] = []
has_score_list_[model].append(one_res[model][score])
if score == 'cross_val_score':
if model not in has_cross_val_score_list:
has_cross_val_score_list[model] = []
has_cross_val_score_list[model].append(one_res[model][score])
if score == 'accuracy_score':
if model not in has_accuracy_score_list:
has_accuracy_score_list[model] = []
has_accuracy_score_list[model].append(one_res[model][score])
if score == 'roc_auc_score':
if model not in has_roc_auc_score_list:
has_roc_auc_score_list[model] = []
has_roc_auc_score_list[model].append(one_res[model][score])
if score == 'best_score_':
if model not in has_best_score_list:
has_best_score_list[model] = []
has_best_score_list[model].append(one_res[model][score])
if score not in model_dic[model]:
model_dic[model][score] = [[one_res[model][score]], 1]
else:
temp_li = model_dic[model][score][0]
temp_li.append(one_res[model][score])
model_dic[model][score] = [ temp_li , model_dic[model][score][1]+1 ]
else:
one_res = get_one_model_result(notebook_id)
for model in one_res:
if model not in nmodel_dic:
nmodel_dic[model] = {}
for score in one_res[model]:
print('model', model)
if score == 'f1_score':
if model not in nhas_f1_score_list:
nhas_f1_score_list[model] = []
nhas_f1_score_list[model].append(one_res[model][score])
if score == 'score':
if model not in nhas_score_list_:
nhas_score_list_[model] = []
nhas_score_list_[model].append(one_res[model][score])
if score == 'cross_val_score':
if model not in nhas_cross_val_score_list:
nhas_cross_val_score_list[model] = []
nhas_cross_val_score_list[model].append(one_res[model][score])
if score == 'accuracy_score':
if model not in nhas_accuracy_score_list:
nhas_accuracy_score_list[model] = []
nhas_accuracy_score_list[model].append(one_res[model][score])
if score == 'roc_auc_score':
if model not in nhas_roc_auc_score_list:
nhas_roc_auc_score_list[model] = []
nhas_roc_auc_score_list[model].append(one_res[model][score])
if score == 'best_score_':
if model not in nhas_best_score_list:
nhas_best_score_list[model] = []
nhas_best_score_list[model].append(one_res[model][score])
nmodel_dic[model][score] = [[one_res[model][score]],1]
else:
for score in one_res[model]:
if score not in nmodel_dic[model]:
nmodel_dic[model][score] = [[one_res[model][score]], 1]
else:
temp_li = nmodel_dic[model][score][0]
temp_li.append(one_res[model][score])
nmodel_dic[model][score] = [ temp_li , nmodel_dic[model][score][1]+1 ]
for model in nmodel_dic:
for score in nmodel_dic[model]:
all_score = 0
all_count = 0
if model in model_dic:
if score in model_dic[model]:
all_score = model_dic[model][score][0]
all_count = model_dic[model][score][1]
n_all_score = nmodel_dic[model][score][0]
n_all_count = nmodel_dic[model][score][1]
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
if n_all_count == 0:
n_ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in n_all_score:
sum += sc
mean = sum / n_all_count
sq = 0
for sc in n_all_score:
sq += (sc - mean)**2
sq /= n_all_count
n_ope_temp = (mean, sq, n_all_count)
if model not in model_dic:
model_dic[model] = {}
model_dic[model][score] = (ope_temp, n_ope_temp)
for model in model_dic:
for score in model_dic[model]:
if type(model_dic[model][score]).__name__ != 'tuple':
all_score = model_dic[model][score][0]
all_count = model_dic[model][score][1]
n_ope_temp = (-1, -1, 0)
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum / all_count
sq = 0
for sc in all_score:
sq += (sc - mean) ** 2
sq /= all_count
ope_temp = (mean, sq, all_count)
model_dic[model][score] = (ope_temp, n_ope_temp)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_has_f1_score_list', has_f1_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_nhas_f1_score_list', nhas_f1_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_has_score_list_', has_score_list_)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_nhas_score_list_', nhas_score_list_)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_has_cross_val_score_list', has_cross_val_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_nhas_cross_val_score_list', nhas_cross_val_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_has_accuracy_score_list', has_accuracy_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_nhas_accuracy_score_list', nhas_accuracy_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_has_roc_auc_score_list', has_roc_auc_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_nhas_roc_auc_score_list', nhas_roc_auc_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_has_best_score_list', has_best_score_list)
np.save('1d1mgap1data/'+str(dataset_id)+'_model_nhas_best_score_list', nhas_best_score_list)
return model_dic
def get_model_exist_dic_by_one_dataset_len(dataset_id, notebook_list):
cursor, db = create_connection()
in_result = {}
sql = "SELECT distinct notebook_id FROM result"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result[row[0]]=[]
sql = "SELECT distinct model_type from result where notebook_id = " + str(row[0])
cursor.execute(sql)
sql_res1 = cursor.fetchall()
for row1 in sql_res1:
in_result[row[0]].append(row1[0])
in_ope = []
sql = "SELECT distinct notebook_id FROM operator"
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_ope.append(row[0])
notebook_list_of_dataset = []
model_dic = {}
nmodel_dic = {}
if notebook_list != []:
notebook_list_of_dataset = notebook_list
else:
sql = "SELECT pair.nid FROM pair where pair.did=" + str(dataset_id)
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_list_of_dataset.append(int(row[0]))
for notebook_id in in_result:
if notebook_id not in notebook_list_of_dataset:
continue
if notebook_id in in_ope:
one_res = get_one_seq_len(notebook_id)
for model in in_result[notebook_id]:
if model not in model_dic:
model_dic[model] = [[one_res],1]
else:
temp_li = model_dic[model][0]
temp_li.append(one_res)
model_dic[model] = [ temp_li , model_dic[model][1]+1 ]
else:
one_res = get_one_seq_len(notebook_id)
for model in in_result[notebook_id]:
if model not in nmodel_dic:
model_dic[model] = [[one_res],1]
else:
temp_li = nmodel_dic[model][0]
temp_li.append(one_res)
nmodel_dic[model] = [ temp_li , nmodel_dic[model][1]+1 ]
for model in nmodel_dic:
for score in nmodel_dic[model]:
all_score = 0
all_count = 0
if model in model_dic:
if score in model_dic[score]:
all_score = model_dic[model][score][0]
all_count = model_dic[model][score][1]
n_all_score = nmodel_dic[model][score][0]
n_all_count = nmodel_dic[model][score][1]
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
if n_all_count == 0:
n_ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in n_all_score:
sum += sc
mean = sum / n_all_count
sq = 0
for sc in n_all_score:
sq += (sc - mean)**2
sq /= n_all_count
n_ope_temp = (mean, sq, n_all_count)
if model not in model_dic:
model_dic[model] = {}
model_dic[model][score] = (ope_temp, n_ope_temp)
return model_dic
def get_operator_exist_dic_by_one_dataset(dataset_id, notebook_list):
cursor, db = create_connection()
CONFIG.read('config.ini')
operator_dic = eval(CONFIG.get('operators', 'operations'))
ope_dic = {}
notebook_list_of_dataset = []
operator_notebook_dic = {}
noperator_notebook_dic = {}
if notebook_list != []:
notebook_list_of_dataset = notebook_list
else:
sql = "SELECT pair.nid FROM pair where pair.did=" + str(dataset_id)
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_list_of_dataset.append(int(row[0]))
for operator in operator_dic.keys():
sql = "SELECT distinct notebook_id FROM operator where operator = '" + operator + "'"
cursor.execute(sql)
sql_res = cursor.fetchall()
operator_notebook_dic[operator] = []
if operator not in ope_dic.keys():
ope_dic[operator] = {}
for row in sql_res:
notebook_id = int(row[0])
if notebook_id not in notebook_list_of_dataset:
continue
operator_notebook_dic[operator].append(notebook_id)
result = get_one_result(notebook_id)
for i in result:
if result[i] != -1:
if i not in ope_dic[operator]:
ope_dic[operator][i] = ([], 0)
all_score = ope_dic[operator][i][0]
all_count = ope_dic[operator][i][1]
all_score.append(result[i])
all_count += 1
ope_dic[operator][i] = (all_score, all_count)
print('add_one_result:',ope_dic[operator][i])
for operator in operator_notebook_dic:
noperator_notebook_dic[operator] = []
for notebook in notebook_list_of_dataset:
if notebook not in operator_notebook_dic[operator]:
noperator_notebook_dic[operator].append(notebook)
nope_dic = {}
for operator in operator_dic.keys():
for notebook_id in noperator_notebook_dic[operator]:
result = get_one_result(notebook_id)
if operator not in nope_dic.keys():
nope_dic[operator] = {}
for i in result:
if result[i] != -1:
if i not in nope_dic[operator]:
nope_dic[operator][i] = ([], 0)
all_score = nope_dic[operator][i][0]
all_count = nope_dic[operator][i][1]
all_score.append(result[i])
all_count += 1
nope_dic[operator][i] = (all_score, all_count)
for operator in ope_dic.keys():
for score in ope_dic[operator].keys():
score_list = ope_dic[operator][score][0]
all_count = ope_dic[operator][score][1]
if all_count != 0:
sum = 0
for sc in score_list:
sum += sc
mean = sum/all_count
sq=0
for sc in score_list:
sq += (sc-mean)**2
sq /= all_count
ope_dic[operator][score]=(mean, sq, all_count)
else:
ope_dic[operator][score] = (-1, -1, all_count)
for operator in nope_dic.keys():
for score in nope_dic[operator].keys():
score_list = nope_dic[operator][score][0]
all_count = nope_dic[operator][score][1]
if all_count != 0:
sum = 0
for sc in score_list:
sum += sc
mean = sum/all_count
sq=0
for sc in score_list:
sq += (sc-mean)**2
sq /= all_count
nope_dic[operator][score]=(mean, sq, all_count)
else:
nope_dic[operator][score] = (-1, -1, all_count)
# result = (ope_dic, nope_dic)
for i in nope_dic:
for j in nope_dic[i]:
all_score = 0
all_count = 0
if i in ope_dic:
if j in ope_dic[i]:
all_count = ope_dic[i][j][2]
n_all_count = nope_dic[i][j][2]
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
ope_temp = ope_dic[i][j]
if n_all_count == 0:
n_ope_temp = (-1,-1, 0)
else:
n_ope_temp = nope_dic[i][j]
if i not in ope_dic:
ope_dic[i] = {}
ope_dic[i][j] =(ope_temp,n_ope_temp)
for i in ope_dic:
for j in ope_dic[i]:
if type(ope_dic[i][j]).__name__ != 'tuple':
all_score = ope_dic[i][j][0]
all_count = ope_dic[i][j][1]
n_ope_temp = (-1, -1, 0)
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum / all_count
sq = 0
for sc in all_score:
sq += (sc - mean) ** 2
sq /= all_count
ope_temp = (mean, sq, all_count)
ope_dic[i][j] = (ope_temp, n_ope_temp)
return ope_dic
def get_dataset_model_exist_dic():
print("get_pair_dic")
cursor, db = create_connection()
in_result = []
sql = 'select distinct(notebook_id) from result'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
pair_dic = {}
sql = 'select * from pair'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_id = int(row[0])
dataset_id = int(row[1])
if notebook_id not in in_result:
continue
if dataset_id not in pair_dic.keys():
pair_dic[dataset_id] = []
pair_dic[int(dataset_id)].append(int(notebook_id))
np.save('./pair_dic.npy',pair_dic)
print("get_pair_dic end")
# if number != '-1':
# sql = "SELECT count(distinct result.notebook_id),pair.did FROM result inner join pair on result.notebook_id = pair.nid group by pair.did order by count(distinct result.notebook_id) limit " + str(number)
# else:
# sql = "SELECT count(distinct result.notebook_id),pair.did FROM result inner join pair on result.notebook_id = pair.nid group by pair.did order by count(distinct result.notebook_id)"
# cursor.execute(sql)
# sql_res = cursor.fetchall()
result = {}
count = 0
for dataset_id in pair_dic:
print(count)
count += 1
result[dataset_id] = get_model_exist_dic_by_one_dataset(dataset_id, pair_dic[dataset_id])
np.save('./dataset_model_dic.npy',result)
return result
def get_dataset_model_exist_dic_len():
print("get_pair_dic")
cursor, db = create_connection()
in_result = []
sql = 'select distinct(notebook_id) from result'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
pair_dic = {}
sql = 'select * from pair'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_id = int(row[0])
dataset_id = int(row[1])
if notebook_id not in in_result:
continue
if dataset_id not in pair_dic.keys():
pair_dic[dataset_id] = []
pair_dic[int(dataset_id)].append(int(notebook_id))
# if number != '-1':
# sql = "SELECT count(distinct result.notebook_id),pair.did FROM result inner join pair on result.notebook_id = pair.nid group by pair.did order by count(distinct result.notebook_id) limit " + str(number)
# else:
# sql = "SELECT count(distinct result.notebook_id),pair.did FROM result inner join pair on result.notebook_id = pair.nid group by pair.did order by count(distinct result.notebook_id)"
# cursor.execute(sql)
# sql_res = cursor.fetchall()
result = {}
count = 0
for dataset_id in pair_dic:
print(count)
count += 1
result[dataset_id] = get_model_exist_dic_by_one_dataset_len(dataset_id, pair_dic[dataset_id])
np.save('./0d1mgap1list.npy',result)
return result
def get_model_operator_exist_dic():
CONFIG.read('config.ini')
operator_dic = eval(CONFIG.get('operators', 'operations'))
model_operator_exist_dic = {}
nmodel_operator_exist_dic = {}
operator_notebook_dic = {}
noperator_notebook_dic = {}
cursor, db = create_connection()
in_result = []
sql = 'select distinct(notebook_id) from result'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
for operator in operator_dic.keys():
sql = "SELECT distinct notebook_id FROM operator where operator = '" + operator + "'"
cursor.execute(sql)
sql_res = cursor.fetchall()
operator_notebook_dic[operator] = []
for row in sql_res:
notebook_id = int(row[0])
if notebook_id not in in_result:
continue
operator_notebook_dic[operator].append(notebook_id)
one_model_result = get_one_model_result(notebook_id)
for model in one_model_result:
if model not in model_operator_exist_dic:
model_operator_exist_dic[model] = {}
if operator not in model_operator_exist_dic[model]:
model_operator_exist_dic[model][operator] = {}
for i in one_model_result[model]:
if one_model_result[model][i] != -1:
if i not in model_operator_exist_dic[model][operator]:
model_operator_exist_dic[model][operator][i] = ([], 0)
all_score = model_operator_exist_dic[model][operator][i][0]
all_count = model_operator_exist_dic[model][operator][i][1]
all_score.append(one_model_result[model][i])
all_count += 1
model_operator_exist_dic[model][operator][i] = (all_score, all_count)
print('add_one_result:',model_operator_exist_dic[model][operator][i])
for operator in operator_notebook_dic:
noperator_notebook_dic[operator] = []
for notebook in in_result:
if notebook not in operator_notebook_dic[operator]:
noperator_notebook_dic[operator].append(notebook)
for operator in operator_dic.keys():
for notebook_id in noperator_notebook_dic[operator]:
if notebook_id not in in_result:
continue
one_model_result = get_one_model_result(notebook_id)
for model in one_model_result:
if model not in nmodel_operator_exist_dic:
nmodel_operator_exist_dic[model] = {}
if operator not in nmodel_operator_exist_dic[model]:
nmodel_operator_exist_dic[model][operator] = {}
for i in one_model_result[model]:
if one_model_result[model][i] != -1:
if i not in nmodel_operator_exist_dic[model][operator]:
nmodel_operator_exist_dic[model][operator][i] = ([], 0)
all_score = nmodel_operator_exist_dic[model][operator][i][0]
all_count = nmodel_operator_exist_dic[model][operator][i][1]
all_score.append(one_model_result[model][i])
all_count += 1
nmodel_operator_exist_dic[model][operator][i] = (all_score, all_count)
print('add_one_result:', nmodel_operator_exist_dic[model][operator][i])
print('end_add_one_result')
print(nmodel_operator_exist_dic)
for model in nmodel_operator_exist_dic:
print('model:',model)
for operator in nmodel_operator_exist_dic[model]:
print('operator:', operator)
for score in nmodel_operator_exist_dic[model][operator]:
print('score:', score)
all_score = []
all_count = 0
if model in model_operator_exist_dic:
if operator in model_operator_exist_dic[model]:
if score in model_operator_exist_dic[model][operator]:
all_score = model_operator_exist_dic[model][operator][score][0]
all_count = model_operator_exist_dic[model][operator][score][1]
n_all_score = nmodel_operator_exist_dic[model][operator][score][0]
n_all_count = nmodel_operator_exist_dic[model][operator][score][1]
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
if n_all_count == 0:
n_ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in n_all_score:
sum += sc
mean = sum / n_all_count
sq = 0
for sc in n_all_score:
sq += (sc - mean)**2
sq /= n_all_count
n_ope_temp = (mean, sq, n_all_count)
print('ope_temp', ope_temp)
print('n_ope_temp', n_ope_temp)
if model not in model_operator_exist_dic:
model_operator_exist_dic[model] = {}
if operator not in model_operator_exist_dic[model]:
model_operator_exist_dic[model][operator] = {}
model_operator_exist_dic[model][operator][score] =(ope_temp,n_ope_temp)
print('model_operator_exist_dic',model_operator_exist_dic[model][operator][score])
for model in model_operator_exist_dic:
for operator in model_operator_exist_dic[model]:
for score in model_operator_exist_dic[model][operator]:
if type(model_operator_exist_dic[model][operator][score]).__name__ != 'tuple':
all_score = model_operator_exist_dic[model][operator][score][0]
all_count = model_operator_exist_dic[model][operator][score][1]
n_ope_temp = (-1, -1, 0)
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
model_operator_exist_dic[model][operator][score] =(ope_temp,n_ope_temp)
print('model_operator_exist_dic', model_operator_exist_dic[model][operator][score])
np.save('./model_operator_exist_dic.npy', model_operator_exist_dic)
def get_model_operator_by_one_dataset(dataset_id,notebook_list):
cursor, db = create_connection()
CONFIG.read('config.ini')
operator_dic = eval(CONFIG.get('operators', 'operations'))
notebook_list_of_dataset = []
operator_notebook_dic = {}
noperator_notebook_dic = {}
nmodel_operator_exist_dic = {}
model_operator_exist_dic = {}
if notebook_list != []:
notebook_list_of_dataset = notebook_list
else:
sql = "SELECT pair.nid FROM pair where pair.did=" + str(dataset_id)
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_list_of_dataset.append(int(row[0]))
for operator in operator_dic.keys():
sql = "SELECT distinct notebook_id FROM operator where operator = '" + operator + "'"
cursor.execute(sql)
sql_res = cursor.fetchall()
operator_notebook_dic[operator] = []
for row in sql_res:
notebook_id = int(row[0])
if notebook_id not in notebook_list_of_dataset:
continue
operator_notebook_dic[operator].append(notebook_id)
one_model_result = get_one_model_result(notebook_id)
for model in one_model_result:
if model not in model_operator_exist_dic:
model_operator_exist_dic[model] = {}
if operator not in model_operator_exist_dic[model]:
model_operator_exist_dic[model][operator] = {}
for i in one_model_result[model]:
if one_model_result[model][i] != -1:
if i not in model_operator_exist_dic[model][operator]:
model_operator_exist_dic[model][operator][i] = ([], 0)
all_score = model_operator_exist_dic[model][operator][i][0]
all_count = model_operator_exist_dic[model][operator][i][1]
all_score.append(one_model_result[model][i])
all_count += 1
model_operator_exist_dic[model][operator][i] = (all_score, all_count)
print('add_one_result:',model_operator_exist_dic[model][operator][i])
for operator in operator_notebook_dic:
noperator_notebook_dic[operator] = []
for notebook in notebook_list_of_dataset:
if notebook not in operator_notebook_dic[operator]:
noperator_notebook_dic[operator].append(notebook)
for operator in operator_dic.keys():
for notebook_id in noperator_notebook_dic[operator]:
one_model_result = get_one_model_result(notebook_id)
for model in one_model_result:
if model not in nmodel_operator_exist_dic:
nmodel_operator_exist_dic[model] = {}
if operator not in nmodel_operator_exist_dic[model]:
nmodel_operator_exist_dic[model][operator] = {}
for i in one_model_result[model]:
if one_model_result[model][i] != -1:
if i not in nmodel_operator_exist_dic[model][operator]:
nmodel_operator_exist_dic[model][operator][i] = ([], 0)
all_score = nmodel_operator_exist_dic[model][operator][i][0]
all_count = nmodel_operator_exist_dic[model][operator][i][1]
all_score.append(one_model_result[model][i])
all_count += 1
nmodel_operator_exist_dic[model][operator][i] = (all_score, all_count)
print('add_one_result:', nmodel_operator_exist_dic[model][operator][i])
for model in nmodel_operator_exist_dic:
for operator in nmodel_operator_exist_dic[model]:
for score in nmodel_operator_exist_dic[model][operator]:
all_score = 0
all_count = 0
if model in model_operator_exist_dic:
if operator in model_operator_exist_dic[model]:
if score in model_operator_exist_dic[model][operator]:
all_score = model_operator_exist_dic[model][operator][score][0]
all_count = model_operator_exist_dic[model][operator][score][1]
n_all_score = nmodel_operator_exist_dic[model][operator][score][0]
n_all_count = nmodel_operator_exist_dic[model][operator][score][1]
if all_count == 0:
ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
if n_all_count == 0:
n_ope_temp = (-1, -1, 0)
else:
sum = 0
for sc in n_all_score:
sum += sc
mean = sum / n_all_count
sq = 0
for sc in n_all_score:
sq += (sc - mean)**2
sq /= n_all_count
n_ope_temp = (mean, sq, n_all_count)
print('ope_temp', ope_temp)
print('n_ope_temp', n_ope_temp)
if model not in model_operator_exist_dic:
model_operator_exist_dic[model] = {}
if operator not in model_operator_exist_dic[model]:
model_operator_exist_dic[model][operator] = {}
model_operator_exist_dic[model][operator][score] =(ope_temp,n_ope_temp)
print('model_operator_exist_dic',model_operator_exist_dic[model][operator][score])
# if all_count == 0:
# ope_temp = (-1, 0)
# else:
# ope_temp = (all_score / all_count, all_count)
# if n_all_count == 0:
# n_ope_temp = (-1, 0)
# else:
# n_ope_temp = (n_all_score/n_all_count, n_all_count)
#
# if model not in model_operator_exist_dic:
# model_operator_exist_dic[model] = {}
# if operator not in model_operator_exist_dic[model]:
# model_operator_exist_dic[model][operator] = {}
# model_operator_exist_dic[model][operator][score] =(ope_temp,n_ope_temp)
for model in model_operator_exist_dic:
for operator in model_operator_exist_dic[model]:
for score in model_operator_exist_dic[model][operator]:
if type(model_operator_exist_dic[model][operator][score]).__name__ != 'tuple':
all_score = model_operator_exist_dic[model][operator][score][0]
all_count = model_operator_exist_dic[model][operator][score][1]
n_ope_temp = (-1, -1, 0)
sum = 0
for sc in all_score:
sum += sc
mean = sum/all_count
sq = 0
for sc in all_score:
sq += (sc-mean)**2
sq /= all_count
ope_temp = (mean, sq, all_count)
model_operator_exist_dic[model][operator][score] =(ope_temp,n_ope_temp)
# print('model_operator_exist_dic', model_operator_exist_dic[model][operator][score])
return model_operator_exist_dic
def get_dataset_model_operator_exist_dic():
print("get_pair_dic")
cursor, db = create_connection()
in_result = []
sql = 'select distinct(notebook_id) from result'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
in_result.append(row[0])
pair_dic = {}
sql = 'select * from pair'
cursor.execute(sql)
sql_res = cursor.fetchall()
for row in sql_res:
notebook_id = int(row[0])
dataset_id = int(row[1])
if notebook_id not in in_result:
continue
if dataset_id not in pair_dic.keys():
pair_dic[dataset_id] = []
pair_dic[int(dataset_id)].append(int(notebook_id))
np.save('./pair_dic.npy',pair_dic)
print("get_pair_dic end")
# if number != '-1':
# sql = "SELECT count(distinct result.notebook_id),pair.did FROM result inner join pair on result.notebook_id = pair.nid group by pair.did order by count(distinct result.notebook_id) limit " + str(number)
# else:
# sql = "SELECT count(distinct result.notebook_id),pair.did FROM result inner join pair on result.notebook_id = pair.nid group by pair.did order by count(distinct result.notebook_id)"
# cursor.execute(sql)
# sql_res = cursor.fetchall()
result = {}
count = 0
for dataset_id in pair_dic:
print(count)
count += 1
result[dataset_id] = get_model_operator_by_one_dataset(dataset_id, pair_dic[dataset_id])
np.save('./dataset_model_operation_dic.npy',result)
if __name__ == '__main__':
# print(get_exist_dic())
print('input stat type:')
print('1: get_exist_dic') # 0d0m gap1 y
print('1.1: get_exist_dic_len') # 0d0m gap2 y
print('2: get_mean_group_by_dataset') # no use
print('3: get_dataset_exist_dic') # 1d0m gap1 y
print('3.1: get_dataset_exist_dic') # 1d0m gap2 y
print('4: get_operator_exist_dic') # 0d0m gap3 y
# print('5: get_operator_param_score')
print('6: get_dataset_operator_exist_dic') # 1d0m gap3 y
print('7: get_model_exist_dic') # 0d1m gap1 n
print('7.1: get_model_exist_dic') # 0d1m gap2 n
print('8: get_dataset_model_exist_dic') # 1d1m gap1 y
print('8.1: get_dataset_model_exist_dic_len') # 1d1m gap2 y
print('9: get_model_operator_exist_dic') # 0d1m gap3 y
print('10: get_dataset_model_operator_exist_dic') # 1d1m gap3 y
# print('7: get_result_of_seq')
# print('8: get_show_sequence')
# print('9: get_operator_param_rate')
print('-1: show_dic')
rtype = input()
if rtype == '1':
res = get_exist_dic()
elif rtype == '1.1':
res = get_exist_dic_len()
elif rtype == '2':
res = get_mean_group_by_dataset()
elif rtype == '3':
res = get_dataset_exist_dic()
elif rtype == '3.1':
res = get_dataset_exist_dic_len()
elif rtype == '4':
res = get_operator_exist_dic()
elif rtype == '5':
res = get_operator_param_score()
elif rtype == '6':
# print('input dataset number:')
# dataset_number = input()
res = get_dataset_operator_exist_dic()
elif rtype == '7':
res = get_model_exist_dic()
elif rtype == '7.1':
res = get_model_exist_dic_len()
elif rtype == '8':
res = get_dataset_model_exist_dic()
elif rtype == '8.1':
res = get_dataset_model_exist_dic_len()
elif rtype == '9':
res = get_model_operator_exist_dic()
elif rtype == '10':
res = get_dataset_model_operator_exist_dic()
elif rtype == '-1':
# dataset,operator,model,exist -> result
print('input show type:')
print('1: get_exist_dic') # 0d0mgap1
# print('2: get_mean_group_by_dataset')
print('3: get_dataset_exist_dic') # 1d0mgap1
print('4: get_operator_exist_dic') # 0d0mgap3
# print('5: get_operator_param_score')
print('6: get_dataset_operator_exist_dic')
print('7: get_model_exist_dic')
print('8: get_dataset_model_exist_dic')
print('9: get_model_operator_exist_dic')
print('10: get_dataset_model_operator_exist_dic')
# print('8: get_show_sequence')
dic_path = input()
res = show_dic(dic_path)
# res = get_mean_group_by_dataset()
# for i in res:
# # if res[i] != -1:
# # print(str(i)+':', res[i]) |
22,668 | ea57db2a12ee3dc509144e8c105829d10bc24a20 | import logging
from fastapi import APIRouter
from app.db.models import (
AccountModelIn,
AccountModelOut,
BookingModelIn,
BookingModelOut,
)
from app.service import (
create_account,
create_booking,
)
router = APIRouter()
logger = logging.getLogger(__name__)
@router.get("/status")
async def status():
return {"status": "ok"}
@router.post("/account", status_code=201, response_model=AccountModelOut)
async def create_user_account(account: AccountModelIn):
return await create_account(account)
@router.post("/booking", status_code=201, response_model=BookingModelOut)
async def create_user_booking(booking_model: BookingModelIn):
logger.info(f'Booking IN: {booking_model}')
return await create_booking(booking_model)
|
22,669 | b48f26ba4677f284f47b19dab6200b8426c2131f | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 20 18:25:49 2016
@author: Stephan
"""
import requests
from bs4 import BeautifulSoup
import re
def get_medicam(medicam):
results = requests.post("http://base-donnees-publique.medicaments.gouv.fr/index.php", data = {'choixRecherche': 'medicament', 'txtCaracteres': medicam, 'action': 'show'})
soup = BeautifulSoup(results.text, "lxml")
names_medicam = soup.findAll('td', {'class':'ResultRowDeno'})
liste_medicam = []
for name in names_medicam:
liste_medicam.append(name.text)
return liste_medicam
ibupro = get_medicam('IBUPROFENE')
#print(get_medicam('IBUPROFENE'))
ibuprofene = []
for ibup in ibupro:
temp = (re.findall(r'(.*)\s(\d{1,4})\s(mg?)(?:\/\d+ mg?)?\s?(.*),\s(\S*)\s(\S*)', ibup))
ibuprofene.append(temp)
for i in range(len(ibuprofene)):
if ibuprofene[i] != []:
print(ibuprofene[i][0][0] + ibuprofene[i][0][1] +' '+ ibuprofene[i][0][2] +' '+ ibuprofene[i][0][3]) |
22,670 | 2f298817fabe723439c0e343bcde76aa9daee1da | import os
from PIL import Image
from icoextract import IconExtractor
ICONS_FOLDER = "icons"
os.makedirs(ICONS_FOLDER, exist_ok=True)
ICON_SIZE = 60
MAX_COLOURS = 30
def get_icon(app_path: str, name: str) -> str:
download_path = os.path.join(ICONS_FOLDER, name + ".png")
simplified_path = os.path.join(ICONS_FOLDER, name + "-simple.png")
if not os.path.isfile(simplified_path):
extractor = IconExtractor(app_path)
extractor.export_icon(download_path)
icon = Image.open(download_path).resize((ICON_SIZE, ICON_SIZE), Image.Resampling.LANCZOS)
icon.quantize(MAX_COLOURS).save(simplified_path)
return simplified_path
|
22,671 | df96c0f8a7e0ac7058eb98ffefb2f7a5ca90e0c6 | #!/usr/bin/python
'''
Created on May 16, 2012
@author: Ehsan Khoddammohammadi
'''
import sys
joiner_string = "#"
def left_binarize(rule):
#rule is tuple, first element is LHS and second element is list of constituents as RHS
LHS = rule[0]
RHS_list = rule[1]
# print LHS,"--> ",RHS_list
if len(RHS_list)>2 :
new_RHS = [joiner_string.join(RHS_list[0:-1])]
new_RHS.append(RHS_list[-1])
first_new_rule = (LHS, new_RHS )
second_new_rule = (joiner_string.join(RHS_list[0:-1]), RHS_list[0:-1])
new_rules = []
new_rules = new_rules + left_binarize (second_new_rule)
new_rules.append( first_new_rule )
return new_rules
else:
new_rules=[]
new_rules.append(rule)
# print new_rules,"else"
return new_rules
def right_binarize(rule):
#rule is tuple, first element is LHS and second element is list of constituents as RHS
LHS = rule[0]
RHS_list = rule[1]
# print LHS,"--> ",RHS_list
if len(RHS_list)>2 :
new_RHS=[]
new_RHS.append(RHS_list[0])
new_RHS.append(joiner_string.join(RHS_list[1:]))
first_new_rule = (LHS, new_RHS )
second_new_rule = (joiner_string.join(RHS_list[1:]), RHS_list[1:])
new_rules = []
new_rules = new_rules + right_binarize (second_new_rule)
new_rules.append( first_new_rule )
return new_rules
else:
new_rules=[]
new_rules.append(rule)
# print new_rules,"else"
return new_rules
def rule_reader(file_path):
fin = open(file_path,'r')
rules=[]
for line in fin:
tokens = line.split()
LHS = tokens[0]
RHS_list = tokens[1:]
rules.append((LHS,RHS_list))
fin.close()
return rules
def binarize (rules,method='left'):
binarized_rules = list()
if (method!='right'):
for rule in rules:
for new_rule in left_binarize(rule):
binarized_rules.append(new_rule)
else:
for rule in rules:
for new_rule in right_binarize(rule):
binarized_rules.append(new_rule)
return binarized_rules
def show(binarized_rules):
strings = []
for rule in binarized_rules:
strings.append(rule[0]+'\t'+rule[1][0]+'\t'+rule[1][1])
return strings
if __name__ == '__main__':
#rule = ('S', ['NP','VP', 'VP','DP'])
#print left_binarize(rule)
if (len(sys.argv)>2):
file_name = sys.argv[1]
method = sys.argv[2]
else:
print "first_argument=pathToGrammar second_argument=left[,right]"
sys.exit(1)
rules = rule_reader(file_name)
binarized_rules = binarize(rules, method)
for rule in show(binarized_rules):
print rule |
22,672 | cb7ee5b1738d3c7d2e8e0f7b83fa9538fe84bb4c | # -*- coding: utf-8 -*-
"""
Este módulo contem funções para extrair dados das empresas a partir de planilhas em excell.
"""
#imports diversos
import openpyxl
from datetime import date
#Função principal, chamando funções especificas de linha e coluna
def get_raw_data_from_excel(folder):
dic_year = {}
#leitura do arquivo
read = openpyxl.load_workbook(folder)
sheet = read['BS P&L']
#dados desejaveis começando pelas linhas e colunas setadas
lin = 25
col = 4
dic_year = get_columns_data(sheet, lin, col)
print(dic_year)
return(dic_year)
#Função para varrer colunas e chamar função que varre as linhas
def get_columns_data(sheet, lin, col):
dic_excel = {}
#varre colunas dos anos, ano por ano da planilha
while(col < 8):
dic_excel[sheet.cell(15,col).value] = {sheet.cell(15,col).value: get_rows_data(sheet, col)}
col += 1
return(dic_excel)
#Função que varre as linhas para obter dados
def get_rows_data(sheet, col):
dic_value = {}
#Usado HardCode devido keys iguais usando dicionario
dic_value['CAIXA'] = sheet.cell(25,col).value
dic_value['APLICAÇÕES FINANCEIRAS'] = sheet.cell(26,col).value
dic_value['CONTAS A RECEBER'] = sheet.cell(27,col).value
dic_value['PDD'] = sheet.cell(28,col).value
dic_value['ESTOQUE'] = sheet.cell(29,col).value
dic_value['AC PARTES RELACIONADAS'] = sheet.cell(30,col).value
dic_value['IR E CS DIFERIDOS'] = sheet.cell(31,col).value
dic_value['CREDITOS FISCAIS A RECUPERAR'] = sheet.cell(32,col).value
dic_value['ATIVOS DERIVATIVOS'] = sheet.cell(33,col).value
dic_value['ADIANTAMENTOS'] = sheet.cell(34,col).value
dic_value['OUTROS ATIVOS CIRCULANTES'] = sheet.cell(35,col).value
dic_value['ANC PARTES RELACIONADAS'] = sheet.cell(37,col).value
#invertido IR e CS devido conflito key dict
dic_value['CS E IR DIFERIDOS'] = sheet.cell(38,col).value
dic_value['CLIENTES'] = sheet.cell(39,col).value
dic_value['REPACTO RISCO HIDROLÓGICO'] = sheet.cell(40,col).value
dic_value['OUTROS ATIVOS NÃO CIRCULANTES'] = sheet.cell(41,col).value
dic_value['IMOBILIZADO'] = sheet.cell(43,col).value
dic_value['INVESTIMENTOS'] = sheet.cell(44,col).value
dic_value['INTANGIVEIS'] = sheet.cell(45,col).value
dic_value['EMPRÉSTIMOS E FINANCIAMENTO'] = sheet.cell(48,col).value
dic_value['DEBENTURES'] = sheet.cell(49,col).value
dic_value['PARTES RELACIONADAS'] = sheet.cell(51,col).value
dic_value['EMPRÉSTIMO (CIRCULANTE DO LONGO PRAZO)'] = sheet.cell(52,col).value
dic_value['FORNECEDORES'] = sheet.cell(53,col).value
dic_value['SALARIOS E ENCARGOS SOCIAIS'] = sheet.cell(54,col).value
dic_value['OBRIGAÇÕES TRIBUTÁRIAS'] = sheet.cell(55,col).value
#Usado . final IR devido conflito key dict
dic_value['IR. E CS DIFERIDOS'] = sheet.cell(56,col).value
dic_value['DIVIDENDOS'] = sheet.cell(57,col).value
dic_value['DERIVATIVOS'] = sheet.cell(58,col).value
dic_value['ADIANTAMENTO DE CLIENTES'] = sheet.cell(59,col).value
dic_value['PROVISÃO'] = sheet.cell(60,col).value
dic_value['OUTROS'] = sheet.cell(61,col).value
#financiamentos no plural para evitar key igual
dic_value['EMPRÉSTIMOS E FINANCIAMENTOS'] = sheet.cell(63,col).value
dic_value['DEBENTURES '] = sheet.cell(64,col).value
dic_value['ADIANTAMENTO'] = sheet.cell(65,col).value
dic_value['PROVISÃO '] = sheet.cell(66,col).value
dic_value['CONCESSÕES A PAGAR'] = sheet.cell(67,col).value
#Já tem outros, incluido espaço final devido conflito key dict
dic_value['OUTROS '] = sheet.cell(68,col).value
dic_value['PARTICIPAÇÃO MINORITÁRIA'] = sheet.cell(72,col).value
dic_value['CAPITAL SOCIAL'] = sheet.cell(73,col).value
dic_value['RESERVAS'] = sheet.cell(74,col).value
dic_value['PREJUÍZO/LUCROS ACUMULADOS'] = sheet.cell(75,col).value
dic_value['RECEITA BRUTA'] = sheet.cell(82,col).value
dic_value['RECEITA LÍQUIDA'] = sheet.cell(83,col).value
dic_value['CPV'] = sheet.cell(84,col).value
dic_value['DEPRECIAÇÃO'] = sheet.cell(85,col).value
dic_value['DESPESAS OPERACIONAIS'] = sheet.cell(88,col).value
dic_value['DESPESAS ADMINISTRATIVAS'] = sheet.cell(89,col).value
dic_value['DESPESAS PESSOAIS'] = sheet.cell(90,col).value
dic_value['DESPESAS COMERCIAIS'] = sheet.cell(91,col).value
dic_value['OUTRAS RECEITAS / DESPESAS'] = sheet.cell(92,col).value
dic_value['DESPESA FINANCEIRA'] = sheet.cell(95,col).value
dic_value['RECEITA FINANCEIRA'] = sheet.cell(96,col).value
dic_value['OUTRAS RECEITAS / DESPESAS'] = sheet.cell(97,col).value
dic_value['IMPOSTOS DIFERIDOS'] = sheet.cell(98,col).value
dic_value['RESULTADO NÃO OPERACIONAL'] = sheet.cell(101,col).value
dic_value['EQUIVALENCIA PATRIMONIAL'] = sheet.cell(102,col).value
dic_value['IRPJ'] = sheet.cell(104,col).value
dic_value['CSLL'] = sheet.cell(105,col).value
#Já tem part. min., usado espaço final devido conflito key dict
dic_value['PARTICIPAÇÃO MINORITÁRIA '] = sheet.cell(108,col).value
return(dic_value)
#chamando função principal com endereço do arquivo
get_raw_data_from_excel("C:\Projeto\Template_Scorecard_Locked_2018.xlsm")
|
22,673 | 32b80789fd6899a4c00db33025431679c55c0261 | # Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__ == "__main__":
num_a = int(input())
a = set(map(int, input().split(" ")))
num_b = int(input())
b = set(map(int, input().split(" ")))
output = [l for l in a.difference(b)]
output += [k for k in b.difference(a)]
output = set(output)
output = list(output)
output.sort()
for item in output:
print(item)
|
22,674 | ac4956fcfa7b1c0becfd1fde2c57319a10b69f9c | from django.contrib import admin
from django.contrib.admin.widgets import ForeignKeyRawIdWidget, NoReverseMatch, reverse, Truncator
class FastForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
"""
ForeignKeyRawIdWidget, which doesn't make extra hit to DB, when rendered in inline form
But take prepopulated value from instance
"""
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance')
self.field = kwargs.pop('field')
super().__init__(admin_site=admin.site, *args, **kwargs)
def label_and_url_for_value(self, value):
try:
obj = getattr(self.instance, self.field)
except AttributeError:
return '', ''
try:
url = reverse(
'%s:%s_%s_change' % (
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,)
)
except NoReverseMatch:
url = '' # Admin not registered for target model.
return Truncator(obj).words(14, truncate='...'), url
|
22,675 | e0bd91375e08e8faed9f986beee0b4b4c01c460a | #
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Broadcom OpenOMCI OLT/ONU adapter handler.
"""
from __future__ import absolute_import
import json
import random
from collections import OrderedDict
import arrow
import pyvoltha.common.openflow.utils as fd
import six
import structlog
from heartbeat import HeartBeat
from omci.brcm_mcast_task import BrcmMcastTask
from omci.brcm_mib_download_task import BrcmMibDownloadTask
from omci.brcm_tp_delete_task import BrcmTpDeleteTask
from omci.brcm_tp_setup_task import BrcmTpSetupTask
from omci.brcm_uni_lock_task import BrcmUniLockTask
from omci.brcm_uni_status import BrcmUniStatusTask
from omci.brcm_vlan_filter_task import BrcmVlanFilterTask
from onu_gem_port import OnuGemPort
from onu_tcont import OnuTCont
from pon_port import PonPort
from tp_state import TpState
from pyvoltha.adapters.common.frameio.frameio import hexify
from pyvoltha.adapters.common.kvstore.twisted_etcd_store import TwistedEtcdStore
from pyvoltha.adapters.extensions.events.adapter_events import AdapterEvents
from pyvoltha.adapters.extensions.events.device_events.onu.onu_active_event import OnuActiveEvent
from pyvoltha.adapters.extensions.events.device_events.onu.onu_deleted_event import OnuDeletedEvent
from pyvoltha.adapters.extensions.events.device_events.onu.onu_disabled_event import OnuDisabledEvent
from pyvoltha.adapters.extensions.events.kpi.onu.onu_omci_pm import OnuOmciPmMetrics
from pyvoltha.adapters.extensions.events.kpi.onu.onu_pm_metrics import OnuPmMetrics
from pyvoltha.adapters.extensions.omci.omci_defs import EntityOperations, ReasonCodes
from pyvoltha.adapters.extensions.omci.omci_entities import AniG, Tcont, MacBridgeServiceProfile
from pyvoltha.adapters.extensions.omci.onu_device_entry import OnuDeviceEvents, \
OnuDeviceEntry, IN_SYNC_KEY
from pyvoltha.adapters.extensions.omci.tasks.omci_test_request import OmciTestRequest
from pyvoltha.common.tech_profile.tech_profile import TechProfile
from pyvoltha.common.utils.registry import registry
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, DeferredQueue
from uni_port import RESERVED_TRANSPARENT_VLAN
from uni_port import UniPort, UniType
from voltha_protos.common_pb2 import OperStatus, ConnectStatus, AdminState
from voltha_protos.device_pb2 import Port
from voltha_protos.inter_container_pb2 import InterAdapterMessageType, \
InterAdapterOmciMessage, InterAdapterTechProfileDownloadMessage, InterAdapterDeleteGemPortMessage, \
InterAdapterDeleteTcontMessage
from voltha_protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC
from voltha_protos.openolt_pb2 import OnuIndication
from voltha_protos.voltha_pb2 import TestResponse
from voltha_protos.extensions_pb2 import SingleGetValueResponse, GetValueResponse
OP = EntityOperations
RC = ReasonCodes
IS_MULTICAST = 'is_multicast'
GEM_PORT_ID = 'gemport_id'
_STARTUP_RETRY_WAIT = 10
_PATH_SEPERATOR = "/"
class BrcmOpenomciOnuHandler(object):
def __init__(self, adapter, device_id):
self.log = structlog.get_logger(device_id=device_id)
self.log.debug('starting-handler')
self.adapter = adapter
self.core_proxy = adapter.core_proxy
self.adapter_proxy = adapter.adapter_proxy
self.parent_id = None
self.device_id = device_id
self.proxy_address = None
self._enabled = False
self._is_device_active_and_reachable = False
self.events = None
self._pm_metrics = None
self._pm_metrics_started = False
self._test_request = None
self._test_request_started = False
self._tp = dict() # tp_id -> technology profile definition in KV Store.
self._reconciling = False
self.olt_serial_number = ""
self.uni_status_response_queue = DeferredQueue()
self._results = SingleGetValueResponse()
# Persisted onu configuration needed in case of reconciliation.
self._onu_persisted_state = {
'onu_id': None,
'intf_id': None,
'serial_number': None,
'admin_state': None,
'oper_state': None,
'uni_config': list()
}
self._unis = dict() # Port # -> UniPort
self._pon = None
self._pon_port_number = 100
self.logical_device_id = None
self._heartbeat = HeartBeat.create(self, device_id)
# Set up OpenOMCI environment
self._onu_omci_device = None
self._dev_info_loaded = False
self._deferred = None
self._in_sync_subscription = None
self._port_state_subscription = None
self._connectivity_subscription = None
self._capabilities_subscription = None
self.mac_bridge_service_profile_entity_id = 0x201
self.gal_enet_profile_entity_id = 0x1
# Stores information related to queued vlan filter tasks
# Dictionary with key being uni_id and value being device,uni port ,uni id and vlan id
self._queued_vlan_filter_task = dict()
self._multicast_task = None
self._set_vlan = dict() # uni_id, tp_id -> set_vlan_id
self._tp_state_map_per_uni = dict() # uni_id -> {dictionary tp_id->TpState}
# Paths from kv store
ONU_PATH = 'service/voltha/openonu'
# Initialize KV store client
self.args = registry('main').get_args()
host, port = self.args.etcd.split(':', 1)
self.tp_kv_client = TwistedEtcdStore(host, port, TechProfile.KV_STORE_TECH_PROFILE_PATH_PREFIX)
self.onu_kv_client = TwistedEtcdStore(host, port, ONU_PATH)
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
if self._enabled != value:
self._enabled = value
@property
def omci_agent(self):
return self.adapter.omci_agent
@property
def omci_cc(self):
return self._onu_omci_device.omci_cc if self._onu_omci_device is not None else None
@property
def heartbeat(self):
return self._heartbeat
@property
def uni_ports(self):
return list(self._unis.values())
@property
def is_device_active_and_reachable(self):
return self._is_device_active_and_reachable
@is_device_active_and_reachable.setter
def is_device_active_and_reachable(self, value):
self._is_device_active_and_reachable = value
def uni_port(self, port_no_or_name):
if isinstance(port_no_or_name, six.string_types):
return next((uni for uni in self.uni_ports
if uni.name == port_no_or_name), None)
assert isinstance(port_no_or_name, int), 'Invalid parameter type'
return next((uni for uni in self.uni_ports
if uni.port_number == port_no_or_name), None)
@property
def pon_port(self):
return self._pon
@property
def onu_omci_device(self):
return self._onu_omci_device
def receive_message(self, msg):
if self.omci_cc is not None:
self.omci_cc.receive_message(msg)
# Called once when the adapter creates the device/onu instance
@inlineCallbacks
def activate(self, device):
self.log.debug('activate-device', device_id=device.id, serial_number=device.serial_number)
assert device.parent_id
assert device.parent_port_no
assert device.proxy_address.device_id
self.proxy_address = device.proxy_address
self.parent_id = device.parent_id
self._pon_port_number = device.parent_port_no
if self.enabled is not True:
self.log.info('activating-new-onu', device_id=device.id, serial_number=device.serial_number)
# populate what we know. rest comes later after mib sync
device.root = False
device.vendor = 'OpenONU'
device.reason = 'activating-onu'
# TODO NEW CORE: Need to either get logical device id from core or use regular device id
# pm_metrics requires a logical device id. For now set to just device_id
self.logical_device_id = self.device_id
self._onu_persisted_state['serial_number'] = device.serial_number
try:
self.log.debug('updating-onu-state', device_id=self.device_id,
onu_persisted_state=self._onu_persisted_state)
yield self.onu_kv_client.set(self.device_id, json.dumps(self._onu_persisted_state))
except Exception as e:
self.log.error('could-not-store-onu-state', device_id=self.device_id,
onu_persisted_state=self._onu_persisted_state, e=e)
# if we cannot write to storage we can proceed, for now.
# later onu indications from the olt will have another chance
yield self.core_proxy.device_update(device)
self.log.debug('device-updated', device_id=device.id, serial_number=device.serial_number)
yield self._init_pon_state()
self.log.debug('pon state initialized', device_id=device.id, serial_number=device.serial_number)
yield self._init_metrics()
self.log.debug('metrics initialized', device_id=device.id, serial_number=device.serial_number)
self.enabled = True
else:
self.log.info('onu-already-activated')
# Called once when the adapter needs to re-create device. usually on vcore restart
@inlineCallbacks
def reconcile(self, device):
self.log.debug('reconcile-device', device_id=device.id, serial_number=device.serial_number)
if self._reconciling:
self.log.debug('already-running-reconcile-device', device_id=device.id, serial_number=device.serial_number)
return
# first we verify that we got parent reference and proxy info
assert device.parent_id
assert device.proxy_address.device_id
self.proxy_address = device.proxy_address
self.parent_id = device.parent_id
self._pon_port_number = device.parent_port_no
if self.enabled is not True:
self._reconciling = True
self.log.info('reconciling-openonu-device')
self.logical_device_id = self.device_id
try:
query_data = yield self.onu_kv_client.get(device.id)
self._onu_persisted_state = json.loads(query_data)
self.log.debug('restored-onu-state', device_id=self.device_id,
onu_persisted_state=self._onu_persisted_state)
except Exception as e:
self.log.error('no-stored-onu-state', device_id=device.id, e=e)
# there is nothing we can do without data. flag the device as UNKNOWN and cannot reconcile
# likely it will take manual steps to delete/re-add this onu
yield self.core_proxy.device_reason_update(self.device_id, "cannot-reconcile")
yield self.core_proxy.device_state_update(self.device_id, oper_status=OperStatus.UNKNOWN)
return
self._init_pon_state()
self.log.debug('pon state initialized', device_id=device.id, serial_number=device.serial_number)
self._init_metrics()
self.log.debug('metrics initialized', device_id=device.id, serial_number=device.serial_number)
self._subscribe_to_events()
# need to restart omci start machines and reload mib database. once db is loaded we can finish reconcile
self._onu_omci_device.start(device)
self._heartbeat.enabled = True
self.enabled = True
else:
self.log.info('onu-already-activated')
@inlineCallbacks
def _init_pon_state(self):
self.log.debug('init-pon-state', device_id=self.device_id, device_logical_id=self.logical_device_id)
self._pon = PonPort.create(self, self._pon_port_number)
self._pon.add_peer(self.parent_id, self._pon_port_number)
self.log.debug('adding-pon-port-to-agent',
type=self._pon.get_port().type,
admin_state=self._pon.get_port().admin_state,
oper_status=self._pon.get_port().oper_status,
)
if not self._reconciling:
yield self.core_proxy.port_created(self.device_id, self._pon.get_port())
self.log.debug('added-pon-port-to-agent',
type=self._pon.get_port().type,
admin_state=self._pon.get_port().admin_state,
oper_status=self._pon.get_port().oper_status,
)
# Create and start the OpenOMCI ONU Device Entry for this ONU
self._onu_omci_device = self.omci_agent.add_device(self.device_id,
self.core_proxy,
self.adapter_proxy,
support_classes=self.adapter.broadcom_omci,
custom_me_map=self.adapter.custom_me_entities())
# Port startup
if self._pon is not None:
self._pon.enabled = True
@inlineCallbacks
def _init_metrics(self):
self.log.debug('init-metrics', device_id=self.device_id, device_logical_id=self.logical_device_id)
serial_number = self._onu_persisted_state.get('serial_number')
############################################################################
# Setup Alarm handler
self.events = AdapterEvents(self.core_proxy, self.device_id, self.logical_device_id,
serial_number)
############################################################################
# Setup PM configuration for this device
# Pass in ONU specific options
kwargs = {
OnuPmMetrics.DEFAULT_FREQUENCY_KEY: OnuPmMetrics.DEFAULT_ONU_COLLECTION_FREQUENCY,
'heartbeat': self.heartbeat,
OnuOmciPmMetrics.OMCI_DEV_KEY: self._onu_omci_device
}
self.log.debug('create-pm-metrics', device_id=self.device_id, serial_number=serial_number)
self._pm_metrics = OnuPmMetrics(self.events, self.core_proxy, self.device_id,
self.logical_device_id, serial_number,
grouped=True, freq_override=False, **kwargs)
pm_config = self._pm_metrics.make_proto()
self._onu_omci_device.set_pm_config(self._pm_metrics.omci_pm.openomci_interval_pm)
self.log.debug("initial-pm-config", device_id=self.device_id, serial_number=serial_number)
if not self._reconciling:
yield self.core_proxy.device_pm_config_update(pm_config, init=True)
# Note, ONU ID and UNI intf set in add_uni_port method
self._onu_omci_device.alarm_synchronizer.set_alarm_params(mgr=self.events,
ani_ports=[self._pon])
# Code to Run OMCI Test Action
kwargs_omci_test_action = {
OmciTestRequest.DEFAULT_FREQUENCY_KEY:
OmciTestRequest.DEFAULT_COLLECTION_FREQUENCY
}
self._test_request = OmciTestRequest(self.core_proxy,
self.omci_agent, self.device_id,
AniG, serial_number,
self.logical_device_id,
exclusive=False,
**kwargs_omci_test_action)
@inlineCallbacks
def delete(self, device):
self.log.info('delete-onu', device_id=device.id, serial_number=device.serial_number)
try:
yield self.onu_kv_client.delete(device.id)
except Exception as e:
self.log.error('could-not-delete-onu-state', device_id=device.id, e=e)
try:
self._deferred.cancel()
self._test_request.stop_collector()
self._pm_metrics.stop_collector()
self.log.debug('removing-openomci-statemachine')
self.omci_agent.remove_device(device.id, cleanup=True)
yield self.onu_deleted_event()
except Exception as e:
self.log.error('could-not-delete-onu', device_id=device.id, e=e)
def _create_tconts(self, uni_id, us_scheduler):
alloc_id = us_scheduler['alloc_id']
q_sched_policy = us_scheduler['q_sched_policy']
self.log.debug('create-tcont', us_scheduler=us_scheduler)
# TODO: revisit for multi tconts support
new_tconts = []
tcontdict = dict()
tcontdict['alloc-id'] = alloc_id
tcontdict['q_sched_policy'] = q_sched_policy
tcontdict['uni_id'] = uni_id
tcont = OnuTCont.create(self, tcont=tcontdict)
success = self._pon.add_tcont(tcont, True)
if success:
new_tconts.append(tcont)
self.log.debug('pon-add-tcont', tcont=tcont)
return new_tconts
# Called when there is an olt up indication, providing the gem port id chosen by the olt handler
def _create_gemports(self, uni_id, gem_ports, alloc_id_ref, direction):
self.log.debug('create-gemport',
gem_ports=gem_ports, direction=direction)
new_gem_ports = []
for gem_port in gem_ports:
gemdict = dict()
if gem_port[IS_MULTICAST] == 'True':
gemdict[GEM_PORT_ID] = gem_port['multicast_gem_id']
gemdict[IS_MULTICAST] = True
else:
gemdict[GEM_PORT_ID] = gem_port[GEM_PORT_ID]
gemdict[IS_MULTICAST] = False
gemdict['direction'] = direction
gemdict['alloc_id_ref'] = alloc_id_ref
gemdict['encryption'] = gem_port['aes_encryption']
gemdict['discard_config'] = dict()
gemdict['discard_config']['max_probability'] = \
gem_port['discard_config']['max_probability']
gemdict['discard_config']['max_threshold'] = \
gem_port['discard_config']['max_threshold']
gemdict['discard_config']['min_threshold'] = \
gem_port['discard_config']['min_threshold']
gemdict['discard_policy'] = gem_port['discard_policy']
gemdict['max_q_size'] = gem_port['max_q_size']
gemdict['pbit_map'] = gem_port['pbit_map']
gemdict['priority_q'] = gem_port['priority_q']
gemdict['scheduling_policy'] = gem_port['scheduling_policy']
gemdict['weight'] = gem_port['weight']
gemdict['uni_id'] = uni_id
gem_port = OnuGemPort.create(self, gem_port=gemdict)
success = self._pon.add_gem_port(gem_port, True)
if success:
new_gem_ports.append(gem_port)
self.log.debug('pon-add-gemport', gem_port=gem_port)
return new_gem_ports
def _execute_queued_vlan_filter_tasks(self, uni_id, tp_id):
# During OLT Reboots, ONU Reboots, ONU Disable/Enable, it is seen that vlan_filter
# task is scheduled even before tp task. So we queue vlan-filter task if tp_task
# or initial-mib-download is not done. Once the tp_task is completed, we execute
# such queued vlan-filter tasks
try:
if uni_id in self._queued_vlan_filter_task and tp_id in self._queued_vlan_filter_task[uni_id]:
self.log.info("executing-queued-vlan-filter-task",
uni_id=uni_id, tp_id=tp_id)
for filter_info in self._queued_vlan_filter_task[uni_id][tp_id]:
reactor.callLater(0, self._add_vlan_filter_task, filter_info.get("device"),
uni_id=uni_id, uni_port=filter_info.get("uni_port"),
match_vlan=filter_info.get("match_vlan"),
_set_vlan_vid=filter_info.get("set_vlan_vid"),
_set_vlan_pcp=filter_info.get("set_vlan_pcp"),
tp_id=filter_info.get("tp_id"))
# Now remove the entry from the dictionary
self.log.debug("executed-queued-vlan-filter-task",
uni_id=uni_id, tp_id=tp_id)
# Now delete the key entry for the tp_id once we have handled the
# queued vlan filter tasks for that tp_id
del self._queued_vlan_filter_task[uni_id][tp_id]
# If the queued vlan filter tasks for all the tp_ids on a given
# uni_id is handled, then delete the uni_id key
if len(self._queued_vlan_filter_task[uni_id]) == 0:
del self._queued_vlan_filter_task[uni_id]
except Exception as e:
self.log.error("vlan-filter-configuration-failed", uni_id=uni_id, error=e)
def _do_tech_profile_configuration(self, uni_id, tp):
us_scheduler = tp['us_scheduler']
alloc_id = us_scheduler['alloc_id']
new_tconts = self._create_tconts(uni_id, us_scheduler)
upstream_gem_port_attribute_list = tp['upstream_gem_port_attribute_list']
new_upstream_gems = self._create_gemports(uni_id, upstream_gem_port_attribute_list, alloc_id, "UPSTREAM")
downstream_gem_port_attribute_list = tp['downstream_gem_port_attribute_list']
new_downstream_gems = self._create_gemports(uni_id, downstream_gem_port_attribute_list, alloc_id, "DOWNSTREAM")
new_gems = []
new_gems.extend(new_upstream_gems)
new_gems.extend(new_downstream_gems)
return new_tconts, new_gems
@inlineCallbacks
def _get_tp_instance_from_kv_store(self, tp_path):
_max_tp_load_retry_count = 5
_curr_retry_cnt = 0
_tp_instance = None
while _curr_retry_cnt < _max_tp_load_retry_count:
_curr_retry_cnt += 1
try:
_tp_instance = yield self.tp_kv_client.get(tp_path)
except Exception as e:
pass
if _tp_instance is None:
self.log.error("failed-to-load-tp--retrying", retry_cnt=_curr_retry_cnt)
continue
# if we have got a valid tp instance, break from loop
break
returnValue(_tp_instance)
@inlineCallbacks
def load_and_configure_tech_profile(self, uni_id, tp_path):
self.log.debug("loading-tech-profile-configuration", uni_id=uni_id, tp_path=tp_path)
tp_id = self.extract_tp_id_from_path(tp_path)
if tp_id not in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id] = TpState(self, uni_id, tp_path)
if not self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done:
try:
if self._tp_state_map_per_uni[uni_id][tp_id].tp_task_ref is not None:
self.log.info("tech-profile-config-already-in-progress",
tp_path=tp_path)
returnValue(None)
if tp_path in self._tp:
tp = self._tp[tp_path]
else:
tpstored = yield self._get_tp_instance_from_kv_store(tp_path)
if tpstored is None:
self.log.error("failed-to-load-tp-instance", tp_path=tp_path)
returnValue(None)
tpstring = tpstored.decode('ascii')
tp = json.loads(tpstring)
self._tp[tp_id] = tp
self.log.debug("tp-instance", tp=tp)
tconts, gem_ports = self._do_tech_profile_configuration(uni_id, tp)
@inlineCallbacks
def success(_results):
self.log.info("tech-profile-config-done-successfully", uni_id=uni_id, tp_id=tp_id)
if tp_id in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id].tp_task_ref = None
self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done = True
# Now execute any vlan filter tasks that were queued for later
reactor.callInThread(self._execute_queued_vlan_filter_tasks, uni_id, tp_id)
yield self.core_proxy.device_reason_update(self.device_id, 'tech-profile-config-download-success')
# Execute mcast task
for gem in gem_ports:
self.log.debug("checking-multicast-service-for-gem ", gem=gem)
if gem.mcast is True:
self.log.info("found-multicast-service-for-gem ", gem=gem, uni_id=uni_id, tp_id=tp_id)
reactor.callInThread(self.start_multicast_service, uni_id, tp_path)
self.log.debug("started_multicast_service-successfully", tconts=tconts, gems=gem_ports)
break
@inlineCallbacks
def failure(_reason):
self.log.warn('tech-profile-config-failure-retrying', uni_id=uni_id, tp_id=tp_id,
_reason=_reason)
if tp_id in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id].tp_task_ref = None
retry = random.randint(1, 5)
reactor.callLater(retry, self.load_and_configure_tech_profile,
uni_id, tp_path)
yield self.core_proxy.device_reason_update(self.device_id,
'tech-profile-config-download-failure-retrying')
self.log.info('downloading-tech-profile-configuration', uni_id=uni_id, tp_id=tp_id)
self.log.debug("tconts-gems-to-install", tconts=tconts, gem_ports=gem_ports)
self.log.debug("current-cached-tconts", tconts=list(self.pon_port.tconts.values()))
self.log.debug("current-cached-gem-ports", gem_ports=list(self.pon_port.gem_ports.values()))
self._tp_state_map_per_uni[uni_id][tp_id].tp_task_ref = \
BrcmTpSetupTask(self.omci_agent, self, uni_id, tconts, gem_ports, tp_id)
self._deferred = \
self._onu_omci_device.task_runner.queue_task(self._tp_state_map_per_uni[uni_id][tp_id].
tp_task_ref)
self._deferred.addCallbacks(success, failure)
except Exception as e:
self.log.exception("error-loading-tech-profile", e=e)
else:
# There is an active tech-profile task ongoing on this UNI port. So, reschedule this task
# after a short interval
for tpid in self._tp_state_map_per_uni[uni_id]:
if self._tp_state_map_per_uni[uni_id][tpid].tp_task_ref is not None:
self.log.debug("active-tp-tasks-in-progress-for-uni--scheduling-this-task-for-later",
uni_id=uni_id, tp_id=tpid)
retry = random.randint(1, 5)
reactor.callLater(retry, self.load_and_configure_tech_profile,
uni_id, tp_path)
return
self.log.info("tech-profile-config-already-done")
# Could be a case where TP exists but new gem-ports are getting added dynamically
tpstored = yield self.tp_kv_client.get(tp_path)
tpstring = tpstored.decode('ascii')
tp = json.loads(tpstring)
upstream_gems = []
downstream_gems = []
# Find out the new Gem ports that are getting added afresh.
for gp in tp['upstream_gem_port_attribute_list']:
if self.pon_port.gem_port(gp['gemport_id'], "upstream"):
# gem port already exists
continue
upstream_gems.append(gp)
for gp in tp['downstream_gem_port_attribute_list']:
if self.pon_port.gem_port(gp['gemport_id'], "downstream"):
# gem port already exists
continue
downstream_gems.append(gp)
us_scheduler = tp['us_scheduler']
alloc_id = us_scheduler['alloc_id']
if len(upstream_gems) > 0 or len(downstream_gems) > 0:
self.log.info("installing-new-gem-ports", upstream_gems=upstream_gems, downstream_gems=downstream_gems)
new_upstream_gems = self._create_gemports(uni_id, upstream_gems, alloc_id, "UPSTREAM")
new_downstream_gems = self._create_gemports(uni_id, downstream_gems, alloc_id, "DOWNSTREAM")
new_gems = []
new_gems.extend(new_upstream_gems)
new_gems.extend(new_downstream_gems)
def success(_results):
self.log.info("new-gem-ports-successfully-installed", result=_results)
if tp_id in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id].tp_task_ref = None
# Execute mcast task
for gem in downstream_gems:
self.log.debug("checking-multicast-service-for-gem ", gem=gem)
if gem.mcast:
self.log.info("found-multicast-service-for-gem ", gem=gem, uni_id=uni_id, tp_id=tp_id)
reactor.callInThread(self.start_multicast_service, uni_id, tp_path)
self.log.debug("started_multicast_service-successfully", gem=gem)
break
def failure(_reason):
self.log.warn('new-gem-port-install-failed--retrying',
_reason=_reason)
if tp_id in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id].tp_task_ref = None
# Remove gem ports from cache. We will re-add them during the retry
for gp in new_gems:
self.pon_port.remove_gem_id(gp.gem_id, gp.direction, False)
retry = random.randint(1, 5)
reactor.callLater(retry, self.load_and_configure_tech_profile,
uni_id, tp_path)
if self._pon.get_tcont(alloc_id) is None:
self.log.error("no-valid-tcont-reference-for-tp-id--not-installing-gem", alloc_id=alloc_id, tp_id=tp_id)
return
self._tp_state_map_per_uni[uni_id][tp_id].tp_task_ref = \
BrcmTpSetupTask(self.omci_agent, self, uni_id, [self._pon.get_tcont(alloc_id)], new_gems, tp_id)
self._deferred = \
self._onu_omci_device.task_runner.queue_task(self._tp_state_map_per_uni[uni_id][tp_id].
tp_task_ref)
self._deferred.addCallbacks(success, failure)
@inlineCallbacks
def start_multicast_service(self, uni_id, tp_path, retry_count=0):
self.log.debug("starting-multicast-service", uni_id=uni_id, tp_path=tp_path)
tp_id = self.extract_tp_id_from_path(tp_path)
if uni_id in self._set_vlan and tp_id in self._set_vlan[uni_id]:
try:
tp = self._tp[tp_id]
if tp is None:
tpstored = yield self.tp_kv_client.get(tp_path)
tpstring = tpstored.decode('ascii')
tp = json.loads(tpstring)
if tp is None:
self.log.error("cannot-find-tp-to-start-multicast-service", uni_id=uni_id, tp_path=tp_path)
return
else:
self._tp[tp_id] = tp
self.log.debug("mcast-vlan-learned-before", self._set_vlan[uni_id][tp_id], uni_id=uni_id, tp_id=tp_id)
def success(_results):
self.log.debug('multicast-success', uni_id=uni_id)
self._multicast_task = None
def failure(_reason):
self.log.warn('multicast-failure', _reason=_reason)
retry = random.randint(1, 5)
reactor.callLater(retry, self.start_multicast_service,
uni_id, tp_path)
self.log.debug('starting-multicast-task', mcast_vlan_id=self._set_vlan[uni_id][tp_id])
downstream_gem_port_attribute_list = tp['downstream_gem_port_attribute_list']
for i in range(len(downstream_gem_port_attribute_list)):
if IS_MULTICAST in downstream_gem_port_attribute_list[i] and \
downstream_gem_port_attribute_list[i][IS_MULTICAST] == 'True':
dynamic_access_control_list_table = downstream_gem_port_attribute_list[i][
'dynamic_access_control_list'].split("-")
static_access_control_list_table = downstream_gem_port_attribute_list[i][
'static_access_control_list'].split("-")
multicast_gem_id = downstream_gem_port_attribute_list[i]['multicast_gem_id']
self._multicast_task = BrcmMcastTask(self.omci_agent, self, self.device_id, uni_id, tp_id,
self._set_vlan[uni_id][tp_id],
dynamic_access_control_list_table,
static_access_control_list_table, multicast_gem_id)
self._deferred = self._onu_omci_device.task_runner.queue_task(self._multicast_task)
self._deferred.addCallbacks(success, failure)
break
except Exception as e:
self.log.exception("error-loading-multicast", e=e)
else:
if retry_count < 30:
retry_count = +1
self.log.debug("going-to-wait-for-flow-to-learn-mcast-vlan", uni_id=uni_id, tp_id=tp_id,
retry=retry_count)
reactor.callLater(0.5, self.start_multicast_service, uni_id, tp_path, retry_count)
else:
self.log.error("mcast-vlan-not-configured-yet-failing-mcast-service-conf", uni_id=uni_id, tp_id=tp_id,
retry=retry_count)
def _clear_alloc_id_gem_port_from_internal_cache(self, alloc_id=None, gem_port_id=None):
tcont = None
gem_port = None
if alloc_id is not None:
self.log.debug("current-cached-tconts", tconts=list(self.pon_port.tconts.values()))
for tc in list(self.pon_port.tconts.values()):
if tc.alloc_id == alloc_id:
self.log.info("removing-tcont-from-internal-cache",
alloc_id=alloc_id)
tcont = tc
self.pon_port.remove_tcont(tc.alloc_id, False)
if gem_port_id is not None:
self.log.debug("current-cached-gem-ports", gem_ports=list(self.pon_port.gem_ports.values()))
for gp in list(self.pon_port.gem_ports.values()):
if gp.gem_id == gem_port_id:
self.log.info("removing-gem-from-internal-cache",
gem_port_id=gem_port_id, direction=gp.direction)
gem_port = gp
self.pon_port.remove_gem_id(gp.gem_id, gp.direction, False)
return tcont, gem_port
def _tcont_delete_complete(self, uni_id, tp_id):
if not self._tp_state_map_per_uni[uni_id][tp_id].is_all_pon_resource_delete_complete():
self.log.info("waiting-for-gem-port-delete-to-complete-before-clearing-tp-states")
retry = random.randint(1, 5)
reactor.callLater(retry, self._tcont_delete_complete, uni_id, tp_id)
return
self.log.info("tp-delete-complete")
# Clear TP states
self._tp_state_map_per_uni[uni_id][tp_id].reset_tp_state()
del self._tp_state_map_per_uni[uni_id][tp_id]
def delete_tech_profile(self, uni_id, tp_path, tcont=None, gem_port=None):
alloc_id = None
gem_port_id = None
try:
tp_table_id = self.extract_tp_id_from_path(tp_path)
# Extract the current set of TCONT and GEM Ports from the Handler's pon_port that are
# relevant to this task's UNI. It won't change. But, the underlying pon_port may change
# due to additional tasks on different UNIs. So, it we cannot use the pon_port affter
# this initializer
alloc_id = tcont.alloc_id if tcont is not None else None
gem_port_id = gem_port.gem_id if gem_port is not None else None
self._clear_alloc_id_gem_port_from_internal_cache(alloc_id, gem_port_id)
if tp_table_id not in self._tp_state_map_per_uni[uni_id]:
self.log.warn("tp-id-is-not-present", uni_id=uni_id, tp_id=tp_table_id)
return
if self._tp_state_map_per_uni[uni_id][tp_table_id].tp_setup_done is not True:
self.log.error("tp-download-is-not-done-in-order-to-process-tp-delete", uni_id=uni_id,
tp_id=tp_table_id)
return
if alloc_id is None and gem_port_id is None:
self.log.error("alloc-id-and-gem-port-id-are-none", uni_id=uni_id, tp_id=tp_table_id)
return
@inlineCallbacks
def success(_results):
if gem_port_id:
self.log.info("gem-port-delete-done-successfully")
self._tp_state_map_per_uni[uni_id][tp_table_id].pon_resource_delete_complete(TpState.GEM_ID,
gem_port_id)
if alloc_id:
self.log.info("tcont-delete-done-successfully")
# The deletion of TCONT marks the complete deletion of tech-profile
self._tp_state_map_per_uni[uni_id][tp_table_id].pon_resource_delete_complete(TpState.ALLOC_ID,
alloc_id)
self._tcont_delete_complete(uni_id, tp_table_id)
# TODO: There could be multiple TP on the UNI, and also the ONU.
# TODO: But the below reason updates for the whole device.
yield self.core_proxy.device_reason_update(self.device_id, 'tech-profile-config-delete-success')
@inlineCallbacks
def failure(_reason):
self.log.warn('tech-profile-delete-failure-retrying',
_reason=_reason)
retry = random.randint(1, 5)
_tcont = self._tp_state_map_per_uni[uni_id][tp_table_id].get_queued_resource_for_delete(TpState.ALLOC_ID, alloc_id)
_gem_port = self._tp_state_map_per_uni[uni_id][tp_table_id].get_queued_resource_for_delete(TpState.GEM_ID, gem_port_id)
reactor.callLater(retry, self.delete_tech_profile, uni_id, tp_path, _tcont, _gem_port)
yield self.core_proxy.device_reason_update(self.device_id,
'tech-profile-config-delete-failure-retrying')
self.log.info('deleting-tech-profile-configuration')
if tcont is None and gem_port is None:
if alloc_id is not None:
self.log.error("tcont-info-corresponding-to-alloc-id-not-found", alloc_id=alloc_id)
if gem_port_id is not None:
self.log.error("gem-port-info-corresponding-to-gem-port-id-not-found", gem_port_id=gem_port_id)
return
self._tp_state_map_per_uni[uni_id][tp_table_id].tp_task_ref = \
BrcmTpDeleteTask(self.omci_agent, self, uni_id, tp_table_id,
tcont=tcont, gem_port=gem_port)
self._deferred = \
self._onu_omci_device.task_runner.queue_task(self._tp_state_map_per_uni[uni_id][tp_table_id].
tp_task_ref)
self._deferred.addCallbacks(success, failure)
except Exception as e:
self.log.exception("failed-to-delete-tp",
e=e, uni_id=uni_id, tp_path=tp_path,
alloc_id=alloc_id, gem_port_id=gem_port_id)
def update_pm_config(self, device, pm_configs):
# TODO: This has not been tested
self.log.info('update_pm_config', pm_configs=pm_configs)
self._pm_metrics.update(pm_configs)
def remove_onu_flows(self, device, flows):
self.log.debug('remove-onu-flows')
# no point in removing omci flows if the device isnt reachable
if device.connect_status != ConnectStatus.REACHABLE or \
device.admin_state != AdminState.ENABLED:
self.log.warn("device-disabled-or-offline-skipping-remove-flow",
admin=device.admin_state, connect=device.connect_status)
return
for flow in flows:
# if incoming flow contains cookie, then remove from ONU
if flow.cookie:
self.log.debug("remove-flow", device_id=device.id, flow=flow)
def is_downstream(port):
return port == self._pon_port_number
def is_upstream(port):
return not is_downstream(port)
try:
_in_port = fd.get_in_port(flow)
assert _in_port is not None
_out_port = fd.get_out_port(flow) # may be None
if is_downstream(_in_port):
self.log.debug('downstream-flow-no-need-to-remove', in_port=_in_port, out_port=_out_port,
device_id=device.id)
# extended vlan tagging operation will handle it
continue
elif is_upstream(_in_port):
self.log.debug('upstream-flow', in_port=_in_port, out_port=_out_port)
if fd.is_dhcp_flow(flow):
self.log.debug('The dhcp trap-to-host flow will be discarded', device_id=device.id)
return
_match_vlan_vid = None
for field in fd.get_ofb_fields(flow):
if field.type == fd.VLAN_VID:
if field.vlan_vid == RESERVED_TRANSPARENT_VLAN and field.vlan_vid_mask == RESERVED_TRANSPARENT_VLAN:
_match_vlan_vid = RESERVED_TRANSPARENT_VLAN
else:
_match_vlan_vid = field.vlan_vid & 0xfff
self.log.debug('field-type-vlan-vid',
vlan=_match_vlan_vid)
_set_vlan_vid = None
_set_vlan_pcp = None
# Retrieve the VLAN_VID that needs to be removed from the EVTO rule on the ONU.
for action in fd.get_actions(flow):
if action.type == fd.SET_FIELD:
_field = action.set_field.field.ofb_field
assert (action.set_field.field.oxm_class ==
OFPXMC_OPENFLOW_BASIC)
if _field.type == fd.VLAN_VID:
_set_vlan_vid = _field.vlan_vid & 0xfff
self.log.debug('vlan-vid-to-remove',
_vlan_vid=_set_vlan_vid, in_port=_in_port)
elif _field.type == fd.VLAN_PCP:
_set_vlan_pcp = _field.vlan_pcp
self.log.debug('set-field-type-vlan-pcp',
vlan_pcp=_set_vlan_pcp)
uni_port = self.uni_port(_in_port)
uni_id = _in_port & 0xF
else:
raise Exception('port should be 1 or 2 by our convention')
self.log.debug('flow-ports', in_port=_in_port, out_port=_out_port, uni_port=str(uni_port))
tp_id = self.get_tp_id_in_flow(flow)
# The vlan filter remove should be followed by a TP deleted for that TP ID.
# Use this information to re-schedule any vlan filter add tasks for the same TP ID again.
# First check if the TP download was done, before we access that TP delete is necessary
if tp_id in self._tp_state_map_per_uni[uni_id] and \
self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done is True:
self._tp_state_map_per_uni[uni_id][tp_id].is_tp_delete_pending = True
# Deleting flow from ONU.
self._remove_vlan_filter_task(device, uni_id, uni_port=uni_port,
_set_vlan_pcp=_set_vlan_pcp,
_set_vlan_vid=_set_vlan_vid,
match_vlan=_match_vlan_vid,
tp_id=tp_id)
# TODO:Delete TD task.
except Exception as e:
self.log.exception('failed-to-remove-flow', e=e)
def add_onu_flows(self, device, flows):
self.log.debug('add-onu-flows')
# no point in pushing omci flows if the device isnt reachable
if device.connect_status != ConnectStatus.REACHABLE or \
device.admin_state != AdminState.ENABLED:
self.log.warn("device-disabled-or-offline-skipping-flow-update",
admin=device.admin_state, connect=device.connect_status)
return
def is_downstream(port):
return port == self._pon_port_number
def is_upstream(port):
return not is_downstream(port)
for flow in flows:
# if incoming flow contains cookie, then add to ONU
if flow.cookie:
_type = None
_port = None
_vlan_vid = None
_udp_dst = None
_udp_src = None
_ipv4_dst = None
_ipv4_src = None
_metadata = None
_output = None
_push_tpid = None
_field = None
_set_vlan_vid = None
_set_vlan_pcp = None
_tunnel_id = None
_proto = -1
self.log.debug("add-flow", device_id=device.id, flow=flow)
try:
_in_port = fd.get_in_port(flow)
assert _in_port is not None
_out_port = fd.get_out_port(flow) # may be None
tp_id = self.get_tp_id_in_flow(flow)
if is_downstream(_in_port):
self.log.debug('downstream-flow', in_port=_in_port, out_port=_out_port)
# NOTE: We don't care downstream flow because we will copy vlan_id to upstream flow
# uni_port = self.uni_port(_out_port)
# uni_id = _out_port & 0xF
continue
elif is_upstream(_in_port):
self.log.debug('upstream-flow', in_port=_in_port, out_port=_out_port)
uni_port = self.uni_port(_in_port)
uni_id = _in_port & 0xF
else:
raise Exception('port should be 1 or 2 by our convention')
self.log.debug('flow-ports', in_port=_in_port, out_port=_out_port, uni_port=str(uni_port))
for field in fd.get_ofb_fields(flow):
if field.type == fd.ETH_TYPE:
_type = field.eth_type
self.log.debug('field-type-eth-type',
eth_type=_type)
elif field.type == fd.IP_PROTO:
_proto = field.ip_proto
if _proto == 2:
# Workaround for TT workflow - avoids installing invalid EVTO rule
self.log.debug("igmp-trap-flow")
break
self.log.debug('field-type-ip-proto',
ip_proto=_proto)
elif field.type == fd.IN_PORT:
_port = field.port
self.log.debug('field-type-in-port',
in_port=_port)
elif field.type == fd.TUNNEL_ID:
self.log.debug('field-type-tunnel-id')
elif field.type == fd.VLAN_VID:
if field.vlan_vid == RESERVED_TRANSPARENT_VLAN and field.vlan_vid_mask == RESERVED_TRANSPARENT_VLAN:
_vlan_vid = RESERVED_TRANSPARENT_VLAN
else:
_vlan_vid = field.vlan_vid & 0xfff
self.log.debug('field-type-vlan-vid',
vlan=_vlan_vid)
elif field.type == fd.VLAN_PCP:
_vlan_pcp = field.vlan_pcp
self.log.debug('field-type-vlan-pcp',
pcp=_vlan_pcp)
elif field.type == fd.UDP_DST:
_udp_dst = field.udp_dst
self.log.debug('field-type-udp-dst',
udp_dst=_udp_dst)
elif field.type == fd.UDP_SRC:
_udp_src = field.udp_src
self.log.debug('field-type-udp-src',
udp_src=_udp_src)
elif field.type == fd.IPV4_DST:
_ipv4_dst = field.ipv4_dst
self.log.debug('field-type-ipv4-dst',
ipv4_dst=_ipv4_dst)
elif field.type == fd.IPV4_SRC:
_ipv4_src = field.ipv4_src
self.log.debug('field-type-ipv4-src',
ipv4_dst=_ipv4_src)
elif field.type == fd.METADATA:
_metadata = field.table_metadata
self.log.debug('field-type-metadata',
metadata=_metadata)
else:
raise NotImplementedError('field.type={}'.format(
field.type))
if _proto == 2:
# Workaround for TT workflow - avoids installing invalid EVTO rule
self.log.warn("skipping-igmp-trap-flow")
continue
for action in fd.get_actions(flow):
if action.type == fd.OUTPUT:
_output = action.output.port
self.log.debug('action-type-output',
output=_output, in_port=_in_port)
elif action.type == fd.POP_VLAN:
self.log.debug('action-type-pop-vlan',
in_port=_in_port)
elif action.type == fd.PUSH_VLAN:
_push_tpid = action.push.ethertype
self.log.debug('action-type-push-vlan',
push_tpid=_push_tpid, in_port=_in_port)
if action.push.ethertype != 0x8100:
self.log.error('unhandled-tpid',
ethertype=action.push.ethertype)
elif action.type == fd.SET_FIELD:
_field = action.set_field.field.ofb_field
assert (action.set_field.field.oxm_class ==
OFPXMC_OPENFLOW_BASIC)
self.log.debug('action-type-set-field',
field=_field, in_port=_in_port)
if _field.type == fd.VLAN_VID:
_set_vlan_vid = _field.vlan_vid & 0xfff
self.log.debug('set-field-type-vlan-vid',
vlan_vid=_set_vlan_vid)
elif _field.type == fd.VLAN_PCP:
_set_vlan_pcp = _field.vlan_pcp
self.log.debug('set-field-type-vlan-pcp',
vlan_pcp=_set_vlan_pcp)
else:
self.log.error('unsupported-action-set-field-type',
field_type=_field.type)
else:
self.log.error('unsupported-action-type',
action_type=action.type, in_port=_in_port)
if self._set_vlan is not None:
if uni_id not in self._set_vlan:
self._set_vlan[uni_id] = dict()
self._set_vlan[uni_id][tp_id] = _set_vlan_vid
self.log.debug("set_vlan_id-for-tp", _set_vlan_vid=_set_vlan_vid, tp_id=tp_id)
# OMCI set vlan task can only filter and set on vlan header attributes. Any other openflow
# supported match and action criteria cannot be handled by omci and must be ignored.
if (_set_vlan_vid is None or _set_vlan_vid == 0) and _vlan_vid != RESERVED_TRANSPARENT_VLAN:
self.log.warn('ignoring-flow-that-does-not-set-vlanid', set_vlan_vid=_set_vlan_vid)
elif (_set_vlan_vid is None or _set_vlan_vid == 0) and _vlan_vid == RESERVED_TRANSPARENT_VLAN:
self.log.info('set-vlanid-any', uni_id=uni_id, uni_port=uni_port,
_set_vlan_vid=_vlan_vid,
_set_vlan_pcp=_set_vlan_pcp, match_vlan=_vlan_vid,
tp_id=tp_id)
self._add_vlan_filter_task(device, uni_id=uni_id, uni_port=uni_port,
_set_vlan_vid=_vlan_vid,
_set_vlan_pcp=_set_vlan_pcp, match_vlan=_vlan_vid,
tp_id=tp_id)
else:
self.log.info('set-vlanid', uni_id=uni_id, uni_port=uni_port, match_vlan=_vlan_vid,
set_vlan_vid=_set_vlan_vid, _set_vlan_pcp=_set_vlan_pcp, ethType=_type)
self._add_vlan_filter_task(device, uni_id=uni_id, uni_port=uni_port,
_set_vlan_vid=_set_vlan_vid,
_set_vlan_pcp=_set_vlan_pcp, match_vlan=_vlan_vid,
tp_id=tp_id)
except Exception as e:
self.log.exception('failed-to-install-flow', e=e, flow=flow)
# Calling this assumes the onu is active/ready and had at least an initial mib downloaded. This gets called from
# flow decomposition that ultimately comes from onos
def update_flow_table(self, device, flows):
self.log.debug('update-flow-table', device_id=device.id, serial_number=device.serial_number)
#
# We need to proxy through the OLT to get to the ONU
# Configuration from here should be using OMCI
#
# self.log.info('bulk-flow-update', device_id=device.id, flows=flows)
# no point in pushing omci flows if the device isnt reachable
if device.connect_status != ConnectStatus.REACHABLE or \
device.admin_state != AdminState.ENABLED:
self.log.warn("device-disabled-or-offline-skipping-flow-update",
admin=device.admin_state, connect=device.connect_status)
return
def is_downstream(port):
return port == self._pon_port_number
def is_upstream(port):
return not is_downstream(port)
for flow in flows:
_type = None
_port = None
_vlan_vid = None
_udp_dst = None
_udp_src = None
_ipv4_dst = None
_ipv4_src = None
_metadata = None
_output = None
_push_tpid = None
_field = None
_set_vlan_vid = None
_set_vlan_pcp = None
_tunnel_id = None
try:
write_metadata = fd.get_write_metadata(flow)
if write_metadata is None:
self.log.error("do-not-process-flow-without-write-metadata")
return
# extract tp id from flow
tp_id = self.get_tp_id_in_flow(flow)
self.log.debug("tp-id-in-flow", tp_id=tp_id)
_in_port = fd.get_in_port(flow)
assert _in_port is not None
_out_port = fd.get_out_port(flow) # may be None
if is_downstream(_in_port):
self.log.debug('downstream-flow', in_port=_in_port, out_port=_out_port)
uni_port = self.uni_port(_out_port)
uni_id = _out_port & 0xF
elif is_upstream(_in_port):
self.log.debug('upstream-flow', in_port=_in_port, out_port=_out_port)
uni_port = self.uni_port(_in_port)
uni_id = _in_port & 0xF
else:
raise Exception('port should be 1 or 2 by our convention')
self.log.debug('flow-ports', in_port=_in_port, out_port=_out_port, uni_port=str(uni_port))
for field in fd.get_ofb_fields(flow):
if field.type == fd.ETH_TYPE:
_type = field.eth_type
self.log.debug('field-type-eth-type',
eth_type=_type)
elif field.type == fd.IP_PROTO:
_proto = field.ip_proto
self.log.debug('field-type-ip-proto',
ip_proto=_proto)
elif field.type == fd.IN_PORT:
_port = field.port
self.log.debug('field-type-in-port',
in_port=_port)
elif field.type == fd.VLAN_VID:
if field.vlan_vid == RESERVED_TRANSPARENT_VLAN and field.vlan_vid_mask == RESERVED_TRANSPARENT_VLAN:
_vlan_vid = RESERVED_TRANSPARENT_VLAN
else:
_vlan_vid = field.vlan_vid & 0xfff
self.log.debug('field-type-vlan-vid',
vlan=_vlan_vid)
elif field.type == fd.VLAN_PCP:
_vlan_pcp = field.vlan_pcp
self.log.debug('field-type-vlan-pcp',
pcp=_vlan_pcp)
elif field.type == fd.UDP_DST:
_udp_dst = field.udp_dst
self.log.debug('field-type-udp-dst',
udp_dst=_udp_dst)
elif field.type == fd.UDP_SRC:
_udp_src = field.udp_src
self.log.debug('field-type-udp-src',
udp_src=_udp_src)
elif field.type == fd.IPV4_DST:
_ipv4_dst = field.ipv4_dst
self.log.debug('field-type-ipv4-dst',
ipv4_dst=_ipv4_dst)
elif field.type == fd.IPV4_SRC:
_ipv4_src = field.ipv4_src
self.log.debug('field-type-ipv4-src',
ipv4_dst=_ipv4_src)
elif field.type == fd.METADATA:
_metadata = field.table_metadata
self.log.debug('field-type-metadata',
metadata=_metadata)
elif field.type == fd.TUNNEL_ID:
_tunnel_id = field.tunnel_id
self.log.debug('field-type-tunnel-id',
tunnel_id=_tunnel_id)
else:
raise NotImplementedError('field.type={}'.format(
field.type))
for action in fd.get_actions(flow):
if action.type == fd.OUTPUT:
_output = action.output.port
self.log.debug('action-type-output',
output=_output, in_port=_in_port)
elif action.type == fd.POP_VLAN:
self.log.debug('action-type-pop-vlan',
in_port=_in_port)
elif action.type == fd.PUSH_VLAN:
_push_tpid = action.push.ethertype
self.log.debug('action-type-push-vlan',
push_tpid=_push_tpid, in_port=_in_port)
if action.push.ethertype != 0x8100:
self.log.error('unhandled-tpid',
ethertype=action.push.ethertype)
elif action.type == fd.SET_FIELD:
_field = action.set_field.field.ofb_field
assert (action.set_field.field.oxm_class ==
OFPXMC_OPENFLOW_BASIC)
self.log.debug('action-type-set-field',
field=_field, in_port=_in_port)
if _field.type == fd.VLAN_VID:
_set_vlan_vid = _field.vlan_vid & 0xfff
self.log.debug('set-field-type-vlan-vid',
vlan_vid=_set_vlan_vid)
elif _field.type == fd.VLAN_PCP:
_set_vlan_pcp = _field.vlan_pcp
self.log.debug('set-field-type-vlan-pcp',
vlan_pcp=_set_vlan_pcp)
else:
self.log.error('unsupported-action-set-field-type',
field_type=_field.type)
else:
self.log.error('unsupported-action-type',
action_type=action.type, in_port=_in_port)
if self._set_vlan is not None:
if uni_id not in self._set_vlan:
self._set_vlan[uni_id] = dict()
self._set_vlan[uni_id][tp_id] = _set_vlan_vid
self.log.debug("set_vlan_id-for-tp", _set_vlan_vid=_set_vlan_vid, tp_id=tp_id)
# OMCI set vlan task can only filter and set on vlan header attributes. Any other openflow
# supported match and action criteria cannot be handled by omci and must be ignored.
if (_set_vlan_vid is None or _set_vlan_vid == 0) and _vlan_vid != RESERVED_TRANSPARENT_VLAN:
self.log.warn('ignoring-flow-that-does-not-set-vlanid', set_vlan_vid=_set_vlan_vid)
elif (_set_vlan_vid is None or _set_vlan_vid == 0) and _vlan_vid == RESERVED_TRANSPARENT_VLAN:
self.log.info('set-vlanid-any', uni_id=uni_id, uni_port=uni_port,
_set_vlan_vid=_vlan_vid,
_set_vlan_pcp=_set_vlan_pcp, match_vlan=_vlan_vid,
tp_id=tp_id)
self._add_vlan_filter_task(device, uni_id=uni_id, uni_port=uni_port,
_set_vlan_vid=_vlan_vid,
_set_vlan_pcp=_set_vlan_pcp, match_vlan=_vlan_vid,
tp_id=tp_id)
else:
self.log.info('set-vlanid', uni_id=uni_id, uni_port=uni_port, match_vlan=_vlan_vid,
set_vlan_vid=_set_vlan_vid, _set_vlan_pcp=_set_vlan_pcp, ethType=_type)
self._add_vlan_filter_task(device, uni_id=uni_id, uni_port=uni_port,
_set_vlan_vid=_set_vlan_vid,
_set_vlan_pcp=_set_vlan_pcp, match_vlan=_vlan_vid,
tp_id=tp_id)
except Exception as e:
self.log.exception('failed-to-install-flow', e=e, flow=flow)
def _add_vlan_filter_task(self, device, uni_id, uni_port=None, match_vlan=0,
_set_vlan_vid=None, _set_vlan_pcp=8, tp_id=0):
if tp_id in self._tp_state_map_per_uni[uni_id] and \
self._tp_state_map_per_uni[uni_id][tp_id].is_tp_delete_pending is True:
self.log.debug("pending-del-tp--scheduling-add-vlan-filter-task-for-later")
retry = random.randint(1, 5)
reactor.callLater(retry, self._add_vlan_filter_task, device, uni_id, uni_port, match_vlan,
_set_vlan_vid, _set_vlan_pcp, tp_id)
return
self.log.info('_adding_vlan_filter_task', uni_port=uni_port, uni_id=uni_id, tp_id=tp_id, match_vlan=match_vlan,
vlan=_set_vlan_vid, vlan_pcp=_set_vlan_pcp)
assert uni_port is not None
if tp_id in self._tp_state_map_per_uni[uni_id] and \
self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done is True:
@inlineCallbacks
def success(_results):
self.log.info('vlan-tagging-success', uni_port=uni_port, vlan=_set_vlan_vid, tp_id=tp_id,
set_vlan_pcp=_set_vlan_pcp)
yield self.core_proxy.device_reason_update(self.device_id, 'omci-flows-pushed')
@inlineCallbacks
def failure(_reason):
self.log.warn('vlan-tagging-failure', uni_port=uni_port, vlan=_set_vlan_vid, tp_id=tp_id)
retry = random.randint(1, 5)
reactor.callLater(retry,
self._add_vlan_filter_task, device, uni_id, uni_port=uni_port,
match_vlan=match_vlan, _set_vlan_vid=_set_vlan_vid,
_set_vlan_pcp=_set_vlan_pcp, tp_id=tp_id)
yield self.core_proxy.device_reason_update(self.device_id, 'omci-flows-failed-retrying')
self.log.info('setting-vlan-tag', uni_port=uni_port, uni_id=uni_id, tp_id=tp_id, match_vlan=match_vlan,
vlan=_set_vlan_vid, vlan_pcp=_set_vlan_pcp)
vlan_filter_add_task = BrcmVlanFilterTask(self.omci_agent, self, uni_port, _set_vlan_vid,
match_vlan, _set_vlan_pcp, add_tag=True,
tp_id=tp_id)
self._deferred = self._onu_omci_device.task_runner.queue_task(vlan_filter_add_task)
self._deferred.addCallbacks(success, failure)
else:
self.log.info('tp-service-specific-task-not-done-adding-request-to-local-cache',
uni_id=uni_id, tp_id=tp_id)
if uni_id not in self._queued_vlan_filter_task:
self._queued_vlan_filter_task[uni_id] = dict()
if tp_id not in self._queued_vlan_filter_task[uni_id]:
self._queued_vlan_filter_task[uni_id][tp_id] = []
self._queued_vlan_filter_task[uni_id][tp_id].append({"device": device,
"uni_id": uni_id,
"uni_port": uni_port,
"match_vlan": match_vlan,
"set_vlan_vid": _set_vlan_vid,
"set_vlan_pcp": _set_vlan_pcp,
"tp_id": tp_id})
def get_tp_id_in_flow(self, flow):
flow_metadata = fd.get_metadata_from_write_metadata(flow)
tp_id = fd.get_tp_id_from_metadata(flow_metadata)
return tp_id
def _remove_vlan_filter_task(self, device, uni_id, uni_port=None, match_vlan=0,
_set_vlan_vid=None, _set_vlan_pcp=8, tp_id=0):
assert uni_port is not None
@inlineCallbacks
def success(_results):
self.log.info('vlan-untagging-success', _results=_results)
yield self.core_proxy.device_reason_update(self.device_id, 'omci-flows-deleted')
@inlineCallbacks
def failure(_reason):
self.log.warn('vlan-untagging-failure', _reason=_reason)
yield self.core_proxy.device_reason_update(self.device_id, 'omci-flows-deletion-failed-retrying')
retry = random.randint(1, 5)
reactor.callLater(retry,
self._remove_vlan_filter_task, device, uni_id,
uni_port=uni_port, match_vlan=match_vlan, _set_vlan_vid=_set_vlan_vid,
_set_vlan_pcp=_set_vlan_pcp, tp_id=tp_id)
self.log.info("remove_vlan_filter_task", tp_id=tp_id)
vlan_remove_task = BrcmVlanFilterTask(self.omci_agent, self, uni_port, _set_vlan_vid,
match_vlan, _set_vlan_pcp, add_tag=False,
tp_id=tp_id)
self._deferred = self._onu_omci_device.task_runner.queue_task(vlan_remove_task)
self._deferred.addCallbacks(success, failure)
@inlineCallbacks
def process_inter_adapter_message(self, request):
self.log.debug('process-inter-adapter-message', type=request.header.type, from_topic=request.header.from_topic,
to_topic=request.header.to_topic, to_device_id=request.header.to_device_id)
if not self.enabled:
self.log.warn('device-not-activated')
reactor.callLater(0.5, self.process_inter_adapter_message, request)
return
try:
update_onu_state = False
# Note: VOLTHA v2.6 and ealier OLTs would send an OMCI_REQUEST instead of an
# OMCI_RESPONSE. Both have identical formats outside of type. So accept
# both.
if request.header.type in (InterAdapterMessageType.OMCI_RESPONSE,
InterAdapterMessageType.OMCI_REQUEST):
omci_msg = InterAdapterOmciMessage()
request.body.Unpack(omci_msg)
self.log.debug('inter-adapter-recv-omci', omci_msg=hexify(omci_msg.message))
self.receive_message(omci_msg.message)
elif request.header.type == InterAdapterMessageType.ONU_IND_REQUEST:
onu_indication = OnuIndication()
request.body.Unpack(onu_indication)
self.log.debug('inter-adapter-recv-onu-ind', onu_id=onu_indication.onu_id,
oper_state=onu_indication.oper_state, admin_state=onu_indication.admin_state,
serial_number=onu_indication.serial_number)
update_onu_state = True
self._onu_persisted_state['onu_id'] = onu_indication.onu_id
self._onu_persisted_state['intf_id'] = onu_indication.intf_id
self._onu_persisted_state['admin_state'] = onu_indication.admin_state
self._onu_persisted_state['oper_state'] = onu_indication.oper_state
if onu_indication.oper_state == "up":
yield self.create_interface(onu_indication)
elif onu_indication.oper_state == "down" or onu_indication.oper_state == "unreachable":
yield self.update_interface(onu_indication)
else:
self.log.error("unknown-onu-indication", onu_id=onu_indication.onu_id,
serial_number=onu_indication.serial_number)
elif request.header.type == InterAdapterMessageType.TECH_PROFILE_DOWNLOAD_REQUEST:
tech_msg = InterAdapterTechProfileDownloadMessage()
request.body.Unpack(tech_msg)
self.log.debug('inter-adapter-recv-tech-profile', tech_msg=tech_msg)
update_onu_state = self._update_onu_persisted_state(tech_msg.uni_id, tp_path=tech_msg.path)
yield self.load_and_configure_tech_profile(tech_msg.uni_id, tech_msg.path)
elif request.header.type == InterAdapterMessageType.DELETE_GEM_PORT_REQUEST:
del_gem_msg = InterAdapterDeleteGemPortMessage()
request.body.Unpack(del_gem_msg)
self.log.debug('inter-adapter-recv-del-gem', gem_del_msg=del_gem_msg)
tp_id = self.extract_tp_id_from_path(del_gem_msg.tp_path)
uni_id = del_gem_msg.uni_id
gem_port = self._pon.get_gem_port(del_gem_msg.gem_port_id)
self._tp_state_map_per_uni[uni_id][tp_id].queue_pending_delete_pon_resource(TpState.GEM_ID,
gem_port)
if self.is_device_active_and_reachable:
self.delete_tech_profile(uni_id=del_gem_msg.uni_id,
gem_port=gem_port,
tp_path=del_gem_msg.tp_path)
else:
self.log.debug("device-unreachable--clearing-gem-id-from-local-cache")
if tp_id in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id].pon_resource_delete_complete(TpState.GEM_ID,
gem_port.gem_id)
self._clear_alloc_id_gem_port_from_internal_cache(None, gem_port.gem_id)
elif request.header.type == InterAdapterMessageType.DELETE_TCONT_REQUEST:
del_tcont_msg = InterAdapterDeleteTcontMessage()
request.body.Unpack(del_tcont_msg)
self.log.debug('inter-adapter-recv-del-tcont', del_tcont_msg=del_tcont_msg)
# Removal of the tcont/alloc id mapping represents the removal of the tech profile
update_onu_state = self._update_onu_persisted_state(del_tcont_msg.uni_id, tp_path=None)
tp_id = self.extract_tp_id_from_path(del_tcont_msg.tp_path)
uni_id = del_tcont_msg.uni_id
tcont = self._pon.get_tcont(del_tcont_msg.alloc_id)
self._tp_state_map_per_uni[uni_id][tp_id].queue_pending_delete_pon_resource(TpState.ALLOC_ID,
tcont)
if self.is_device_active_and_reachable:
self.delete_tech_profile(uni_id=del_tcont_msg.uni_id,
tcont=tcont,
tp_path=del_tcont_msg.tp_path)
else:
self.log.debug("device-unreachable--clearing-tcont-from-local-cache")
if tp_id in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id].pon_resource_delete_complete(TpState.ALLOC_ID,
tcont.alloc_id)
self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done = False
self._clear_alloc_id_gem_port_from_internal_cache(tcont.alloc_id, None)
else:
self.log.error("inter-adapter-unhandled-type", request=request)
if update_onu_state:
try:
self.log.debug('updating-onu-state', device_id=self.device_id,
onu_persisted_state=self._onu_persisted_state)
yield self.onu_kv_client.set(self.device_id, json.dumps(self._onu_persisted_state))
except Exception as e:
self.log.error('could-not-store-onu-state', device_id=self.device_id,
onu_persisted_state=self._onu_persisted_state, e=e)
# at this point omci is started and/or indications being processed
# later indications may have a chance to write this state out again
except Exception as e:
self.log.exception("error-processing-inter-adapter-message", e=e)
def _update_onu_persisted_state(self, uni_id, tp_path):
# persist the uni and tech profile path for later reconciliation. update only if changed
update_onu_state = False
found = False
for entry in self._onu_persisted_state.get('uni_config', list()):
if entry.get('uni_id') == uni_id:
found = True
if entry.get('tp_path') != tp_path:
update_onu_state = True
entry['tp_path'] = tp_path
if not found:
update_onu_state = True
uni_tp = {
'uni_id': uni_id,
'tp_path': tp_path
}
self._onu_persisted_state['uni_config'].append(uni_tp)
return update_onu_state
# Called each time there is an onu "up" indication from the olt handler
@inlineCallbacks
def create_interface(self, onu_indication):
self.log.info('create-interface', onu_id=onu_indication.onu_id,
serial_number=onu_indication.serial_number)
# Ignore if onu_indication is received for an already running ONU
if self._onu_omci_device is not None and self._onu_omci_device.active:
self.log.warn('received-onu-indication-for-active-onu', onu_indication=onu_indication)
return
yield self.core_proxy.device_state_update(self.device_id, oper_status=OperStatus.ACTIVATING,
connect_status=ConnectStatus.REACHABLE)
onu_device = yield self.core_proxy.get_device(self.device_id)
self.log.debug('starting-openomci-statemachine')
self._subscribe_to_events()
onu_device.reason = "starting-openomci"
reactor.callLater(1, self._onu_omci_device.start, onu_device)
yield self.core_proxy.device_reason_update(self.device_id, onu_device.reason)
self._heartbeat.enabled = True
# Called each time there is an onu "down" indication from the olt handler
@inlineCallbacks
def update_interface(self, onu_indication):
self.log.info('update-interface', onu_id=onu_indication.onu_id,
serial_number=onu_indication.serial_number)
if onu_indication.oper_state == 'down' or onu_indication.oper_state == "unreachable":
self.log.debug('stopping-openomci-statemachine', device_id=self.device_id)
reactor.callLater(0, self._onu_omci_device.stop)
self._tp = dict()
# Let TP download happen again
for uni_id in self._tp_state_map_per_uni:
for tp_id in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done = False
yield self.disable_ports(lock_ports=False)
yield self.core_proxy.device_reason_update(self.device_id, "stopping-openomci")
yield self.core_proxy.device_state_update(self.device_id, oper_status=OperStatus.DISCOVERED,
connect_status=ConnectStatus.UNREACHABLE)
self.is_device_active_and_reachable = False
else:
self.log.debug('not-changing-openomci-statemachine')
@inlineCallbacks
def disable(self, device):
self.log.info('disable', device_id=device.id, serial_number=device.serial_number)
try:
yield self.disable_ports(lock_ports=True, device_disabled=True)
yield self.core_proxy.device_reason_update(self.device_id, "omci-admin-lock")
yield self.core_proxy.device_state_update(self.device_id, oper_status=OperStatus.UNKNOWN)
self.is_device_active_and_reachable = False
except Exception as e:
self.log.exception('exception-in-onu-disable', exception=e)
@inlineCallbacks
def reenable(self, device):
self.log.info('reenable', device_id=device.id, serial_number=device.serial_number)
try:
yield self.core_proxy.device_state_update(device.id,
oper_status=OperStatus.ACTIVE,
connect_status=ConnectStatus.REACHABLE)
self.is_device_active_and_reachable = True
yield self.core_proxy.device_reason_update(self.device_id, 'onu-reenabled')
yield self.enable_ports()
except Exception as e:
self.log.exception('exception-in-onu-reenable', exception=e)
@inlineCallbacks
def reboot(self):
self.log.info('reboot-device')
device = yield self.core_proxy.get_device(self.device_id)
if device.connect_status != ConnectStatus.REACHABLE:
self.log.error("device-unreachable")
return
@inlineCallbacks
def success(_results):
self.log.info('reboot-success', _results=_results)
yield self.core_proxy.device_reason_update(self.device_id, 'rebooting')
def failure(_reason):
self.log.info('reboot-failure', _reason=_reason)
self._deferred = self._onu_omci_device.reboot()
self._deferred.addCallbacks(success, failure)
@inlineCallbacks
def disable_ports(self, lock_ports=True, device_disabled=False):
self.log.info('disable-ports', device_id=self.device_id)
# TODO: for now only support the first UNI given no requirement for multiple uni yet. Also needed to reduce flow
# load on the core
for port in self.uni_ports:
if port.mac_bridge_port_num == 1:
port.operstatus = OperStatus.UNKNOWN
self.log.info('disable-port', device_id=self.device_id, port=port)
yield self.core_proxy.port_state_update(self.device_id, Port.ETHERNET_UNI, port.port_number,
port.operstatus)
if lock_ports is True:
self.lock_ports(lock=True, device_disabled=device_disabled)
@inlineCallbacks
def enable_ports(self):
self.log.info('enable-ports', device_id=self.device_id)
self.lock_ports(lock=False)
# TODO: for now only support the first UNI given no requirement for multiple uni yet. Also needed to reduce flow
# load on the core
# Given by default all unis are initially active according to omci alarming, we must mimic this.
for port in self.uni_ports:
if port.mac_bridge_port_num == 1:
port.operstatus = OperStatus.ACTIVE
self.log.info('enable-port', device_id=self.device_id, port=port)
yield self.core_proxy.port_state_update(self.device_id, Port.ETHERNET_UNI, port.port_number,
port.operstatus)
# TODO: Normally we would want any uni ethernet link down or uni ethernet link up alarms to register in the core,
# but practically olt provisioning cannot handle the churn of links up, down, then up again typical on startup.
#
# Basically the link state sequence:
# 1) per omci default alarm state, all unis are initially up (no link down alarms received yet)
# 2) a link state down alarm is received for all uni, given the lock command, and also because most unis have nothing plugged in
# 3) a link state up alarm is received for the uni plugged in.
#
# Given the olt (BAL) has to provision all uni, de-provision all uni, and re-provision one uni in quick succession
# and cannot (bug?), we have to skip this and leave uni ports as assumed active. Also all the link state activity
# would have a ripple effect through the core to the controller as well. And is it really worth it?
'''
@inlineCallbacks
def port_state_handler(self, _topic, msg):
self.log.info("port-state-change", _topic=_topic, msg=msg)
onu_id = msg['onu_id']
port_no = msg['port_number']
serial_number = msg['serial_number']
port_status = msg['port_status']
uni_port = self.uni_port(int(port_no))
self.log.debug("port-state-parsed-message", onu_id=onu_id, port_no=port_no, serial_number=serial_number,
port_status=port_status)
if port_status is True:
uni_port.operstatus = OperStatus.ACTIVE
self.log.info('link-up', device_id=self.device_id, port=uni_port)
else:
uni_port.operstatus = OperStatus.UNKNOWN
self.log.info('link-down', device_id=self.device_id, port=uni_port)
yield self.core_proxy.port_state_update(self.device_id, Port.ETHERNET_UNI, uni_port.port_number, uni_port.operstatus)
'''
# Called just before openomci state machine is started. These listen for events from selected state machines,
# most importantly, mib in sync. Which ultimately leads to downloading the mib
def _subscribe_to_events(self):
self.log.debug('subscribe-to-events')
bus = self._onu_omci_device.event_bus
# OMCI MIB Database sync status
topic = OnuDeviceEntry.event_bus_topic(self.device_id,
OnuDeviceEvents.MibDatabaseSyncEvent)
self._in_sync_subscription = bus.subscribe(topic, self.in_sync_handler)
# OMCI Capabilities
topic = OnuDeviceEntry.event_bus_topic(self.device_id,
OnuDeviceEvents.OmciCapabilitiesEvent)
self._capabilities_subscription = bus.subscribe(topic, self.capabilties_handler)
# TODO: these alarms seem to be unreliable depending on the environment
# Listen for UNI link state alarms and set the oper_state based on that rather than assuming all UNI are up
# topic = OnuDeviceEntry.event_bus_topic(self.device_id,
# OnuDeviceEvents.PortEvent)
# self._port_state_subscription = bus.subscribe(topic, self.port_state_handler)
# Called when the mib is in sync
def in_sync_handler(self, _topic, msg):
self.log.debug('in-sync-handler', _topic=_topic, msg=msg)
if self._in_sync_subscription is not None:
try:
in_sync = msg[IN_SYNC_KEY]
if in_sync:
# Only call this once
bus = self._onu_omci_device.event_bus
bus.unsubscribe(self._in_sync_subscription)
self._in_sync_subscription = None
# Start up device_info load
self.log.debug('running-mib-sync')
reactor.callLater(0, self._mib_in_sync)
except Exception as e:
self.log.exception('in-sync', e=e)
def capabilties_handler(self, _topic, _msg):
self.log.debug('capabilities-handler', _topic=_topic, msg=_msg)
if self._capabilities_subscription is not None:
self.log.debug('capabilities-handler-done')
# Mib is in sync, we can now query what we learned and actually start pushing ME (download) to the ONU.
@inlineCallbacks
def _mib_in_sync(self):
self.log.debug('mib-in-sync')
device = yield self.core_proxy.get_device(self.device_id)
# only notify core if this is a new device. otherwise do not have reconcile generating
# a lot of needless message churn
if not self._reconciling:
yield self.core_proxy.device_reason_update(self.device_id, 'discovery-mibsync-complete')
if self._dev_info_loaded:
self.log.debug('device-info-already-loaded')
else:
# new onu or adapter was restarted. fill up our local data
yield self._load_device_data(device)
if self._check_mib_downloaded():
self.log.debug('mib-already-downloaded')
if not self._reconciling:
yield self.core_proxy.device_state_update(device.id,
oper_status=OperStatus.ACTIVE,
connect_status=ConnectStatus.REACHABLE)
self.is_device_active_and_reachable = True
yield self.enable_ports()
else:
self._download_mib(device)
if self._reconciling:
yield self._restore_tech_profile()
self._start_monitoring()
self._reconciling = False
self.log.debug('reconcile-finished')
def _download_mib(self, device):
self.log.debug('downloading-initial-mib-configuration')
@inlineCallbacks
def success(_results):
self.log.debug('mib-download-success', _results=_results)
yield self.core_proxy.device_state_update(device.id,
oper_status=OperStatus.ACTIVE,
connect_status=ConnectStatus.REACHABLE)
self.is_device_active_and_reachable = True
yield self.core_proxy.device_reason_update(self.device_id, 'initial-mib-downloaded')
self._mib_download_task = None
yield self.enable_ports()
yield self.onu_active_event()
self._start_monitoring()
@inlineCallbacks
def failure(_reason):
self.log.warn('mib-download-failure-retrying', _reason=_reason)
retry = _STARTUP_RETRY_WAIT * (random.randint(1, 5))
reactor.callLater(retry, self._mib_in_sync)
yield self.core_proxy.device_reason_update(self.device_id, 'initial-mib-download-failure-retrying')
# start by locking all the unis till mib sync and initial mib is downloaded
# this way we can capture the port down/up events when we are ready
self.lock_ports(lock=True)
# Download an initial mib that creates simple bridge that can pass EAP. On success (above) finally set
# the device to active/reachable. This then opens up the handler to openflow pushes from outside
self._mib_download_task = BrcmMibDownloadTask(self.omci_agent, self)
self._deferred = self._onu_omci_device.task_runner.queue_task(self._mib_download_task)
self._deferred.addCallbacks(success, failure)
def _start_monitoring(self):
self.log.debug('starting-monitoring')
# Start collecting stats from the device after a brief pause
if not self._pm_metrics_started:
self._pm_metrics_started = True
pmstart = _STARTUP_RETRY_WAIT * (random.randint(1, self._pm_metrics.max_skew))
reactor.callLater(pmstart, self._pm_metrics.start_collector)
# Start test requests after a brief pause
if not self._test_request_started:
self._test_request_started = True
tststart = _STARTUP_RETRY_WAIT * (random.randint(1, 5))
reactor.callLater(tststart, self._test_request.start_collector)
def _check_mib_downloaded(self):
self.log.debug('checking-mib-downloaded')
results = False
mac_bridges = self.onu_omci_device.query_mib(MacBridgeServiceProfile.class_id)
self.log.debug('mac-bridges', mac_bridges=mac_bridges)
for k, v in mac_bridges.items():
if not isinstance(v, dict):
continue
# found at least one mac bridge, good enough to say its done, break out
self.log.debug('found-mac-bridge-mib-download-has-been-done', omci_key=k, omci_value=v)
results = True
break
return results
@inlineCallbacks
def _load_device_data(self, device):
self.log.debug('loading-device-data-from-mib', device_id=device.id)
omci_dev = self._onu_omci_device
config = omci_dev.configuration
try:
# sort the lists so we get consistent port ordering.
ani_list = sorted(config.ani_g_entities) if config.ani_g_entities else []
uni_list = sorted(config.uni_g_entities) if config.uni_g_entities else []
pptp_list = sorted(config.pptp_entities) if config.pptp_entities else []
veip_list = sorted(config.veip_entities) if config.veip_entities else []
if ani_list is None or (pptp_list is None and veip_list is None):
yield self.core_proxy.device_reason_update(self.device_id, 'onu-missing-required-elements')
raise Exception("onu-missing-required-elements")
# Currently logging the ani, pptp, veip, and uni for information purposes.
# Actually act on the veip/pptp as its ME is the most correct one to use in later tasks.
# And in some ONU the UNI-G list is incomplete or incorrect...
for entity_id in ani_list:
ani_value = config.ani_g_entities[entity_id]
self.log.debug("discovered-ani", entity_id=entity_id, value=ani_value)
for entity_id in uni_list:
uni_value = config.uni_g_entities[entity_id]
self.log.debug("discovered-uni", entity_id=entity_id, value=uni_value)
uni_entities = OrderedDict()
for entity_id in pptp_list:
pptp_value = config.pptp_entities[entity_id]
self.log.debug("discovered-pptp", entity_id=entity_id, value=pptp_value)
uni_entities[entity_id] = UniType.PPTP
for entity_id in veip_list:
veip_value = config.veip_entities[entity_id]
self.log.debug("discovered-veip", entity_id=entity_id, value=veip_value)
uni_entities[entity_id] = UniType.VEIP
uni_id = 0
for entity_id, uni_type in uni_entities.items():
yield self._add_uni_port(device, entity_id, uni_id, uni_type)
self._tp_state_map_per_uni[uni_id] = dict()
uni_id += 1
if self._unis:
self._dev_info_loaded = True
else:
yield self.core_proxy.device_reason_update(self.device_id, 'no-usable-unis')
raise Exception("no-usable-unis")
except Exception as e:
self.log.exception('device-info-load', e=e)
self._deferred = reactor.callLater(_STARTUP_RETRY_WAIT, self._mib_in_sync)
@inlineCallbacks
def _add_uni_port(self, device, entity_id, uni_id, uni_type=UniType.PPTP):
self.log.debug('add-uni-port', entity_id=entity_id, uni_id=uni_id)
intf_id = self._onu_persisted_state.get('intf_id')
onu_id = self._onu_persisted_state.get('onu_id')
uni_no = self.mk_uni_port_num(intf_id, onu_id, uni_id)
# TODO: Some or parts of this likely need to move to UniPort. especially the format stuff
uni_name = "uni-{}".format(uni_no)
mac_bridge_port_num = uni_id + 1 # TODO +1 is only to test non-zero index
self.log.debug('uni-port-inputs', uni_no=uni_no, uni_id=uni_id, uni_name=uni_name, uni_type=uni_type,
entity_id=entity_id, mac_bridge_port_num=mac_bridge_port_num, serial_number=device.serial_number)
uni_port = UniPort.create(self, uni_name, uni_id, uni_no, uni_name,
device.parent_port_no, device.serial_number,
uni_type,)
uni_port.entity_id = entity_id
uni_port.enabled = True
uni_port.mac_bridge_port_num = mac_bridge_port_num
self.log.debug("created-uni-port", uni=uni_port)
if not self._reconciling:
yield self.core_proxy.port_created(device.id, uni_port.get_port())
self._unis[uni_port.port_number] = uni_port
self._onu_omci_device.alarm_synchronizer.set_alarm_params(onu_id=onu_id,
uni_ports=self.uni_ports,
serial_number=device.serial_number)
@inlineCallbacks
def _restore_tech_profile(self):
self.log.debug("reconcile-restoring-tech-profile-tcont-gem-config")
# for every uni that has tech profile config reload all its tcont/alloc_id and gem from the tp path
for entry in self._onu_persisted_state.get('uni_config', list()):
uni_id = entry.get('uni_id')
tp_path = entry.get('tp_path')
if tp_path:
tpstored = yield self.tp_kv_client.get(tp_path)
tpstring = tpstored.decode('ascii')
tp = json.loads(tpstring)
self.log.debug("restoring-tp-instance", tp=tp)
# re-run tech profile config that stores gem and tconts in the self._pon object
# this does not actually re-run the omci, just rebuilds our local data store
self._do_tech_profile_configuration(uni_id, tp)
tp_id = self.extract_tp_id_from_path(tp_path)
# rebuild cache dicts so tp updates and deletes dont get KeyErrors
if uni_id not in self._tp_state_map_per_uni:
self._tp_state_map_per_uni[uni_id] = dict()
if tp_id not in self._tp_state_map_per_uni[uni_id]:
self._tp_state_map_per_uni[uni_id][tp_id] = TpState(self, uni_id, tp_path)
self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done = True
else:
self.log.debug("no-assigned-tp-instance", uni_id=uni_id)
# for every loaded tcont from tp check the mib database for its entity_id
# needed for later tp deletes/adds
tcont_idents = self.onu_omci_device.query_mib(Tcont.class_id)
self.log.debug('tcont-idents', tcont_idents=tcont_idents)
for k, v in tcont_idents.items():
if not isinstance(v, dict):
continue
alloc_check = v.get('attributes', {}).get('alloc_id', 0)
tcont = self._pon.tconts.get(alloc_check)
if tcont:
tcont.entity_id = k
self.log.debug('reassigning-tcont-entity-id', entity_id=tcont.entity_id,
alloc_id=tcont.alloc_id)
# TODO NEW CORE: Figure out how to gain this knowledge from the olt. for now cheat terribly.
def mk_uni_port_num(self, intf_id, onu_id, uni_id):
MAX_PONS_PER_OLT = 256
MAX_ONUS_PER_PON = 256
MAX_UNIS_PER_ONU = 16
assert intf_id < MAX_PONS_PER_OLT
assert onu_id < MAX_ONUS_PER_PON
assert uni_id < MAX_UNIS_PER_ONU
return intf_id << 12 | onu_id << 4 | uni_id
@inlineCallbacks
def onu_active_event(self):
self.log.debug('onu-active-event')
try:
# TODO: this is expensive for just getting the olt serial number. replace with direct api call
parent_device = yield self.core_proxy.get_device(self.parent_id)
olt_serial_number = parent_device.serial_number
self.olt_serial_number = olt_serial_number
raised_ts = arrow.utcnow().timestamp
intf_id = self._onu_persisted_state.get('intf_id')
onu_id = self._onu_persisted_state.get('onu_id')
onu_serial = self._onu_persisted_state.get('serial_number')
self.log.debug("onu-indication-context-data",
pon_id=intf_id,
onu_id=onu_id,
registration_id=self.device_id,
device_id=self.device_id,
onu_serial_number=onu_serial,
olt_serial_number=olt_serial_number,
raised_ts=raised_ts)
self.log.debug("Trying-to-raise-onu-active-event")
OnuActiveEvent(self.events, self.device_id,
intf_id,
onu_serial,
str(self.device_id),
olt_serial_number, raised_ts,
onu_id=onu_id).send(True)
except Exception as active_event_error:
self.log.exception('onu-activated-event-error',
errmsg=active_event_error)
@inlineCallbacks
def onu_disabled_event(self):
self.log.debug('onu-disabled-event')
try:
device = yield self.core_proxy.get_device(self.device_id)
parent_device = yield self.core_proxy.get_device(self.parent_id)
olt_serial_number = parent_device.serial_number
raised_ts = arrow.utcnow().timestamp
intf_id = self._onu_persisted_state.get('intf_id')
onu_id = self._onu_persisted_state.get('onu_id')
onu_serial = self._onu_persisted_state.get('serial_number')
self.log.debug("onu-indication-context-data",
pon_id=intf_id,
onu_id=onu_id,
registration_id=self.device_id,
device_id=self.device_id,
onu_serial_number=onu_serial,
olt_serial_number=olt_serial_number,
raised_ts=raised_ts)
self.log.debug("Trying-to-raise-onu-disabled-event")
OnuDisabledEvent(self.events, self.device_id,
intf_id,
device.serial_number,
str(self.device_id),
olt_serial_number, raised_ts,
onu_id=onu_id).send(True)
except Exception as disable_event_error:
self.log.exception('onu-disabled-event-error',
errmsg=disable_event_error)
@inlineCallbacks
def onu_deleted_event(self):
self.log.debug('onu-deleted-event')
try:
olt_serial_number = self.olt_serial_number
raised_ts = arrow.utcnow().timestamp
intf_id = self._onu_persisted_state.get('intf_id')
onu_id = self._onu_persisted_state.get('onu_id')
serial_number = self._onu_persisted_state.get('serial_number')
self.log.debug("onu-deleted-event-context-data",
pon_id=intf_id,
onu_id=onu_id,
registration_id=self.device_id,
device_id=self.device_id,
onu_serial_number=serial_number,
olt_serial_number=olt_serial_number,
raised_ts=raised_ts)
OnuDeletedEvent(self.events, self.device_id,
intf_id,
serial_number,
str(self.device_id),
olt_serial_number, raised_ts,
onu_id=onu_id).send(True)
except Exception as deleted_event_error:
self.log.exception('onu-deleted-event-error',
errmsg=deleted_event_error)
def lock_ports(self, lock=True, device_disabled=False):
def success(response):
self.log.debug('set-onu-ports-state', lock=lock, response=response)
if device_disabled:
self.onu_disabled_event()
def failure(response):
self.log.error('cannot-set-onu-ports-state', lock=lock, response=response)
task = BrcmUniLockTask(self.omci_agent, self.device_id, lock=lock)
self._deferred = self._onu_omci_device.task_runner.queue_task(task)
self._deferred.addCallbacks(success, failure)
def extract_tp_id_from_path(self, tp_path):
# tp_path is of the format <technology>/<table_id>/<uni_port_name>
tp_id = int(tp_path.split(_PATH_SEPERATOR)[1])
return tp_id
def start_omci_test_action(self, device, uuid):
"""
:param device:
:return:
"""
# Code to Run OMCI Test Action
self.log.info('Omci-test-action-request-On', request=device.id)
kwargs_omci_test_action = {
OmciTestRequest.DEFAULT_FREQUENCY_KEY:
OmciTestRequest.DEFAULT_COLLECTION_FREQUENCY
}
serial_number = device.serial_number
if device.connect_status != ConnectStatus.REACHABLE or device.admin_state != AdminState.ENABLED:
return (TestResponse(result=TestResponse.FAILURE))
test_request = OmciTestRequest(self.core_proxy,
self.omci_agent, self.device_id, AniG,
serial_number,
self.logical_device_id, exclusive=False,
uuid=uuid,
**kwargs_omci_test_action)
test_request.perform_test_omci()
return (TestResponse(result=TestResponse.SUCCESS))
@inlineCallbacks
def get_uni_status(self, request):
"""
:param request:
:return:
"""
for uni in self.uni_ports:
self.log.debug('uni-id-and-uni-index',uni_id = uni.uni_id, uni_idx=request.uniInfo.uniIndex)
if uni.uni_id == request.uniInfo.uniIndex:
task = BrcmUniStatusTask(self.omci_agent, self.device_id, request, uni.entity_id, self.uni_status_response_queue)
self._deferred = self._onu_omci_device.task_runner.queue_task(task)
try:
self._results = yield self.uni_status_response_queue.get()
self.log.debug('uni-status-response',res=self._results)
except Exception as e:
self.log.exception("failed-dequeueing-received-message", e=e)
self._results.response.status = GetValueResponse.ERROR
self._results.response.errReason = GetValueResponse.UNSUPPORTED
finally:
task.stop()
returnValue(self._results)
self.log.error('uni-not-found', uni_idx=request.uniInfo.uniIndex)
self._results.response.status = GetValueResponse.ERROR
self._results.response.errReason = GetValueResponse.UNSUPPORTED
returnValue(self._results)
|
22,676 | 6e620740e04c35b20e1ba65b4f8cd8e1a3b7d7e7 | from py_CQCGL_threads import pyCQCGL
from personalFunctions import *
case = 65
if case == 1:
"""
test the accuracy of the soliton solution
"""
N = 1024
d = 30
h = 0.0002
di = 0.05
cgl = pyCqcgl1d(N, d, h, True, 0, 4.0, 0.8, 0.01, di, 4)
a0, wth0, wphi0, err = cqcglReadReqdi('../../data/cgl/reqDi.h5',
di, 1)
vReq = cgl.velocityReq(a0, wth0, wphi0)
nstp = abs(int(2 * np.pi / h / wphi0))
print norm(vReq)
aaE = cgl.intg(a0, nstp, 1)
aaEH, th, phi = cgl.orbit2slice(aaE)
if case == 10:
"""
plot unstable manifold of the exploding soliton solution
only continuous symmetries are reduced
"""
N = 1024
d = 30
h = 0.000001
di = 0.05
cgl = pyCqcgl1d(N, d, h, True, 0, 4.0, 0.8, 0.01, di, 4)
a0, wth0, wphi0, err = cqcglReadReqdi('../../data/cgl/reqDi.h5',
di, 1)
eigvalues, eigvectors = eigReq(cgl, a0, wth0, wphi0)
eigvectors = Tcopy(realve(eigvectors))
a0Hat = cgl.orbit2slice(a0)[0].squeeze()
veHat = cgl.ve2slice(eigvectors, a0)
nstp = 120000
a0Erg = a0 + eigvectors[0]*1e-7
aaErg = cgl.intg(a0Erg, nstp, 10)
aaErgHat, th, phi = cgl.orbit2slice(aaErg)
aaErgHat -= a0Hat
e1, e2, e3 = orthAxes(veHat[0], veHat[1], veHat[10])
aaErgHatProj = np.dot(aaErgHat, np.vstack((e1, e2, e3)).T)
fig = plt.figure(figsize=[8, 6])
ax = fig.add_subplot(111, projection='3d')
ix1 = 000
ix2 = 10000
ixs = ix1 + 1400
ax.plot(aaErgHatProj[ix1:ix2, 0], aaErgHatProj[ix1:ix2, 1],
aaErgHatProj[ix1:ix2, 2], c='r', lw=1)
# ax.scatter([0], [0], [0], s=120)
# ax.scatter(aaErgHatProj[ixs, 0], aaErgHatProj[ixs, 1],
# aaErgHatProj[ixs, 2], s=120, c='k')
ax.set_xlabel(r'$e_1$', fontsize=25)
ax.set_ylabel(r'$e_2$', fontsize=25)
ax.set_zlabel(r'$e_3$', fontsize=25)
fig.tight_layout(pad=0)
plt.show(block=False)
if case == 11:
"""
view a sing unstable manifold orbit
but with full symmetry reduction
"""
N = 1024
d = 30
h = 0.0002
di = 0.05
cgl = pyCqcgl1d(N, d, h, True, 0, 4.0, 0.8, 0.01, di, 4)
a0, wth0, wphi0, err = cqcglReadReqdi('../../data/cgl/reqDi.h5',
di, 1)
eigvalues, eigvectors = eigReq(cgl, a0, wth0, wphi0)
eigvectors = Tcopy(realve(eigvectors))
a0Hat = cgl.orbit2slice(a0)[0].squeeze()
a0Tilde = cgl.reduceReflection(a0Hat)
veHat = cgl.ve2slice(eigvectors, a0)
veTilde = cgl.reflectVe(veHat, a0Hat)
a0Reflected = cgl.reflect(a0)
a0ReflectedHat = cgl.orbit2slice(a0Reflected)[0].squeeze()
nstp = 5500
a0Erg = a0 + eigvectors[0]*1e-4
aaErg = cgl.intg(a0Erg, nstp, 1)
aaErgHat, th, phi = cgl.orbit2slice(aaErg)
aaErgTilde = cgl.reduceReflection(aaErgHat)
aaErgTilde -= a0Tilde
e1, e2, e3 = orthAxes(veTilde[0], veTilde[1], veTilde[10])
aaErgTildeProj = np.dot(aaErgTilde, np.vstack((e1, e2, e3)).T)
# plot3dfig(aaErgHatProj[1000:, 0], aaErgHatProj[1000:, 1], aaErgHatProj[1000:, 2])
fig = plt.figure(figsize=[8, 6])
ax = fig.add_subplot(111, projection='3d')
ix1 = 0
ix2 = 3300
ax.plot(aaErgTildeProj[ix1:ix2, 0], aaErgTildeProj[ix1:ix2, 1],
aaErgTildeProj[ix1:ix2, 2], c='r', lw=1)
ax.scatter([0], [0], [0], s=100)
ax.set_xlabel(r'$e_1$', fontsize=25)
ax.set_ylabel(r'$e_2$', fontsize=25)
ax.set_zlabel(r'$e_3$', fontsize=25)
fig.tight_layout(pad=0)
plt.show(block=False)
if case == 13:
N = 1024
d = 30
h = 0.0002
di = 0.05
cgl = pyCqcgl1d(N, d, h, True, 0, 4.0, 0.8, 0.01, di, 4)
def vel(x, t):
return cgl.velocity(x)
a0, wth0, wphi0, err = cqcglReadReqdi('../../data/cgl/reqDi.h5',
di, 1)
eigvalues, eigvectors = eigReq(cgl, a0, wth0, wphi0)
eigvectors = Tcopy(realve(eigvectors))
a0Hat = cgl.orbit2slice(a0)[0].squeeze()
a0Tilde = cgl.reduceReflection(a0Hat)
veHat = cgl.ve2slice(eigvectors, a0)
veTilde = cgl.reflectVe(veHat, a0Hat)
a0Reflected = cgl.reflect(a0)
a0ReflectedHat = cgl.orbit2slice(a0Reflected)[0].squeeze()
nstp = 5500
a0Erg = a0 + eigvectors[0]*1e-4
aaErg = cgl.intg(a0Erg, nstp, 1)
aaErgHat, th, phi = cgl.orbit2slice(aaErg)
aaErgTilde = cgl.reduceReflection(aaErgHat)
aaErgTilde -= a0Tilde
e1, e2, e3 = orthAxes(veTilde[0], veTilde[1], veTilde[10])
aaErgTildeProj = np.dot(aaErgTilde, np.vstack((e1, e2, e3)).T)
# plot3dfig(aaErgHatProj[1000:, 0], aaErgHatProj[1000:, 1], aaErgHatProj[1000:, 2])
fig = plt.figure(figsize=[8, 6])
ax = fig.add_subplot(111, projection='3d')
ix1 = 0
ix2 = 3300
ax.plot(aaErgTildeProj[ix1:ix2, 0], aaErgTildeProj[ix1:ix2, 1],
aaErgTildeProj[ix1:ix2, 2], c='r', lw=1)
ax.scatter([0], [0], [0], s=100)
ax.set_xlabel(r'$e_1$', fontsize=25)
ax.set_ylabel(r'$e_2$', fontsize=25)
ax.set_zlabel(r'$e_3$', fontsize=25)
fig.tight_layout(pad=0)
plt.show(block=False)
if case == 15:
"""
view a single unstable manifold orbit using Jacobian not the
system integrator.
"""
N = 1024
d = 30
h = 0.0002
di = 0.05
cgl = pyCqcgl1d(N, d, h, True, 0, 4.0, 0.8, 0.01, di, 4)
a0, wth0, wphi0, err = cqcglReadReqdi('../../data/cgl/reqDi.h5',
di, 1)
eigvalues, eigvectors = eigReq(cgl, a0, wth0, wphi0)
eigvectors = Tcopy(realve(eigvectors))
a0Hat = cgl.orbit2slice(a0)[0].squeeze()
veHat = cgl.ve2slice(eigvectors, a0)
cgl1 = pyCqcgl1d(N, d, h, True, 1, 4.0, 0.8, 0.01, di, 4)
nstp = 3000
v0 = eigvectors[0]*1e-7
aaErg, vErg = cgl1.intgvs(a0, v0, nstp, 1, 1)
aaErgHat, th, phi = cgl1.orbit2slice(aaErg)
vErgHat = cgl1.ve2slice(vErg, a0)
aaErgHat -= a0Hat
e1, e2, e3 = orthAxes(veHat[0], veHat[1], veHat[10])
aaErgHatProj = np.dot(aaErgHat, np.vstack((e1, e2, e3)).T)
vErgHatProj = np.dot(vErgHat, np.vstack((e1, e2, e3)).T)
fig = plt.figure(figsize=[8, 6])
ax = fig.add_subplot(111, projection='3d')
ix1 = 000
ix2 = nstp
ixs = ix1 + 1400
# ax.plot(aaErgHatProj[ix1:ix2, 0], aaErgHatProj[ix1:ix2, 1],
# aaErgHatProj[ix1:ix2, 2], c='r', lw=1)
ax.plot(vErgHatProj[ix1:ix2, 0], vErgHatProj[ix1:ix2, 1],
vErgHatProj[ix1:ix2, 2], c='r', lw=1)
# ax.scatter([0], [0], [0], s=120)
ax.set_xlabel(r'$e_1$', fontsize=25)
ax.set_ylabel(r'$e_2$', fontsize=25)
ax.set_zlabel(r'$e_3$', fontsize=25)
fig.tight_layout(pad=0)
plt.show(block=False)
if case == 20:
"""
plot unstable manifold
of the exploding soliton solution from a
line of states
"""
N = 1024
d = 30
h = 1e-5
di = 0.05
cgl = pyCqcgl1d(N, d, h, True, 0, 4.0, 0.8, 0.01, di, 4)
a0, wth0, wphi0, err = cqcglReadReqdi('../../data/cgl/reqDi.h5',
di, 1)
eigvalues, eigvectors = eigReq(cgl, a0, wth0, wphi0)
eigvectors = Tcopy(realve(eigvectors))
a0Hat = cgl.orbit2slice(a0)[0]
veHat = cgl.ve2slice(eigvectors, a0)
a0R = cgl.reflect(a0)
a0RH = cgl.orbit2slice(a0R)[0]
nstp = np.int(1e5)
Er = eigvalues[0].real
Ei = eigvalues[0].imag
Vr, Vi = orthAxes2(eigvectors[0], eigvectors[1])
n = 10
aaE = []
aaEHat = []
for i in range(n):
print i
# ang = 2 * i * np.pi / n
e = np.exp(2*np.pi * Er / Ei / n * i*10)
a0Erg = a0 + Vr * e * 1e-6
aaErg = cgl.intg(a0Erg, nstp, 100)
aaErgHat, th, phi = cgl.orbit2slice(aaErg)
# aaE.append(aaErg)
aaEHat.append(aaErgHat - a0Hat)
e1, e2, e3 = orthAxes(veHat[0], veHat[1], veHat[10])
aaEHatP = []
for i in range(n):
aaErgHatProj = np.dot(aaEHat[i], np.vstack((e1, e2, e3)).T)
aaEHatP.append(aaErgHatProj)
ix1 = 0
ix2 = nstp
ixs = ix1 + 1400
fig = plt.figure(figsize=[8, 6])
ax = fig.add_subplot(111, projection='3d')
for i in range(1):
ax.plot(aaEHatP[i][ix1:ix2, 0], aaEHatP[i][ix1:ix2, 1],
aaEHatP[i][ix1:ix2, 2], c='r', lw=1)
# ax.scatter([0], [0], [0], s=50)
# ax.scatter(aaErgHatProj[ixs, 0], aaErgHatProj[ixs, 1],
# aaErgHatProj[ixs, 2], s=120, c='k')
ax.set_xlabel(r'$e_1$', fontsize=25)
ax.set_ylabel(r'$e_2$', fontsize=25)
ax.set_zlabel(r'$e_3$', fontsize=25)
fig.tight_layout(pad=0)
plt.show(block=False)
if case == 21:
"""
plot unstable manifold
of the exploding soliton solution from a
circular states
"""
N = 1024
d = 30
h = 0.0002
di = 0.05
cgl = pyCqcgl1d(N, d, h, True, 0, 4.0, 0.8, 0.01, di, 4)
a0, wth0, wphi0, err = cqcglReadReqdi('../../data/cgl/reqDi.h5',
di, 1)
eigvalues, eigvectors = eigReq(cgl, a0, wth0, wphi0)
eigvectors = Tcopy(realve(eigvectors))
a0Hat = cgl.orbit2slice(a0)[0].squeeze()
veHat = cgl.ve2slice(eigvectors, a0)
nstp = 3000
Er = eigvalues[0].real
Ei = eigvalues[0].imag
Vr, Vi = orthAxes2(eigvectors[0], eigvectors[1])
n = 30
aaE = []
aaEHat = []
for i in range(n):
print i
ang = 2 * i * np.pi / n
a0Erg = a0 + (Vr * np.cos(ang) + Vi * np.sin(ang)) * 1e-4
aaErg = cgl.intg(a0Erg, nstp, 1)
aaErgHat, th, phi = cgl.orbit2slice(aaErg)
# aaE.append(aaErg)
aaEHat.append(aaErgHat - a0Hat)
e1, e2, e3 = orthAxes(veHat[0], veHat[1], veHat[10])
aaEHatP = []
for i in range(n):
aaErgHatProj = np.dot(aaEHat[i], np.vstack((e1, e2, e3)).T)
aaEHatP.append(aaErgHatProj)
ix1 = 00000
ix2 = 03000
ixs = ix1 + 1400
fig = plt.figure(figsize=[8, 6])
ax = fig.add_subplot(111, projection='3d')
for i in range(n):
ax.plot(aaEHatP[i][ix1:ix2, 0], aaEHatP[i][ix1:ix2, 1],
aaEHatP[i][ix1:ix2, 2], c='r', lw=1)
# ax.scatter([0], [0], [0], s=120)
# ax.scatter(aaErgHatProj[ixs, 0], aaErgHatProj[ixs, 1],
# aaErgHatProj[ixs, 2], s=120, c='k')
ax.set_xlabel(r'$e_1$', fontsize=25)
ax.set_ylabel(r'$e_2$', fontsize=25)
ax.set_zlabel(r'$e_3$', fontsize=25)
fig.tight_layout(pad=0)
plt.show(block=False)
if case == 30:
"""
try to obtain the Poincare intersection points
"""
N = 1024
d = 50
h = 0.0001
cgl = pyCqcgl1d(N, d, h, True, 0,
-0.1, 1.0, 0.8, 0.125, 0.5, -0.1, -0.6,
4)
a0, wth0, wphi0, err = cqcglReadReq('../../data/cgl/reqN1024.h5', '1')
eigvalues, eigvectors = eigReq(cgl, a0, wth0, wphi0)
eigvectors = realve(eigvectors)
eigvectors = Tcopy(eigvectors)
a0Hat = cgl.orbit2slice(a0)[0].squeeze()
a0Tilde = cgl.reduceReflection(a0Hat)
veHat = cgl.ve2slice(eigvectors, a0)
veTilde = cgl.reflectVe(veHat, a0Hat)
e1, e2, e3 = orthAxes(veTilde[0], veTilde[1], veTilde[6])
nstp = 60000
M = 10
a0Erg = np.empty((M, cgl.Ndim))
for i in range(M):
a0Erg[i] = a0 + (i+1) * eigvectors[0]*1e-4
PointsProj = np.zeros((0, 2))
PointsFull = np.zeros((0, cgl.Ndim))
for i in range(30):
for j in range(M):
aaErg = cgl.intg(a0Erg[j], nstp, 1)
aaErgHat, th, phi = cgl.orbit2slice(aaErg)
aaErgTilde = cgl.reduceReflection(aaErgHat)
aaErgTilde -= a0Tilde
aaErgTildeProj = np.dot(aaErgTilde, np.vstack((e1, e2, e3)).T)
# plotConfigSpace(cgl.Fourier2Config(aaErg),
# [0, d, nstp*h*i, nstp*h*(i+1)])
points, index, ratios = PoincareLinearInterp(aaErgTildeProj, getIndex=True)
PointsProj = np.vstack((PointsProj, points))
for i in range(len(index)):
dif = aaErgTilde[index[i]+1] - aaErgTilde[index[i]]
p = dif * ratios[i] + aaErgTilde[index[i]]
PointsFull = np.vstack((PointsFull, p))
a0Erg[j] = aaErg[-1]
upTo = PointsProj.shape[0]
scatter2dfig(PointsProj[:upTo, 0], PointsProj[:upTo, 1], s=10,
labs=[r'$e_2$', r'$e_3$'])
dis = getCurveCoor(PointsFull)
# np.savez_compressed('PoincarePoints', totalPoints=totalPoints)
if case == 40:
"""
plot the Poincare intersection points
"""
totalPoints = np.load('PoincarePoints.npz')['totalPoints']
multiScatter2dfig([totalPoints[:280, 0], totalPoints[280:350, 0]],
[totalPoints[:280, 1], totalPoints[280:350, 1]],
s=[15, 15], marker=['o', 'o'], fc=['r', 'b'],
labs=[r'$e_2$', r'$e_3$'])
scatter2dfig(totalPoints[:, 0], totalPoints[:, 1], s=10,
labs=[r'$e_2$', r'$e_3$'])
if case == 50:
"""
use the new constructor of cqcgl
try to obtain the Poincare intersection points
"""
N = 1024
d = 40
h = 0.0005
cgl = pyCqcgl1d(N, d, h, True, 0, 4.0, 0.8, -0.01, -0.04, 4)
a0, wth0, wphi0, err = cqcglReadReq('../../data/cgl/reqN1024.h5', '1')
eigvalues, eigvectors = eigReq(cgl, a0, wth0, wphi0)
eigvectors = realve(eigvectors)
eigvectors = Tcopy(eigvectors)
a0Hat = cgl.orbit2slice(a0)[0].squeeze()
a0Tilde = cgl.reduceReflection(a0Hat)
veHat = cgl.ve2slice(eigvectors, a0)
veTilde = cgl.reflectVe(veHat, a0Hat)
e1, e2, e3 = orthAxes(veTilde[0], veTilde[1], veTilde[6])
nstp = 60000
M = 10
a0Erg = np.empty((M, cgl.Ndim))
for i in range(M):
a0Erg[i] = a0 + (i+1) * eigvectors[0]*1e-4
PointsProj = np.zeros((0, 2))
PointsFull = np.zeros((0, cgl.Ndim))
for i in range(30):
for j in range(M):
aaErg = cgl.intg(a0Erg[j], nstp, 1)
aaErgHat, th, phi = cgl.orbit2slice(aaErg)
aaErgTilde = cgl.reduceReflection(aaErgHat)
aaErgTilde -= a0Tilde
aaErgTildeProj = np.dot(aaErgTilde, np.vstack((e1, e2, e3)).T)
# plotConfigSpace(cgl.Fourier2Config(aaErg),
# [0, d, nstp*h*i, nstp*h*(i+1)])
points, index, ratios = PoincareLinearInterp(aaErgTildeProj, getIndex=True)
PointsProj = np.vstack((PointsProj, points))
for i in range(len(index)):
dif = aaErgTilde[index[i]+1] - aaErgTilde[index[i]]
p = dif * ratios[i] + aaErgTilde[index[i]]
PointsFull = np.vstack((PointsFull, p))
a0Erg[j] = aaErg[-1]
upTo = PointsProj.shape[0]
scatter2dfig(PointsProj[:upTo, 0], PointsProj[:upTo, 1], s=10,
labs=[r'$e_2$', r'$e_3$'])
dis = getCurveCoor(PointsFull)
# np.savez_compressed('PoincarePoints', totalPoints=totalPoints)
if case == 60:
"""
change parameter of b, see how the soliton changes
"""
N = 1024
d = 30
di = 0.06
b = 4
T = 3
cgl = pyCQCGL(N, d, b, 0.8, 0.01, di, 0, 4)
cgl.changeOmega(-176.67504941219335)
Ndim = cgl.Ndim
A0 = 5*centerRand(N, 0.1, True)
a0 = cgl.Config2Fourier(A0)
aa = cgl.aintg(a0, 0.001, T, 1)
plotConfigSpaceFromFourier(cgl, aa, [0, d, 0, T])
t1 = cgl.Ts()
plot2dfig(t1, aa[:, 0], labs=['t', r'$Re(a_0)$'])
|
22,677 | 672154a50bf50cf20d64c73cffaea2bc6cb2fdc6 | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
def monkey_count(n: int) -> list:
"""
You take your son to the forest to see the monkeys.
You know that there are a certain number there (n),
but your son is too young to just appreciate the full
number, he has to start counting them from 1.
As a good parent, you will sit and count with him.
Given the number (n), populate an array with all
numbers up to and including that number, but
excluding zero.
:param n:
:return:
"""
monkeys = list()
for n in range(1, n + 1):
monkeys.append(n)
return monkeys
|
22,678 | 641dd9559594480de0bcd14fde2917b115823234 | import gettext
import locale
from typing import Dict
default_lang = "en"
try:
locale_lang, encoding = locale.getlocale()
lang = locale_lang.split("_")[0] if locale_lang else default_lang
except:
# If unable to get the locale language, use English
lang = default_lang
try:
language = gettext.translation('numerology', localedir='locale', languages=[lang])
language.install()
_ = language.gettext
except:
# If the current language does not have a translation, the default laguage (English) will be used English
language = gettext.translation('numerology', localedir='locale', languages=[default_lang])
language.install()
_ = language.gettext
class LifePathNumber:
meanings: Dict[int, Dict[str, str]] = {
1: {
"title": _("Individual action life"),
"description": _(
"This life path promotes personal success and individual achievement. It symbolizes an active life and is often the scene of unexpected changes. It can be difficult at times, but a certain amount of luck prevails and helps to overcome obstacles.\nRequirements: The qualities necessary to take on this life path are: willpower, courage, self-confidence and perseverance.\nChallenges: This path is difficult for those who have 1 as the missing digit, and the expression numbers 2 and 4."
),
},
2: {
"title": _("Life of collaboration and harmony with others"),
"description": _(
"This life path favors association and marriage. Affection and friendship are sought. It symbolizes a certain passivity and there is sometimes a tendency to live according to events. There are many twists and turns and success comes with time unless it comes unexpectedly with the help of others.\nRequirements: The qualities needed to successfully take on this life path are: diplomacy, patience and balance.\nChallenges: This path is difficult for those who have 2 as a missing digit, and the expression numbers 1, 5, 9, 11 and 22."
),
},
3: {
"title": _("Life of creativity and expression"),
"description": _(
"This life path favors contact activities and relationships with others. It symbolizes a pleasant and sociable life with few obstacles and the possibility of achieving success fairly quickly (and sometimes brilliantly). Those who show creativity, ingenuity and drive in business are happy and successful on this path.\nRequirements: The qualities necessary to successfully take on this life path are: extroversion, a sense of relationships and contacts and ambition.\nChallenges: This path is difficult for those who have 3 as the missing number, and the number of expression 4."
),
},
4: {
"title": _("Life of work and construction"),
"description": _(
"This life path promotes success through hard work and steady effort. It symbolizes stable and serious endeavors and generally allows for solid success, even if progress is slow. It involves few risks, but lacks a certain fantasy and nothing is gained easily.\nRequirements: The qualities necessary to assume this path of life are: love of work well done, regularity, rigor and perseverance.\nChallenges: This path is difficult for those who have 4 as the missing number, and the expression numbers 1, 3, 5, 8 and 11."
),
},
5: {
"title": _("Life of mobility and change"),
"description": _(
"This life path favors changes in all areas of life. Life undergoes frequent transformations. It symbolizes travel, physical activity and personal freedom; sometimes adventure. It promises an exciting life full of unexpected events, but it also involves risks, as well as the threat of accidents.\nRequirements: The qualities needed to successfully take on this path of life are: flexibility, adaptability as well as boldness and health (moral and physical).\nChallenges: This path is difficult for those who have 5 as the missing number, and the expression numbers 2, 4 and 6."
),
},
6: {
"title": _("A life of responsibility and emotional harmony"),
"description": _(
"This life path involves choices to be made and it is not always easy to move in the right direction. If the chosen path is positive and ambitious, the ascent is rapid and leads far and high. If not, there is hesitation in several directions, the consequences of which are rarely beneficial. It symbolizes responsibilities that can also turn into burdens or trials. It is also the path of love and marriage as the 6 is home and family.\nRequirements: The qualities needed to take on this life path are: willpower (because free will is predominant on this path), a spirit of conciliation and adaptation. There is a natural tendency to perfectionism which can cause problems.\nChallenges: This path is difficult for those who have 6 as the missing number, and the expression number 5."
),
},
7: {
"title": _("Inner life and independence"),
"description": _(
"This life path favors the work of the mind, the inner or spiritual life and everything that has to do with analysis or research. It symbolizes the taste for independence and sometimes solitude. It often characterizes an original destiny, with a selfless success. Friendships are a source of great satisfaction, but marriage is not always easy.\nRequirements: The qualities necessary to take on this path of life are: interest in others, reflection and disinterestedness in the material aspects of life.\nChallenges: This path is difficult for those who have 7 as the missing number (delays and delays in achievements), and the expression numbers 8, 11 and 22."
),
},
8: {
"title": _("Life of ambitious achievements and material acquisitions"),
"description": _(
"This life path favors ambitions and large-scale achievements. It symbolizes power, money and materiality. It is difficult because it involves risks and trials but it can lead to extraordinary success. It is not free from accidents and health problems.\nRequirements: The qualities needed to take on this path of life are: courage, endurance and a sense of balance.\nChallenges: This path is difficult for those who have 8 as the missing number, and the expression numbers 4 and 7. The 2s and 9s are not always at ease."
),
},
9: {
"title": _("Life of escape or ideal"),
"description": _(
"This life path favors journeys: those of the spirit or of the soul, and those that one can undertake on the planet. It promises many encounters and varied experiences. It symbolizes the search for an ideal or the realization of a vocation. It involves a lot of emotions and success often manifests itself in an unexpected way, not without pitfalls and obstacles.\nRequirements: The qualities necessary to successfully take on this life path are: understanding and dedication, sensitivity and an open mind, as well as courage. Sometimes it is necessary to overcome a tendency to dreams and illusions.\nChallenges: This path is difficult for those who have 9 as the missing number, and the expression numbers 2 and 8."
),
},
11: {
"title": _("Life of inspiration and mastery"),
"description": _(
"This life path favors the achievement of ambitious or original accomplishments. It symbolizes inspiration, intelligence and leadership. It is not an easy path to live because the vibrations are strong and do not tolerate limitations or restrictions. Sometimes it brings great success followed by a collapse that forces you to start all over again.\nRequirements: The qualities necessary to assume this life path are: a strong character and a great will. Tendency to impatience and nervousness that must be curbed.\nChallenges: This path is difficult for those who have 1 and 2 as missing numbers, and the expression numbers 2, 4 and 7."
),
},
22: {
"title": _("Life of significant achievement"),
"description": _(
"This life path favors high level projects that may be of interest to a community, a country or even the world. It symbolizes superior intelligence and universal interest. There is little room for personal and daily existence because it implies an overflowing activity. There is a great desire to build for others at the base of this master number.\nRequirements: The qualities necessary to take on this life path are: a great humanism, the ability to carry out great projects, the power to concretely realize sometimes utopian ambitions... exceptional qualities.\nChallenges: This path is difficult for those who have 4 as the missing number, and the expression numbers 2, 4 and 7."
),
},
}
|
22,679 | 1509b6c712004caeb9d062c1c81e032bb037ee32 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#=== Sokrates Auto Test =============================================
# Date : 2022.02.17
# Program : main sok test
# Author : daniel
#====================================================================
import unittest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import visibility_of_element_located
from time import sleep
from login import login
from sort_vedio import sort_vedio
from play_vedio import play_vedio
from add_evaluate import add_evaluate
from upload_vedio import upload_vedio
from edit_tbas import edit_tbas
from video_share import video_share
from my_public_comment import my_public_comment
class TestSok(unittest.TestCase):
def setUp(self): # 每個測試初始化
# login
driver = webdriver.Firefox()
#driver = webdriver.Chrome()
driver.implicitly_wait(7)
self.driver = driver
url="https://sokrates.teammodel.org/"
self.url = url
login(driver,url)
def tearDown(self): # 每個測試結束
self.driver.quit()
sleep(1)
def sort_vedio(self): #要測試的功能, 名稱需test開頭
sort_vedio(self.driver)
def test_play_vedio(self):
play_vedio(self.driver)
def test_add_evaluate(self):
add_evaluate(self.driver)
def upload_vedio(self):
upload_vedio(self.driver)
def edit_tbas(self):
edit_tbas(self.driver)
def video_share(self):
video_share(self.driver,self.url)
def my_public_comment(self):
my_public_comment(self.driver,self.url)
if __name__ == "__main__":
unittest.main()
|
22,680 | 8bac9d7bbdf337833545f55074952e1b93fed099 | class DoublyLinkedList:
"""Doubly Linked List implementation
Returns:
DoublyLinkedList
"""
class Node:
"""Inner node classes used to create
individual nodes for the Doubly Linked List
"""
def __init__(self, data, prev, nxt):
"""Constructor of each node
Args:
data : An object to be held
prev : Reference to previous node
nxt : Reference to next node
"""
self.data = data
self.prev = prev
self.nxt = nxt
def __repr__(self):
"""Convert Node to string
Returns:
str: Node data
"""
return str(self.data)
def __init__(self):
"""Constructor
"""
self.size = 0
self.head = None
self.tail = None
self.travIter = None
def clear(self):
"""Empty the linked list O(n)
"""
trav = self.head
while trav is not None:
nxt = trav.nxt
trav.prev = trav.nxt
trav.data = None
trav = nxt
self.head = None
self.tail = None
trav = None
self.size = 0
def __len__(self):
"""
Return number of elements sorted in array
"""
return self.llSize
def size(self):
"""Return the size of linked list
Returns:
int: Linked list size
"""
return self.size
def is_empty(self):
"""Check if the list is empty
Returns:
bool: Comapres size to 0
"""
return self.size == 0
def add(self, elem):
"""Add an element to the linked list using add_last
Args:
elem (object): Any type of object to be added
"""
self.add_last(elem)
def add_first(self, elem):
"""Add element to the front of the linked list
Args:
elem (object): Any type of object to be added
"""
if self.is_empty():
self.head = self.tail = self.Node(elem, None, None)
else:
self.head.prev = self.Node(elem, None, self.head)
self.head = self.head.prev
self.size += 1
def add_last(self, elem):
"""Add to the end of the Linked list by append to
tail.next O(n)
Args:
elem (object): Any type of object to be added
"""
if self.is_empty():
self.head = self.tail = self.Node(elem, None, None)
else:
self.tail.nxt = self.Node(elem, self.tail, None)
self.tail = self.tail.nxt
self.size += 1
def peek_first(self):
"""Obtain data from head of linked list O(1)
Raises:
RuntimeError: Empty list
Returns:
object: Data held in head Node
"""
if self.is_empty(): raise RuntimeError("Empty list")
return self.head.data
def peek_last(self):
"""Obtain data from tail of linked list O(1)
Raises:
RuntimeError: Empty list
Returns:
Object: Data held in tail Node
"""
if self.is_empty(): raise RuntimeError("Empty list")
return self.tail.data
def remove_first(self):
"""Remove Node at the head of the linked list O(1)
Raises:
RuntimeError: Empty list
Returns:
Object: Data of the removed node
"""
if self.is_empty(): raise RuntimeError("Empty list")
data = self.head.data
self.head = self.head.nxt
self.size -= 1
if self.is_empty(): self.tail = None
else: self.head.prev = None
return data
def remove_last(self):
if self.is_empty(): raise RuntimeError("Empty list")
data = self.tail.data
self.tail = self.tail.prev
self.size -= 1
if self.is_empty(): self.head = None
else: self.tail.nxt = None
return data
def __remove__(self, node):
if node.prev == None: return self.remove_first()
if node.nxt == None: return self.remove_last()
node.nxt.prev = node.prev
node.prev.nxt = node.nxt
data = node.data
node.data = None
node.nxt = None
node.prev = None
node = None
self.size -= 1
return data
def remove_at(self, index):
if index < 0 or index >= size: raise ValueError("Wrong index")
i = 0
trav = None
if index < self.size / 2:
trav = self.head
while i != index:
i += 1
trav= trav.nxt
else:
i = self.size
trav = self.tail
while i != index:
i -= 1
trav = trav.prev
return self.__remove__(trav)
def remove(self, obj):
trav = self.head
if obj == None:
while trav is not None:
if trav.data is None:
self.__remove__(trav)
return True
trav = trav.nxt
else:
trav = self.head
while trav is not None:
if obj == trav.data:
self.__remove__(trav)
return True
trav = trav.nxt
return False
def index_of(self, obj):
index = 0
trav = self.head
if obj is None:
while trav is not None:
if trav.data is None:
return index
trav = trav.nxt
index += 1
else:
while trav is not None:
if trav.data == obj:
return index
trav = trav.nxt
index += 1
return -1
def contains(self, obj):
return self.index_of(obj) != -1
def __iter__(self):
self.travIter = self.head
return self
def __next__(self):
if self.travIter is None:
raise StopIteration
data = self.travIter.data
self.travIter = self.travIter.nxt
return data
def __str__(self):
sb = ""
sb = sb + '[ '
trav = self.head
while trav is not None:
sb = sb + str(trav.data)
if trav.nxt is not None:
sb = sb + ' - '
trav = trav.nxt
sb = sb + ' ]'
return sb
|
22,681 | 6795284cd5cc3eb866b7ca395abc9ba6fb81f5e6 | from zuper_commons.logs import ZLogger
logger = ZLogger(__name__) |
22,682 | 69ac7c89e71758ce6ae30dcc1c6033e2a54ad3e0 | '''
Author: @amitrajitbose
Problem : https://www.hackerearth.com/practice/math/number-theory/totient-function/practice-problems/algorithm/euler-totient-function/
'''
def SieveOfEratosthenes(n):
prime = [True for i in range(n+1)]
p=2
while(p * p <= n):
# If prime[p] is not changed, then it is a prime
if (prime[p] == True):
# Update all multiples of p
for i in range(p * 2, n+1, p):
prime[i] = False
p+=1
prime[1]=False
return prime
N=int(input())
prime=SieveOfEratosthenes(N+1)
ans=N
for i in range(2,N+1):
if(prime[i] and N%i==0):
ans=ans*(1-(1/i))
print(int(ans))
|
22,683 | 89bc225e285c7e04e19d1c8e866967d70568f0f6 | #!/usr/bin/env python
import os
import argparse
import glob
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='')
parser.add_argument("-r","--rawdata",help="data archived path")
parser.add_argument("-p","--product",help="data archived path")
args = parser.parse_args()
rawdata_path = args.rawdata
product_path = args.product
def readfile(instru='HE'):
blindfile = glob.glob(os.path.join(product_path,instru,'cleaned','*blind*'))
ehkfile = glob.glob(os.path.join(rawdata_path,'AUX','*EHK*'))
gtifile = glob.glob(os.path.join(product_path,instru,'tmp','*gti*'))
tempfile = glob.glob(os.path.join(rawdata_path,instru,'*TH*'))
if instru == 'HE':
deadfile = glob.glob(os.path.join(rawdata_path,instru,'*DTime*'))
screenfile = glob.glob(os.path.join(product_path,instru,'cleaned','he_screen_NaI.fits'))
elif instru == 'ME':
deadfile = glob.glob(os.path.join(product_path,instru,'tmp','*dtime*'))
screenfile = glob.glob(os.path.join(product_path,instru,'cleaned','me_screen_smfov.fits'))
else:
deadfile = []
screenfile = glob.glob(os.path.join(product_path,instru,'cleaned','le_screen_smfov.fits'))
return screenfile, blindfile, ehkfile, gtifile, tempfile, deadfile
def helcgen(evtfile, outpath, deadfile, binsize=1, minPI=20, maxPI=200):
print evtfile, deadfile
lc_text = 'helcgen evtfile=%s outfile=%s deadfile=%s userdetid=%s eventtype=1 binsize=%s minPI=%s maxPI=%s clobber=yes'\
%(evtfile[0], outpath+'he_lc', deadfile[0], '"0-15,17"',\
str(binsize),str(minPI), str(maxPI))
print(lc_text)
os.system(lc_text)
return
def melcgen(evtfile, outpath, deadfile, binsize=1, minPI=0, maxPI=1024):
lc_text = 'melcgen evtfile=%s outfile=%s deadfile=%s userdetid=%s binsize=%s minPI=%s maxPI=%s clobber=yes'\
%(evtfile[0], outpath+'me_lc_smfov', deadfile[0], '"0-5, 7, 12-23, 25, 30-41, 43, 48-53"',\
str(binsize),str(minPI), str(maxPI))
print(lc_text)
os.system(lc_text)
return
def lelcgen(evtfile, outpath, binsize=1, minPI=0, maxPI=1035):
lc_text = 'lelcgen evtfile=%s outfile=%s userdetid=%s eventtype=1 binsize=%s minPI=%s maxPI=%s clobber=yes'\
%(evtfile[0], outpath+'le_lc_smfov',\
'"0,2-4,6-10,12,14,20,22-26,28-30,32,34-36,38-42,44,46,52,54-58,60-62,64,66-68,70-74,76,78,84,86-90,92-94"',\
str(binsize),str(minPI), str(maxPI))
print lc_text
os.system(lc_text)
return
def mk_lcdir(outpath):
if not os.path.isdir(outpath):
os.system('mkdir -p '+outpath)
def genlist(outpath, instru='HE'):
if instru == 'HE':
listfile = [os.path.join(product_path,instru,'lightcurve','he_lc_g0_0-17.lc')]
elif instru == 'ME':
listfile = [os.path.join(product_path,instru,'lightcurve','me_lc_smfov_g0_0-53.lc')]
else:
listfile = [os.path.join(product_path,instru,'lightcurve','le_lc_smfov_g0_0-94.lc')]
outname = os.path.join(outpath,'lightcurve.lst')
with open(outname,'w') as fout:
for item in listfile:
wrt_str = item + '\n'
fout.write(wrt_str)
return outname
def hebkgmap(blindfile, ehkfile, gtifile, deadfile, listfile, outpath, minPI=20, maxPI=200):
lcbkgmap_text = 'hebkgmap lc %s %s %s %s %s %s %s %s'\
%(blindfile[0], ehkfile[0], gtifile[0], deadfile[0], listfile, str(minPI), str(maxPI),\
os.path.join(outpath,'lc_bkgmap'))
print lcbkgmap_text
os.system(lcbkgmap_text)
return
def mebkgmap(blindfile, ehkfile, gtifile, deadfile, tempfile, listfile, outpath, minPI=0, maxPI=1024):
lcbkgmap_text = 'mebkgmap lc %s %s %s %s %s %s %s %s %s'\
%(blindfile[0], ehkfile[0], gtifile[0], deadfile[0], tempfile[0], listfile, str(minPI), str(maxPI),\
os.path.join(outpath,'lc_bkgmap'))
print lcbkgmap_text
os.system(lcbkgmap_text)
return
def lebkgmap(blindfile, gtifile, listfile, outpath, minPI=0, maxPI=1535):
lcbkgmap_text = 'lebkgmap lc %s %s %s %s %s %s'\
%(blindfile[0], gtifile[0], listfile, str(minPI), str(maxPI),\
os.path.join(outpath,'lc_bkgmap'))
print lcbkgmap_text
os.system(lcbkgmap_text)
return
if __name__ == '__main__':
outpath = os.path.join(product_path,'HE','lightcurve/')
mk_lcdir(outpath)
screenfile, blindfile, ehkfile, gtifile, _, deadfile = readfile(instru='HE')
helcgen(screenfile, outpath, deadfile, minPI=20, maxPI=200)
listfile = genlist(outpath, instru='HE')
hebkgmap(blindfile, ehkfile, gtifile, deadfile, listfile, outpath, minPI=20, maxPI=200)
outpath = os.path.join(product_path,'ME','lightcurve/')
mk_lcdir(outpath)
screenfile, blindfile, ehkfile, gtifile, tempfile, deadfile = readfile(instru='ME')
listfile = genlist(outpath, instru='ME')
melcgen(screenfile, outpath, deadfile)
mebkgmap(blindfile, ehkfile, gtifile, deadfile, tempfile, listfile, outpath, minPI=0, maxPI=1024)
outpath = os.path.join(product_path,'LE','lightcurve/')
mk_lcdir(outpath)
screenfile, blindfile, ehkfile, gtifile, tempfile, deadfile = readfile(instru='LE')
listfile = genlist(outpath, instru='LE')
lelcgen(screenfile, outpath)
lebkgmap(blindfile, gtifile, listfile, outpath, minPI=0, maxPI=1535)
|
22,684 | de6c6cc7fe72cf50dbe60a81769b70e9fb4d572e | from AESEncryptor import AESModeCTR
import os
from hashlib import sha256
import numpy
class MTProxy:
class MTProtoPacket:
def __init__(self):
pass
def obfuscated2(self,raw_data,secret):
obf_enc_key_bytes = bytes(os.urandom(64))
obf_enc_key = obf_enc_key_bytes[7:39] # 8 - 39 bytes [32]
obf_enc_iv = obf_enc_key_bytes[39:55] # 40 - 55 bytes [16]
secret = (secret.encode('UTF-8'))
obf_enc_key = sha256(b'%s%s'%(obf_enc_key,secret)).digest()
encryptor = AESModeCTR (key=obf_enc_key,
iv=obf_enc_iv)
enc_data = encryptor.encrypt(raw_data)
return obf_enc_key_bytes + enc_data
def deobfuscated2(self,enc_data,secret):
obf_dec_key_bytes = bytes(enc_data[0:64])[::-1]
obf_dec_key = obf_dec_key_bytes[7:39] # 8 - 39 bytes [32]
obf_dec_iv = obf_dec_key_bytes[39:55] # 40 - 55 bytes [16]
secret = (secret.encode ('UTF-8'))
obf_dec_key = sha256 (b'%s%s' % (obf_dec_key, secret)).digest ()
encryptor = AESModeCTR (key=obf_dec_key,
iv=obf_dec_iv)
raw_data = encryptor.decrypt(enc_data[64:])
return raw_data
def serverside_deobfuscated2(self, enc_data,secret=None):
obf_dec_key = enc_data[7:39] # 8 - 39 bytes [32]
obf_dec_iv = enc_data[39:55] # 40 - 55 bytes [16]
if secret:
secret = secret.encode ('UTF-8')
obf_dec_key = sha256 (b'%s%s' % (obf_dec_key, secret)).digest ()
encryptor = AESModeCTR(key=obf_dec_key,
iv=obf_dec_iv)
raw_data = encryptor.decrypt(enc_data[64:])
return raw_data
def serverside_obfuscated2(self, raw_data, secret=None):
obf_enc_key_bytes = os.urandom(64)
obf_enc_key = (obf_enc_key_bytes[7:39]) # 8 - 39 bytes [32]
obf_enc_iv = (obf_enc_key_bytes[39:55]) # 40 - 55 bytes [16]
obf_enc_key_bytes = obf_enc_key_bytes[::-1]
if secret:
secret = secret.encode('UTF-8')
obf_enc_key = sha256(b'%s%s' % (obf_enc_key, secret)).digest()
encryptor = AESModeCTR (key=obf_enc_key,
iv=obf_enc_iv)
enc_data = encryptor.encrypt(raw_data)
return obf_enc_key_bytes + enc_data
|
22,685 | 25f9b93987a28f819c4bf07ff09876792e768eeb | from datetime import datetime
class FormUtils:
def __get_birthday_years():
current_year = datetime.now().year
maximum_lifespan = 130 # in years
return tuple(i for i in range(current_year,
current_year - maximum_lifespan,
-1))
get_birthday_years = staticmethod(__get_birthday_years)
|
22,686 | 7fc2d2ee31f8cb8f73e9c4fc75d3fa16214818d0 | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover
urlpatterns = patterns('',
#"Hi you are in the big building, where do you want to go?
#this is basically saying, hi, you are in building the main buildng, here are the different places you can go, in this case, we have the option of going to the app folder, or basically going on the Maps app within Google. REMEMBER: Django has projects, and inside these projects are apps. If we create multiple apps here they are going to be like Maps, Gmail, Calendar... etc... all drawing from the parent app.'''
url(r'^app/', include('app.urls')),
#PART 1: in the line above you are saying, yo, I want to you to go to the apps folder and open the urls.py file... part 1
url(r'^admin/', include(admin.site.urls)),
) |
22,687 | d5d73dc23370874d4f0d25a542634cda9ccb6f2f | from django.db import models
class details(models.Model):
Applicant_name=models.CharField(max_length=20)
Applicant_mail=models.EmailField(max_length=30)
Applicant_channel=models.CharField(max_length=20,default='refer or companysite or advt')
Date_of_birth=models.DateField(default='/ separator')
Mobile_number=models.IntegerField(max_length=10)
year_gaps=models.IntegerField(max_length=2)
SSC_percentage=models.FloatField(max_length=5)
Inter_or_equi_percentage=models.FloatField(max_length=5)
Graduation_percentage=models.FloatField(max_length=5)
Year_passedout=models.CharField(max_length=10)
|
22,688 | 9e8fcc0a205ff397580e6f8d61e79219f7e1dca4 | import os
import numpy as np
import torch
import tqdm
import config as cfg
import grid
import city
import policy
import relabel
import rl
import torch
from torch.utils import tensorboard
def pad(episodes):
"""Pads episodes to all be the same length by repeating the last exp.
Args:
episodes (list[list[Experience]]): episodes to pad.
Returns:
padded_episodes (list[list[Experience]]): now of shape
(batch_size, max_len)
mask (torch.BoolTensor): of shape (batch_size, max_len) with value 0 for
padded experiences.
"""
max_len = max(len(episode) for episode in episodes)
mask = torch.zeros((len(episodes), max_len), dtype=torch.bool)
padded_episodes = []
for i, episode in enumerate(episodes):
padded = episode + [episode[-1]] * (max_len - len(episode))
padded_episodes.append(padded)
mask[i, :len(episode)] = True
return padded_episodes, mask
class EpisodeAndStepWriter(object):
"""Logs to tensorboard against both episode and number of steps."""
def __init__(self, log_dir):
self._episode_writer = tensorboard.SummaryWriter(
os.path.join(log_dir, "episode"))
self._step_writer = tensorboard.SummaryWriter(
os.path.join(log_dir, "step"))
def add_scalar(self, key, value, episode, step):
self._episode_writer.add_scalar(key, value, episode)
self._step_writer.add_scalar(key, value, step)
def run_episode(env, policy, experience_observers=None, test=False):
"""Runs a single episode on the environment following the policy.
Args:
env (gym.Environment): environment to run on.
policy (Policy): policy to follow.
experience_observers (list[Callable] | None): each observer is called with
with each experience at each timestep.
Returns:
episode (list[Experience]): experiences from the episode.
renders (list[object | None]): renderings of the episode, only rendered if
test=True. Otherwise, returns list of Nones.
"""
# Optimization: rendering takes a lot of time.
def maybe_render(env, action, reward, timestep):
if test:
render = env.render()
render.write_text("Action: {}".format(str(action)))
render.write_text("Reward: {}".format(reward))
render.write_text("Timestep: {}".format(timestep))
return render
return None
if experience_observers is None:
experience_observers = []
episode = []
state = env.reset()
timestep = 0
renders = [maybe_render(env, None, 0, timestep)]
hidden_state = None
while True:
action, next_hidden_state = policy.act(
state, hidden_state, test=test)
next_state, reward, done, info = env.step(action)
timestep += 1
renders.append(
maybe_render(env, grid.Action(action), reward, timestep))
experience = rl.Experience(
state, action, reward, next_state, done, info, hidden_state,
next_hidden_state)
episode.append(experience)
for observer in experience_observers:
observer(experience)
state = next_state
hidden_state = next_hidden_state
if done:
return episode, renders
def get_env_class(environment_type):
"""Returns the environment class specified by the type.
Args:
environment_type (str): a valid environment type.
Returns:
environment_class (type): type specified.
"""
if environment_type == "vanilla":
return city.CityGridEnv
elif environment_type == "distraction":
return city.DistractionGridEnv
elif environment_type == "map":
return city.MapGridEnv
elif environment_type == "cooking":
return cooking.CookingGridEnv
elif environment_type == "miniworld_sign":
# Dependencies on OpenGL, so only load if absolutely necessary
from envs.miniworld import sign
return sign.MiniWorldSign
else:
raise ValueError(
"Unsupported environment type: {}".format(environment_type))
def get_instruction_agent(instruction_config, instruction_env):
if instruction_config.get("type") == "learned":
return DQNAgent.from_config(instruction_config, instruction_env)
else:
raise ValueError(
"Invalid instruction agent: {}".format(instruction_config.get("type")))
def get_exploration_agent(exploration_config, exploration_env):
if exploration_config.get("type") == "learned":
return DQNAgent.from_config(exploration_config, exploration_env)
elif exploration_config.get("type") == "random":
return policy.RandomPolicy(exploration_env.action_space)
elif exploration_config.get("type") == "none":
return policy.ConstantActionPolicy(grid.Action.end_episode)
else:
raise ValueError("Invalid exploration agent: {}".format(
exploration_config.get("type")))
def log_episode(exploration_episode, exploration_rewards, distances, path):
with open(path, "w+") as f:
f.write("Env ID: {}\n".format(exploration_episode[0].state.env_id))
for t, (exp, exploration_reward, distance) in enumerate(
zip(exploration_episode, exploration_rewards, distances)):
f.write("=" * 80 + "\n")
f.write("Timestep: {}\n".format(t))
f.write("State: {}\n".format(exp.state.observation))
f.write("Action: {}\n".format(grid.Action(exp.action).name))
f.write("Reward: {}\n".format(exploration_reward))
f.write("Distance: {}\n".format(distance))
f.write("Next state: {}\n".format(exp.next_state.observation))
f.write("=" * 80 + "\n")
f.write("\n") |
22,689 | 10e8c3401277adb957ea01c51e6855ebd05cdb86 | import kwic
document = "a b c"
assert(kwic.kwic(document) == [(['a', 'b', 'c'],0), (['b', 'c', 'a'],0), (['c', 'a', 'b'],0)])
|
22,690 | 1d44a8c562fd5a5281de09238fb34949ed9bbf47 | try:
from bhoma.apps.case.tests.test_adult_visit import *
from bhoma.apps.case.tests.test_chw_referrals import *
from bhoma.apps.case.tests.test_clinic_cases import *
from bhoma.apps.case.tests.test_death import *
from .test_delivery_cases import *
from bhoma.apps.case.tests.test_from_xform import *
from bhoma.apps.case.tests.test_in_patient import *
from bhoma.apps.case.tests.test_ltfu import *
from bhoma.apps.case.tests.test_phone_followups import *
from bhoma.apps.case.tests.test_pregnancy import *
from bhoma.apps.case.tests.test_random_followup import *
from bhoma.apps.case.tests.test_xml import *
except ImportError, e:
# for some reason the test harness squashes these so log them here for clarity
# otherwise debugging is a pain
import logging
logging.exception("bad test import!")
raise
|
22,691 | 739b038ef1c3fa13779119c0eaf2dd1c6c5112fe | from strategy import istrategy
class car_map(istrategy):
def __init__(self, start, end):
self.start = start
self.end = end
def buildmaps(self):
print("This is car map")
|
22,692 | a6e65de9e92aabbec67abee8a56d90eee6806507 | # game_end_scoring.py
def check_game_end(players):
'''
Checks if game should end, if a horizontal line on the wall is complete.
'''
game_end = 0
for cur_player in players:
for x in cur_player.wall:
if sum(x.values()) == 5:
game_end += 1
return game_end
def final_scoring(players):
'''
End of game scoring, and winner declaration
'''
for cur_player in players:
# 1. Score 2 pts for each complete horizontal line.
for x in cur_player.wall:
if sum(x.values()) == 5:
cur_player.score += 2
# 2. Score 7 pts for each complete vertical line.
wall_values = []
for x in cur_player.wall:
wall_values.append(list(x.values()))
cols = []
for i in range(len(wall_values)):
cols.append([row[i] for row in wall_values])
for x in cols:
if sum(x) == 5:
cur_player.score += 7
# 3. Score 10 pts if all 5 tiles of a color are filled.
for color in cur_player.wall[0].keys():
color_sum = sum([x[color] for x in cur_player.wall])
if color_sum == 5:
cur_player.score += 10
# Find winner
max_score = max([cur_player.score for cur_player in players])
top_scorers = [cur_player for cur_player in players if cur_player.score == max_score]
# Add # of completed horizontal lines for each player
for cur_player in players:
filled_lines = 0
for x in cur_player.wall:
if sum(x.values()) == 5:
filled_lines += 1
cur_player.filled_lines = filled_lines
if len(top_scorers) == 1:
winners = top_scorers
else: # if multiple top scorers
# for each top_scorer, check who has the most horizontal lines
max_filled_lines = max([cur_player.filled_lines for cur_player in top_scorers])
most_filled_lines = [cur_player for cur_player in top_scorers if cur_player.filled_lines == max_filled_lines]
winners = most_filled_lines
return winners |
22,693 | d72f69a194947a56a14fd6e70f51bf5d3b21d959 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import coverage_error # classification_report
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
def balanced_subsample(x,y,subsample_size=1.0):
class_xs = []
min_elems = None
for yi in np.unique(y):
elems = x[(y == yi)]
class_xs.append((yi, elems))
if min_elems == None or elems.shape[0] < min_elems:
min_elems = elems.shape[0]
use_elems = min_elems
if subsample_size < 1:
use_elems = int(min_elems*subsample_size)
xs = []
ys = []
for ci,this_xs in class_xs:
if len(this_xs) > use_elems:
np.random.shuffle(this_xs)
x_ = this_xs[:use_elems]
y_ = np.empty(use_elems)
y_.fill(ci)
xs.append(x_)
ys.append(y_)
xs = np.concatenate(xs)
ys = np.concatenate(ys)
return xs,ys
# load the foods
with open('all_foods.txt', 'r') as f:
all_foods = f.readlines()
food_to_idx = {food.strip(): i for i, food in enumerate(all_foods)}
class Net(nn.Module):
# define nn
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(768, 100)
self.fc3 = nn.Linear(100, len(all_foods))
# self.softmax = nn.Softmax(dim=1)
self.sig = nn.Sigmoid()
def forward(self, X):
X = F.relu(self.fc1(X))
X = F.relu(self.fc3(X))
# X = self.softmax(X)
X = self.sig(X)
return X
if __name__ == "__main__":
# load dataset
df = pd.read_csv('bert_food_vectors_data.csv', index_col=0)
X=df.drop(all_foods,axis=1)
Y=df[all_foods]
# X, Y = balanced_subsample(X.values, Y.values)
print(X.shape, Y.shape)
X_train, X_test, y_train, y_test = train_test_split(X.values,Y.values,random_state=0,test_size=0.3)
# wrap up with Variable in pytorch
X_train = Variable(torch.Tensor(X_train).float())
X_test = Variable(torch.Tensor(X_test).float())
y_train = Variable(torch.Tensor(y_train).float())
y_test = Variable(torch.Tensor(y_test).float())
net = Net()
criterion = nn.MSELoss()# cross entropy loss
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
for epoch in range(101):
optimizer.zero_grad()
out = net(X_train)
loss = criterion(out, y_train)
loss.backward()
optimizer.step()
if epoch % 100 == 0:
print('number of epoch', epoch, 'loss', loss.data.item())
predict_out = net(X_test)
# print(predict_out)
y_pred = predict_out >= 0.9
# _, y_pred = torch.max(predict_out, 1)
print(y_pred)
print(y_pred.sum())
# print('prediction accuracy', accuracy_score(y_test.data, y_pred.data))
# print('macro precision', precision_score(y_test.data, y_pred.data, average='macro'))
# print('micro precision', precision_score(y_test.data, y_pred.data, average='micro'))
# print('macro recall', recall_score(y_test.data, y_pred.data, average='macro'))
# print('micro recall', recall_score(y_test.data, y_pred.data, average='micro'))
target_names = all_foods
# print(classification_report(y_test.data, predict_out.data, target_names=target_names))
print(coverage_error(y_test.data, predict_out.data))
torch.save(net.state_dict(), 'evidencenet.nn')
|
22,694 | 6d9d9446ac375ff2079e97dca719df40d039591a |
""" Ausgabe der Daten in verschiedenen Ansätzen zur besseren Verständlichkeit """
## ##########################
## Teil 0: Einlesen der Daten
## ##########################
import pandas as pd
df = pd.read_csv("./Python_Training/Machine Learning/Klassifikation/CSV/classification.csv")
## ################################################################
## Teil 1: Aufteilung in Trainings- und Testdaten (hier: 75% / 25%)
## ################################################################
from sklearn.model_selection import train_test_split
X = df[["age", "interest"]].values
y = df["success"].values
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0, test_size = 0.25)
## ########################
## Teil 2: Daten skallieren
## ########################
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
## #############################################
## Teil 3: Ergebnisse der Trainingsdaten plotten
## #############################################
import matplotlib.pyplot as plt
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
plt.xlabel("Alter")
plt.ylabel("Interesse")
plt.show()
## ##################################
## Teil 4: Bestimmtheitsmaß berechnen
## ##################################
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
print(model.score(X_test, y_test))
y_predicted = model.predict(X_test)
## ########################################
## Teil 5: Ergebnisse der Testdaten plotten
## ########################################
import matplotlib.pyplot as plt
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test)
plt.xlabel("Alter")
plt.ylabel("Interesse")
plt.show()
## #####################################################
## Teil 6: Ergebnisse der verhergesehenden Daten plotten
## #####################################################
import matplotlib.pyplot as plt
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_predicted)
plt.xlabel("Alter")
plt.ylabel("Interesse")
plt.show()
|
22,695 | edd15515ecc1d8af080bdad6134075e5d337ebd4 | # Defining the simplest possible class
# class User:
# def __init__(self, first, last, age):
# self.first = first
# self.last = last
# self.age = age
# pass
#
# user1 = User("dave", "smith",22)
# user2 = User("jane","doe",44)
# print(user1.first)
# print(user2.first, user2.last, user2.age)
class Person:
def __init__(self):
self.name = "tony"
self._secret = "hi" # Private attribute by convention only - can still call if need to.
self.__msg = "I like turtles" # name mangling see output below
p = Person()
print(p.name)
print(p._secret)
print(dir(p))
|
22,696 | 77c13c55fd16b439543c1bff4573b2df8a52595a |
from Utility import *
class Triplets:
utility=Utility()
while True:
try:
print("enter the length of array : ")
given_input = utility.input_int_data()
if given_input > 0:
break
else:
print("please enter a valid input")
except NameError:
print("please enter a number...try again")
my_array = [None] * given_input
utility.triplets(my_array) |
22,697 | 292d996d4476995ea5edf80883e75aa408b99e6d | import copy
from office365.runtime.client_object import ClientObject
from office365.runtime.client_request import ClientRequest
from office365.runtime.client_result import ClientResult
from office365.runtime.client_value import ClientValue
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.http.request_options import RequestOptions
from office365.runtime.odata.v3.json_light_format import JsonLightFormat
from office365.runtime.queries.create_entity import CreateEntityQuery
from office365.runtime.queries.delete_entity import DeleteEntityQuery
from office365.runtime.queries.function import FunctionQuery
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.runtime.queries.update_entity import UpdateEntityQuery
class ODataRequest(ClientRequest):
def __init__(self, json_format):
"""
Creates OData request
:type json_format: office365.runtime.odata.json_format.ODataJsonFormat
"""
super(ODataRequest, self).__init__()
self._default_json_format = json_format
self.beforeExecute += self._ensure_json_format
@property
def json_format(self):
return self._default_json_format
def build_request(self, query):
"""
Builds a request
:type query: office365.runtime.queries.client_query.ClientQuery
"""
request = RequestOptions(query.url)
request.method = HttpMethod.Get
if isinstance(query, DeleteEntityQuery):
request.method = HttpMethod.Post
elif isinstance(query, (CreateEntityQuery, UpdateEntityQuery, ServiceOperationQuery)):
request.method = HttpMethod.Post
if query.parameters_type is not None:
request.data = self._build_payload(query)
return request
def process_response(self, response, query):
"""
:type response: requests.Response
:type query: office365.runtime.queries.client_query.ClientQuery
"""
json_format = copy.deepcopy(self.json_format)
return_type = query.return_type
if return_type is None:
return
if isinstance(return_type, ClientObject):
return_type.clear()
if response.headers.get('Content-Type', '').lower().split(';')[0] != 'application/json':
if isinstance(return_type, ClientResult):
return_type.set_property("__value", response.content)
else:
if isinstance(json_format, JsonLightFormat):
if isinstance(query, ServiceOperationQuery) or isinstance(query, FunctionQuery):
json_format.function = query.name
self.map_json(response.json(), return_type, json_format)
def map_json(self, json, return_type, json_format=None):
"""
:type json: any
:type return_type: ClientValue or ClientResult or ClientObject
:type json_format: office365.runtime.odata.json_format.ODataJsonFormat
"""
if json_format is None:
json_format = self.json_format
if json and return_type is not None:
for k, v in self._next_property(json, json_format):
return_type.set_property(k, v, False)
def _next_property(self, json, json_format):
"""
:type json: Any
:type json_format: office365.runtime.odata.json_format.ODataJsonFormat
"""
if isinstance(json_format, JsonLightFormat):
json = json.get(json_format.security, json)
json = json.get(json_format.function, json)
if isinstance(json, dict):
next_link_url = json.get(json_format.collection_next, None)
json = json.get(json_format.collection, json)
if next_link_url:
yield "__nextLinkUrl", next_link_url
if isinstance(json, list):
for index, item in enumerate(json):
if isinstance(item, dict):
item = {k: v for k, v in self._next_property(item, json_format)}
yield index, item
elif isinstance(json, dict):
for name, value in json.items():
if isinstance(json_format, JsonLightFormat):
is_valid = name != "__metadata" and not (isinstance(value, dict) and "__deferred" in value)
else:
is_valid = "@odata" not in name
if is_valid:
if isinstance(value, dict):
value = {k: v for k, v in self._next_property(value, json_format)}
yield name, value
else:
yield "__value", json
elif json is not None:
yield "__value", json
def _build_payload(self, query):
"""
Normalizes OData request payload
:type query: office365.runtime.queries.client_query.ClientQuery
"""
def _normalize_payload(payload):
if isinstance(payload, ClientObject) or isinstance(payload, ClientValue):
return payload.to_json(self._default_json_format)
elif isinstance(payload, dict):
return {k: _normalize_payload(v) for k, v in payload.items() if v is not None}
elif isinstance(payload, list):
return [_normalize_payload(item) for item in payload]
return payload
json = _normalize_payload(query.parameters_type)
if isinstance(query, ServiceOperationQuery) and query.parameters_name is not None:
json = {query.parameters_name: json}
return json
def _ensure_json_format(self, request):
"""
:type request: RequestOptions
"""
media_type = self.json_format.media_type
request.ensure_header('Content-Type', media_type)
request.ensure_header('Accept', media_type)
|
22,698 | c9b1f873f67f9ec6bff0cdebf31eb97a7e730db2 | #!/usr/bin/python3
"""Defines a function that returns the number of lines of a text file"""
def number_of_lines(filename=""):
"""Returns the number of lines of a text file"""
n_lines = 0
with open(filename, encoding='utf-8', mode='r') as file:
for lines in file:
n_lines += 1
return n_lines
|
22,699 | cf6ff2591cf87792d208003c9dec804e25cf2bba | # encoding: UTF-8
import unittest
from framework.browser import WebApp
from hiptest.actionwords import Actionwords
class TestFYM(unittest.TestCase):
def setUp(self):
self.webApp = WebApp()
self.actionwords = Actionwords(self.webApp)
def tearDown(self):
self.webApp.quit()
def smoke__as_a_teacher_i_can_login_in_the_application(self, username, password, subject):
# Tags: status priority_P0 test_case_level_Smoke JIRA:FY-11
# Given a teacher on the login page
self.actionwords.a_teacher_on_the_login_page()
# When teacher enters in the login page "<username>" and "<password>"
self.actionwords.teacher_enters_in_the_login_page_p1_and_p2(p1 = username, p2 = password)
# And teacher clicks Submit button
self.actionwords.teacher_clicks_submit_button()
# Then logs in and lands on the Homepage and sees "<subject>"
self.actionwords.logs_in_and_lands_on_the_homepage_and_sees(p1 = subject)
def test_Smoke__As_a_teacher_I_can_login_in_the_application__uid2bd7e766ab97489198cfa1cdb2e89eda(self):
self.smoke__as_a_teacher_i_can_login_in_the_application(username = 'teacher.worldhistory@testschool.org', password = 'password', subject = 'AP World History: Modern')
def test_Smoke__As_a_teacher_I_can_login_in_the_application__uid9059fd48d38747f4bc60d075fd948042(self):
self.smoke__as_a_teacher_i_can_login_in_the_application(username = 'teacher.calculus@testschool.org', password = 'password', subject = 'Calculus')
def test_Smoke__As_a_teacher_I_can_login_in_the_application__uid81cd450f4c534fc4981d2534d19c871f(self):
self.smoke__as_a_teacher_i_can_login_in_the_application(username = 'teacher.apbiology@testschool.org', password = 'password', subject = 'AP Insight Biology')
def test_Smoke__As_a_teacher_I_can_login_in_the_application__uid9991f942ced542a6967c4ad668576ef2(self):
self.smoke__as_a_teacher_i_can_login_in_the_application(username = 'teacher.biology@testschool.org', password = 'password', subject = 'Biology')
def test_Smoke__As_a_teacher_I_can_login_in_the_application__uida2b8d1864a1b410a85339042dd170765(self):
self.smoke__as_a_teacher_i_can_login_in_the_application(username = 'teacher.ushistory@testschool.org', password = 'password', subject = 'U.S. History')
def test_Smoke__As_a_teacher_I_can_login_in_the_application__uid917b5c37df7840578fe648abf172f7b2(self):
self.smoke__as_a_teacher_i_can_login_in_the_application(username = 'teacher.calculusphysics@testschool.org', password = 'password', subject = 'Calculus')
def test_Smoke__As_a_teacher_I_can_login_in_the_application__uid68bd2a4686f240c6b84d1751df18bed3(self):
self.smoke__as_a_teacher_i_can_login_in_the_application(username = 'teacher.physicsEMphysicsM@testschool.org', password = 'password', subject = 'Physics C: Electricity & Magnetism')
def test_Smoke__As_a_teacher_I_can_login_in_the_application__uid808eebea55a14e85bc969b72a8f5dfc8(self):
self.smoke__as_a_teacher_i_can_login_in_the_application(username = 'teacher.physicschemistry@testschool.org', password = 'password', subject = 'Physics C: Electricity & Magnetism')
def smoke__ap_only_teacher_multiple_ap_subjects_onscreen(self):
# Tags: priority_P1 test_case_level_Smoke JIRA:PREAP-51 JIRA:PREAP-50 JIRA:PREAP-275 Regression
# Given a teacher with multiple AP subjects on the login page
self.actionwords.a_teacher_with_multiple_ap_subjects_on_the_login_page()
# But no Pre-AP subjects
self.actionwords.no_pre_ap_subjects()
# When the teacher logs in
self.actionwords.the_teacher_logs_in()
# Then he lands on My Subjects page where he can see the AP courses he has access to onscreen
self.actionwords.he_lands_on_my_subjects_page_where_he_can_see_the_ap_courses_he_has_access_to_onscreen()
# And the Pre-AP section is not displayed
self.actionwords.the_pre_ap_section_is_not_displayed()
def test_Smoke__AP_only_teacherMultiple_AP_subjects_onscreen__uid87d9a5fdb4c240d9937cbcb66c6bd880(self):
self.smoke__ap_only_teacher_multiple_ap_subjects_onscreen()
def smoke__homepage_has_the_correct_subject_displayed_in_the_dropdownheader(self, subject):
# Tags: priority_P0 Regression JIRA:FY-5474
# Given teacher is "<subject>" teacher
self.actionwords.teacher_is_p1_teacher(p1 = subject)
# When he clicks on "<subject>"
self.actionwords.he_clicks_on_p1(p1 = subject, free_text = "", datatable = "||")
# Then teacher lands on the Course Homepage and sees "<subject>" in the page header
self.actionwords.teacher_lands_on_the_course_homepage_and_sees_p1_in_the_page_header(p1 = subject)
def test_Smoke__Homepage_has_the_correct_subject_displayed_in_the_dropdownheader__uid77cac202d8a3456e846e00e87da36e2e(self):
self.smoke__homepage_has_the_correct_subject_displayed_in_the_dropdownheader(subject = 'AP Calculus AB')
def test_Smoke__Homepage_has_the_correct_subject_displayed_in_the_dropdownheader__uid53978cea9f2e4ceab15e5ed914ae2c71(self):
self.smoke__homepage_has_the_correct_subject_displayed_in_the_dropdownheader(subject = 'AP Calculus BC')
def test_Smoke__Homepage_has_the_correct_subject_displayed_in_the_dropdownheader__uidd407f5f3e1c046838e1026b9ea46bf8e(self):
self.smoke__homepage_has_the_correct_subject_displayed_in_the_dropdownheader(subject = 'AP Calculus AB and BC')
def smoke__teacher_units_display_on_the_homepage(self, cond, env, browser):
# Tags: JIRA:FY-5152 priority_P0 test_case_level_Smoke JIRA:FY-5154
# Given a teacher on the login page opened on "<env>" using "<browser>"
self.actionwords.a_teacher_on_the_login_page_opened_on_p2_using_p3(p2 = env, p3 = browser)
# When he logs in and navigates to the homepage of a "<cond>" subject
self.actionwords.he_logs_in_and_navigates_to_the_homepage_of_a_p1_subject(p1 = cond)
# Then he sees each unit and its content displayed in a tab
self.actionwords.he_sees_each_unit_and_its_content_displayed_in_a_tab()
def test_Smoke__Teacher_Units_display_on_the_homepage__uidb0ea04a958ed4cd4b237f149cd4736ba(self):
self.smoke__teacher_units_display_on_the_homepage(cond = 'AP', env = 'desktop', browser = 'Chrome')
def test_Smoke__Teacher_Units_display_on_the_homepage__uidf87bc90037e3483d9484c6f82dbfc3f7(self):
self.smoke__teacher_units_display_on_the_homepage(cond = 'Pre-AP', env = 'mobile', browser = 'Safari')
def test_Smoke__Teacher_Units_display_on_the_homepage__uid056590396aca413889abb2c6006b62e9(self):
self.smoke__teacher_units_display_on_the_homepage(cond = 'AP', env = 'tablet', browser = 'Mozilla')
def test_Smoke__Teacher_Units_display_on_the_homepage__uid865c51631fcf4d3295c89e38c12c43c9(self):
self.smoke__teacher_units_display_on_the_homepage(cond = 'AP', env = 'desktop', browser = 'IE')
def test_smoke__teacher_selected_unit_tab_highlight_state_uida93f8928820d4d4fbafffbf5061d277a(self):
# Tags: JIRA:FY-5152 priority_P2 test_case_level_Functional_GUI
# Given a teacher on the homepage of a subject
self.actionwords.a_teacher_on_the_homepage_of_a_subject()
# When he clicks on a unit tab
self.actionwords.he_clicks_on_a_unit_tab()
# Then that tab becomes highlighted on white
self.actionwords.that_tab_becomes_highlighted_on_white()
def smoke__teacher_unit_resources_within_unit_tabs_are_functional(self, subj, resources, result):
# Tags: JIRA:FY-5152 test_case_level_Functional priority_P1
# Given a teacher on the homepage of a "<subj>" subject
self.actionwords.a_teacher_on_the_homepage_of_a_p1_subject(p1 = subj)
# When he clicks on "<resources>" resource under a unit tab within Units section
self.actionwords.he_clicks_on_p2_resource_under_a_unit_tab_within_units_section(p2 = resources)
# Then the following happens: "<result>"
self.actionwords.the_following_happens_p3(p3 = result)
def test_Smoke__Teacher_Unit_resources_within_unit_tabs_are_functional__uidfaa6a230a1ed4b9caa4b23f9ae634356(self):
self.smoke__teacher_unit_resources_within_unit_tabs_are_functional(subj = 'AP', resources = 'Unit Guide', result = 'Unit Guide pdf opens in a new tab')
def test_Smoke__Teacher_Unit_resources_within_unit_tabs_are_functional__uid7393df5f7a1b46829f86e31a1a416c1b(self):
self.smoke__teacher_unit_resources_within_unit_tabs_are_functional(subj = 'Pre-AP', resources = 'Sample Quiz Questions', result = 'Sample Quiz Questions pdf opens in a new tab')
def test_Smoke__Teacher_Unit_resources_within_unit_tabs_are_functional__uid8421688c72eb4bf0b9ff6d9e1afee66a(self):
self.smoke__teacher_unit_resources_within_unit_tabs_are_functional(subj = 'AP', resources = 'Teacher Module', result = 'Teacher Module page opens in a new tab')
def test_Smoke__Teacher_Unit_resources_within_unit_tabs_are_functional__uidc7598e018b4946b5b3279a103e90c334(self):
self.smoke__teacher_unit_resources_within_unit_tabs_are_functional(subj = 'AP', resources = 'Question Bank', result = 'Question Bank page opens in a new tab having the filter set to the unit number')
def smoke__skills_associated_with_subunits_are_visible_for_all_types_of_users(self, col, result):
# Tags: JIRA:FY-5149 priority_P1 test_case_level_Functional story_priority_p0
# Given a "<col>" logs in and navigates to the above subject homepage
self.actionwords.a_p1_logs_in_and_navigates_to_the_above_subject_homepage(p1 = col)
# When clicks on a unit tab from Units section
self.actionwords.clicks_on_a_unit_tab_from_units_section()
# Then he sees a new column containing skills associated to the sub-units on the left side
self.actionwords.he_sees_a_new_column_containing_skills_associated_to_the_subunits_on_the_left_side()
def test_Smoke__Skills_associated_with_subunits_are_visible_for_all_types_of_users__uidc7259663b0074389b48df61421b65d66(self):
self.smoke__skills_associated_with_subunits_are_visible_for_all_types_of_users(col = 'teacher', result = 'AP')
def test_Smoke__Skills_associated_with_subunits_are_visible_for_all_types_of_users__uidf75cd8e4b9b84c4d9796f77efc79f535(self):
self.smoke__skills_associated_with_subunits_are_visible_for_all_types_of_users(col = 'student', result = 'Pre-AP')
def test_Smoke__Skills_associated_with_subunits_are_visible_for_all_types_of_users__uidb7d62e04aa804d64865287253fcdf3e1(self):
self.smoke__skills_associated_with_subunits_are_visible_for_all_types_of_users(col = 'coord/admin', result = 'AP')
def smoke__latest_unit_tab_is_remembered_when_returning_from_another_page_of_the_same_subject(self, user, type):
# Tags: test_case_level_Functional priority_P1 JIRA:FY-5153 story_priority_p0
# Given a "<user>" on the homepage of a "<type>" subject with units
self.actionwords.a_p1_on_the_homepage_of_a_p2_subject_with_units(p1 = user, p2 = type)
# And a unit tab has been accessed
self.actionwords.a_unit_tab_has_been_accessed()
# When he navigate to a different page of the same subject
self.actionwords.he_navigate_to_a_different_page_of_the_same_subject()
# And then returns to homepage
self.actionwords.then_returns_to_homepage()
# Then he sees the latest unit tab accessed opened along with its content
self.actionwords.he_sees_the_latest_unit_tab_accessed_opened_along_with_its_content()
def test_Smoke__Latest_unit_tab_is_remembered_when_returning_from_another_page_of_the_same_subject__uid5132a2d9c34b40f9ac8a9cecd42cded1(self):
self.smoke__latest_unit_tab_is_remembered_when_returning_from_another_page_of_the_same_subject(user = 'teacher', type = 'Pre-AP')
def test_Smoke__Latest_unit_tab_is_remembered_when_returning_from_another_page_of_the_same_subject__uid5591336c2fa841c29d5f94873275cd95(self):
self.smoke__latest_unit_tab_is_remembered_when_returning_from_another_page_of_the_same_subject(user = 'student', type = 'AP')
def test_Smoke__Latest_unit_tab_is_remembered_when_returning_from_another_page_of_the_same_subject__uid9ab434ea677b4dc69fe8e5ff11d2b3e5(self):
self.smoke__latest_unit_tab_is_remembered_when_returning_from_another_page_of_the_same_subject(user = 'coord/admin', type = 'AP')
def test_smoke__clicking_on_class_name_redirect_user_to_my_classes_uidaf276416dea3494995331120f50cdec9(self):
# Tags: JIRA:FY-6440 priority_P1 test_case_level_Functional
# Given a teacher on Homepage
self.actionwords.a_teacher_on_homepage()
# And a class is already created for the teacher
self.actionwords.a_class_is_already_created_for_the_teacher()
# When he clicks in the widget header on class name
self.actionwords.he_clicks_in_the_widget_header_on_class_name()
# Then he is redirected to My Classes with the associated class selected (https://myap.collegeboard.org/course/ {test_cd}/section/{APRO_section_id})
self.actionwords.he_is_redirected_to_my_classes_with_the_associated_class_selected_httpsmyapcollegeboardorgcourse_test_cdsection_apro_section_id()
def test_smoke__navigate_through_classes_using_arrows_uiddec73bc6bfbb469187c55748a9c7c4c3(self):
# Tags: JIRA:FY-6440 priority_P2 test_case_level_Functional
# Given a teacher on Homepage
self.actionwords.a_teacher_on_homepage()
# And multiple classes are created for that teacher
self.actionwords.multiple_classes_are_created_for_that_teacher()
# When he clicks on ">" or "<" arrows in the widget header
self.actionwords.he_clicks_on_p1_or_p2_arrows_in_the_widget_header(p1 = ">", p2 = "<")
# Then he is able to navigate through the classes
self.actionwords.he_is_able_to_navigate_through_the_classes()
def test_smoke__class_widget_default_number_of_quizzes_on_screen_uid4ce0221cae5e43a680df1c5e55817473(self):
# Tags: priority_P1 test_case_level_Functional JIRA:FY-6441
# Given a teacher has assigned more than 6 quizzes to a class
self.actionwords.a_teacher_has_assigned_more_than_6_quizzes_to_a_class()
# When he navigates to that specific class in the Class widget
self.actionwords.he_navigates_to_that_specific_class_in_the_class_widget()
# Then by default the first 6 quizzes(sorting rules applied)are displayed on the screen
self.actionwords.by_default_the_first_6_quizzessorting_rules_appliedare_displayed_on_the_screen()
# And the others are hidden
self.actionwords.the_others_are_hidden()
def smoke__class_widget_results_bar_precedes_progress_bars(self, cond):
# Tags: priority_P1 test_case_level_Functional JIRA:FY-6441
# Given a teacher has assigned multiple "<cond>" to a class
self.actionwords.a_teacher_has_assigned_multiple_p1_to_a_class(p1 = cond)
# And some of the assignments are in progress and some have already results
self.actionwords.some_of_the_assignments_are_in_progress_and_some_have_already_results()
# When the teacher navigates to that specific class in the Class widget
self.actionwords.the_teacher_navigates_to_that_specific_class_in_the_class_widget()
# Then he sees first the bars for results, then progress bars
self.actionwords.he_sees_first_the_bars_for_results_then_progress_bars()
def test_Smoke__Class_widget_results_bar_precedes_progress_bars__uid1f406912bdb247da834696613fe838ff(self):
self.smoke__class_widget_results_bar_precedes_progress_bars(cond = 'PPCs')
def test_Smoke__Class_widget_results_bar_precedes_progress_bars__uid07d0915d74fa477394d9fbff97e6cd5d(self):
self.smoke__class_widget_results_bar_precedes_progress_bars(cond = 'QBs')
def smoke__class_widget_progress_bars_when_less_than_80_of_students_have_submitted(self, cond, state):
# Tags: priority_P1 test_case_level_Functional JIRA:FY-6441
# Given a teacher has assigned an MCQ "<cond>"only to a class
self.actionwords.a_teacher_has_assigned_an_mcq_p1only_to_a_class(p1 = cond)
# And less than 80% of students have submitted the quiz
self.actionwords.less_than_80_of_students_have_submitted_the_quiz()
# And the quiz in "<state>" state
self.actionwords.the_quiz_in_p2_state(p2 = state)
# When the teacher navigates to that specific class in the Class widget
self.actionwords.the_teacher_navigates_to_that_specific_class_in_the_class_widget()
# Then he sees the quiz row having a Progress bar(second column) + View Progress button(third column)
self.actionwords.he_sees_the_quiz_row_having_a_progress_barsecond_column__view_progress_buttonthird_column()
def test_Smoke__Class_widget_Progress_bars_when_less_than_80_of_students_have_submitted__uid9eed01140a914f3f964b06bbd43fd61e(self):
self.smoke__class_widget_progress_bars_when_less_than_80_of_students_have_submitted(cond = 'PPC', state = 'opened')
def test_Smoke__Class_widget_Progress_bars_when_less_than_80_of_students_have_submitted__uid40111f871bf641769967f60c23e11da5(self):
self.smoke__class_widget_progress_bars_when_less_than_80_of_students_have_submitted(cond = 'QB', state = 'locked')
def smoke__class_widget_results_bars_when__80_of_students_have_submitted(self, cond, state, condit):
# Tags: priority_P1 test_case_level_Functional JIRA:FY-6441
# Given a teacher has assigned a "<cond>" to a class
self.actionwords.a_teacher_has_assigned_a_p1_to_a_class(p1 = cond)
# And >= 80% of students have "<condit>"
self.actionwords._80_of_students_have_p3(p3 = condit)
# And the quiz in "<state>" state
self.actionwords.the_quiz_in_p2_state(p2 = state)
# When the teacher navigates to that specific class in the Class widget
self.actionwords.the_teacher_navigates_to_that_specific_class_in_the_class_widget()
# Then he sees the quiz row having a Results bar(second column) + View Results button(third column)
self.actionwords.he_sees_the_quiz_row_having_a_results_barsecond_column__view_results_buttonthird_column()
def test_Smoke__Class_widget_Results_bars_when__80_of_students_have_submitted__uidb08cfcacbb174222b09b517b0303080f(self):
self.smoke__class_widget_results_bars_when__80_of_students_have_submitted(cond = 'an MCQ PPC', state = 'opened', condit = 'submitted the quiz')
def test_Smoke__Class_widget_Results_bars_when__80_of_students_have_submitted__uid210dc0ddf08e4da291fcb15410bda9fe(self):
self.smoke__class_widget_results_bars_when__80_of_students_have_submitted(cond = 'a mixed QB', state = 'locked', condit = 'submitted the quiz and were scored')
def test_Smoke__Class_widget_Results_bars_when__80_of_students_have_submitted__uid768d38a51761466dacd065aa394025c8(self):
self.smoke__class_widget_results_bars_when__80_of_students_have_submitted(cond = 'an FRQ PPC', state = 'completed', condit = 'submitted the quiz and were scored')
def smoke__class_widget_x_submissions_to_score_button_display(self, cond):
# Tags: priority_P1 test_case_level_Functional JIRA:FY-6441
# Given a teacher on the homepage of a subject
self.actionwords.a_teacher_on_the_homepage_of_a_subject()
# And there is a "<cond>" quiz assigned with students in progress > 0
self.actionwords.there_is_a_p1_quiz_assigned_with_students_in_progress__0(p1 = cond)
# And submitted (available to score) = X > 0
self.actionwords.submitted_available_to_score__x__0()
# When the teacher inspect the Class widget for the above quiz
self.actionwords.the_teacher_inspect_the_class_widget_for_the_above_quiz()
# Then he sees the quiz row having a “x submissions to score”button displayed in Actions column
self.actionwords.he_sees_the_quiz_row_having_a_x_submissions_to_scorebutton_displayed_in_actions_column()
def test_Smoke__Class_widget_X_submissions_to_score_button_display__uid67e80c27e3fe4057bf2555ec241e9fa5(self):
self.smoke__class_widget_x_submissions_to_score_button_display(cond = 'locked FRQ')
def test_Smoke__Class_widget_X_submissions_to_score_button_display__uid70fd997b3d7a49e58e50ae3ba29149c8(self):
self.smoke__class_widget_x_submissions_to_score_button_display(cond = 'opened mixed')
def test_smoke__as_a_teacher_i_want_to_be_able_to_assign_a_unit_test_to_my_class_uid24bc21d1a24244178609f7fdf67035bd(self):
# Given the teacher is logged into the application
self.actionwords.the_teacher_is_logged_into_the_application()
# And teacher navigates to Progress Checks page
self.actionwords.teacher_navigates_to_progress_checks_page()
# And teacher navigates to Assign tab
self.actionwords.teacher_navigates_to_assign_tab()
# Then the teacher is able to see the Assign button for a progress check
self.actionwords.the_teacher_is_able_to_see_the_assign_button_for_a_progress_check()
def test_smoke__as_a_teacher_i_want_to_be_able_to_assign_an_assessment_to_a_class_uid0130ea0ec89b42a4a3eacc6a6c854a6f(self):
# Tags: priority_P1 test_case_level_Functional
# Given a teacher on Assign tab from Progress Checks page
self.actionwords.a_teacher_on_assign_tab_from_progress_checks_page()
# When clicks Assign button for an assessment
self.actionwords.clicks_assign_button_for_an_assessment()
# And selects a class
self.actionwords.selects_a_class()
# And clicks on Assign button
self.actionwords.clicks_on_assign_button()
# Then the assessment/quiz is assigned to that class
self.actionwords.the_assessmentquiz_is_assigned_to_that_class()
# And is redirected to Progress tab
self.actionwords.is_redirected_to_progress_tab()
def test_smoke__assign_modal_window_quick_assign_section_unlock_the_assessment_now_uid2b7a81f44bf24462a100f00e6b84dc41(self):
# Tags: story_priority_p0 JIRA:FY-4855
# Given the teacher is already in the UA->Assign->Assign modal window->Quick Assign section
self.actionwords.the_teacher_is_already_in_the_ua_assign_assign_modal_window_quick_assign_section()
# When the teacher looks at the Unlock the Assessment Now? toggle
self.actionwords.the_teacher_looks_at_the_unlock_the_assessment_now_toggle()
# Then he sees on the right side, a button for switching on/off
self.actionwords.he_sees_on_the_right_side_a_button_for_switching_onoff()
# And bellow appears the text: Your students can begin the assessment immediately and will see this assignment when they log into AP Classroom.
self.actionwords.bellow_appears_the_text_your_students_can_begin_the_assessment_immediately_and_will_see_this_assignment_when_they_log_into_ap_classroom()
def test_smoke__ap_subject_student_login_uid4905abcc9c3e439da91a9142a8304f0d(self):
# Tags: priority_P1 test_case_level_Smoke JIRA:PREAP-19
# Given a student with only one AP subject on the login page
self.actionwords.a_student_with_only_one_ap_subject_on_the_login_page(datatable = "||", free_text = "")
# When the student enters the correct credentials and clicks on the Sign In button
self.actionwords.the_student_enters_the_correct_credentials_and_clicks_on_the_sign_in_button()
# Then he logs in and lands on the Homepage and sees his subject
self.actionwords.he_logs_in_and_lands_on_the_homepage_and_sees_his_subject()
def test_smoke__as_a_student_i_want_to_log_out_of_the_application_uid4b7abe30b3634501ab2401f90d15a1fa(self):
# Tags: priority_P0 test_case_level_Smoke JIRA:FY-3659
# Given a user logged in the application (APC)
self.actionwords.a_user_logged_in_the_application_apc()
# When the user logs out of the application
self.actionwords.the_user_logs_out_of_the_application()
# Then the user is logged out
self.actionwords.the_user_is_logged_out()
# And redirected to the ROS login page
self.actionwords.redirected_to_the_ros_login_page()
def test_smoke__student_assessments_assignments_tab__begin_button_for_opened_assignments_uida6950bfa223c4fd594daf862fdf01557(self):
# Tags: priority_P1 test_case_level_Functional_GUI JIRA:CB-1789 JIRA:FY-4279
# Given a teacher opened an assignment to students
self.actionwords.a_teacher_opened_an_assignment_to_students()
# When a student goes to Assignments tab form Assessment page
self.actionwords.a_student_goes_to_assignments_tab_form_assessment_page()
# Then he sees the assignment having "Opened" status
self.actionwords.he_sees_the_assignment_having_opened_status(opened = "Opened")
# And in the Action column a Begin button is available for the opened assignment
self.actionwords.in_the_action_column_a_begin_button_is_available_for_the_opened_assignment()
def smoke__student_assessments_assign_tab__start_an_assignment(self, place):
# Tags: priority_P1 test_case_level_Functional JIRA:FY-4279
# Given a student on Assignments tab from Assessments
self.actionwords.a_student_on_assignments_tab_from_assessments()
# When student clicks on "<place>"
self.actionwords.student_clicks_on_p1(p1 = place)
# Then the quiz player is displayed
self.actionwords.the_quiz_player_is_displayed()
def test_Smoke__Student_Assessments_Assign_tab__start_an_assignment__uida6f71b127f4c4a959e47f16995634d0a(self):
self.smoke__student_assessments_assign_tab__start_an_assignment(place = 'title')
def test_Smoke__Student_Assessments_Assign_tab__start_an_assignment__uidcd3a9927853b4a88a634948382c04a80(self):
self.smoke__student_assessments_assign_tab__start_an_assignment(place = 'Begin button')
def test_Smoke__Student_Assessments_Assign_tab__start_an_assignment__uid3ebeb37f7b544f6a90c3addf48b2fcc1(self):
self.smoke__student_assessments_assign_tab__start_an_assignment(place = 'Continue button')
def test_smoke__student__save_and_exit_popup_yes_button_uid297f85c25e4d416c95971dbcac4907e7(self):
# Tags: JIRA:FY-5620 priority_P1 test_case_level_Functional
# Given a student on the 'Save and Exit' popup for exiting a quiz
self.actionwords.a_student_on_the_save_and_exit_popup_for_exiting_a_quiz()
# When he clicks on the Yes button
self.actionwords.he_clicks_on_the_yes_button()
# Then the popup closes and he is taken back to Assignments tab (URL: .... /assessments/assignments)
self.actionwords.the_popup_closes_and_he_is_taken_back_to_assignments_tab_url__assessmentsassignments()
def test_smoke__student__save_and_exit_popup_reopening_a_quiz_after_leaving_it_uidd8a5ed779c6c4dd593cc42df4e1c1e46(self):
# Tags: JIRA:FY-5620 priority_P1 test_case_level_Functional
# Given a student has started working on a quiz
self.actionwords.a_student_has_started_working_on_a_quiz()
# When he leaves the quiz by clicking on X button and then on Yes
self.actionwords.he_leaves_the_quiz_by_clicking_on_x_button_and_then_on_yes()
# And then re-opens the quiz
self.actionwords.then_reopens_the_quiz()
# Then the quiz player is re-opened
self.actionwords.the_quiz_player_is_reopened()
# And all the work previously done by the student is saved and displayed
self.actionwords.all_the_work_previously_done_by_the_student_is_saved_and_displayed()
def test_smoke__student_assessments__submit_answers_uidcf44e521bbac4b678e1b277da3eb26b4(self):
# Tags: priority_P0 test_case_level_Smoke JIRA:CB-1789
# Given a student on Assignments tab from Assessments
self.actionwords.a_student_on_assignments_tab_from_assessments()
# When student begins a quiz
self.actionwords.student_begins_a_quiz()
# And gives answers for all the questions and clicks Submit button
self.actionwords.gives_answers_for_all_the_questions_and_clicks_submit_button()
# Then the quiz is submitted
self.actionwords.the_quiz_is_submitted()
# And is no longer displayed in Assignments tab
self.actionwords.is_no_longer_displayed_in_assignments_tab()
def test_smoke__score_page_and_response_displayed_uid9ff61d6fd23341f88aaf71726fa1c07a(self):
# Tags: JIRA:FY-6285 test_case_level_Functional test_case_level_Smoke priority_P0
# Given a teacher on Progress Check -> Progress page -> FRQ assessment
self.actionwords.a_teacher_on_progress_check__progress_page__frq_assessment()
# And the assessment was submitted by at least one student
self.actionwords.the_assessment_was_submitted_by_at_least_one_student()
# When the teacher clicks on Score button
self.actionwords.the_teacher_clicks_on_score_button()
# Then the scoring page is displayed
self.actionwords.the_scoring_page_is_displayed()
# And he sees the Response on the left side of the page
self.actionwords.he_sees_the_response_on_the_left_side_of_the_page()
def test_smoke__score_page__score_autosave_in_feedback_popup_uid678fb5e9d88a4c36972804db0b5a5e60(self):
# Tags: test_case_level_Functional JIRA:FY-6370
# Given a teacher on Progress Check -> Progress page -> FRQ assessment
self.actionwords.a_teacher_on_progress_check__progress_page__frq_assessment()
# And he scores an assessment
self.actionwords.he_scores_an_assessment()
# When the teacher is adding some student feedback
self.actionwords.the_teacher_is_adding_some_student_feedback()
# And he saves the feedback popup
self.actionwords.he_saves_the_feedback_popup()
# Then the score is saved automatically displaying a pop-up with timestamp
self.actionwords.the_score_is_saved_automatically_displaying_a_popup_with_timestamp()
# And the popup will disappear after 1 second
self.actionwords.the_popup_will_disappear_after_1_second()
def test_smoke__score_page__submit_scores_visible_for_student_uide1aa54172db74915a8e7caef8c472c37(self):
# Tags: test_case_level_Functional JIRA:FY-6370
# Given a teacher on Progress Check -> Progress page -> FRQ assessment
self.actionwords.a_teacher_on_progress_check__progress_page__frq_assessment()
# And he scores an assessment
self.actionwords.he_scores_an_assessment()
# When he scores all parts and questions
self.actionwords.he_scores_all_parts_and_questions()
# Then he is able to click Submit button
self.actionwords.he_is_able_to_click_submit_button()
def smoke__results_tab_student_performance_sub_tab_submitted_frq_assessments(self, place):
# Tags: JIRA:FY-3209 priority_P1 test_case_level_Functional Regression
# Given a teacher on "<place>" page -> Results tab
self.actionwords.a_teacher_on_p1_page__results_tab(p1 = place)
# When he clicks on a scored FRQ assessment title -> Student Performance sub-tab ->click on a student
self.actionwords.he_clicks_on_a_scored_frq_assessment_title__student_performance_subtab_click_on_a_student()
# Then he sees red X for No Credit, yellow circle for Partial Credit, green check for Full Credit, Awaiting Scoring for not scored; for sub-scores of an FRQ, the same but faded slightly
self.actionwords.he_sees_red_x_for_no_credit_yellow_circle_for_partial_credit_green_check_for_full_credit_awaiting_scoring_for_not_scored_for_subscores_of_an_frq_the_same_but_faded_slightly()
def test_Smoke__Results_tab_Student_performance_sub_tab_submitted_FRQ_assessments__uidf446223bef184007b059dbda803aad30(self):
self.smoke__results_tab_student_performance_sub_tab_submitted_frq_assessments(place = 'Unit Assessments')
def test_Smoke__Results_tab_Student_performance_sub_tab_submitted_FRQ_assessments__uide8a1a03a3aa1425f8c58544f502a714b(self):
self.smoke__results_tab_student_performance_sub_tab_submitted_frq_assessments(place = 'Question Bank')
def test_smoke__results_tab_feedback_box_in_student_performance_when_feedback_was_given_from_progress_tab_uid78f0e62103624630bc99db076d706050(self):
# Tags: JIRA:FY-4159 story_priority_p0 test_case_level_Functional JIRA:FY-4144
# Given a teacher on the Results page for a specific quiz
self.actionwords.a_teacher_on_the_results_page_for_a_specific_quiz()
# When the teacher opens Student Performance for a specific student
self.actionwords.the_teacher_opens_student_performance_for_a_specific_student()
# And there is feedback already given for that student from Progress tab
self.actionwords.there_is_feedback_already_given_for_that_student_from_progress_tab()
# Then he should see the feedback box with the actual feedback
self.actionwords.he_should_see_the_feedback_box_with_the_actual_feedback()
def test_smoke__student_performance_pdf_printing_uid8b59e169c85549248b8d4c96b441ea43(self):
# Tags: JIRA:FY-924 Regression
# Given a teacher has class assignments for students
self.actionwords.a_teacher_has_class_assignments_for_students()
# And student has completed assignments
self.actionwords.student_has_completed_assignments()
# When the teacher is on the Results page for a specific assignment
self.actionwords.the_teacher_is_on_the_results_page_for_a_specific_assignment()
# And selects the individual student
self.actionwords.selects_the_individual_student()
# Then teacher will be able to generate a PDF for the individual student.
self.actionwords.teacher_will_be_able_to_generate_a_pdf_for_the_individual_student()
def smoke__student_assessments__results_page(self, info):
# Tags: priority_P2 test_case_level_GUI JIRA:CB-1789
# Given a student on subject Homepage
self.actionwords.a_student_on_subject_homepage()
# When clicks on Assessment tab in the navigation menu
self.actionwords.clicks_on_assessment_tab_in_the_navigation_menu()
# And clicks on Results tab
self.actionwords.clicks_on_results_tab()
# Then the Results page is displayed and the following "<info>" is available
self.actionwords.the_results_page_is_displayed_and_the_following_p1_is_available(p1 = info)
def test_Smoke__Student_Assessments__Results_page__uide82e4333f51b4a1db69eaa7adddffd1a(self):
self.smoke__student_assessments__results_page(info = 'Title')
def test_Smoke__Student_Assessments__Results_page__uid261bf4d7ac6949efab703ede3f43ed5b(self):
self.smoke__student_assessments__results_page(info = 'Assigned')
def test_Smoke__Student_Assessments__Results_page__uid6930fbd864ee46c998ef4eb8e77c31c3(self):
self.smoke__student_assessments__results_page(info = 'Performance')
def test_smoke__student_assessments__performance_column_in_results_tab_uidfb7a754b5a8144478c3cd5c72f506a4c(self):
# Tags: priority_P2 test_case_level_GUI JIRA:CB-1789
# Given a student on subject homepage
self.actionwords.a_student_on_subject_homepage()
# When clicks on Assessment tab in the navigation menu
self.actionwords.clicks_on_assessment_tab_in_the_navigation_menu()
# And clicks on Results tab
self.actionwords.clicks_on_results_tab()
# Then sees in Performance column the results for each quiz
self.actionwords.sees_in_performance_column_the_results_for_each_quiz()
# And is displayed the number of points overall
self.actionwords.is_displayed_the_number_of_points_overall()
def test_smoke__student_assessments__specific_quiz_results_page_uid14a832050da944e1b4c7ea144bc31133(self):
# Tags: priority_P1 test_case_level_GUI JIRA:CB-1789
# Given a student on Results tab from assessments
self.actionwords.a_student_on_results_tab_from_assessments()
# When clicks on a quiz
self.actionwords.clicks_on_a_quiz()
# Then The quiz's Results page is displayed
self.actionwords.the_quizs_results_page_is_displayed()
# And contains the student performance, the overall points and the list of questions
self.actionwords.contains_the_student_performance_the_overall_points_and_the_list_of_questions()
# And next to each question is shown the scored points from the total number of points
self.actionwords.next_to_each_question_is_shown_the_scored_points_from_the_total_number_of_points()
def test_student_assessments__specific_question_results_tab_1_copy_uid0699c0587ba949a5a2853f734106cd52(self):
# Tags: priority_P2 test_case_level_GUI JIRA:CB-1789
# Given a student on the Results tab from Assessments
self.actionwords.a_student_on_the_results_tab_from_assessments()
# When clicks on a quizz
self.actionwords.clicks_on_a_quizz()
# And clicks on a question
self.actionwords.clicks_on_a_question()
# Then sees the question having the correct answer letter colored in green
self.actionwords.sees_the_question_having_the_correct_answer_letter_colored_in_green()
# And a Next button is available to navigate to the next question
self.actionwords.a_next_button_is_available_to_navigate_to_the_next_question()
def test_smoke__as_a_teacher_i_want_to_be_able_to_set_an_assignment_to_complete_uida22c15e6ad0e4363b2c01fba0bf560a4(self):
# Tags: status priority_P1 test_case_level_Functional JIRA:CB-1789
# Given a teacher on Homepage
self.actionwords.a_teacher_on_homepage()
# When teacher navigates to Progress Checks->Progress tab
self.actionwords.teacher_navigates_to_progress_checks_progress_tab()
# And clicks on an assignment
self.actionwords.clicks_on_an_assignment()
# And clicks on Complete button
self.actionwords.clicks_on_complete_button()
# Then the assignment is set to complete
self.actionwords.the_assignment_is_set_to_complete()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.