seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
33008270053 | # load the example image and convert it to grayscale
import os
import cv2
import pytesseract
image = "example_01.jpg"
preprocess = "thresh"
image = cv2.imread(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we should apply thresholding to preprocess the
# image
if preprocess == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
elif preprocess == "blur":
gray = cv2.medianBlur(gray, 3)
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract.exe'
text = pytesseract.image_to_string(gray)
os.remove(filename)
print(text)
# show the output images
cv2.imshow("Image", image)
cv2.imshow("Output", gray)
cv2.waitKey(0)
| Marius-Juston/SonnetGeneratorCombination | ocr.py | ocr.py | py | 1,062 | python | en | code | 0 | github-code | 36 |
29412113956 | import cv2
import numpy as np
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v','--video', type=str)
parser.add_argument('-o','--output', type=str, default=None)
args = parser.parse_args()
vid = cv2.VideoCapture(args.video)
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc('a','v','c','1')
out = cv2.VideoWriter(args.output, codec, fps, (width, height))
while vid.isOpened():
success, image = vid.read()
if not success:
print("ignoring empty video")
break
cv2.imshow("before", image)
max_x, max_y, z = image.shape
# print('image dimensions: x:', max_x, "by y:", max_y)
# points are (y, x), thickness -1 for solid
start_point = (154, 170)
end_point = (500, 1200)
# setting for 720x1280 (portrait) handstand video at ../add_noise/sleeves.mp4
# draw red rectangle around area
# line_color= (0, 0, 255)
# line_thickness = 3
# cv2.rectangle(image, start_point, end_point, line_color, line_thickness)
# exclude area outside rectangle
start_y, start_x = start_point
end_y, end_x = end_point
mask = np.zeros(image.shape[:2],np.uint8)
mask[start_x:end_x,start_y:end_y] = 255
image = cv2.bitwise_and(image,image,mask = mask)
out.write(image)
cv2.imshow("after", image)
if cv2.waitKey(1) & 0xFF == 27:
break
vid.release()
if __name__ == "__main__":
main()
# https://stackoverflow.com/questions/11492214/opencv-via-python-is-there-a-fast-way-to-zero-pixels-outside-a-set-of-rectangle
# img = cv2.imread('testimg.jpeg')
# start_x = 30
# start_y = 30
# end_x = 200
# end_y = 100
# mask = np.zeros(img.shape[:2],np.uint8)
# mask[start_y:start_y+end_y,start_x:start_x+end_x] = 255
# result = cv2.bitwise_and(img,img,mask = mask)
# cv2.imshow("result", result) | flexinai/flexin-ipod-ad | exclusion.py | exclusion.py | py | 2,097 | python | en | code | 0 | github-code | 36 |
17729946892 | import argparse
import glob
import logging
import os
import random
import timeit
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForQuestionAnswering,
AlbertTokenizer,
#BertConfig,
#BertForQuestionAnswering,
#BertForSequenceClassification,
#BertTokenizer,
RobertaConfig,
RobertaForQuestionAnswering,
RobertaTokenizer,
get_linear_schedule_with_warmup,
openqa_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
squad_evaluate,
)
from transformers.data.processors.openqa import OpenQAResult, OpenQAV1Processor, OpenQAV2Processor
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
## added by Jong-Hoon Oh
import torchtext
import cnn_utils
import train_utils
NUM_PARALLEL_EXEC_UNITS = 4
os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)
os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
os.environ['KMP_WARNINGS'] = 'off'
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(tuple(conf.pretrained_config_archive_map.keys()) for conf in (AlbertConfig, RobertaConfig,)),
(),
)
MODEL_CLASSES = {
"albert": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),
"roberta": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),
}
## added by Jong-Hoon Oh
class TTDataset(torchtext.data.Dataset):
'''Dummy Dataset for build_vocab'''
def __init__(self, words, fields):
data_fields = [('text', fields['text'])]
ex = (words,)
examples = [torchtext.data.Example.fromlist(ex, data_fields)]
super(TTDataset, self).__init__(examples, data_fields)
## added by Jong-Hoon Oh
def load_cnn_model_and_vocab(args, cnn_file, words):
assert args.emb_file and args.min_freq
fields = cnn_utils.get_fields()
train_utils.build_vocab(args, fields, TTDataset(words, fields), [])
vocab = fields['text'].vocab
model, pre_fields = train_utils.load_cnn_model(args, cnn_file, fields)
return model, pre_fields['text'].vocab.stoi
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
# modified by Jong-Hoon Oh
# DATA PROCESSING PART
# - Converting input examples to cached examples
# - cnn_stoi: vocab.stoi for the cnn model
def load_and_cache_examples(args, filename, tokenizer, cnn_stoi, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
bert_token_str = "ot0"
input_dir = args.feat_dir if args.feat_dir else "."
fstem = list(filter(None,filename.split("/"))).pop()
fstem = fstem.split(".")[0]
fstem = fstem
cached_file = "cached_{}_{}_{}_{}_{}_{}".format(
fstem,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
args.cnn_stem,
list(filter(None, args.cnn_model.split("_"))).pop(),
bert_token_str,
str(args.max_seq_length),
)
# split the input data into data, positive_data, feature, and example
dset_dir = input_dir + '/dset'
pdset_dir = input_dir + '/pdset'
feat_dir = input_dir + '/feat'
exset_dir = input_dir + '/exset'
cached_dset_file = os.path.join(dset_dir,cached_file)
cached_feat_file = os.path.join(feat_dir,cached_file)
cached_pdset_file = os.path.join(pdset_dir,cached_file)
cached_exset_file = os.path.join(exset_dir,cached_file)
if evaluate:
logger.info("Specified cached file %s for dev or predict files", cached_dset_file)
else:
logger.info("Specified cached file %s for train files", cached_dset_file)
# Init features and dataset from cache if it exists
if os.path.exists(cached_dset_file) and not args.overwrite_cache:
logger.info("Feature files already exist: %s", cached_dset_file)
else:
logger.info("Creating features from dataset file at %s", input_dir) # input_dir="." by defaults
# if no predict file for evaluation or no train file for training
if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)):
try:
import tensorflow_datasets as tfds
except ImportError:
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
if args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
tfds_examples = tfds.load("openqa")
examples = OpenQAV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
else:
# The main part of data processing in our OpenQA experiments
processor = OpenQAV1Processor()
if evaluate:
# initializer
examples = processor.get_dev_examples(args.data_dir, filename=filename)
else:
# initializer
examples = processor.get_train_examples(args.data_dir, filename=filename)
features, dataset, possible_dataset = openqa_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
cnn_stoi=cnn_stoi,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
return_dataset="pt", # "pt" represents 'pytorch dataset'
threads=args.threads,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_dset_file)
if evaluate:
logger.info("dataset:{}".format(len(dataset)))
torch.save({"dataset": dataset}, cached_dset_file)
logger.info("features")
torch.save({"features": features}, cached_feat_file)
logger.info("examples")
torch.save({"examples": examples}, cached_exset_file)
else:
logger.info("dataset:{}".format(len(dataset)))
torch.save({"dataset": dataset}, cached_dset_file)
logger.info("possible_dataset:{}".format(len(possible_dataset)))
torch.save({"possible_dataset": possible_dataset}, cached_pdset_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--prep_vocab_file",
default=None,
type=str,
help="preprocessed_vocab_file with the train/dev/predict file. see make_openqa_cnn_vocab.py",
)
parser.add_argument(
"--emb_file",
default=None,
type=str,
help="The embedding vector file used for cnn",
)
parser.add_argument(
"--cnn_model",
default=None,
type=str,
help="The cnn model file name",
)
parser.add_argument(
"--cnn_stem",
default="enwiki",
type=str,
help="stem for cnn models for caching (different vocab.stoi for each model)",
)
parser.add_argument(
"--min_freq",
default=5,
type=int,
help="min freq. for unknown words",
)
parser.add_argument(
"--emb_dim",
default=300,
type=int,
help="dim for representation of fastText",
)
# Other parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .json files for the task."
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--train_file",
default=None,
type=str,
help="The input training file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--predict_file",
default=None,
type=str,
help="The input evaluation file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--dev_file",
default=None,
type=str,
help="The input development file. If a data dir is specified, will look for the file there"
+ "If no data dir or devel files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--feat_dir",
default="",
type=str,
help="Where do you want to store the processed data whose features were extracted from the input data",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
parser.add_argument(
"--lang_id",
default=0,
type=int,
help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
)
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features")
args = parser.parse_args()
assert args.prep_vocab_file is not None
assert args.cnn_model is not None
assert args.cnn_stem is not None
assert args.emb_dim is not None
assert args.emb_file is not None
if (not os.path.exists(args.prep_vocab_file)):
raise ValueError(
"prep_vocab_file ({}) does not exist. Check the --prep_vocab_file option.".format( args.prep_vocab_file) )
if (not os.path.exists(args.cnn_model)):
raise ValueError(
"cnn_model ({}) does not exist. Check the --cnn_model option.".format( args.cnn_model) )
if (not os.path.exists(args.emb_file)):
raise ValueError(
"emb_file ({}) does not exist. Check the --emb_file option.".format( args.emb_file) )
if args.doc_stride >= args.max_seq_length - args.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
# The barrier starts
torch.distributed.barrier()
# added by Jong-Hoon Oh
# - Load cnn model and pre-processed vocab.
# - prep_vocab_file: see vocab/
prep_tokens = torch.load(args.prep_vocab_file)
all_tokens = prep_tokens['tokens']
cnn_model, cnn_stoi = load_cnn_model_and_vocab(args, args.cnn_model, all_tokens)
cnn_dim = len(cnn_model.args.filter_widths) * cnn_model.args.filter_size
args.cnn_dim = cnn_dim
args.model_type = args.model_type.lower()
# "albert": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
config.num_of_TIERs = 3
config.cnn_dim = args.cnn_dim
config.emb_dim = args.emb_dim
config.cnn_model = args.cnn_model
config.cnn_stem = args.cnn_stem
# tokenizer_class: AlbertTokenizer
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# model_class: AlbertForQuestionAnswering
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path), # ckpt: tensorflow file, pt: pytorch file
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
###########
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
# The barrier ends
torch.distributed.barrier()
model.to(args.device)
cnn_model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
if args.train_file is not None:
load_and_cache_examples(args, args.train_file, tokenizer, cnn_stoi, evaluate=False, output_examples=False)
if args.predict_file is not None:
load_and_cache_examples(args, args.predict_file, tokenizer, cnn_stoi, evaluate=True, output_examples=True)
if args.dev_file is not None:
load_and_cache_examples(args, args.dev_file, tokenizer, cnn_stoi, evaluate=True, output_examples=True)
if __name__ == "__main__":
main()
| nict-wisdom/bertac | src/examples.openqa/run_openqa_preprocess.py | run_openqa_preprocess.py | py | 18,692 | python | en | code | 7 | github-code | 36 |
43507170482 | #!/usr/bin/env python3
"""this module contains a function for task 2"""
import numpy as np
def nparser(sentence, n):
"""nparser - parses sentence in to n partitions"""
uniq_words = []
for i in range(len(sentence)):
if i + n <= len(sentence):
uniq_words.append(str(sentence[i:i+n]))
return uniq_words
def ngram_bleu(references, sentence, n):
"""this function gives us the unigram bleu score"""
total = len(sentence)
lref = min([len(i) for i in references])
parsed_sent = nparser(sentence, n)
# print(parsed_sent)
parsed_tot = len(parsed_sent)
parsed_ref = []
for i in references:
parsed_ref.append(nparser(i, n))
# print(parsed_ref)
uniq_ref = {}
for i in parsed_ref:
for x in i:
if x not in uniq_ref:
uniq_ref[x] = max([l.count(x) for l in parsed_ref])
# print("ur", uniq_ref)
uniq_words = {}
for phrase in parsed_sent:
# print("phrase ->", phrase)
if phrase not in uniq_words and phrase in uniq_ref:
if parsed_sent.count(phrase) >= uniq_ref[phrase]:
uniq_words[phrase] = uniq_ref[phrase]
else:
uniq_words[phrase] += 1
find = sum(uniq_words.values())
# print(find)
# print(uniq_words)
bp = 1 if total >= lref else np.exp(1 - (lref/total))
# print("bp->", bp)
# print(find, total)
return find/parsed_tot
def cumulative_bleu(references, sentence, n):
"""computes the cumulative bleu score"""
score_hold = []
bp_hold = []
lref = min([len(i) for i in references])
total = len(sentence)
for i in range(n):
score_hold.append(ngram_bleu(references, sentence, i + 1))
# print(score_hold)
# print(np.average(score_hold))
bp = 1 if total >= lref else np.exp(1 - (lref/total))
# print("bp ->", bp)
return np.exp(np.average(np.log(score_hold))) * bp
| chriswill88/holbertonschool-machine_learning | supervised_learning/0x10-nlp_metrics/2-cumulative_bleu.py | 2-cumulative_bleu.py | py | 1,937 | python | en | code | 0 | github-code | 36 |
43734774169 | """
@创建日期 :2022/4/25
@修改日期 :2022/4/26
@作者 :jzj
@功能 :模型库,输出统一以字典格式
dqn 输出 value
a2c 输出 policy value
fixme: 可能会抽象为参数构建的模式,不确定
"""
from typing import List
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
def make_model(id, args):
if id == "cartpole_dqn":
return CartPoleDQN(**args)
elif id == "cartpole_a2c":
return CartPoleA2C(**args)
elif id == "flappybirdsimple_dqn":
return FlappyBirdSimpleDqn(**args)
elif id == "flappybirdsimple_a2c":
return FlappyBirdSimpleA2C(**args)
else:
raise NotImplementedError
class ModelWrapper(models.Model):
"""fixme: dev ing"""
def __init__(self, model):
super(ModelWrapper, self).__init__()
self.model = model
def call(self, inputs):
return self.model(inputs)
def inference(self, x):
if hasattr(self.model, "inference"):
return self.model.inference(x)
x = tf.expand_dims(x, 0)
outputs = self.call(x)
return outputs
# CartPole DQN
class CartPoleDQN(models.Model):
def __init__(self, action_dim, hidden_dims: List):
super(CartPoleDQN, self).__init__()
self.input_layers = layers.InputLayer(input_shape=(4,))
self.hidden_layers = []
for hidden_dim in hidden_dims:
self.hidden_layers.append(layers.Dense(hidden_dim, activation="tanh"))
self.output_layer = layers.Dense(action_dim)
def call(self, inputs):
x = self.input_layers(inputs)
for layer in self.hidden_layers:
x = layer(x)
x = self.output_layer(x)
return {"value": x}
# CartPole A2C
class CartPoleA2C(tf.keras.Model):
def __init__(self, num_action=2, num_hidden_units=128):
super(CartPoleA2C, self).__init__()
self.common = layers.Dense(num_hidden_units, activation=None)
self.activation = layers.ReLU()
self.actor = layers.Dense(num_action)
self.critic = layers.Dense(1)
def call(self, inputs: tf.Tensor):
x = self.common(inputs)
x = self.activation(x)
return {"policy": self.actor(x), "value": self.critic(x)}
# FlappyBirdRGB A2C
class ConvBlock(layers.Layer):
def __init__(self, filter, kernel_size, stride=1):
super(ConvBlock, self).__init__()
self.conv = layers.Conv2D(filter, kernel_size, stride, padding="same")
self.bn = layers.BatchNormalization()
self.activation = layers.ReLU()
def call(self, inputs):
return self.activation(self.bn(self.conv(inputs)))
class ResidualBlock(layers.Layer):
def __init__(self, filter, kernel_size, stride, squeeze_factor, se=False):
"""fixme: 添加Se支持"""
super(ResidualBlock, self).__init__()
self.conv_block1 = ConvBlock(filter//squeeze_factor, kernel_size, stride)
self.conv_block2 = ConvBlock(filter, kernel_size, stride)
self.short_cut = ConvBlock(filter, 1)
self.output_bn = layers.BatchNormalization()
self.output_ac = layers.ReLU()
def call(self, inputs):
x = self.conv_block1(inputs)
x = self.conv_block2(x)
x = x + self.short_cut(inputs)
x = self.output_ac(self.output_bn(x))
return x
class PolicyHead(layers.Layer):
def __init__(self, policy_dim):
super(PolicyHead, self).__init__()
self.conv = layers.Conv2D(1, kernel_size=3, strides=1, padding="same")
self.bn = layers.BatchNormalization()
self.dense = layers.Dense(policy_dim)
def call(self, inputs):
b, h, w, c = inputs.shape
x = self.bn(self.conv(inputs))
x = tf.reshape(x, (-1, h*w))
x = self.dense(x)
return x
class ValueHead(layers.Layer):
def __init__(self, value_dim):
super(ValueHead, self).__init__()
self.conv = layers.Conv2D(1, kernel_size=3, strides=1, padding="same")
self.bn = layers.BatchNormalization()
self.dense = layers.Dense(value_dim)
def call(self, inputs):
b, h, w, c = inputs.shape
x = self.bn(self.conv(inputs))
x = tf.reshape(x, (-1, h*w))
x = self.dense(x)
return x
class FlappyBirdA2C(models.Model):
"""
简单模型,注意policy输出的是logit值为非概率
"""
def __init__(self, filters=[32, 64, 128], blocks=[2, 2, 4]):
super(FlappyBirdA2C, self).__init__()
self.conv1 = layers.Conv2D(32, 5, 2, padding="same")
self.bn1 = layers.BatchNormalization()
self.ac1 = layers.ReLU()
self.pool1 = layers.MaxPooling2D(pool_size=3, strides=2, padding="same")
self.middle_layers = []
for filter, block in zip(filters, blocks):
for n in range(block):
self.middle_layers.append(ResidualBlock(filter, 3, 1, 4))
self.middle_layers.append(layers.MaxPooling2D(pool_size=3, strides=2, padding="same"))
self.policy_head = PolicyHead(policy_dim=2)
self.value_head = ValueHead(value_dim=1)
def call(self, inputs):
x = self.pool1(self.ac1(self.bn1(self.conv1(inputs))))
for layer in self.middle_layers[:-1]:
x = layer(x)
policy = self.policy_head(x)
value = self.value_head(x)
return {"policy": policy, "value": value}
# FlappyBirdSimple A2C
class FlappyBirdSimpleA2C(models.Model):
def __init__(self, policy_dim=2, value_dim=1, hidden_dims=[32, 64]):
super(FlappyBirdSimpleA2C, self).__init__()
self.input_layers = layers.InputLayer(input_shape=(2,))
self.hidden_layers = []
for hidden_dim in hidden_dims:
self.hidden_layers.append(layers.Dense(hidden_dim, activation="tanh",))
self.policy_head = layers.Dense(policy_dim)
self.value_head = layers.Dense(value_dim)
def call(self, inputs):
x = self.input_layers(inputs)
for layer in self.hidden_layers:
x = layer(x)
policy = self.policy_head(x)
value = self.value_head(x)
return {"policy": policy, "value": value}
# FlappyBirdSimple DQN
class FlappyBirdSimpleDqn(models.Model):
def __init__(self, value_dim=2, hidden_dims=[256, 256]):
super(FlappyBirdSimpleDqn, self).__init__()
self.input_layers = layers.InputLayer(input_shape=(2,))
self.hidden_layers = []
for hidden_dim in hidden_dims:
self.hidden_layers.append(layers.Dense(hidden_dim, activation="tanh"))
self.value_head = layers.Dense(value_dim)
def call(self, inputs):
x = self.input_layers(inputs)
for layer in self.hidden_layers:
x = layer(x)
value = self.value_head(x)
return {"value": value}
| baichii/inspire | rookie/models.py | models.py | py | 6,872 | python | en | code | 0 | github-code | 36 |
18590413266 | import pytest
from sqlalchemy import create_engine
from rebrickable.data.database import Session
from rebrickable.data.models import *
models = [Color, Inventory, InventorySet,
InventoryPart, Part, PartCategory, Set, Theme]
@pytest.fixture(scope='module')
def session():
engine = create_engine('sqlite:///:memory:', echo=True)
Session.configure(bind=engine)
# You probably need to create some tables and
# load some test data, do so here.
# To create tables, you typically do:
Base.metadata.create_all(engine)
yield Session()
Session.close_all()
@pytest.fixture
def objects():
return [
Color(id=1, name='black', rgb='123456', is_trans=True),
Inventory(id=1, version=42, set_num='7189-1'),
InventoryPart(inventory_id=1, part_num='3001', color_id=1, quantity=12),
InventorySet(inventory_id=1, set_num='7189-1', quantity=1),
Part(part_num='3001', name='Brick 2X4', part_cat_id=1),
PartCategory(id=1, name='bricks'),
Set(set_num='7189-1', name='Dumy Test', year=2015, theme_id=42, num_parts=12),
Theme(id=42, name='Town', parent_id=None),
Theme(id=43, name='Police', parent_id=42)
]
def test_models(session, objects):
session.add_all(objects)
session.commit()
for obj in objects:
session.refresh(obj)
print(obj)
| rienafairefr/pyrebrickable | tests/data/test_data.py | test_data.py | py | 1,369 | python | en | code | 4 | github-code | 36 |
9294959555 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""--------------------------------------------------------------------
GENETIC ALGORITHMS EXPERIMENTS
Started on the 2018/01/03
theo.alves.da.costa@gmail.com
https://github.com/theolvs
------------------------------------------------------------------------
"""
from scipy import stats
import seaborn as sns
import os
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
import time
from tqdm import tqdm
import itertools
#=============================================================================================================================
# DISTRIBUTIONS
#=============================================================================================================================
class Dist(object):
def __init__(self,mu = None,std = None,label = None):
self.mu = np.random.rand()*20 - 10 if mu is None else mu
self.std = np.random.rand()*10 if std is None else std
self.label = "" if not label else " - "+label
self.func = lambda x : stats.norm.cdf(x,loc = self.mu,scale = self.std)
def __repr__(self,markdown = False):
return "Norm {1}mu={2}{0}, {0}std={3}{0}{4}".format("$" if markdown else "","$\\" if markdown else "",
round(self.mu,2),round(self.std,2),self.label)
def plot(self,fill = True):
x = np.linspace(-20, 20, 100)
y = stats.norm.pdf(x,loc = self.mu,scale = self.std)
plt.plot(x,y,label = self.__repr__(markdown = True))
if fill:
plt.fill_between(x, 0, y, alpha=0.4)
def __add__(self,other):
mu = np.mean([self.mu,other.mu])
std = np.mean([self.std,other.std])
return Dist(mu,std)
def mutate(self,alpha = 1):
self.mu = self.mu + 1/(1+np.log(1+alpha)) * np.random.randn()
self.std = max(self.std + 1/(1+np.log(1+alpha)) * np.random.randn(),0.5)
def fitness(self,x):
return 1 - stats.kstest(x,self.func).statistic
class Population(object):
def __init__(self,distributions = None,n = 100):
if distributions is not None:
self.distributions = distributions
else:
self.distributions = [Dist() for i in range(n)]
def __getitem__(self,key):
if type(key) == tuple or type(key) == list:
d = []
for i in key:
d.append(self.distributions[i])
return d
else:
return self.distributions[key]
def __iter__(self):
return iter(self.distributions)
def __len__(self):
return len(self.distributions)
def plot(self,title = "Normal distributions",figsize = None):
if figsize:
plt.figure(figsize = figsize)
plt.title(title)
fill = len(self) < 5
for d in self:
d.plot(fill = fill)
plt.legend()
plt.xlabel("x")
plt.show()
def evaluate(self,x):
fitnesses = [(i,dist.fitness(x)) for i,dist in enumerate(self)]
indices,fitnesses = zip(*sorted(fitnesses,key = lambda x : x[1],reverse = True))
return indices,fitnesses
def selection(self,x,top = 0.1):
indices,fitnesses = self.evaluate(x)
n = int(top*len(fitnesses))
return indices[:n]
def crossover(self,indices):
combinations = list(itertools.combinations(indices,2))
np.random.shuffle(combinations)
combinations = combinations[:len(self)]
new_population = []
for i,j in combinations:
new_population.append(self[i]+self[j])
self.distributions = new_population
def mutate(self,generation = 1):
for d in self:
d.mutate(generation)
def evolve(self,x,top = 0.25,n_generations = 20,last_selection = True):
all_fitnesses = [self.evaluate(x)[1]]
for generation in tqdm(range(n_generations)):
indices = self.selection(x,top)
self.crossover(indices)
self.mutate(generation)
indices,fitnesses = self.evaluate(x)
all_fitnesses.append(fitnesses)
self._plot_fitnesses(all_fitnesses)
if last_selection:
indices = self.selection(x,top)
return Population(self[indices])
def _plot_fitnesses(self,fitnesses):
sups = []
infs = []
means = []
for step in fitnesses:
sups.append(np.max(step))
infs.append(np.min(step))
means.append(np.mean(step))
plt.figure(figsize=(10,6))
plt.plot(means)
plt.fill_between(range(len(means)),sups,infs, alpha = 0.2)
plt.xlabel('# Generation')
plt.ylabel('Fitness')
plt.legend()
plt.show()
#=============================================================================================================================
# LOGREG
#=============================================================================================================================
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class LogReg(torch.nn.Module):
def __init__(self, n_feature,n_output = 1,alpha = 10e-1):
self.alpha = alpha
self.args = n_feature,n_output
super(LogReg, self).__init__()
self.out = torch.nn.Linear(n_feature,n_output,bias = False) # output layer
def forward(self, x):
x = Variable(torch.FloatTensor(x))
x = F.sigmoid(self.out(x))
return x
def __add__(self,other):
new = LogReg(*self.args)
new.out.weight.data = torch.FloatTensor(0.5 * (self.out.weight.data.numpy() + other.out.weight.data.numpy()))
return new
def mutate(self,generation):
out = self.out.weight.data.numpy()
noise_out = self.alpha * np.random.randn(*out.shape)
self.out.weight.data = torch.FloatTensor(self.out.weight.data.numpy() + noise_out)
def evaluate(self,x,y):
pred = self.forward(x).data.numpy()
loss_1 = np.sum(np.log(pred + 10e-9)*y.reshape(-1,1))
loss_0 = np.sum(np.log(1-pred + 10e-9)*(1-y).reshape(-1,1))
return loss_1 + loss_0
def plot_coefs(self):
plt.figure(figsize = (15,4))
plt.title("Coefficients")
plt.axhline(0,c = "black")
plt.plot(self.out.weight.data.numpy()[0])
plt.xlabel("# Pixel")
plt.show()
class PopulationLogReg(object):
def __init__(self,x,y,regs = None,n = 20,top = 0.25,**kwargs):
self.x = x
self.y = y
self.kwargs = kwargs
if regs is None:
self.regs = [LogReg(**kwargs) for i in range(n)]
else:
self.regs = regs
def __getitem__(self,key):
if type(key) == tuple or type(key) == list:
d = []
for i in key:
d.append(self.regs[i])
return d
else:
return self.regs[key]
def __iter__(self):
return iter(self.regs)
def __len__(self):
return len(self.regs)
def evaluate(self):
fitnesses = [(i,element.evaluate(self.x,self.y)) for i,element in enumerate(self)]
indices,fitnesses = zip(*sorted(fitnesses,key = lambda x : x[1],reverse = True))
return indices,fitnesses
def selection(self,top = 0.5):
indices,fitnesses = self.evaluate()
n = int(top*len(fitnesses))
return indices[:n]
def crossover(self,indices):
combinations = list(itertools.combinations(indices,2))
np.random.shuffle(combinations)
combinations = combinations[:len(self)]
new_population = []
for i,j in combinations:
new_population.append(self[i]+self[j])
if len(new_population) < len(self):
new_population.extend([LogReg(**self.kwargs) for i in range(len(self)-len(new_population))])
self.regs = new_population
def mutate(self,generation):
for d in self:
d.mutate(generation)
def evolve(self,top = 0.25,n_generations = 20,last_selection = True):
n_fittest = int(top*len(self))
offsprings = len(list(itertools.combinations(range(n_fittest),2)))
print("- Generations {}".format(len(self)))
print("- Fittest : {}".format(n_fittest))
print("- Offsprings : {}".format(offsprings))
all_fitnesses = [self.evaluate()[1]]
for generation in tqdm(range(n_generations)):
indices = self.selection(top)
self.crossover(indices)
self.mutate(generation)
indices,fitnesses = self.evaluate()
all_fitnesses.append(fitnesses)
self._plot_fitnesses(all_fitnesses)
if last_selection:
indices = self.selection(top)
return PopulationLogReg(self.x,self.y,regs = self[indices])
def _plot_fitnesses(self,fitnesses):
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(self.x,self.y)
pred_bench = lr.predict_proba(self.x)
loss_bench = np.sum(np.log(pred_bench + 10e-9)*self.y.reshape(-1,1)) + np.sum(np.log(1-pred_bench + 10e-9)*(1-self.y).reshape(-1,1))
sups = []
infs = []
means = []
for step in fitnesses:
sups.append(np.max(step))
infs.append(np.min(step))
means.append(np.mean(step))
plt.figure(figsize=(10,6))
plt.plot(means)
plt.fill_between(range(len(means)),sups,infs, alpha = 0.2)
plt.axhline(loss_bench)
plt.xlabel('# Generation')
plt.ylabel('Fitness')
plt.legend()
plt.show()
| TheoLvs/reinforcement-learning | 4. Chrome Dino/experiments.py | experiments.py | py | 10,024 | python | en | code | 94 | github-code | 36 |
6367637162 | # Look for #IMPLEMENT tags in this file.
'''
All models need to return a CSP object, and a list of lists of Variable objects
representing the board. The returned list of lists is used to access the
solution.
For example, after these three lines of code
csp, var_array = caged_csp_model(board)
solver = BT(csp)
solver.bt_search(prop_FC, var_ord)
var_array[0][0].get_assigned_value() should be the correct value in the top left
cell of the FunPuzz puzzle.
The grid-only models do not need to encode the cage constraints.
1. binary_ne_grid (worth 10/100 marks)
- A model of a FunPuzz grid (without cage constraints) built using only
binary not-equal constraints for both the row and column constraints.
2. nary_ad_grid (worth 10/100 marks)
- A model of a FunPuzz grid (without cage constraints) built using only n-ary
all-different constraints for both the row and column constraints.
3. caged_csp_model (worth 25/100 marks)
- A model built using your choice of (1) binary binary not-equal, or (2)
n-ary all-different constraints for the grid.
- Together with FunPuzz cage constraints.
'''
from cspbase import *
import itertools
def binary_ne_grid(fpuzz_grid):
##IMPLEMENT
constraints, constraint_values, constraint_names = [], [], []
initial_variables = get_initial_variables(fpuzz_grid)
size_of_board = initial_variables["size"]
cell_values = initial_variables["cell_values"]
variables = initial_variables["variables"]
for cell in itertools.product(list(range(size_of_board)), list(range(size_of_board)), list(range(size_of_board))):
col_c1 = cell_name(cell[1], cell[0]) + ", " + cell_name(cell[2], cell[0])
col_c2 = cell_name(cell[2], cell[0]) + ", " + cell_name(cell[1], cell[0])
if col_c1 not in constraint_names and col_c2 not in constraint_names and cell[2] != cell[1]:
satisfying_col_constraints = []
for v1 in cell_values:
for v2 in cell_values:
if v1 != v2:
satisfying_col_constraints.append((v1, v2))
c1 = Constraint(cell_name(cell[1], cell[0]) + ", " + cell_name(cell[2], cell[0]),
[variables[cell[1]][cell[0]], variables[cell[2]][cell[0]]])
c1.add_satisfying_tuples(satisfying_col_constraints)
constraints.append(c1)
constraint_values.append(c1)
constraint_names.append(c1.name)
row_c1 = cell_name(cell[0], cell[1]) + ", " + cell_name(cell[0], cell[2])
row_c2 = cell_name(cell[0], cell[2]) + ", " + cell_name(cell[0], cell[1])
if row_c1 not in constraint_names and row_c2 not in constraint_names and cell[2] != cell[1]:
satisfying_row_constraints = []
for v1 in cell_values:
for v2 in cell_values:
if v1 != v2:
satisfying_row_constraints.append((v1, v2))
added_constraints = Constraint(cell_name(cell[0], cell[1]) + ", " + cell_name(cell[0], cell[2]),
[variables[cell[0]][cell[1]], variables[cell[0]][cell[2]]])
added_constraints.add_satisfying_tuples(satisfying_row_constraints)
constraint_values.append(added_constraints)
constraint_names.append(added_constraints.name)
csp = CSP('binary_ne', [variable for rows in variables for variable in rows])
for constraint in constraints:
csp.add_constraint(constraint)
return (csp, variables)
def nary_ad_grid(fpuzz_grid):
##IMPLEMENT
constraints, scope = [], []
initial_variables = get_initial_variables(fpuzz_grid)
cell_values = initial_variables["cell_values"]
variables = initial_variables["variables"]
for col in cell_values:
for row in cell_values:
scope.append(variables[row][col])
cells1 = []
for value_pair1 in itertools.permutations(cell_values):
cells1.append(value_pair1)
c1 = Constraint(hash(col), scope)
c1.add_satisfying_tuples(cells1)
constraints.append(c1)
cells2 = []
for value_pair2 in itertools.permutations(cell_values):
cells2.append(value_pair2)
c2 = Constraint(hash(col), variables[col])
c2.add_satisfying_tuples(cells2)
constraints.append(c2)
csp = CSP('nary_ad', [variable for rows in variables for variable in rows])
for constraint in constraints:
csp.add_constraint(constraint)
return (csp, variables)
def caged_csp_model(fpuzz_grid):
##IMPLEMENT
constraints, constraint_values, constraint_names = [], [], []
initial_variables = get_initial_variables(fpuzz_grid)
size_of_board = initial_variables["size"]
cell_values = initial_variables["cell_values"]
cage_constraints = range(1, size_of_board)
csp, variables = binary_ne_grid(fpuzz_grid)
for cage in cage_constraints:
row = list(fpuzz_grid[cage])
operation, target, scope_values = row[-1], row[-2], row[:-2]
scope, cells = [], []
for scope_value in scope_values:
value = variables[(scope_value // 10) - 1][(scope_value % 10) - 1]
scope.append(value)
constraint_name = "Operation: " + str(operation) + "Target:" + str(target)
constraint = Constraint(constraint_name, scope)
op = check_operation(operation)
if op['addition']:
for cell in itertools.product(tuple(cell_values), repeat=len(scope)):
if sum(cell) == target:
cells.append(cell)
elif op['subtraction']:
for cell in itertools.product(tuple(cell_values), repeat=len(scope)):
for i in range(len(scope)):
difference = cell[i] - sum(cell[:i] + cell[i + 1:])
if difference == target:
cells.append(cell)
elif op['multiplication']:
for cell in itertools.product(tuple(cell_values), repeat=len(scope)):
for i in range(len(scope)):
product = float(cell[i])
for v1 in cell[:i] + cell[i + 1:]:
product *= v1
if product == target:
cells.append(cell)
elif op['division']:
for cell in itertools.product(tuple(cell_values), repeat=len(scope)):
for i in range(len(scope)):
quotient = float(cell[i])
for v1 in cell[:i] + cell[i + 1:]:
quotient = quotient / v1
if quotient == target:
cells.append(cell)
constraint.add_satisfying_tuples(cells)
constraints.append(constraint)
constraint_values.append(constraint)
constraint_names.append(constraint.name)
for constraint in constraints:
csp.add_constraint(constraint)
return (csp, variables)
def check_operation(operation):
operation_dictionary = {'addition': False, 'subtraction': False, 'multiplication': False, 'division': False}
if operation == 0:
operation_dictionary['addition'] = True
elif operation == 1:
operation_dictionary['subtraction'] = True
elif operation == 3:
operation_dictionary['multiplication'] = True
elif operation == 2:
operation_dictionary['division'] = True
return operation_dictionary
def cell_name(row, column):
# Return cell name used for constraints
return "Row: " + str(row) + " Col: " + str(column)
def get_initial_variables(fpuzz_grid):
# Return size_of_board, cell_values, variables in that order
size_of_board = fpuzz_grid[0][0]
max_value = size_of_board + 1
cell_values = list(range(1, max_value))
variables = []
for r in range(size_of_board):
row = []
for c in range(size_of_board):
variable = Variable(cell_name(r, c), list(range(1, size_of_board + 1))[:])
row.append(variable)
variables.append(row)
return {"size": size_of_board, "cell_values": cell_values, "variables": variables}
| eliasvolonakis/CSC384CourseWork | Constraint Satisfaction Assignment/puzzle_csp.py | puzzle_csp.py | py | 8,422 | python | en | code | 0 | github-code | 36 |
10916034260 | import abc
import sys
from importlib import import_module
from typing import TypeVar
import pytest
from sphinx.ext.autodoc.mock import _MockModule, _MockObject, mock
def test_MockModule():
mock = _MockModule('mocked_module')
assert isinstance(mock.some_attr, _MockObject)
assert isinstance(mock.some_method, _MockObject)
assert isinstance(mock.attr1.attr2, _MockObject)
assert isinstance(mock.attr1.attr2.meth(), _MockObject)
assert repr(mock.some_attr) == 'mocked_module.some_attr'
assert repr(mock.some_method) == 'mocked_module.some_method'
assert repr(mock.attr1.attr2) == 'mocked_module.attr1.attr2'
assert repr(mock.attr1.attr2.meth) == 'mocked_module.attr1.attr2.meth'
assert repr(mock) == 'mocked_module'
def test_MockObject():
mock = _MockObject()
assert isinstance(mock.some_attr, _MockObject)
assert isinstance(mock.some_method, _MockObject)
assert isinstance(mock.attr1.attr2, _MockObject)
assert isinstance(mock.attr1.attr2.meth(), _MockObject)
# subclassing
class SubClass(mock.SomeClass):
"""docstring of SubClass"""
def method(self):
return "string"
obj = SubClass()
assert SubClass.__doc__ == "docstring of SubClass"
assert isinstance(obj, SubClass)
assert obj.method() == "string"
assert isinstance(obj.other_method(), SubClass)
# parametrized type
T = TypeVar('T')
class SubClass2(mock.SomeClass[T]):
"""docstring of SubClass"""
obj2 = SubClass2()
assert SubClass2.__doc__ == "docstring of SubClass"
assert isinstance(obj2, SubClass2)
def test_mock():
modname = 'sphinx.unknown'
submodule = modname + '.submodule'
assert modname not in sys.modules
with pytest.raises(ImportError):
import_module(modname)
with mock([modname]):
import_module(modname)
assert modname in sys.modules
assert isinstance(sys.modules[modname], _MockModule)
# submodules are also mocked
import_module(submodule)
assert submodule in sys.modules
assert isinstance(sys.modules[submodule], _MockModule)
assert modname not in sys.modules
with pytest.raises(ImportError):
import_module(modname)
def test_mock_does_not_follow_upper_modules():
with mock(['sphinx.unknown.module']):
with pytest.raises(ImportError):
import_module('sphinx.unknown')
@pytest.mark.skipif(sys.version_info < (3, 7), reason='Only for py37 or above')
def test_abc_MockObject():
mock = _MockObject()
class Base:
@abc.abstractmethod
def __init__(self):
pass
class Derived(Base, mock.SubClass):
pass
obj = Derived()
assert isinstance(obj, Base)
assert isinstance(obj, _MockObject)
assert isinstance(obj.some_method(), Derived)
def test_mock_decorator():
mock = _MockObject()
@mock.function_deco
def func():
"""docstring"""
class Foo:
@mock.method_deco
def meth(self):
"""docstring"""
@mock.class_deco
class Bar:
"""docstring"""
assert func.__doc__ == "docstring"
assert Foo.meth.__doc__ == "docstring"
assert Bar.__doc__ == "docstring"
| borntocodeRaj/sphinx_configuration | tests/test_ext_autodoc_mock.py | test_ext_autodoc_mock.py | py | 3,242 | python | en | code | 1 | github-code | 36 |
73952795302 | from scipy.special import comb
"""
This file contains a set of functions to practice your
probabilities skills.
It needs to be completed with "vanilla" Python, without
help from any library -- except for the bin_dist function.
"""
def head_tails(p, n):
"""
Given a coin that have probability p of giving a heads
in each toss independently, what is the probability of
having n heads consecutively in a row?
:param p: probability of a head
:param n: number of heads in a row (int)
:return: probability of having n heads in a row
:rtype: float
"""
return p**n
head_tails(0.5,1)
def bin_dist(n, p, x):
"""
Given n number of trials, p the probability of success,
what is the probability of having x successes?
Your function should raise a ValueError if x is higher
than n.
If you need to compute combinations, you can import the
function "comb" from the package "scipy.special"
:param n: number of trials (int)
:param p: probability of success
:param x: number of successes (int)
:return: probability of having x successes
:rtype: float
:raise ValueError: if x > n
"""
if x > n:
# raise ValueError('value error')
return ValueError
return comb(n, x, exact=True) * (p ** x) * ((1 - p) ** (n - x))
bin_dist(10, .5, 6)
bin_dist(3, .7, 4)
def fact(x):
"""
Return the factorial of x.
Your function should raise a ValueError
if x is negative
:param x: a number (int)
:return: the factorial of x
:rtype: float
:raise ValueError:
"""
if x < 0:
raise ValueError('x is negative')
lfact = 1
for i in range(1, x+1):
lfact = lfact*i
return float(lfact)
def bin_cdf(n, p, x):
"""
Given n number of trials, p the probability of successes,
what is the probability of having less than or equal to x successes?
Your function should raise a ValueError if x is higher
than n.
:param n: number of trials (int)
:param p: probability of success
:param x: number of successes (int)
:return: probability of having less than or
equal to x successes
:rtype: float
:raise ValueError: if x > n
"""
if x > n:
return ValueError
if p == 0:
return 0
q = 1 - p
l_outcomes = (fact(n)/(fact(x)*fact(n-x)))
l_probability = (p**n)
return l_outcomes/l_probability
bin_cdf(3, 1, 1)
bin_cdf(3, 0 ,1)
bin_cdf(3, 0.7, 2)
bin_cdf(3, 0.7, 4)
bin_cdf(4, 0.2, 3)
bin_cdf(4, 0.4, 2)
bin_cdf(4, 0.8, 3)
bin_cdf(5, 0.2, 2)
bin_cdf(5, 0.2, 3)
bin_cdf(5, 0.4, 2)
bin_cdf(5, 0.4, 3)
bin_cdf(5, 0.8, 3)
bin_cdf(5, 0.2, 2)
bin_cdf(6, 0.2, 3)
bin_cdf(6, 0.4, 2)
bin_cdf(6, 0.4, 3)
bin_cdf(6, 0.8, 3)
| ashokpanigrahi88/ashokpython | Exercises/Pre-Maths/probabilities.py | probabilities.py | py | 2,880 | python | en | code | 0 | github-code | 36 |
43297568374 | from _rawffi import alt
class MetaStructure(type):
def __new__(cls, name, bases, dic):
cls._compute_shape(name, dic)
return type.__new__(cls, name, bases, dic)
@classmethod
def _compute_shape(cls, name, dic):
fields = dic.get('_fields_')
if fields is None:
return
struct_descr = alt._StructDescr(name, fields)
for field in fields:
dic[field.name] = field
dic['_struct_'] = struct_descr
class Structure(object):
__metaclass__ = MetaStructure
| mozillazg/pypy | pypy/module/_rawffi/alt/app_struct.py | app_struct.py | py | 542 | python | en | code | 430 | github-code | 36 |
6997263842 | class ListNode:
def __init__(self, val=0, next=None):
"""
:type val: int
:type next: ListNode
"""
self.val = val
self.next = next
def mergeTwoLists(list1 ,list2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
result = ListNode()
tail = result
while list1 and list2:
if list1.val < list2.val:
tail.next = list1
list1 = list1.next
else:
tail.next = list2
list2 = list2.next
tail = tail.next
return result.next
list1 = ListNode(1,ListNode(2,ListNode(4)))
list2 = ListNode(1,ListNode(3,ListNode(4)))
res = mergeTwoLists(list1,list2)
while res.next is not None:
print(res.val,end=" ")
res = res.next
print(res.val)
| joseluisvr93/leetcode | mergeTwoList.py | mergeTwoList.py | py | 800 | python | en | code | 0 | github-code | 36 |
40056650925 | from decouple import config
import os
class HTML_file:
def __init__(self, group_name: str, measure: str) -> None:
self.group_name = group_name
self.measure = measure
self.png_dir = os.path.join(config('root'), 'work/visual_graphs')
def save_directory(self) -> str:
return os.path.join(config('root'), 'results/assumptions')
def html_markup(self) -> str:
html_head_css = """
<!DOCTYPE html>
<html>
<head>
<style type="text/css" media="screen">
body{background-color: azure; font-family: "Arial", Arial, Sans-serif;}
</style>
"""
html_body = f"""
<title>Assumption graphs for {self.group_name}</title>
</head>
<body>
<h1>Distro plots for average clustering, average shortest path length, assortativity, modularity and efficieny</h1>
<centre>
<img src="{self.png_dir}/distro_plots_for_{self.group_name}_for_{self.measure}.png">
</centre>
<h1>Network measure plots</h1>
<center>
<img src="{self.png_dir}/network_measures_plot_for_{self.group_name}_for_{self.measure}.png">
</center>
</body>
</html>
"""
return html_head_css + html_body
def save_to_file(self) -> None:
directory = self.save_directory() + f'/{self.group_name}_assumptions_for_{self.measure}.html'
html = self.html_markup()
with open(directory, 'w') as file:
file.write(html)
class Group_differences_HTML_file:
def __init__(self, groups: dict, measure: str) -> None:
self.png_dir = os.path.join(config('root'), 'work/visual_graphs')
self.groups = [key for key in groups]
self.measure = measure
def save_directory(self) -> str:
return os.path.join(config('root'), 'results/group_differences')
def img_src(self):
img_src = f"""
<h2> Cluster plot for {self.groups[0]} </h2>
<img src="{self.png_dir}/cluster_plots_for_{self.groups[0]}_for_{self.measure}.png">
<h2> Cluster plot for {self.groups[1]} </h2>
<img src="{self.png_dir}/cluster_plots_for_{self.groups[1]}_for_{self.measure}.png">
"""
if len(self.groups) == 3:
img_src = img_src + f"""
<h2> Cluster plot for {self.groups[2]} </h2>
<img src="{self.png_dir}/cluster_plots_for_{self.groups[2]}_for_{self.measure}.png">
"""
return img_src
def html_markup(self) -> str:
img = self.img_src()
html_head_css = """
<!DOCTYPE html>
<html>
<head>
<style type="text/css" media="screen">
body{background-color: azure; font-family: "Arial", Arial, Sans-serif;}
</style>
"""
html_body = f"""
<title>Group difference graphs</title>
</head>
<body>
<h1>Global measure plots for each group</h1>
<centre>
<img src = "{self.png_dir}/global_measure_plots_for_{self.measure}.png"
</centre>
<h1>Cluster plots</h1>
<centre>
{img}
</centre>
</body>
</html>
"""
return html_head_css + html_body
def save_to_file(self) -> None:
directory = self.save_directory() + f'/group_differences_for_{self.measure}.html'
html = self.html_markup()
with open(directory, 'w') as file:
file.write(html)
| WMDA/SCN | SCN/visualization/create_html_view.py | create_html_view.py | py | 3,593 | python | en | code | 0 | github-code | 36 |
34287760383 |
import random
from xml.dom.minidom import parseString
file=open('/home/med/Desktop/bioInfo.xml', 'r')
data= file.read()
dom=parseString(data)
f = open('/home/med/Desktop/seedpopulation.txt', "w")
PS=dom.getElementsByTagName('problemSize')[0].toxml()
PopS=dom.getElementsByTagName('populationSize')[0].toxml()
ProblemSize =PS.replace('<problemSize>','').replace('</problemSize>','')
PopulationSize=PopS.replace('<populationSize>','').replace('</populationSize>','')
psint=int(ProblemSize)+1
popsint=int(PopulationSize)+1
for i in range(1,popsint):
for l in range(1, psint):
x=random.randrange(0,2)
y=str(x)
f.write(y)
f.write('\n')
| dogatuncay/GA_Twister_Hadoop | docs/seedpopulation.py | seedpopulation.py | py | 671 | python | en | code | 4 | github-code | 36 |
74647027303 | import logging
from igraph import Graph as iGraph
from parvusdb import GraphDatabase
from parvusdb.utils.code_container import DummyCodeContainer
from parvusdb.utils.match import Match, MatchException
from .node_matcher import VectorNodeMatcher
_logger = logging.getLogger()
class GraphMatcher:
def __init__(self, small_graph, metric, relations_metric):
self.small_graph = small_graph
self.metric = metric
self.relations_metric = relations_metric
def apply(self, g):
if not isinstance(g, iGraph):
raise TypeError("GraphRule.apply_to_graph() needs an igraph.Graph as an argument")
db = GraphDatabase(g, node_matcher=VectorNodeMatcher(self.metric, self.relations_metric))
rule = 'MATCH ' + str(self.small_graph) + ' RETURN __RESULT__;'
lst = db.query(rule)
if lst and lst[0]:
return True
return False
class GraphWeightedMatch:
def __init__(self, big_graph, metric, relations_metric):
self.big_graph = big_graph
self.metric = metric
self.relations_metric = relations_metric
def apply(self, g):
if not isinstance(g, iGraph):
raise TypeError("GraphRule.apply_to_graph() needs an igraph.Graph as an argument")
match = Match(matching_code_container=DummyCodeContainer(),
node_matcher=VectorNodeMatcher(self.metric, self.relations_metric, gradient=False))
big_graph = self.big_graph._g
try:
matching_variables = match.get_variables_substitution_dictionaries(g, big_graph)
w = 0
for k, v in matching_variables[0].items():
rindex = big_graph.vs['name' == v]['vector']
lindex = g.vs['name' == k]['vector']
w += self.metric.indices_dot_product(lindex, rindex)
return w
except MatchException as e:
_logger.warning('Cannot find matching variables %s', str(e))
return 0
| fractalego/dgt | dgt/graph/graph_matcher.py | graph_matcher.py | py | 1,990 | python | en | code | 2 | github-code | 36 |
44034009675 | import sys
from collections import deque
n, k = map(int, sys.stdin.readline().split())
m = 100001
visited = [-1] * m
check = [0] * m
q = deque()
visited[n] = 0
q.append(n)
def path(x):
move = []
temp = x
for _ in range(visited[x] + 1):
move.append(temp)
temp = check[temp]
print(*move[::-1])
while q:
x = q.popleft()
if x == k:
print(visited[x])
path(x)
break
else:
for i in [2*x, x-1, x+1]:
if (0 <= i <= (m-1)) and visited[i] == -1:
visited[i] = visited[x] + 1
q.append(i)
check[i] = x # 이동 경로 저장
| GluteusStrength/Algorithm | 백준/Gold/13913. 숨바꼭질 4/숨바꼭질 4.py | 숨바꼭질 4.py | py | 675 | python | en | code | 0 | github-code | 36 |
26030329346 | import os
import sys
#モジュール探索パス追加
p = ['../','../../']
for e in p: sys.path.append(os.path.join(os.path.dirname(__file__),e))
import discord
from discord.ext import commands
from discord import app_commands
from cmmod.json_module import open_json
from cmmod.time_module import get_currenttime
from cmmod.discord_module import CustomEmbed
from usermenu.cmfunc.userfunc import UserDBFunc
from usermenu.cmfunc.teamfunc import TeamDBFunc
from usermenu.error.usermenu_error import UserMenuError
#app_commandsで使うデータ
cmddata = open_json(r'menu/usermenu/data/apply_team.json')
cmdname = cmddata["name"]
cmddesp = cmddata["description"]
cmddesb = cmddata["describe"]
cmdcho = cmddata["choices"]
cmdcho_apt = [app_commands.Choice(name=c["name"],value=c["value"]) for c in cmdcho["apptype"]]
cmdcho_lgid = [app_commands.Choice(name=c["name"],value=c["value"]) for c in cmdcho["league"]]
cmddix = cmddata["dataindex"]
class ApplyTeam(commands.Cog):
def __init__(self, client):
self.client = client
self.userdbfunc = UserDBFunc()
self.teamdbfunc = TeamDBFunc()
self.custembed = CustomEmbed()
@app_commands.command(name=cmdname, description=cmddesp)
@app_commands.describe(apptype=cmddesb["apptype"],teamname=cmddesb["teamname"],league=cmddesb["league"],
leader=cmddesb["leader"],member1=cmddesb["member1"],member2=cmddesb["member2"],member3=cmddesb["member3"],member4=cmddesb["member4"])
@app_commands.choices(apptype=cmdcho_apt,league=cmdcho_lgid)
@app_commands.guild_only()
async def apply_team_command(self, interaction:discord.Interaction, apptype:app_commands.Choice[int], teamname:str, league:app_commands.Choice[int],
leader:discord.User, member1:discord.User, member2:discord.User, member3:discord.User, member4:discord.User=None) -> None:
author = interaction.user #コマンド実行者
try:
#【thinking処理】
await interaction.response.defer(thinking=True)
#【チーム情報確認処理】
raw_teamdata = await self.teamdbfunc.get_teamdata(leaderid=author.id)
teamdata = raw_teamdata[0]
#[ERROR] 申請区分が「登録」且つ既にチーム情報が存在する場合
if apptype.value == 0 and teamdata:
error = "既にいずれかのチームのリーダーとなっています。新たにチーム登録する場合は、情報更新でリーダーを変更後行ってください"
raise UserMenuError(error)
#[ERROR] 申請区分が「更新」且つチーム情報が存在しない場合
elif apptype.value == 1 and not teamdata:
error = "いずれかのチームのリーダーであることが確認できませんでした。チームリーダーであるにもかかわらず、このエラーメッセージが送信された場合は運営まで連絡してください"
raise UserMenuError(error)
#【ユーザ情報確認処理】
members = [leader, member1, member2, member3, member4]
for member in members:
if member != None:
raw_userdata = await self.userdbfunc.get_userdata(member.id)
userdata = raw_userdata[0]
#[ERROR] 指定ユーザの情報がデータベースに登録されていない場合
if not userdata:
error = f"指定ユーザ{member.mention}の情報がデータベースに登録されていません。ユーザ情報を登録行ってからチーム情報登録・更新を行ってください"
raise UserMenuError(error)
#【メンバー4確定処理】
if member4 == None: MEMBER4 = ''
else: MEMBER4 = str(member4.id)
#【登録日時確定処理】
currenttime = get_currenttime()
if apptype.value == 0: REGISTRATION_DATE = currenttime
else: REGISTRATION_DATE = teamdata[9]
#【チーム情報作成処理】
postdata = {"チーム名":teamname, "リーグ":league.name, "リーダー":str(leader.id), "メンバー1":str(member1.id), "メンバー2":str(member2.id),
"メンバー3":str(member3.id), "メンバー4":MEMBER4, "登録日時":REGISTRATION_DATE, "最終更新日時":currenttime}
#【POST処理】
await self.teamdbfunc.post_teamdata(leaderid=leader.id, postdata=postdata, apptype=apptype.value)
await self.teamdbfunc.log_teamdata(author=author, postdata=postdata, currenttime=currenttime, apptype=apptype.value)
except UserMenuError as e:
await interaction.followup.send(content=author.mention, embed=self.custembed.error(description=str(e)))
except Exception as e:
error = "コマンド実行中に予期せぬエラーが発生しました。このエラーが発生した場合は運営まで連絡をお願いします。\nエラー内容:"+str(e)
print(error)
await interaction.followup.send(content=author.mention,embed=self.custembed.error(description=error))
else:
#【完了送信処理】
success = f"{author.mention}からリーダー:{leader.mention}のチーム情報{apptype.name}を受け付けました。データベースからの完了通知をお待ちください。通知が無かった場合は運営まで連絡をお願いします"
await interaction.followup.send(content=author.mention, embed=self.custembed.success(description=success))
async def setup(client: commands.Bot):
await client.add_cog(ApplyTeam(client))
| rich-bread/bmdb_bot | menu/usermenu/apply_team.py | apply_team.py | py | 5,802 | python | ja | code | 0 | github-code | 36 |
37225738128 | import nilearn
from nilearn.plotting import plot_carpet, plot_glass_brain, plot_anat, plot_stat_map, plot_design_matrix, plot_epi, plot_contrast_matrix
from nilearn import image, masking, input_data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from nilearn.glm.first_level import make_first_level_design_matrix, FirstLevelModel
from nilearn.glm import threshold_stats_img
from nilearn.reporting import get_clusters_table, make_glm_report
from nilearn.input_data import NiftiLabelsMasker, NiftiMasker, NiftiSpheresMasker
from nilearn import datasets
from nilearn.regions import RegionExtractor
from nilearn import plotting
from nilearn import surface
from nilearn.decoding import Decoder
def get_events_file(events_home_dir, subject_id, run):
events_file = events_home_dir + 'sub-' + subject_id + '/run-' + str(run).zfill(2) + '/events.csv'
#events_file = 'events_run_' + str(i) + '.csv'
events = pd.read_csv(events_file)
events = events.drop('Unnamed: 0', 1)
return events
def fit_glm(subject_id, run):
events = get_events_file(subject_id, run)
tr = 1.25
n_scans = image.load_img(fmri_image[run-1]).shape[-1]
frame_times = np.arange(n_scans) * tr
motion = np.cumsum(np.random.randn(n_scans, 6), 0)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
design_matrix = make_first_level_design_matrix(frame_times, events,
drift_model='polynomial', drift_order=3,
add_regs=motion, add_reg_names=add_reg_names,
hrf_model='spm')
fmri_glm_model = FirstLevelModel(t_r=1.25, minimize_memory=False, noise_model='ar1', mask_img=mask_image[run-1])
fmri_glm_model.fit(fmri_image[run-1], design_matrices=design_matrix)
print("run done: ", run)
return fmri_glm_model, design_matrix
def compute_no_diff_contrasts(glm, run):
z_maps = list()
conditions_label = list()
sessions_label = list()
events = get_events_file(subject_id, run)
conditions = events.trial_type.unique()
for condition_ in conditions:
z_maps.append(glm[run-1].compute_contrast(condition_))
conditions_label.append(condition_)
sessions_label.append(str(run))
return z_maps, conditions_label, sessions_label
def get_movement_minus_wait_contrasts(design_matrices, glms):
z_map_movement_minus_wait = list()
movement_minus_wait_labels = list()
for run in range(1, 11):
contrast_matrix = np.eye(design_matrices[run-1].shape[1])
basic_contrasts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrices[run-1].columns)])
movement_contrasts = basic_contrasts['movement_153'] + basic_contrasts['movement_207'] + basic_contrasts['movement_45'] + basic_contrasts['movement_99'] - basic_contrasts['wait']
z_map_movement_minus_wait.append(glms[run-1].compute_contrast(movement_contrasts))
movement_minus_wait_labels.append('Movement minus wait, run_' + str(run).zfill(2))
return z_map_movement_minus_wait, movement_minus_wait_labels
def get_prep_minus_wait_contrasts(design_matrices, glms):
z_map_prep_minus_wait = list()
prep_minus_wait_labels = list()
for run in range(1, 11):
contrast_matrix = np.eye(design_matrices[run-1].shape[1])
basic_contrasts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrices[run-1].columns)])
movement_contrasts = basic_contrasts['go_153_prep'] + basic_contrasts['go_207_prep'] + basic_contrasts['go_45_prep'] + basic_contrasts['go_99_prep'] + basic_contrasts['nogo_153_prep'] + basic_contrasts['nogo_207_prep'] + basic_contrasts['nogo_45_prep'] + basic_contrasts['nogo_99_prep'] - basic_contrasts['wait']
z_map_prep_minus_wait.append(glms[run-1].compute_contrast(movement_contrasts))
prep_minus_wait_labels.append('Prep minus wait, run_' + str(run).zfill(2))
return z_map_prep_minus_wait, prep_minus_wait_labels
def plot_contrast_maps(z_maps, z_map_no, condition_label, display_mode = 'ortho', correction = 'bonferroni', alpha = 0.05):
_, threshold = threshold_stats_img(
z_maps[z_map_no], alpha= alpha, height_control=correction)
print('Bonferroni-corrected, p<0.05 threshold: %.3f' % threshold)
plot_map = plot_stat_map(z_maps[z_map_no], threshold = threshold,
black_bg=True, display_mode=display_mode, draw_cross=False,
title = condition_label[z_map_no] + ' '+ correction + ' corrected, p<0.05')
masker.fit(z_maps[z_map_no])
#report = masker.generate_report()
#plot_map.add_contours(image.index_img(atlas_filename, 11))
plotting.show()
return plot_map, masker
| tejas-savalia/fmri_project | util.py | util.py | py | 4,890 | python | en | code | 0 | github-code | 36 |
16385745917 | from torch.utils.data import Dataset
import torch
from PIL import Image
from pathlib import Path
import numpy as np
from dataclasses import dataclass
import random
@dataclass
class imageset:
t1: Path
t2: Path
cm: Path
@dataclass
class patch:
imset: imageset
x: tuple
y: tuple
class CDDataset(Dataset):
""""""
imagesets = None
patchsize = None
nx = 0
ny = 0
patches = []
normalize = True
cache = {}
def loadrgb(self, image):
if image not in self.cache:
img = self._loadrgb(image).astype(np.float32)
if self.normalize:
img = (img - img.mean(axis=(-1, -2))[:, None, None]) / img.std(
axis=(-1, -2)
)[:, None, None]
self.cache[image] = img
return self.cache[image]
def loadcm(self, image):
if image not in self.cache:
self.cache[image] = self._loadcm(image).astype(np.int64)
return self.cache[image]
def __init__(self):
if self.imagesets is None or self.patchsize is None:
raise NotImplementedError
m, v = np.zeros(3), np.zeros(3)
self.patches = []
for imset in self.imagesets:
im1 = self.loadrgb(imset.t1)
im2 = self.loadrgb(imset.t2)
cm = self.loadcm(imset.cm)
assert im1.shape[1:] == im2.shape[1:] == cm.shape
assert im1.shape[0] == im2.shape[0] == 3
for ix in range(im1.shape[1] // self.patchsize):
for iy in range(im1.shape[2] // self.patchsize):
self.patches.append(
patch(
imset,
(self.patchsize * ix, self.patchsize * (ix + 1)),
(self.patchsize * iy, self.patchsize * (iy + 1)),
)
)
self.nx += ix / len(self.imagesets)
self.ny += iy / len(self.imagesets)
self._m = m
self._s = np.sqrt(v)
def __getitem__(self, idx):
patch = self.patches[idx]
im1 = self.loadrgb(patch.imset.t1)
im2 = self.loadrgb(patch.imset.t2)
cm = self.loadcm(patch.imset.cm)
im1 = im1[..., patch.x[0] : patch.x[1], patch.y[0] : patch.y[1]]
im2 = im2[..., patch.x[0] : patch.x[1], patch.y[0] : patch.y[1]]
# if self.normalize:
# im1=(im1-im1.mean(axis=(-1,-2))[:,None,None])/im1.std(axis=(-1,-2))[:,None,None]
# im2=(im2-im2.mean(axis=(-1,-2))[:,None,None])/im2.std(axis=(-1,-2))[:,None,None]
cm = cm[..., patch.x[0] : patch.x[1], patch.y[0] : patch.y[1]]
return (im1, im2, cm)
def __len__(self):
return len(self.patches)
class WV_S1(CDDataset):
def __init__(self, path: Path, patchsize: int):
self.imagesets = [imageset(*(path / f for f in ["t1.bmp", "t2.bmp", "gt.bmp"]))]
self.patchsize = patchsize
super(WV_S1, self).__init__()
def _loadrgb(self, image):
return np.array(Image.open(image)).transpose(2, 0, 1) / 255
def _loadcm(self, image):
return np.array(Image.open(image)) < 128
class OSCD(CDDataset):
def __init__(self, path: Path, patchsize: int):
self.imagesets = [
imageset(im1, im2, cm)
for im1, im2, cm in zip(
sorted((path / "images").rglob("imgs_1_rect")),
sorted((path / "images").rglob("imgs_2_rect")),
sorted((path / "labels").rglob("cm")),
)
]
self.patchsize = patchsize
super(OSCD, self).__init__()
def _loadrgb(self, image):
return np.stack(
[np.array(Image.open(image / b)) for b in ("B02.tif", "B03.tif", "B04.tif")]
)
def _loadcm(self, image):
return np.array(Image.open(next(image.glob("*-cm.tif")))) > 1
from typing import Tuple
from torch.utils.data import Subset
def split(
ds: Dataset, validation_ratio: float, test_ratio: float, runsize=16, seed=0
) -> Tuple[Dataset, Dataset, Dataset]:
"""
splits dataset by ratio (0..1) of validation and test in validation, test and train (remainder)
while ensuring somewhat equal distribution between different parts
of the Dataset by randomly choosing out of partitions of size runsize
"""
rng = np.random.RandomState(0)
val = list()
test = list()
train = list()
split = np.array_split(np.arange(len(ds)), len(ds) / runsize)
for s in split:
nv = int(
validation_ratio * (len(val) + len(test) + len(train) + len(s)) - len(val)
)
i = rng.choice(s, nv, replace=False)
s = np.setdiff1d(s, i)
val += i.tolist()
nt = int(test_ratio * (len(val) + len(test) + len(train) + len(s)) - len(test))
i = rng.choice(s, nt, replace=False)
s = np.setdiff1d(s, i)
test += i.tolist()
train += s.tolist()
return CDSubset(ds, train), CDSubset(ds, val), CDSubset(ds, test)
class CDSubset(Subset):
"""
Subset of a CDDataset at specified indices with optional augmentation.
"""
augment = False
def __getitem__(self, idx):
items = super().__getitem__(idx)
if self.augment:
if random.randint(0, 1):
items = [np.swapaxes(item, -1, -2) for item in items]
rot = random.randint(0, 3)
items = [np.copy(np.rot90(item, rot, (-1, -2))) for item in items]
return items
class CDCat(Dataset):
"""
Concats the two images along first dimension
"""
def __init__(self, baseObject):
self.__class__ = type(baseObject.__class__.__name__,
(self.__class__, baseObject.__class__),
{})
self.__dict__ = baseObject.__dict__
self.baseObject=baseObject
def __getitem__(self, idx):
im1, im2, cm = self.baseObject[idx]
return np.concatenate((im1,im2),0),cm
| fzimmermann89/ml4rs | cd/ds.py | ds.py | py | 6,007 | python | en | code | 0 | github-code | 36 |
6027026572 | #Uppgift 3
"""
Primtalsfaktorerna av 13195 är 5, 7, 13 och 29.
Vilken är den största primtalsfaktorn av 600851475143?
"""
#Svar: 6857
num = 600851475143
factors = []
while num > 1:
for i in range(2,num+1):
if num % i == 0:
num = int(num/i)
factors.append(i)
break
print(factors)
| WastPow/Sommarmatte-l-sningsf-rslag | Uppgift 3.py | Uppgift 3.py | py | 310 | python | sv | code | 0 | github-code | 36 |
8755321415 | # -*- coding: utf-8 -*-
from odoo import fields, models, api
line_sizes = [
('medium', 'Moyen'),
('small', 'Petit'),
('smaller', 'Plus petit'),
('x-small', u'Très petit')
]
class ResCompany(models.Model):
_inherit = "res.company"
use_of_custom_footer = fields.Boolean(
string=u'Utiliser le pied de page personnalisé pour cette société.',
help=u"Cochez si vous voulez utiliser ce pied de page personnalisé pour les rapports PDF")
of_custom_footer_line_1 = fields.Char(string='Ligne 1')
of_custom_footer_line_2 = fields.Char(string='Ligne 2')
of_custom_footer_line_3 = fields.Char(string='Ligne 3')
of_custom_footer_line_1_size = fields.Selection(
selection=line_sizes, string='Taille', default='small', required=True)
of_custom_footer_line_2_size = fields.Selection(
selection=line_sizes, string='Taille', default='small', required=True)
of_custom_footer_line_3_size = fields.Selection(
selection=line_sizes, string='Taille', default='smaller', required=True)
of_custom_header_line_1 = fields.Char(string='Ligne 1')
of_custom_header_line_2 = fields.Char(string='Ligne 2')
of_custom_header_line_3 = fields.Char(string='Ligne 3')
of_custom_header_line_4 = fields.Char(string='Ligne 4')
of_custom_header_line_1_size = fields.Selection(
selection=line_sizes, string='Taille', default='medium', required=True)
of_custom_header_line_2_size = fields.Selection(
selection=line_sizes, string='Taille', default='medium', required=True)
of_custom_header_line_3_size = fields.Selection(
selection=line_sizes, string='Taille', default='medium', required=True)
of_custom_header_line_4_size = fields.Selection(
selection=line_sizes, string='Taille', default='medium', required=True)
of_max_height_bandeau = fields.Integer(string=u'Hauteur max bandeau (px)', default=130, required=True)
of_position_header_lines = fields.Selection(
[
('logo_under', u"Logo société et adresse configurable dessous"),
('logo_right', u"Logo société et adresse configurable à droite"),
('bandeau_pastille', u"Bandeau image + pastille"),
('bandeau_totalite', u"Bandeau image totalité page "),
], string="Type d'en-tête société", default="logo_right",
help=u"Position des lignes d'en-tête relativement au logo de société\n"
u"Sous le logo : les lignes d'en-tête seront placées sous le logo de société.\n"
u"À droite du logo : les lignes d'en-tête seront placées à droite du logo.")
@api.multi
def get_line_content(self, header_or_footer="header", number=1):
"""Analyse des variables mako"""
self.ensure_one()
field_to_render = self.of_custom_header_line_1
if header_or_footer == "header":
if number == 2:
field_to_render = self.of_custom_header_line_2
elif number == 3:
field_to_render = self.of_custom_header_line_3
elif number == 4:
field_to_render = self.of_custom_header_line_4
else:
if number == 1:
field_to_render = self.of_custom_footer_line_1
elif number == 2:
field_to_render = self.of_custom_footer_line_2
else:
field_to_render = self.of_custom_footer_line_3
content = self.env['mail.template'].render_template(field_to_render, 'res.company', self.id, post_process=False)
return content
class View(models.Model):
_inherit = 'ir.ui.view'
@api.multi
def render(self, values=None, engine='ir.qweb'):
# Ajout de la possibilité d'appeler hasattr depuis une vue qweb
return super(View, self).render(dict(values or {}, hasattr=hasattr), engine)
| odof/openfire | of_external/models/of_company.py | of_company.py | py | 3,872 | python | en | code | 3 | github-code | 36 |
18915198493 | import pytest
from src.maximum_twin_sum_of_a_linked_list import Solution
from src.utils.linked_list import to_linked_list
@pytest.mark.parametrize(
"in_list,expected",
(
([5, 4, 2, 1], 6),
([4, 2, 2, 3], 7),
([1, 100_000], 100_001),
),
)
def test_solution(in_list, expected):
head = to_linked_list(in_list)
assert Solution().pairSum(head) == expected
| lancelote/leetcode | tests/test_maximum_twin_sum_of_a_linked_list.py | test_maximum_twin_sum_of_a_linked_list.py | py | 398 | python | en | code | 3 | github-code | 36 |
23425927839 | import os
from discord.ext import commands, tasks
import motor.motor_asyncio
import util.util
from util.help import HelpCommand
from util.setup import load_text, load_data, mod_data, get_files
import discord
import itertools
bot = commands.Bot(
command_prefix="!", # Change to desired prefix
case_insensitive=True, # Commands aren't case-sensitive
intents=discord.Intents.all()
)
bot.help_command = HelpCommand(bot)
STATUS = itertools.cycle(["a", "b"])
mongo_client = motor.motor_asyncio.AsyncIOMotorClient("") # need to create a database, i used mongo atlas
bot.db = mongo_client.bhv
bot.author_id = 656373241144934420 # Change to your discord id!!!
@bot.event
async def on_ready(): # When the bot is ready
for extension in files:
print(extension)
await bot.load_extension(extension) # Loades every extension.
bot.hdb = bot.get_cog("Database")
bot.util = util.util.setup(bot)
bot.embed = discord.Embed(color=discord.Colour.from_str("#f77394"))
mod_data(bot)
change_status.start()
print("I'm in")
print(bot.user) # Prints the bot's username and identifier
@tasks.loop(seconds=10)
async def change_status():
await bot.change_presence(activity=discord.Game(next(STATUS)))
files = [file.replace("/", ".")[:-3] for file in get_files("cogs", [])]
bot.t = load_text()
bot.d = load_data()
token = "" # your own token
bot.run(token) # Starts the bot
| gritor111/bhv-bot | bot.py | bot.py | py | 1,424 | python | en | code | 0 | github-code | 36 |
40843782656 | from apivk.function_vk import vkinder
from datetime import date
from database.script_bd import check_users_vk, check_search_results, save_users_vk, save_search_results
from botvk.function_botvk import write_msg, send_photo
# определение статуса отношений
def find_relation(search_user_id):
res = vkinder.about_user(search_user_id)['response']
for item in res:
if 'relation' not in item:
relation = None
elif 'relation' in item:
relation = item['relation']
return relation
# определение пола
def find_sex(search_user_id):
res = vkinder.about_user(search_user_id)['response']
for item in res:
if 'sex' not in item:
any_sex = None
elif 'sex' in item:
sex = item['sex']
if sex == 2:
any_sex = 1
elif sex == 1:
any_sex = 2
else:
any_sex = 0
return any_sex
# определение города
def find_city(search_user_id):
res = vkinder.about_user(search_user_id)['response']
for item in res:
if 'city' not in item:
city = None
elif 'city' in item:
city = item['city']['id']
return city
# определение возраста
def find_age(search_user_id):
res = vkinder.about_user(search_user_id)['response']
for item in res:
if 'bdate' not in item:
age = None
elif 'bdate' in item:
bdate = item['bdate']
if len(bdate) >= 8:
day, mon, year = bdate.split('.')
day = int(day)
mon = int(mon)
year = int(year)
today = date.today()
age = today.year - year - ((today.month, today.day) < (mon, day))
else:
age = None
return age
# отправить ссылку на человека и топ-3 фото
def choose_photo(age_from, age_to, relation, sex, city, search_user_id, user_id):
try:
search = vkinder.users_search(age_from, age_to, relation, sex, city)
people_id = search['response']['items']
for people in people_id:
try:
id_people = int(people['id'])
first_name = people['first_name']
last_name = people['last_name']
status = people['is_closed']
city_ = people['city']['id']
except KeyError:
pass
if status is False and city_ == city:
if city_ == city:
if check_users_vk(id_people) is None:
save_users_vk(id_people, first_name, last_name)
if check_search_results(search_user_id, id_people) is None:
save_search_results(search_user_id, id_people)
write_msg(user_id, f'Зацени {first_name} {last_name} https://vk.com/id{id_people}')
for i in vkinder.photo_user(id_people):
owner_id = i[1][2]
photo_id = i[1][3]
photo = f'photo{owner_id}_{photo_id}'
send_photo(user_id, photo)
write_msg(user_id, 'Для продолжения поиска повторно введите команду "поиск"')
break
except TypeError:
write_msg(user_id, 'Не хватает данных для поиска')
# проверка полноты информации о пользователе
def check(age, city, sex, relation, user_id):
count = 0
if age is not None:
count += 1
if city is not None:
count += 1
if sex is not None:
count += 1
if relation is not None:
count += 1
if count == 4:
write_msg(user_id, 'Отлично! Для начала поиска введите команду "поиск"')
| beloglazovpl/VKinder | function_find/func.py | func.py | py | 4,050 | python | en | code | 0 | github-code | 36 |
36407109164 | """
This script is used for 'writing' songs in musical notation form, with recording of key downs and ups being used to define the time durations and delays of notes. Notes are shown line by line and a single key on your keyboard can be used to set the timing for each note in a song - of course, you'll need to know the song by heart.
All that is needed is the pynput module installed and a file containing your music notes. You will need to pass the music notes file as an argument. To start, call this script from python in the terminal - e.g.:
$ python3 musical_timing_recorder.py <path to your music notes file>
Once all the music notes have been 'played' by you, an out file gets written to in the same directory as your music notes file. This text file will contain 3 separate sections - the notes, the note durations, and the note gaps.
"""
from pynput import keyboard
import time
import os,sys
DEBUG = False
KILL = False
# Keyboard key handling
key_held = False
def key_down(key):
global key_held,KILL
key_held = True
if key == keyboard.Key.esc: KILL = True
def key_up(key):
global key_held
key_held = False
kl = keyboard.Listener(on_press=key_down,on_release=key_up,suppress=True)
kl.start()
# Execute
if __name__ == "__main__":
# Init
if not DEBUG:
args = sys.argv
if os.path.isfile(args[1]): notes_file = args[1]
else: raise FileNotFoundError("Error: missing positional argument - music notes file")
else:
notes_file = "<INSERT MUSIC NOTES FILE HERE>" # Ignore this
with open(notes_file,"r") as f:
x = f.readlines()
lines = [i.strip() for i in x]
# Clear screen and show greeter
os.system("cls" if os.name == "nt" else "clear")
print(f"Musical Timing Recorder ({os.path.basename(notes_file)})\nPress 'Esc' to quit\n\n")
print("When you are ready, Maestro...\n")
# Recording loop system
line_count = len(lines)
durations,gaps = [],[]
t,time_d,time_g = 0,0,0
recording_started = False
for n,line in enumerate(lines):
# Print out the current sheet line for the user
print(f"\nLine: {n+1}\n \t{line}\n> \t",end="")
if not recording_started:
# Wait for user to press key for first time
while not key_held: continue
if KILL: exit()
recording_started = True
# Notes per line recording loop start
notes = line.split()
note_count = len(notes)
for i,note in enumerate(notes):
while not key_held: continue # Failsafe
if key_held:
print(note+" ",end="",flush=True)
t = time.time()
while key_held: time_d = (time.time() - t)
if KILL: exit()
durations.append(time_d)
if not key_held:
# Abruptly stop and don't record gap for last note
if n+1 >= line_count and i+1 >= note_count:
gaps.append(0)
break
t = time.time()
while not key_held: time_g = (time.time() - t)
if KILL: exit()
gaps.append(time_g)
durations.append("\n")
gaps.append("\n")
print("\n")
# Finished recording - cleanup and write data to output file
out_file = os.path.basename(notes_file).split(".")[0]+"_output.txt"
melody = []
for l in lines:
clean = l.split()
x = [f"\"{n}\"," for n in clean]
x.append("\n")
melody.extend(x)
for i in range(len(gaps)):
if gaps[i] == "\n":continue
durations[i] = f"{durations[i]:.3F},"
gaps[i] = f"{gaps[i]:.3F},"
with open(f"{out_file}","w") as f:
f.write("Melody:\n")
f.writelines(melody)
f.write("Durations:\n")
f.writelines(durations)
f.write("Gaps:\n")
f.writelines(gaps)
print(f"Finished - Data written to ./{out_file}") | cwylycode/dumptruck | python/musical_timing_recorder.py | musical_timing_recorder.py | py | 4,076 | python | en | code | 4 | github-code | 36 |
42926082156 | import tempfile
import unittest
import numpy as np
import pandas as pd
import pysam
from hmnfusion import mmej_deletion
from tests.main_test import Main_test
class TestMmejDeletionMain(Main_test):
@classmethod
def load_records(cls, path: str):
vcf_in = pysam.VariantFile(path)
return [x for x in vcf_in.fetch()]
def setUp(self):
# Value
self.value_0 = mmej_deletion.Value()
self.value_1 = mmej_deletion.Value(
id="86ad494080bc9c322a639d3de922e958",
contig="chr1",
start=5,
deletion="TGAGGC",
)
self.value_2 = mmej_deletion.Value(
id="927f1d86b6d899d163efdb245b9aca67",
contig="chr19",
start=5,
deletion="TGA",
)
self.value_1_df = pd.DataFrame(
{
"contig": "chr1",
"start": 5,
"deletion": "TGAGGC",
"sequence": "TGAGGC",
"conclusion": "alignment ambiguous",
},
index=["86ad494080bc9c322a639d3de922e958"],
)
self.value_2_df = pd.DataFrame(
{
"contig": "chr19",
"start": 5,
"deletion": "TGA",
"sequence": "",
"conclusion": "no clear signature",
},
index=["927f1d86b6d899d163efdb245b9aca67"],
)
self.values_unit_one = TestMmejDeletionMain.load_records(path=self.u1_vcf)
# MmejDeletion
self.mmej_deletion_u0 = mmej_deletion.MmejDeletion(name="sample0", values=[])
self.mmej_deletion_u1 = mmej_deletion.MmejDeletion(
name="sample1",
values=[self.value_1],
)
self.mmej_deletion_u2_s1 = mmej_deletion.MmejDeletion(
name="sample1",
values=[self.value_1, self.value_2],
)
self.mmej_deletion_u2_s2 = mmej_deletion.MmejDeletion(
name="sample2",
values=[self.value_1],
)
self.mmej_deletion_u2_df = pd.concat([self.value_1_df, self.value_2_df])
self.mmej_deletion_u2_df["sample1"] = ["o", "o"]
self.mmej_deletion_u2_df["sample2"] = ["o", pd.NA]
self.mmej_deletion_u2_df_xlsx = self.mmej_deletion_u2_df.replace(
{pd.NA: np.nan, "": np.nan}
)
self.mmej_deletion_u2_df_xlsx.reset_index(inplace=True, drop=True)
self.mmej_deletion_empty_df = pd.DataFrame(
columns=["contig", "start", "deletion", "sequence", "conclusion", "N1"]
)
self.mmej_deletion_empty_df_xlsx = pd.DataFrame(
{
"Unnamed: 0": "no deletion found",
"contig": np.nan,
"start": np.nan,
"deletion": np.nan,
"sequence": np.nan,
"conclusion": np.nan,
"N1": np.nan,
},
index=[0],
)
class TestConclude(Main_test):
"""Test Conclude object"""
def test_attribute(self):
"""Test attribute number"""
attrs = [x for x in dir(mmej_deletion.Conclude) if not x.startswith("__")]
self.assertEqual(len(attrs), 4)
class TestValue(TestMmejDeletionMain):
"""Test Value object"""
def test_getters(self):
"""Test getters attributes"""
self.assertEqual(self.value_1.id, "86ad494080bc9c322a639d3de922e958")
self.assertEqual(self.value_1.contig, "chr1")
self.assertEqual(self.value_1.start, 5)
self.assertEqual(self.value_1.deletion, "TGAGGC")
def test_setters(self):
"""Test setters attributes"""
self.value_0.id = self.value_1.id
self.value_0.contig = self.value_1.contig
self.value_0.start = self.value_1.start
self.value_0.deletion = self.value_1.deletion
self.value_0.sequence = self.value_1.sequence
self.assertEqual(self.value_0.id, "86ad494080bc9c322a639d3de922e958")
self.assertEqual(self.value_0.contig, "chr1")
self.assertEqual(self.value_0.start, 5)
self.assertEqual(self.value_0.deletion, "TGAGGC")
def test_get_conclusion(self):
"""Test get_conclusion()"""
self.value_1.sequence = "ATCG"
self.value_1.deletion = "ATCG"
self.assertEqual(
self.value_1.get_conclusion(),
mmej_deletion.Conclude.AMBIGUOUS,
)
self.value_1.sequence = "ATC"
self.assertEqual(
self.value_1.get_conclusion(),
mmej_deletion.Conclude.UNCLEAR,
)
self.value_1.sequence = "ATCGGC"
self.assertEqual(
self.value_1.get_conclusion(),
mmej_deletion.Conclude.VALID,
)
self.value_1.deletion = "A"
self.assertEqual(
self.value_1.get_conclusion(),
mmej_deletion.Conclude.UNINITIALIZED,
)
def test_set_sequence(self):
"""Test set_sequence()"""
self.value_1.set_sequence(path=self.ref_mmej)
self.assertEqual(self.value_1.sequence, "TGAGGC")
def test_from_record(self):
"""Test from_record()"""
rec = mmej_deletion.Value.from_record(self.values_unit_one[0])
self.assertEqual(rec, self.value_1)
def test_to_dataframe(self):
"""Test to_dataframe()"""
self.value_1.set_sequence(path=self.ref_mmej)
self.assertTrue(self.value_1.to_dataframe().equals(self.value_1_df))
def test_to_region(self):
"""Test to_region()"""
self.assertEqual(self.value_1.to_region(), "chr1:5-17")
class TestMmejDeletion(TestMmejDeletionMain):
"""Test MmmejDeletion object"""
def test_getters(self):
"""Test getters attributes"""
self.assertEqual(self.mmej_deletion_u1.name, "sample1")
self.assertEqual(self.mmej_deletion_u1.values, [self.value_1])
def test_setters(self):
"""Test setters attributes"""
self.assertEqual(self.mmej_deletion_u0.name, "sample0")
self.assertEqual(self.mmej_deletion_u0.values, [])
self.mmej_deletion_u0.name = self.mmej_deletion_u1.name
self.mmej_deletion_u0.values = self.mmej_deletion_u1.values
self.assertEqual(self.mmej_deletion_u1.name, "sample1")
self.assertEqual(self.mmej_deletion_u1.values, [self.value_1])
def test_empty(self):
"""Test empty property"""
self.assertTrue(self.mmej_deletion_u0.empty)
self.assertFalse(self.mmej_deletion_u1.empty)
def test_build_empty_dataframe(self):
"""Test build_empty_dataframe"""
self.assertTrue(
mmej_deletion.MmejDeletion.build_empty_dataframe(name="test").equals(
pd.DataFrame(
columns=[
"contig",
"start",
"deletion",
"sequence",
"conclusion",
"test",
]
)
)
)
def test_get_value_ids(self):
"""Test get_value_ids()"""
self.assertEqual(self.mmej_deletion_u0.get_value_ids(), [])
self.assertEqual(
self.mmej_deletion_u2_s1.get_value_ids(),
["86ad494080bc9c322a639d3de922e958", "927f1d86b6d899d163efdb245b9aca67"],
)
def test_set_value_sequence(self):
"""Test set_value_sequence()"""
self.mmej_deletion_u0.set_value_sequence(path=self.ref_mmej)
self.assertEqual(self.mmej_deletion_u0.values, [])
self.assertEqual(self.mmej_deletion_u1.values[0].sequence, "")
self.mmej_deletion_u1.set_value_sequence(path=self.ref_mmej)
self.assertEqual(self.mmej_deletion_u1.values[0].sequence, "TGAGGC")
def test_from_vcf(self):
"""Test from_vcf()"""
dels = mmej_deletion.MmejDeletion.from_vcf(path=self.n1_vcf)
self.assertEqual(dels, [mmej_deletion.MmejDeletion(name="N1", values=[])])
dels = mmej_deletion.MmejDeletion.from_vcf(path=self.u2_vcf)
self.assertEqual(
dels,
[self.mmej_deletion_u2_s1, self.mmej_deletion_u2_s2],
)
def test_to_dataframe(self):
"""Test to_dataframe()"""
# Empty
mmej_deletions = mmej_deletion.MmejDeletion.from_vcf(path=self.n1_vcf)
for m in mmej_deletions:
m.set_value_sequence(path=self.ref_mmej)
df = mmej_deletion.MmejDeletion.to_dataframe(mmej_deletions=mmej_deletions)
self.assertTrue(self.mmej_deletion_empty_df.equals(df))
# Filled
mmej_deletions = mmej_deletion.MmejDeletion.from_vcf(path=self.u2_vcf)
for m in mmej_deletions:
m.set_value_sequence(path=self.ref_mmej)
df = mmej_deletion.MmejDeletion.to_dataframe(mmej_deletions=mmej_deletions)
self.assertTrue(self.mmej_deletion_u2_df.equals(df))
def test_to_excel(self):
"""Test to_excel()"""
# Empty
mmej_deletions = mmej_deletion.MmejDeletion.from_vcf(path=self.n1_vcf)
for m in mmej_deletions:
m.set_value_sequence(path=self.ref_mmej)
with tempfile.NamedTemporaryFile(suffix=".xlsx") as fod:
mmej_deletion.MmejDeletion.to_excel(
path=fod.name, mmej_deletions=mmej_deletions
)
df = pd.read_excel(fod.name)
self.assertTrue(self.mmej_deletion_empty_df_xlsx.equals(df))
# Filled
mmej_deletions = mmej_deletion.MmejDeletion.from_vcf(path=self.u2_vcf)
for m in mmej_deletions:
m.set_value_sequence(path=self.ref_mmej)
with tempfile.NamedTemporaryFile(suffix=".xlsx") as fod:
mmej_deletion.MmejDeletion.to_excel(
path=fod.name, mmej_deletions=mmej_deletions
)
df = pd.read_excel(fod.name)
self.assertTrue(self.mmej_deletion_u2_df_xlsx.equals(df))
if __name__ == "__main__":
unittest.main()
| guillaume-gricourt/HmnFusion | tests/unit/test_mmej_deletion.py | test_mmej_deletion.py | py | 9,961 | python | en | code | 0 | github-code | 36 |
39914551744 | from fastapi import APIRouter, HTTPException, Request
from utils.model import *
from services.camera_service import camera_service
from services.server_service import server_service
import requests
import threading
router = APIRouter(prefix="/camera")
@router.get("/{server_name}")
async def get_camera(server_name: str=None):
server = server_service.get_by_server_name(server_name)
records = camera_service.get_camera_by_server(server['server_id'])
if server is not None:
server['cameras'] = records if records is not None else []
del server['_id']
return {
"data": server,
"msg": "success",
}
else:
return {
"data": {},
"msg": "fail"
}
@router.get("/")
async def get_all_camera():
servers = server_service.get_all()
if servers is not None:
for server in servers:
cameras = camera_service.get_camera_by_server(server['server_id'])
path = "http://{0}:8005/stream-manage/output/motion-detections-{1}"
for camera in cameras:
camera['stream_url'] = path.format(server['ip'], camera['camera_id'])
server['cameras'] = cameras if cameras is not None else []
del server['id']
return {
"data": list(servers),
"msg": "success",
}
else:
return {
"data": {},
"msg": "fail"
}
@router.post("", response_model=Reponse[CameraResponse])
async def add_camera_api(camera: Camera):
try:
result = camera_service.add_camera(camera)
def add_to_streaming():
server = server_service.get_by_id(camera.server_id)
server_name = server['server_name']
root_url = f'http://{server_name}:8005/stream-manage/camera'
requests.post(root_url, json=camera.dict())
background_thread = threading.Thread(target=add_to_streaming)
background_thread.start()
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
@router.delete("/{camera_id}", response_model=Reponse[object])
async def delete_camera(camera_id: str):
try:
camera = camera_service.get_by_id(camera_id)
result = camera_service.delete_camera(camera_id)
def delete_to_streaming():
server = server_service.get_by_id(camera['server_id'])
server_name = server['server_name']
root_url = f'http://{server_name}:8005/stream-manage/camera/{camera_id}'
requests.delete(root_url)
background_thread = threading.Thread(target=delete_to_streaming)
background_thread.start()
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
@router.put("", response_model=Reponse[CameraResponse])
async def update_camera(camera: Camera):
try:
result = camera_service.update_camera(camera)
def refesh_streaming():
server = server_service.get_by_id(camera.server_id)
server_name = server['server_name']
root_url = f'http://{server_name}:8005/stream-manage/camera/refresh'
requests.get(root_url)
background_thread = threading.Thread(target=refesh_streaming)
background_thread.start()
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
@router.put("/update-date", response_model=Reponse[CameraResponse])
async def update_camera_date(model: RangeDate):
try:
result = camera_service.get_by_id(model.camera_id)
if result is None:
raise HTTPException(
status_code=400,
detail='Camera ID does not existed'
)
print(result)
result['start_time'] = model.start_time
result['end_time'] = model.end_time
result = camera_service.update_camera(Camera(**result))
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
@router.put("/update-status", response_model=Reponse[CameraResponse])
async def update_camera_status(camera: Camera):
try:
result = camera_service.get_by_id(camera.camera_id)
result['camera_status'] = camera.camera_status
result = camera_service.update_camera(Camera(**result))
return {"data": result}
except Exception as e:
raise HTTPException(
status_code=400,
detail=str(e)
)
| ngocthien2306/be-cctv | src/router/camera_router.py | camera_router.py | py | 4,936 | python | en | code | 0 | github-code | 36 |
43303341114 | import py
import random
from collections import OrderedDict
from hypothesis import settings, given, strategies
from hypothesis.stateful import run_state_machine_as_test
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem import rordereddict, rstr
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper.annlowlevel import llstr, hlstr
from rpython.rtyper.test.test_rdict import (
BaseTestRDict, MappingSpace, MappingSM)
from rpython.rlib import objectmodel
rodct = rordereddict
def get_indexes(ll_d):
return ll_d.indexes._obj.container._as_ptr()
def foreach_index(ll_d):
indexes = get_indexes(ll_d)
for i in range(len(indexes)):
yield rffi.cast(lltype.Signed, indexes[i])
def count_items(ll_d, ITEM):
c = 0
for item in foreach_index(ll_d):
if item == ITEM:
c += 1
return c
class TestRDictDirect(object):
dummykeyobj = None
dummyvalueobj = None
def _get_str_dict(self):
# STR -> lltype.Signed
DICT = rordereddict.get_ll_dict(lltype.Ptr(rstr.STR), lltype.Signed,
ll_fasthash_function=rstr.LLHelpers.ll_strhash,
ll_hash_function=rstr.LLHelpers.ll_strhash,
ll_eq_function=rstr.LLHelpers.ll_streq,
dummykeyobj=self.dummykeyobj,
dummyvalueobj=self.dummyvalueobj)
return DICT
def test_dict_creation(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
lls = llstr("abc")
rordereddict.ll_dict_setitem(ll_d, lls, 13)
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 1
assert rordereddict.ll_dict_getitem(ll_d, llstr("abc")) == 13
assert rordereddict.ll_dict_getitem(ll_d, lls) == 13
rordereddict.ll_dict_setitem(ll_d, lls, 42)
assert rordereddict.ll_dict_getitem(ll_d, lls) == 42
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 43)
assert rordereddict.ll_dict_getitem(ll_d, lls) == 43
def test_dict_creation_2(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
llab = llstr("ab")
llb = llstr("b")
rordereddict.ll_dict_setitem(ll_d, llab, 1)
rordereddict.ll_dict_setitem(ll_d, llb, 2)
assert rordereddict.ll_dict_getitem(ll_d, llb) == 2
def test_dict_store_get(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(20):
for j in range(i):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(j))) == j
rordereddict.ll_dict_setitem(ll_d, llstr(str(i)), i)
assert ll_d.num_live_items == 20
for i in range(20):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(i))) == i
def test_dict_store_get_del(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(20):
for j in range(0, i, 2):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(j))) == j
rordereddict.ll_dict_setitem(ll_d, llstr(str(i)), i)
if i % 2 != 0:
rordereddict.ll_dict_delitem(ll_d, llstr(str(i)))
assert ll_d.num_live_items == 10
for i in range(0, 20, 2):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(i))) == i
def test_dict_del_lastitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
py.test.raises(KeyError, rordereddict.ll_dict_delitem, ll_d, llstr("abc"))
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 13)
py.test.raises(KeyError, rordereddict.ll_dict_delitem, ll_d, llstr("def"))
rordereddict.ll_dict_delitem(ll_d, llstr("abc"))
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 1
assert count_items(ll_d, rordereddict.DELETED) == 1
py.test.raises(KeyError, rordereddict.ll_dict_getitem, ll_d, llstr("abc"))
def test_dict_del_not_lastitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 13)
rordereddict.ll_dict_setitem(ll_d, llstr("def"), 15)
rordereddict.ll_dict_delitem(ll_d, llstr("abc"))
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 2
assert count_items(ll_d, rordereddict.DELETED) == 1
def test_dict_resize(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("a"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("b"), 2)
rordereddict.ll_dict_setitem(ll_d, llstr("c"), 3)
rordereddict.ll_dict_setitem(ll_d, llstr("d"), 4)
rordereddict.ll_dict_setitem(ll_d, llstr("e"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("f"), 6)
rordereddict.ll_dict_setitem(ll_d, llstr("g"), 7)
rordereddict.ll_dict_setitem(ll_d, llstr("h"), 8)
rordereddict.ll_dict_setitem(ll_d, llstr("i"), 9)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 10)
assert len(get_indexes(ll_d)) == 16
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 11)
rordereddict.ll_dict_setitem(ll_d, llstr("l"), 12)
rordereddict.ll_dict_setitem(ll_d, llstr("m"), 13)
assert len(get_indexes(ll_d)) == 64
for item in 'abcdefghijklm':
assert rordereddict.ll_dict_getitem(ll_d, llstr(item)) == ord(item) - ord('a') + 1
def test_dict_grow_cleanup(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
lls = llstr("a")
for i in range(40):
rordereddict.ll_dict_setitem(ll_d, lls, i)
rordereddict.ll_dict_delitem(ll_d, lls)
assert ll_d.num_ever_used_items <= 10
def test_dict_iteration(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
assert [hlstr(entry.key) for entry in self._ll_iter(ll_d)] == ["k", "j"]
def _ll_iter(self, ll_d):
ITER = rordereddict.get_ll_dictiter(lltype.typeOf(ll_d))
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
ll_dictnext = rordereddict._ll_dictnext
while True:
try:
num = ll_dictnext(ll_iter)
except StopIteration:
break
yield ll_d.entries[num]
def test_popitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
TUP = lltype.Ptr(lltype.GcStruct('x', ('item0', lltype.Ptr(rstr.STR)),
('item1', lltype.Signed)))
ll_elem = rordereddict.ll_dict_popitem(TUP, ll_d)
assert hlstr(ll_elem.item0) == "j"
assert ll_elem.item1 == 2
ll_elem = rordereddict.ll_dict_popitem(TUP, ll_d)
assert hlstr(ll_elem.item0) == "k"
assert ll_elem.item1 == 1
py.test.raises(KeyError, rordereddict.ll_dict_popitem, TUP, ll_d)
def test_popitem_first(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
rordereddict.ll_dict_setitem(ll_d, llstr("m"), 3)
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
for expected in ["k", "j", "m"]:
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
num = rordereddict._ll_dictnext(ll_iter)
ll_key = ll_d.entries[num].key
assert hlstr(ll_key) == expected
rordereddict.ll_dict_delitem(ll_d, ll_key)
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
py.test.raises(StopIteration, rordereddict._ll_dictnext, ll_iter)
def test_popitem_first_bug(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1)
rordereddict.ll_dict_delitem(ll_d, llstr("k"))
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
num = rordereddict._ll_dictnext(ll_iter)
ll_key = ll_d.entries[num].key
assert hlstr(ll_key) == "j"
assert ll_d.lookup_function_no == ( # 1 free item found at the start
(1 << rordereddict.FUNC_SHIFT) | rordereddict.FUNC_BYTE)
rordereddict.ll_dict_delitem(ll_d, llstr("j"))
assert ll_d.num_ever_used_items == 0
assert ll_d.lookup_function_no == rordereddict.FUNC_BYTE # reset
def _get_int_dict(self):
def eq(a, b):
return a == b
return rordereddict.get_ll_dict(lltype.Signed, lltype.Signed,
ll_fasthash_function=intmask,
ll_hash_function=intmask,
ll_eq_function=eq)
def test_direct_enter_and_del(self):
DICT = self._get_int_dict()
ll_d = rordereddict.ll_newdict(DICT)
numbers = [i * rordereddict.DICT_INITSIZE + 1 for i in range(8)]
for num in numbers:
rordereddict.ll_dict_setitem(ll_d, num, 1)
rordereddict.ll_dict_delitem(ll_d, num)
for k in foreach_index(ll_d):
assert k < rordereddict.VALID_OFFSET
def test_contains(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_contains(ll_d, llstr("k"))
assert not rordereddict.ll_dict_contains(ll_d, llstr("j"))
def test_clear(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("l"), 1)
rordereddict.ll_dict_clear(ll_d)
assert ll_d.num_live_items == 0
def test_get(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_get(ll_d, llstr("k"), 32) == 1
assert rordereddict.ll_dict_get(ll_d, llstr("j"), 32) == 32
def test_setdefault(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_setdefault(ll_d, llstr("j"), 42) == 42
assert rordereddict.ll_dict_getitem(ll_d, llstr("j")) == 42
assert rordereddict.ll_dict_setdefault(ll_d, llstr("k"), 42) == 1
assert rordereddict.ll_dict_getitem(ll_d, llstr("k")) == 1
def test_copy(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
ll_d2 = rordereddict.ll_dict_copy(ll_d)
for ll_d3 in [ll_d, ll_d2]:
assert rordereddict.ll_dict_getitem(ll_d3, llstr("k")) == 1
assert rordereddict.ll_dict_get(ll_d3, llstr("j"), 42) == 2
assert rordereddict.ll_dict_get(ll_d3, llstr("i"), 42) == 42
def test_update(self):
DICT = self._get_str_dict()
ll_d1 = rordereddict.ll_newdict(DICT)
ll_d2 = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d1, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d1, llstr("j"), 6)
rordereddict.ll_dict_setitem(ll_d2, llstr("i"), 7)
rordereddict.ll_dict_setitem(ll_d2, llstr("k"), 8)
rordereddict.ll_dict_update(ll_d1, ll_d2)
for key, value in [("k", 8), ("i", 7), ("j", 6)]:
assert rordereddict.ll_dict_getitem(ll_d1, llstr(key)) == value
def test_pop(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 6)
assert rordereddict.ll_dict_pop(ll_d, llstr("k")) == 5
assert rordereddict.ll_dict_pop(ll_d, llstr("j")) == 6
py.test.raises(KeyError, rordereddict.ll_dict_pop, ll_d, llstr("k"))
py.test.raises(KeyError, rordereddict.ll_dict_pop, ll_d, llstr("j"))
def test_pop_default(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 6)
assert rordereddict.ll_dict_pop_default(ll_d, llstr("k"), 42) == 5
assert rordereddict.ll_dict_pop_default(ll_d, llstr("j"), 41) == 6
assert rordereddict.ll_dict_pop_default(ll_d, llstr("k"), 40) == 40
assert rordereddict.ll_dict_pop_default(ll_d, llstr("j"), 39) == 39
def test_bug_remove_deleted_items(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(15):
rordereddict.ll_dict_setitem(ll_d, llstr(chr(i)), 5)
for i in range(15):
rordereddict.ll_dict_delitem(ll_d, llstr(chr(i)))
rordereddict.ll_prepare_dict_update(ll_d, 7)
# used to get UninitializedMemoryAccess
def test_bug_resize_counter(self):
DICT = self._get_int_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 1, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_setitem(ll_d, 2, 0)
rordereddict.ll_dict_delitem(ll_d, 1)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_delitem(ll_d, 2)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_delitem(ll_d, 0)
rordereddict.ll_dict_setitem(ll_d, 0, 0)
rordereddict.ll_dict_setitem(ll_d, 1, 0)
d = ll_d
idx = d.indexes._obj.container
num_nonfrees = 0
for i in range(idx.getlength()):
got = idx.getitem(i) # 0: unused; 1: deleted
num_nonfrees += (got > 0)
assert d.resize_counter <= idx.getlength() * 2 - num_nonfrees * 3
@given(strategies.lists(strategies.integers(min_value=1, max_value=5)))
def test_direct_move_to_end(self, lst):
DICT = self._get_int_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, 1, 11)
rordereddict.ll_dict_setitem(ll_d, 2, 22)
def content():
return [(entry.key, entry.value) for entry in self._ll_iter(ll_d)]
for case in lst:
if case == 1:
rordereddict.ll_dict_move_to_end(ll_d, 1, True)
assert content() == [(2, 22), (1, 11)]
elif case == 2:
rordereddict.ll_dict_move_to_end(ll_d, 2, True)
assert content() == [(1, 11), (2, 22)]
elif case == 3:
py.test.raises(KeyError, rordereddict.ll_dict_move_to_end,
ll_d, 3, True)
elif case == 4:
rordereddict.ll_dict_move_to_end(ll_d, 2, False)
assert content() == [(2, 22), (1, 11)]
elif case == 5:
rordereddict.ll_dict_move_to_end(ll_d, 1, False)
assert content() == [(1, 11), (2, 22)]
class TestRDictDirectDummyKey(TestRDictDirect):
class dummykeyobj:
ll_dummy_value = llstr("dupa")
class TestRDictDirectDummyValue(TestRDictDirect):
class dummyvalueobj:
ll_dummy_value = -42
class TestOrderedRDict(BaseTestRDict):
@staticmethod
def newdict():
return OrderedDict()
@staticmethod
def newdict2():
return OrderedDict()
@staticmethod
def new_r_dict(myeq, myhash, force_non_null=False, simple_hash_eq=False):
return objectmodel.r_ordereddict(
myeq, myhash, force_non_null=force_non_null,
simple_hash_eq=simple_hash_eq)
def test_two_dicts_with_different_value_types(self):
def func(i):
d1 = OrderedDict()
d1['hello'] = i + 1
d2 = OrderedDict()
d2['world'] = d1
return d2['world']['hello']
res = self.interpret(func, [5])
assert res == 6
def test_move_to_end(self):
def func():
d1 = OrderedDict()
d1['key1'] = 'value1'
d1['key2'] = 'value2'
for i in range(20):
objectmodel.move_to_end(d1, 'key1')
assert d1.keys() == ['key2', 'key1']
objectmodel.move_to_end(d1, 'key2')
assert d1.keys() == ['key1', 'key2']
for i in range(20):
objectmodel.move_to_end(d1, 'key2', last=False)
assert d1.keys() == ['key2', 'key1']
objectmodel.move_to_end(d1, 'key1', last=False)
assert d1.keys() == ['key1', 'key2']
func()
self.interpret(func, [])
class ODictSpace(MappingSpace):
MappingRepr = rodct.OrderedDictRepr
moved_around = False
ll_getitem = staticmethod(rodct.ll_dict_getitem)
ll_setitem = staticmethod(rodct.ll_dict_setitem)
ll_delitem = staticmethod(rodct.ll_dict_delitem)
ll_len = staticmethod(rodct.ll_dict_len)
ll_contains = staticmethod(rodct.ll_dict_contains)
ll_copy = staticmethod(rodct.ll_dict_copy)
ll_clear = staticmethod(rodct.ll_dict_clear)
ll_popitem = staticmethod(rodct.ll_dict_popitem)
def newdict(self, repr):
return rodct.ll_newdict(repr.DICT)
def get_keys(self):
DICT = lltype.typeOf(self.l_dict).TO
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
ll_iter = rordereddict.ll_dictiter(ITER, self.l_dict)
ll_dictnext = rordereddict._ll_dictnext
keys_ll = []
while True:
try:
num = ll_dictnext(ll_iter)
keys_ll.append(self.l_dict.entries[num].key)
except StopIteration:
break
return keys_ll
def popitem(self):
# overridden to check that we're getting the most recent key,
# not a random one
try:
ll_tuple = self.ll_popitem(self.TUPLE, self.l_dict)
except KeyError:
assert len(self.reference) == 0
else:
ll_key = ll_tuple.item0
ll_value = ll_tuple.item1
key, value = self.reference.popitem()
assert self.ll_key(key) == ll_key
assert self.ll_value(value) == ll_value
self.removed_keys.append(key)
def removeindex(self):
# remove the index, as done during translation for prebuilt dicts
# (but cannot be done if we already removed a key)
if not self.removed_keys and not self.moved_around:
rodct.ll_no_initial_index(self.l_dict)
def move_to_end(self, key, last=True):
ll_key = self.ll_key(key)
rodct.ll_dict_move_to_end(self.l_dict, ll_key, last)
value = self.reference.pop(key)
if last:
self.reference[key] = value
else:
items = self.reference.items()
self.reference.clear()
self.reference[key] = value
self.reference.update(items)
# prevent ll_no_initial_index()
self.moved_around = True
def fullcheck(self):
# overridden to also check key order
assert self.ll_len(self.l_dict) == len(self.reference)
keys_ll = self.get_keys()
assert len(keys_ll) == len(self.reference)
for key, ll_key in zip(self.reference, keys_ll):
assert self.ll_key(key) == ll_key
assert (self.ll_getitem(self.l_dict, self.ll_key(key)) ==
self.ll_value(self.reference[key]))
for key in self.removed_keys:
if key not in self.reference:
try:
self.ll_getitem(self.l_dict, self.ll_key(key))
except KeyError:
pass
else:
raise AssertionError("removed key still shows up")
# check some internal invariants
d = self.l_dict
num_lives = 0
for i in range(d.num_ever_used_items):
if d.entries.valid(i):
num_lives += 1
assert num_lives == d.num_live_items
fun = d.lookup_function_no & rordereddict.FUNC_MASK
if fun == rordereddict.FUNC_MUST_REINDEX:
assert not d.indexes
else:
assert d.indexes
idx = d.indexes._obj.container
num_lives = 0
num_nonfrees = 0
for i in range(idx.getlength()):
got = idx.getitem(i) # 0: unused; 1: deleted
num_nonfrees += (got > 0)
num_lives += (got > 1)
assert num_lives == d.num_live_items
assert 0 < d.resize_counter <= idx.getlength()*2 - num_nonfrees*3
class ODictSM(MappingSM):
Space = ODictSpace
def test_hypothesis():
run_state_machine_as_test(
ODictSM, settings(max_examples=500, stateful_step_count=100))
| mozillazg/pypy | rpython/rtyper/test/test_rordereddict.py | test_rordereddict.py | py | 22,081 | python | en | code | 430 | github-code | 36 |
1284010441 | import socket
import webbrowser
s = socket.socket()
host = 'localhost' # server address
port = 9010
s.connect((host, port))
url = s.recv(1024)
s.close
webbrowser.open_new(url)
s = socket.socket()
host = 'localhost'
port = 9010
s.bind((host, port))
s.listen(1)
c, addr = s.accept() # Establish connection with client.
c.send("User accepted.")
c.close() # Close the connection
s.close() | hackandcode/sniffnlearn | OAuth/client.py | client.py | py | 418 | python | en | code | 0 | github-code | 36 |
19909078329 | def corpus_file_transform(src_file,dst_file):
import os
assert os.path.isfile(src_file),'Src File Not Exists.'
with open(src_file,'r',encoding = 'utf-8') as text_corpus_src:
with open(dst_file,'w',encoding = 'utf-8') as text_corpus_dst:
from tqdm.notebook import tqdm
text_corpus_dst.write(''.join([(text_word + "\tS\n" if len(text_word) == 1 else (text_word[0] + "\tB\n" + ''.join([(w + "\tM\n") for w in text_word[1 : -1]]) + text_word[-1] + "\tE\n")) for text_line in tqdm_notebook(text_corpus_src.readlines()) for text_word in text_line.strip().split()]))
def IOForFeature(file,feature = None,mode = 'rb',featureList = ['A','B','C']):
assert (mode == 'rb') or (mode == 'wb'),'The third parameter must be \'r\' or \'w\''
assert not((mode == 'wb') and not feature),'The second parameter feature must not be empty.'
try:
import pickle
with open(file,mode) as f:
if mode == 'rb':
feature = pickle.load(f)
elif mode == 'wb':
pickle.dump(feature,f)
except:
feature = {label : {} for label in featureList}
return feature
def TrainingFeatureA(corpus,featureA,wordLabel = {'B' : 0, 'M' : 1, 'E' : 2, 'S' : 3}):
# p(y_i|x_i)
if not featureA:
featureA = {}
for word in tqdm_notebook(corpus):
if not featureA.get(word[0]):
featureA[word[0]] = [0,0,0,0]
featureA[word[0]][wordLabel[word[2]]] += 1
return featureA
def TrainingFeatureB(corpus,featureB,wordLabel = {'B' : 0, 'M' : 1, 'E' : 2, 'S' : 3}):
# p(y_(i+1)|x_i,y_i)
if not featureB:
featureB = {}
for word,nextword in tqdm_notebook(zip(corpus[:-1],corpus[1:])):
if not featureB.get(word[0]):
featureB[word[0]] = [[0,0,0,0] for i in range(4)]
featureB[word[0]][wordLabel[word[2]]][wordLabel[nextword[2]]] += 1
return featureB
def TrainingFeatureC(corpus,featureC,wordLabel = {'B' : 0, 'M' : 1, 'E' : 2, 'S' : 3}):
# p(x_(i-1)|x_i,y_i),p(x_(i+1)|x_i,y_i)
if not featureC:
featureC = {}
for lastWord,word,nextWord in tqdm_notebook(zip(corpus[:-2],corpus[1:-1],corpus[2:])):
if not featureC.get(word[0]):
featureC[word[0]] = {label : {} for label in wordLabel}
if not featureC[word[0]][word[2]].get(lastWord[0]):
featureC[word[0]][word[2]][lastWord[0]] = [0,0]
featureC[word[0]][word[2]][lastWord[0]][0] += 1
if not featureC[word[0]][word[2]].get(nextWord[0]):
featureC[word[0]][word[2]][nextWord[0]] = [0,0]
featureC[word[0]][word[2]][nextWord[0]][1] += 1
return featureC4
def featureTraining(feature,train_corpus,
featureList = ['A','B','C'],
featureFunction = {'A' : TrainingFeatureA, 'B' : TrainingFeatureB,'C' : TrainingFeatureC},
wordLabel = {'B' : 0, 'M' : 1, 'E' : 2, 'S' : 3}):
for featureLabel in featureList:
feature[featureLabel] = featureFunction[featureLabel](train_corpus,feature[featureLabel],wordLabel)
def getTestFeatureABC(test_str,feature,wordLabel):
import numpy as np
test_featureA = {word : (-np.log(np.array(feature['A'][word]) / sum(feature['A'][word]))).tolist()
if feature['A'].get(word) else [0,0,0,0] for word in test_str}
test_featureB = {word : (-np.log(np.array(feature['B'][word]).T / np.array(feature['B'][word]).sum(axis = 1)).T).tolist()
if feature['B'].get(word) else [[0,0,0,0] for label in wordLabel.keys()] for word in test_str}
test_featureC = {word :{d1_key : {d2_key : d2_value for d2_key,d2_value in
zip(d1_value.keys(),(np.array(list(d1_value.values())) / np.array(list(d1_value.values())).sum(axis = 0)).tolist())}
for d1_key,d1_value in feature['C'][word].items()} if feature['C'].get(word) else {label : {} for label in wordLabel.keys()} for word in test_str}
return test_featureA,test_featureB,test_featureC
def getDividedResult(wordLabel,relationDict,test_str):
wordLabelk = list(wordLabel.keys())
thisIndex = relationDict[-1][0].index(min(relationDict[-1][0]))
dividedResult, lastIndex = [[test_str[-1],wordLabelk[thisIndex]]],relationDict[-1][1][thisIndex]
for w_id in range(len(test_str) - 2,-1,-1):
dividedResult.append([test_str[w_id],wordLabelk[lastIndex]])
lastIndex = relationDict[w_id][1][lastIndex]
dividedResult.reverse()
resultString = ''.join([(' ' if d_R[1] == 'S' or d_R[1] == 'B' else '') + d_R[0] + (' ' if d_R[1] == 'S' or d_R[1] == 'E' else '') for d_R in dividedResult])
return dividedResult,resultString
def CRFWordSeperate(test_str,feature,wordLabel = {'B' : 0, 'M' : 1, 'E' : 2, 'S' : 3} ):
import numpy as np
test_featureA,test_featureB,test_featureC = getTestFeatureABC(test_str,feature,wordLabel)
relationDict = [[[test_featureA[test_str[w_id]][wordLabel[l_id]] *
(1 - (0 if w_id == 0 else test_featureC[test_str[w_id]][l_id].get(test_str[w_id - 1], [0,0])[0])) *
(1 - (0 if w_id == len(test_str) - 1 else test_featureC[test_str[w_id]][l_id].get(test_str[w_id + 1], [0,0])[1]))
for l_id in wordLabel],[0 for l_id in wordLabel]] for w_id in range(len(test_str))]
relationDict[0][0][wordLabel['E']] = relationDict[0][0][wordLabel['M']] = float('inf')
for w_id in range(1,len(test_str)):
for l_id in wordLabel:
candidateList = [test_featureB[test_str[w_id - 1]][wordLabel[l]][wordLabel[l_id]]
* (1 - (0 if w_id == 0 else test_featureC[test_str[w_id]][l_id].get(test_str[w_id - 1], [0,0])[0]))
* (1 - (0 if w_id == len(test_str) - 1 else test_featureC[test_str[w_id]][l_id].get(test_str[w_id + 1], [0,0])[1]))
+ relationDict[w_id - 1][0][wordLabel[l]] for l in wordLabel]
candidateList = [float('inf') if np.isnan(c_l) else c_l for c_l in candidateList]
relationDict[w_id][0][wordLabel[l_id]] += min(candidateList)
relationDict[w_id][1][wordLabel[l_id]] = candidateList.index(min(candidateList))
relationDict[-1][0][wordLabel['B']] = relationDict[-1][0][wordLabel['M']] = float('inf')
return getDividedResult(wordLabel,relationDict,test_str)
if __name__=="__main__":
train_corpus_src = 'msr_training.utf8'
train_corpus_dst = 'msr_training.utf8.pr'
corpus_file_transform(train_corpus_src,train_corpus_dst)
with open(train_corpus_dst,'r',encoding = 'utf-8') as f:
train_corpus = f.readlines()
print(train_corpus[:10])
featureFile = 'feature.pkl'
wordLabel = {'B' : 0, 'M' : 1, 'E' : 2, 'S' : 3}
feature = IOForFeature(featureFile,mode='rb')
featureTraining(feature,train_corpus)
feature = IOForFeature(featureFile,feature,mode='wb')
t_str = '最近内存在涨价,不能用以前等价值的物品交换了'
dividedResult,resultString = CRFWordSeperate(t_str,feature,wordLabel)
dividedSequences = ''.join([result[1] for result in dividedResult])
print(resultString)
print(dividedSequences)
print(dividedResult)
test_corpus_src = 'pku_training.utf8'
test_corpus_dst = 'pku_training.utf8.pr'
corpus_file_transform(test_corpus_src,test_corpus_dst)
#将已分词的训练文件转换为未分词的测试文件
with open(test_corpus_src,'r',encoding = 'utf-8') as f:
test_sentences = f.readlines()
test_sentences = [sentence.replace(' ','') for sentence in test_sentences]
test_sentences = [sentence.replace('\n','') for sentence in test_sentences]
#将获得测试文件的正确标注
with open(test_corpus_dst,'r',encoding = 'utf-8') as f:
test_corpus = f.readlines()
test_label = ''.join([result[2] for result in test_corpus])
print(test_sentences[0])
print(test_corpus[:len(test_sentences[0])])
print(test_label[:len(test_sentences[0])])
dividedSequences = ''
dividedResults = []
resultStrings = []
for sentences in tqdm_notebook(test_sentences[:500]):
dividedResult,resultString = CRFWordSeperate(sentences,feature,wordLabel)
dividedResults.append(dividedResult)
resultStrings.append(resultString)
dividedSequences += ''.join([result[1] for result in dividedResult])
for d_R,r_S in zip(dividedResults[:10],resultStrings[:10]):
print(r_S)
print(d_R)
count = [0,0,0,0]
for d_S in dividedSequences:
count[wordLabel[d_S]] += 1
print(list(zip(wordLabel.keys(),count)))
accurate = [0,0]
for d_S in range(len(dividedSequences)):
accurate[test_label[d_S] == dividedSequences[d_S]] += 1
print('Wrong : %.2f%%, Right : %.2f%%' % (accurate[0] / sum(accurate) * 100,accurate[1] / sum(accurate) * 100)) | JackieChenssh/TC_VFDT_CRF | CRF.py | CRF.py | py | 9,036 | python | en | code | 0 | github-code | 36 |
17134892000 |
# Packages
import pandas as pd
import os
import json
from gensim.utils import simple_preprocess
from gensim.summarization.textcleaner import split_sentences
from functools import reduce
from fuzzywuzzy import fuzz
## Functions
## Returns marked html from iucn notes
def find_country(text, country):
'''Function to id country names in iucn notes and insert mark tags for
highlighting in html'''
# # Split text into individual words
txt_ls = text.split(" ")
q_ls = country.split(" ")
# given length of country
q_len = len(q_ls)
interest = [0]*len(txt_ls)
# check each subset of n words for matches
for i in range(len(txt_ls)-q_len+1):
tmp_txt = (" ").join(txt_ls[i:i+q_len])
if fuzz.token_set_ratio(tmp_txt, country)>=90:
interest[i:i+q_len] = [1]*q_len
# use index list to find words to highlight
for w in range(len(txt_ls)):
if interest[w] == 1:
txt_ls[w] = "<mark>"+txt_ls[w]+"</mark>"
recomb_html = " ".join(txt_ls)
# If consecutive words highlighted, rm end and start
recomb_html = recomb_html.replace("</mark> <mark>", " ")
# for t in range(len(word_ls)):
# # Match word against country
# pr = fuzz.token_set_ratio(word_ls[t], country)
# # If match is good, add html marks...
# if pr>90:
# # print (word_ls[t])
# word_ls[t] = "<mark>"+word_ls[t]+"</mark>"
# Split text into sentences within paragraphs
# split_txt = [split_sentences(para) for para in text.split("\n") if len(split_sentences(para))>0]
# # interest_idx = [[0] * len(inner) for inner in split_txt]
# for p in range(len(split_txt)):
# for s in range(len(split_txt[p])):
# # for each sentence fuzzy match against country
# pr = fuzz.token_set_ratio(split_txt[p][s], country)
# # If match is good, indicate in interest list or add marks...
# if pr>90:
# # interest_idx[p][s] += 1
# # Add "<mark>" to start and "</mark>" end of sentence?
# split_txt[p][s] = "<mark>"+split_txt[p][s]+"</mark>"
# recomb_html = "\n".join([" ".join(inner) for inner in split_txt])
# recomb_html = " ".join(word_ls)
return(recomb_html)
# Extracts data from dictionary level given a list of indices to that level
def get_from_dict(dataDict, pathlist):
"""Iterate nested dictionary"""
return reduce(dict.get, pathlist, dataDict)
## Constants
notes_paths = {"taxonomic_notes" : ["taxonomy", "taxonomic_notes", "value"],
"red_list_notes" : ["iucn_status", "red_list_notes", "value"],
"range_notes" : ["habitat", "range_notes", "value"],
"population_notes" : ["population", "population_notes", "value"],
"use_trate_notes" : ["trade", "use_trade_notes", "value"],
"conservation_notes" : ["conservation", "conservation_notes", "value"],
"threats_notes" : ["threats", "threats_notes", "value"]}
## Main code
# Load cites df for relevant countries
cites_df = pd.read_csv("../Data/CitesParrots.csv")
cites_country_code = list(set(list(cites_df["Importer"])+(list(cites_df["Exporter"]))))
# Load country list data data
country_df = pd.read_csv("../Data/countries.csv")
# Subset to countries of interest
country_df = country_df.loc[country_df["Code"].isin(cites_country_code)]
# Create a simpler, single word country name
country_df["Basic"] = [country.split("(")[0].split(",")[0] for country in country_df["Name"]]
# List all json files
dat_dir = "../Data/parrot_data/"
f_ls = os.listdir(dat_dir)
# Calc no of rows need in output df
n_row = len(f_ls) * country_df.shape[0]
out_df = pd.DataFrame({"SpeciesID" : ["NA"]*n_row,
"Country" : ["NA"]*n_row,
"taxonomic_notes" : ["NA"]*n_row,
"red_list_notes" : ["NA"]*n_row,
"range_notes" : ["NA"]*n_row,
"population_notes" : ["NA"]*n_row,
"use_trate_notes" : ["NA"]*n_row,
"conservation_notes" : ["NA"]*n_row,
"threats_notes" : ["NA"]*n_row})
row_count = 0
for f in f_ls:
# Load json
with open(dat_dir+f) as json_file:
parrot_dat = json.load(json_file)
parrot = f.split(".")[0]
# Is IUCN data there?
if len(parrot_dat["iucn"])>0:
iucn_dat = parrot_dat["iucn"]
for country in country_df["Basic"]:
for key in notes_paths.keys():
# Obtain data
tmp_dat = get_from_dict(iucn_dat, notes_paths[key])
# If not "NA" or "value" add to notes dict
if ((tmp_dat != "NA") & (tmp_dat != "value")):
out_df.iloc[row_count][key] = find_country(tmp_dat, country)
out_df.iloc[row_count]["SpeciesID"] = parrot
out_df.iloc[row_count]["Country"] = country
row_count +=1
# print(row_count)
out_df = out_df.loc[0:row_count-1]
# out_df.to_csv("../../Local_Code/Data/marked_text.csv")
idx = int((row_count-1)/3.0)
out_df1 = out_df.loc[0:idx]
out_df2 = out_df.loc[idx+1:2*idx]
out_df3 = out_df.loc[2*idx+1:row_count-1]
out_df1.to_csv("../Data/parrot_csv/marked_text1.csv")
out_df2.to_csv("../Data/parrot_csv/marked_text2.csv")
out_df3.to_csv("../Data/parrot_csv/marked_text3.csv")
| ConMine/ConMine | Development/Code/sentence_tagging.py | sentence_tagging.py | py | 4,876 | python | en | code | 0 | github-code | 36 |
22703377702 | from __future__ import print_function
import sys
import xml.etree.ElementTree as ET
import os
sys.path.extend(['.', '..', './pycparser/'])
from pycparser import c_parser, c_ast
filehandle = open('dummy3.c', 'r')
#filehandle = open('reverse_noinclude.c', 'r')
#filehandle = open('reverse.c', 'r')
text = ''.join(filehandle.readlines())
#print(text)
# create a pycparser
parser = c_parser.CParser()
ast = parser.parse(text, filename='<none>')
# generate the XML tree
ast.show()
codeAstXml = open('code_ast.xml','w')
ast.showXml(codeAstXml)
codeAstXml.close()
tree = ET.parse('code_ast.xml')
root = tree.getroot()
kernelsVars=[]
kernelsTyps=[]
kernelNames=['__ungenerated_kernel_function_region__0']
for kn in kernelNames:
# go through all functions in the code (C/C++ code)
# find the function which the kernel is called there
# then find the type of all variables
for func in root.findall(".//FuncDef"):
kernelFound=0
kernelVars=[]
kernelTyps=[]
print('we have found '+str(len(func.findall(".//FuncCall/ID")))+' function calls')
for fcall in func.findall(".//FuncCall/ID"):
if str(fcall.get('uid')).strip()==kn.strip():
kernelFound=1
#print(fcall.get('uid'))
if kernelFound==1:
print('<'+kn+'> is found in <'+func.find('Decl').get('uid')+'>')
# go through all declerations and find the varibales
funcBody=func.find('Compound')
for var in funcBody.findall(".//Decl"):
# single variable Decl
kernelVars.append(var.get('uid'))
kernelTyps.append(var.find('.//IdentifierType').get('uid')+((len(var.findall(".//PtrDecl")))*'*'))
# print('< '+var.get('uid')+' > is defined as <'+var.find('.//IdentifierType').get('uid')+((len(var.findall(".//PtrDecl")))*'*')+'>')
kernelsVars.append(kernelVars)
kernelsTyps.append(kernelTyps)
break
for i in range(0,len(kernelsVars)):
var=kernelsVars[i]
typ=kernelsTyps[i]
print('=======> kernel #'+str(i)+':')
for g in range(0,len(var)):
print(var[g]+'->'+typ[g])
os.remove('code_ast.xml')
| lashgar/ipmacc | src/auxilaries/generate_oacc_ast.py | generate_oacc_ast.py | py | 2,204 | python | en | code | 13 | github-code | 36 |
4480093655 | import tkinter.messagebox
import pandas as pd
from tkinter import *
from random import choice
BACKGROUND_COLOR = "#B1DDC6"
try:
data = pd.read_csv("data/words_to_learn.csv").to_dict('records')
except FileNotFoundError:
data = pd.read_csv("data/english_russian_words.csv").to_dict('records')
except pd.errors.EmptyDataError:
data = pd.read_csv("data/english_russian_words.csv").to_dict('records')
# ser = pd.Series(data.Russian.values, index=data.English).to_dict()
# my_list = [
# {key: value} for key, value in ser.items()
# ]
# print(my_list)
word = None
def flip():
if len(data):
canvas.itemconfig(canvas_image, image=card_back)
canvas.itemconfig(card_title, text="Russian", fill='white')
canvas.itemconfig(card_word, text=word['Russian'], fill='white')
def update_word():
global word, flip_timer, data
if len(data) == 0:
play_again = tkinter.messagebox.askokcancel(title="There is no words in your dictionary!",
message="Do you want to repeat words again?")
if play_again:
data = pd.read_csv("data/english_russian_words.csv").to_dict('records')
update_word()
else:
window.destroy()
else:
window.after_cancel(flip_timer)
word = choice(data)
canvas.itemconfig(canvas_image, image=card_front)
canvas.itemconfig(card_title, text="English", fill='black')
canvas.itemconfig(card_word, text=word['English'], fill='black')
flip_timer = window.after(3000, flip)
def remove_word_and_update():
global word, data
if len(data):
data.remove(word)
df = pd.DataFrame(data)
df.to_csv("data/words_to_learn.csv", index=False)
update_word()
window = Tk()
window.title("Flashy")
window.config(padx=50, pady=50, bg=BACKGROUND_COLOR)
flip_timer = window.after(3000, flip)
right_image = PhotoImage(file="images/right.png")
wrong_image = PhotoImage(file="images/wrong.png")
card_front = PhotoImage(file="images/card_front.png")
card_back = PhotoImage(file="images/card_back.png")
canvas = Canvas(width=800, height=526, bg=BACKGROUND_COLOR, highlightthickness=0)
canvas_image = canvas.create_image(400, 263, image=card_front)
card_title = canvas.create_text(400, 150, text="English", font=("Ariel", 40, "italic"), fill="black")
card_word = canvas.create_text(400, 263, text="word", font=("Ariel", 60, "bold"), fill="black")
canvas.grid(row=0, column=0, columnspan=2)
button_right = Button(image=right_image, highlightthickness=0, bg=BACKGROUND_COLOR, borderwidth=0, command=remove_word_and_update)
button_right.grid(row=1, column=0)
button_wrong = Button(image=wrong_image, highlightthickness=0, bg=BACKGROUND_COLOR, borderwidth=0, command=update_word)
button_wrong.grid(row=1, column=1)
update_word()
window.mainloop()
| montekrist0/PythonBootCamp | day31/main.py | main.py | py | 2,873 | python | en | code | 0 | github-code | 36 |
40727926981 | import requests
STEAMDB_SALE_URL = "https://steamdb.info/sales/?merged=true&cc=cn"
class SaleRequester:
def __init__(self):
self.fake_header = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
def get_sale_page(self):
try:
content = requests.get(STEAMDB_SALE_URL,headers=self.fake_header).text
except TimeoutError:
pass
return content
if __name__ == "__main__":
s = SaleRequester()
print(s.get_sale_page()) | KIDJourney/sbeamhub | crawler/requester.py | requester.py | py | 779 | python | en | code | 0 | github-code | 36 |
1327922070 | #!/usr/bin/env python
#
"""
Name: Jesus Hernandez Partner: Zechariah Neak
Email: jherna83@ucsc.edu Email: zneak@ucsc.edu
ID: 1420330
Course: CMPM146 Game AI
Professor: Daniel G Shapiro
\\\\\\\ Program 4 ///////
Description:
This is a bot that is designed to win at Planet Wars against 5 other bots using
a behavior tree. The root acts as a Selector composite parent that checks through
each Sequence composite child top to bottom, and performs the action for whatever
Sequence child returns true. Each Sequence child only returns true if all its
checks and actions come out as successful.
"""
"""
// There is already a basic strategy in place here. You can use it as a
// starting point, or you can throw it out entirely and replace it with your
// own.
"""
import logging, traceback, sys, os, inspect
logging.basicConfig(filename=__file__[:-3] +'.log', filemode='w', level=logging.DEBUG)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from behavior_tree_bot.behaviors import *
from behavior_tree_bot.checks import *
from behavior_tree_bot.bt_nodes import Selector, Sequence, Action, Check
from planet_wars import PlanetWars, finish_turn
# You have to improve this tree or create an entire new one that is capable
# of winning against all the 5 opponent bots
def setup_behavior_tree():
# Top-down construction of behavior tree
root = Selector(name='High Level Ordering of Strategies')
# Define available actions to take.
colonize = Action(take_defenseless_territory)
invade = Action(attack_with_no_mercy)
reinforce = Action(reinforce_with_vengeance)
retaliate = Action(retaliate_with_fury)
# *** Begin preliminary suprise invasion over the galaxy. ***
imperial_ambition = Sequence(name='Expansion Strategy: Manifest Destiny')
imperial_ambition.child_nodes = [colonize, invade]
# *** Consolidate and retaliate if under attack by hostiles. ***
imperial_shield = Sequence(name='Security Strategy: Cereberus')
danger_check = Check(if_under_attack)
imperial_shield.child_nodes = [danger_check, reinforce, retaliate]
# *** If the advantage is ours, attack with full force. ***
imperial_aggression = Sequence(name='Aggressive Strategy: Crush All Remaining Resistance')
largest_fleet_check = Check(have_largest_fleet)
imperial_aggression.child_nodes = [largest_fleet_check, invade]
# Begin selecting strategies.
root.child_nodes = [imperial_ambition, imperial_aggression, imperial_shield, invade.copy()]
logging.info('\n' + root.tree_to_string())
return root
# You don't need to change this function
def do_turn(state):
behavior_tree.execute(planet_wars)
if __name__ == '__main__':
logging.basicConfig(filename=__file__[:-3] + '.log', filemode='w', level=logging.DEBUG)
behavior_tree = setup_behavior_tree()
try:
map_data = ''
while True:
current_line = input()
if len(current_line) >= 2 and current_line.startswith("go"):
planet_wars = PlanetWars(map_data)
do_turn(planet_wars)
finish_turn()
map_data = ''
else:
map_data += current_line + '\n'
except KeyboardInterrupt:
print('ctrl-c, leaving ...')
except Exception:
traceback.print_exc(file=sys.stdout)
logging.exception("Error in bot.")
| JjayaitchH/BehaviorTrees | behavior_tree_bot/bt_bot.py | bt_bot.py | py | 3,634 | python | en | code | 2 | github-code | 36 |
74574880744 | def process(fileName):
# Print data to console
print("")
print("-----------------------")
print(fileName)
print("-----------------------")
# Read the open file by name
inputFile = open(inputFilesDirectory + fileName + ".in", "rt")
# Read file
firstLine = inputFile.readline()
secondLine = inputFile.readline()
inputFile.close()
# Print input data
print("INPUT")
print(firstLine)
print(secondLine)
# Assign parameters
MAX, NUM = list(map(int, firstLine.split()))
# Create the pizza list by reading the file
inputList = list(map(int, secondLine.split()))
outputList = solve(MAX, inputList) # Solve the problem and get output
# Print output data and create output file
print("")
print("OUTPUT")
print(len(outputList))
outputString = ""
for l in outputList:
outputString = outputString + str(l) + " "
print(outputString)
outputFile = open(outputFilesDirectory + fileName + ".out", "w")
outputFile.write(str(len(outputList)) + "\n")
outputFile.write(outputString)
outputFile.close()
def solve(MAX, inputList):
inputList = 'Add input'
return 1
inputFilesDirectory = "Input/" # Location of input files
outputFilesDirectory = "Output/" # Location of output files
fileNames = ["a_example", "b_small", "c_medium",
"d_quite_big", "e_also_big"] # File names
for fileName in fileNames: # Take each and every file and solve
process(fileName) | jaswanth001/Hashcode2020 | filehandling.py | filehandling.py | py | 1,517 | python | en | code | 0 | github-code | 36 |
29858374038 | '''
Created on 9 Apr 2019
@author: qubix
'''
from typing import Tuple
import numpy as np
from sklearn.base import BaseEstimator
from modAL.utils.data import modALinput
from math import floor
from asreview.query_strategies.max_sampling import max_sampling
from asreview.query_strategies.random_sampling import random_sampling
def rand_max_sampling(classifier: BaseEstimator,
X: modALinput,
pool_idx=None,
n_instances: int = 1,
query_kwargs={},
**kwargs
) -> Tuple[np.ndarray, modALinput]:
"""
Combination of random and maximum sampling.
By default samples the 95% of the instances with max sampling,
and 5% of the samples with random sampling.
Parameters
----------
classifier: BaseEstimator
The classifier for which the labels are to be queried.
X: modALinput
The whole input matrix.
pool_idx: np.array
Indices of samples that are in the pool.
n_instances: int
Total number of samples to be queried.
extra_vars: dict
dictionary to pass through settings (such as the max/rand ratio),
as well as the indices that were obtained using max & random sampling.
**kwargs:
Keyword arguments to be passed on to random/max sampling.
Returns
-------
np.ndarray, modALinput
The indices of the instances from X chosen to be labelled;
the instances from X chosen to be labelled.
"""
n_samples = X.shape[0]
if pool_idx is None:
pool_idx = np.arange(n_samples)
# Set the fraction of maximum sampling. Defaults to 95% max, 5% rand.
rand_max_frac = query_kwargs.get('rand_max_frac', 0.05)
max_frac = 1-rand_max_frac
# Get the discrete number of instances for rand/max sampling.
n_instance_max = floor(n_instances*max_frac)
if np.random.random_sample() < n_instances*max_frac-n_instance_max:
n_instance_max += 1
n_instance_rand = n_instances-n_instance_max
# Do max sampling.
max_idx, _ = max_sampling(classifier, X, pool_idx=pool_idx,
n_instances=n_instance_max,
query_kwargs=query_kwargs,
**kwargs)
# Remove indices found with max sampling from the pool.
query_idx = np.delete(np.arange(n_samples), pool_idx, axis=0)
query_idx = np.append(query_idx, max_idx)
new_pool_idx = np.delete(np.arange(n_samples), query_idx, axis=0)
# Random sampling.
rand_idx, _ = random_sampling(classifier, X, pool_idx=new_pool_idx,
n_instances=n_instance_rand,
query_kwargs=query_kwargs,
**kwargs)
if "max" not in query_kwargs['src_query_idx']:
query_kwargs["src_query_idx"]["max"] = np.array(max_idx, dtype=np.int)
else:
query_kwargs["src_query_idx"]["max"] = np.append(
query_kwargs["src_query_idx"]["max"], max_idx)
if "random" not in query_kwargs['src_query_idx']:
query_kwargs["src_query_idx"]["random"] = np.array(
rand_idx, dtype=np.int)
else:
query_kwargs["src_query_idx"]["random"] = np.append(
query_kwargs["src_query_idx"]["random"], rand_idx)
query_kwargs['rand_max_frac'] = rand_max_frac
query_kwargs['last_bounds'] = [
("max", 0, n_instance_max),
("random", n_instance_max, n_instances),
]
query_idx = np.append(max_idx, rand_idx)
return query_idx, X[query_idx]
| syuanuvt/automated-systematic-review | asreview/query_strategies/rand_max.py | rand_max.py | py | 3,622 | python | en | code | null | github-code | 36 |
27688294553 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='image',
name='uploaded_by',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='website',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='SiteOwner',
),
]
| seanlinxs/content-console | main/migrations/0002_auto_20151201_1309.py | 0002_auto_20151201_1309.py | py | 681 | python | en | code | 0 | github-code | 36 |
31691135600 | """
Module: libfmp.c8.c8s1_hps
Author: Meinard Müller, Frank Zalkow
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the FMP Notebooks (https://www.audiolabs-erlangen.de/FMP)
"""
from collections import OrderedDict
import numpy as np
from scipy import signal
import librosa
import IPython.display as ipd
import pandas as pd
def median_filter_horizontal(x, filter_len):
"""Apply median filter in horizontal direction
Notebook: C8/C8S1_HPS.ipynb
"""
return signal.medfilt(x, [1, filter_len])
def median_filter_vertical(x, filter_len):
"""Apply median filter in vertical direction
Notebook: C8/C8S1_HPS.ipynb
"""
return signal.medfilt(x, [filter_len, 1])
def convert_l_sec_to_frames(L_h_sec, Fs=22050, N=1024, H=512):
"""Convert filter length parameter from seconds to frame indices
Notebook: C8/C8S1_HPS.ipynb
"""
L_h = int(np.ceil(L_h_sec * Fs / H))
return L_h
def convert_l_hertz_to_bins(L_p_Hz, Fs=22050, N=1024, H=512):
"""Convert filter length parameter from Hertz to frequency bins
Notebook: C8/C8S1_HPS.ipynb
"""
L_p = int(np.ceil(L_p_Hz * N / Fs))
return L_p
def make_integer_odd(n):
"""Convert integer into odd integer
Notebook: C8/C8S1_HPS.ipynb
"""
if(n % 2 == 0):
n += 1
return n
def hps(x, Fs, N, H, L_h, L_p, L_unit='physical', mask='binary', eps=0.001, detail=False):
"""Harmonic-percussive separation (HPS) algorithm
Notebook: C8/C8S1_HPS.ipynb
Args:
x: Input signal
Fs: Sampling rate of x
N: Frame length
H: Hopsize
L_h: Horizontal median filter length given in seconds or frames
L_p: Percussive median filter length given in Hertz or bins
L_unit: Adjusts unit, either 'pyhsical' or 'indices'
mask: Either 'binary' or 'soft'
eps: Parameter used in soft maskig
detail (bool): Returns detailed information
Returns:
x_h: Harmonic signal
x_p: Percussive signal
dict: dictionary containing detailed information; returned if "detail=True"
"""
assert L_unit in ['physical', 'indices']
assert mask in ['binary', 'soft']
# stft
X = librosa.stft(x, n_fft=N, hop_length=H, win_length=N, window='hann', center=True, pad_mode='constant')
# power spectrogram
Y = np.abs(X) ** 2
# median filtering
if L_unit == 'physical':
L_h = convert_l_sec_to_frames(L_h_sec=L_h, Fs=Fs, N=N, H=H)
L_p = convert_l_hertz_to_bins(L_p_Hz=L_p, Fs=Fs, N=N, H=H)
L_h = make_integer_odd(L_h)
L_p = make_integer_odd(L_p)
Y_h = signal.medfilt(Y, [1, L_h])
Y_p = signal.medfilt(Y, [L_p, 1])
# masking
if mask == 'binary':
M_h = np.int8(Y_h >= Y_p)
M_p = np.int8(Y_h < Y_p)
if mask == 'soft':
eps = 0.00001
M_h = (Y_h + eps / 2) / (Y_h + Y_p + eps)
M_p = (Y_p + eps / 2) / (Y_h + Y_p + eps)
X_h = X * M_h
X_p = X * M_p
# istft
x_h = librosa.istft(X_h, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
x_p = librosa.istft(X_p, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
if detail:
return x_h, x_p, dict(Y_h=Y_h, Y_p=Y_p, M_h=M_h, M_p=M_p, X_h=X_h, X_p=X_p)
else:
return x_h, x_p
def generate_audio_tag_html_list(list_x, Fs, width='150', height='40'):
"""Generates audio tag for html needed to be shown in table
Notebook: C8/C8S1_HPS.ipynb
"""
audio_tag_html_list = []
for i in range(len(list_x)):
audio_tag = ipd.Audio(list_x[i], rate=Fs)
audio_tag_html = audio_tag._repr_html_().replace('\n', '').strip()
audio_tag_html = audio_tag_html.replace('<audio ',
'<audio style="width: '+width+'px; height: '+height+'px;"')
audio_tag_html_list.append(audio_tag_html)
return audio_tag_html_list
def hrps(x, Fs, N, H, L_h, L_p, beta=2, L_unit='physical', detail=False):
"""Harmonic-residual-percussive separation (HRPS) algorithm
Notebook: C8/C8S1_HPS.ipynb
Args:
x: Input signal
Fs: Sampling rate of x
N: Frame length
H: Hopsize
L_h: Horizontal median filter length given in seconds or frames
L_p: Percussive median filter length given in Hertz or bins
beta: Separation factor
L_unit: Adjusts unit, either 'pyhsical' or 'indices'
detail (bool): Returns detailed information
Returns:
x_h: Harmonic signal
x_p: Percussive signal
x_r: Residual signal
dict: dictionary containing detailed information; returned if "detail=True"
"""
assert L_unit in ['physical', 'indices']
# stft
X = librosa.stft(x, n_fft=N, hop_length=H, win_length=N, window='hann', center=True, pad_mode='constant')
# power spectrogram
Y = np.abs(X) ** 2
# median filtering
if L_unit == 'physical':
L_h = convert_l_sec_to_frames(L_h_sec=L_h, Fs=Fs, N=N, H=H)
L_p = convert_l_hertz_to_bins(L_p_Hz=L_p, Fs=Fs, N=N, H=H)
L_h = make_integer_odd(L_h)
L_p = make_integer_odd(L_p)
Y_h = signal.medfilt(Y, [1, L_h])
Y_p = signal.medfilt(Y, [L_p, 1])
# masking
M_h = np.int8(Y_h >= beta * Y_p)
M_p = np.int8(Y_p > beta * Y_h)
M_r = 1 - (M_h + M_p)
X_h = X * M_h
X_p = X * M_p
X_r = X * M_r
# istft
x_h = librosa.istft(X_h, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
x_p = librosa.istft(X_p, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
x_r = librosa.istft(X_r, hop_length=H, win_length=N, window='hann', center=True, length=x.size)
if detail:
return x_h, x_p, x_r, dict(Y_h=Y_h, Y_p=Y_p, M_h=M_h, M_r=M_r, M_p=M_p, X_h=X_h, X_r=X_r, X_p=X_p)
else:
return x_h, x_p, x_r
def experiment_hrps_parameter(fn_wav, param_list):
"""Script for running experiment over parameter list [[1024, 256, 0.1, 100], ...
Notebook: C8/C8S1_HRPS.ipynb
"""
Fs = 22050
x, Fs = librosa.load(fn_wav, sr=Fs)
list_x = []
list_x_h = []
list_x_p = []
list_x_r = []
list_N = []
list_H = []
list_L_h_sec = []
list_L_p_Hz = []
list_L_h = []
list_L_p = []
list_beta = []
for param in param_list:
N, H, L_h_sec, L_p_Hz, beta = param
print('N=%4d, H=%4d, L_h_sec=%4.2f, L_p_Hz=%3.1f, beta=%3.1f' % (N, H, L_h_sec, L_p_Hz, beta))
x_h, x_p, x_r = hrps(x, Fs=Fs, N=1024, H=512, L_h=L_h_sec, L_p=L_p_Hz, beta=beta)
L_h = convert_l_sec_to_frames(L_h_sec=L_h_sec, Fs=Fs, N=N, H=H)
L_p = convert_l_hertz_to_bins(L_p_Hz=L_p_Hz, Fs=Fs, N=N, H=H)
list_x.append(x)
list_x_h.append(x_h)
list_x_p.append(x_p)
list_x_r.append(x_r)
list_N.append(N)
list_H.append(H)
list_L_h_sec.append(L_h_sec)
list_L_p_Hz.append(L_p_Hz)
list_L_h.append(L_h)
list_L_p.append(L_p)
list_beta.append(beta)
html_x = generate_audio_tag_html_list(list_x, Fs=Fs)
html_x_h = generate_audio_tag_html_list(list_x_h, Fs=Fs)
html_x_p = generate_audio_tag_html_list(list_x_p, Fs=Fs)
html_x_r = generate_audio_tag_html_list(list_x_r, Fs=Fs)
pd.options.display.float_format = '{:,.1f}'.format
pd.set_option('display.max_colwidth', None)
df = pd.DataFrame(OrderedDict([
('$N$', list_N),
('$H$', list_H),
('$L_h$ (sec)', list_L_h_sec),
('$L_p$ (Hz)', list_L_p_Hz),
('$L_h$', list_L_h),
('$L_p$', list_L_p),
('$\\beta$', list_beta),
('$x$', html_x),
('$x_h$', html_x_h),
('$x_r$', html_x_r),
('$x_p$', html_x_p)]))
df.index = np.arange(1, len(df) + 1)
ipd.display(ipd.HTML(df.to_html(escape=False, index=False)))
| christofw/pitchclass_mctc | libfmp/c8/c8s1_hps.py | c8s1_hps.py | py | 8,127 | python | en | code | 20 | github-code | 36 |
26771069266 | from pyowm import OWM
from pyowm.utils import config
from pyowm.utils import timestamps
from config import owm_key
owm = OWM(owm_key)
mgr = owm.weather_manager()
# info on looking up cities.
#To make it more precise put the city's name, comma, 2-letter country code (ISO3166). You will get all proper cities in chosen country.
#The order is important - the first is city name then comma then country. Example - London, GB or New York, US.
city = 'Leesburg,US'
short_city = city.split(",", 1)[0]
# Creating an empty "database" or dictionary.
# I'm using this to train myself in dictionaries and how to read them.
database = {}
# Searching location for data on owm
location = mgr.weather_at_place(city)
w = location.weather
# Create key + value
database['wind'] = w.wind()
database['temp'] = w.temperature('fahrenheit')
# printing out the default looking dictionary
print(database)
# Print out the status of the dictionary
print(f"Wind Speed for {city} : {database['wind']['speed']}")
# Printing out the temp instead
print(f"Temp for {short_city} = {database['temp']['temp']}")
print(f"The high for today in {short_city} is, {database['temp']['temp_max']}") | shelmus/owm_weather | weather_dictionary.py | weather_dictionary.py | py | 1,164 | python | en | code | 0 | github-code | 36 |
483095221 | import requests
from time import sleep
#听写单词扣词验证PRE环境
header={"Authorization":"Bearer eyJhbGciOiJIUzUxMiJ9.eyJqdGkiOiIxNTYyNjI5MDYwMDc1MTAyMjA5Iiwic3ViIjoie1wiaWRcIjoxNTYyNjI5MDYwMDc1MTAyMjA5LFwibW9iaWxlXCI6XCIrODYxODM4NDI1MzUwNlwifSIsImV4cCI6MTcwMTY3NzU1M30.ByAdhAfbxwS5tTbkbSJIPJXN6bIrzoOjeWMwn6JA8pimm2v1fMTXVJfdXloqInXPY_FsTlc7ZPDwxlCGtFqQ5Q",
"User-Uid":"1562629060075102209",
"Kid-Uid":"1562629060075102209"}
DataGetTcheBox={"uid":1562629060075102209}
#获取教材版本接口
GetTeacherBoxUrl="https://hear-pre.abctime.com/v1/dictation/textbook"
#获取年级信息
GetTcheBoxInfo=requests.post(url=GetTeacherBoxUrl,json=DataGetTcheBox,headers=header)
# print(GetTcheBoxInfo.json()["data"]['grade_list'][0])
# print(GetTcheBoxInfo.json()["data"]['grade_list'][0]['textbook_list'])
bookErrorList=[]
worderror=[]
for grade_id in range(19):
# print(textbook_id)
sleep(1)
for textbook_id in range(1,len(GetTcheBoxInfo.json()["data"]['grade_list'][grade_id]['textbook_list'])+1):
grade_name = GetTcheBoxInfo.json()["data"]['grade_list'][grade_id]['grade_name']
# print(grade_name)
JX = GetTcheBoxInfo.json()["data"]['grade_list'][grade_id]['textbook_list'][textbook_id-1]['textbook_name']
sleep(1)
# 获取每本教材的单元
GetRescourseUrl = "https://hear-pre.abctime.com/v1/dictation/rescourse"
DataGetRescourse = {"grade_id": grade_id+1, "publisher_id": textbook_id, "uid": 1562629060075102209}
GetDataGetRescourse = requests.post(headers=header, json=DataGetRescourse, url=GetRescourseUrl)
# print("年级教材版本:",GetDataGetRescourse.json()['data'])
try:
for i in range(len(GetDataGetRescourse.json()['data']['resource_list'])):
# print(GetDataGetRescourse.json()['data']['resource_list'][i])
book_id=GetDataGetRescourse.json()['data']['resource_list'][i]['unit_id']
DY=book_id
publisher_idd=GetDataGetRescourse.json()['data']['resource_list'][i]['unit_id']
sleep(1)
# print('book_id',book_id)
# 选择单词
selectUrl = "https://hear-pre.abctime.com/v1/dictation/select"
selectData = {"book_id": book_id, "type": 1, "uid": 1562629060075102209}
selctreq = requests.post(json=selectData, url=selectUrl, headers=header)
sleep(1)
# print("选择单词:",selctreq.json()['data']['words_list'])
#遍历保存出单词和单词ids
wordss=[]
wordIdss =[]
for words in range(len(selctreq.json()['data']['words_list'])):
wordsEnd=selctreq.json()['data']['words_list'][words]['word']
# print("单词:",wordsEnd)
wordss.append(wordsEnd)
wordidEnd = selctreq.json()['data']['words_list'][words]['word_id']
# print("单词id:", wordidEnd)
wordIdss.append(wordidEnd)
sleep(1)
# 扣词接口
value=len(wordss)
deductionUrl = 'https://hear-pre.abctime.com/v1/dictation/deduction'
dataDeduction = {"pictureBookIds": [book_id], "value": value,
"word": wordss,
"wordIds": wordIdss, "uid": 1562629060075102209}
deductionReq=requests.post(url=deductionUrl,json=dataDeduction,headers=header)
# print("扣词请求:",deductionReq.json())
# Errorlist=[]
if deductionReq.json()['code']=="200" :
print('年级:',grade_name, '教材:',JX, '单元:',book_id, '正常!')
# print(deductionReq.json())
else:
print('年级:',grade_name, '教材:',JX, '单元:',book_id, '扣词异常!')
print("选择单词",selctreq.json())
worderror.append([grade_name,JX,book_id,[selctreq.json()]])
except:
print("异常请求",GetDataGetRescourse.json())
print('年级:', grade_name, '教材:', JX)
print("请求参数:",DataGetRescourse)
bookErrorList.append([grade_name,JX])
continue
print('教材无单词数据',bookErrorList)
print("扣词异常",worderror)
| wengyuanpei/pandaInterfaceTest | testCase/TingXieWordsCheck.py | TingXieWordsCheck.py | py | 4,624 | python | en | code | 0 | github-code | 36 |
73627488425 | '''
Dependencies: gettext, playsound
installing
$ pip install gTTS pyttsx3 playsound soundfile transformers datasets sentencepiece
$ pip install playsound (may need to use "$ pip install --upgrade wheel" if install fails)
'''
import gtts
from playsound import playsound
with open("sample.ini") as fileDescriptor:
data = fileDescriptor.read()
tts = gtts.gTTS(data)
tts.save("audioReader.mp3")
playsound("audioReader.mp3")
| vvMaxwell/U5L2 | audio.py | audio.py | py | 428 | python | en | code | 0 | github-code | 36 |
30568657844 | import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["text.usetex"] = True
LEGEND_FONTSIZE = 20
TICK_LABEL_FONTSIZE = 20
AXIS_LABEL_FONTSIZE = 20
TITLE_FONTSIZE = 20
CHART_SIZE = [10, 6]
LONG_CHART_SIZE = [10, 10]
def do_nothing_Rt_plot(Rt_dict, fname=None, ps=True):
fig, ax = plt.subplots(1, 1, figsize=CHART_SIZE)
ax.set_xlabel("Time (days)", fontsize=AXIS_LABEL_FONTSIZE)
ax.set_ylabel(r"$\mathcal{R}_t$", fontsize=AXIS_LABEL_FONTSIZE)
for R0 in Rt_dict:
t_arr = Rt_dict[R0]["t"]
Rt_arr = Rt_dict[R0]["Rt"]
ax.plot(t_arr, Rt_arr, label=f"{R0:.1f}")
ax.legend(
loc="best",
title=r"$\mathcal{R}_0$",
fontsize=LEGEND_FONTSIZE,
title_fontsize=TITLE_FONTSIZE,
)
ax.tick_params(axis="both", which="major", labelsize=TICK_LABEL_FONTSIZE)
if not (fname is None):
fig.savefig(fname)
if ps:
plt.show()
def do_nothing_hospital_plot(region_dict, fname=None, ps=True):
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=CHART_SIZE)
R0s = [k for k in region_dict]
R0 = R0s[0]
hospital_do_nothing_plot(ax[0, 0], region_dict[R0], R0, xlabel=False)
plt.gca().set_prop_cycle(None)
R0 = R0s[1]
hospital_do_nothing_plot(ax[0, 1], region_dict[R0], R0, xlabel=False)
plt.gca().set_prop_cycle(None)
R0 = R0s[2]
hospital_do_nothing_plot(ax[1, 0], region_dict[R0], R0)
plt.gca().set_prop_cycle(None)
R0 = R0s[3]
hospital_do_nothing_plot(ax[1, 1], region_dict[R0], R0)
fig.suptitle(r"Beds Occupied $(\%N)$", fontsize=TITLE_FONTSIZE)
if not (fname is None):
fig.savefig(fname)
if ps:
plt.show()
def hospital_do_nothing_plot(ax, Hdict, R0, xlabel=True):
for region in Hdict:
pN = Hdict[region]["pN"]
ax.plot(Hdict[region]["t"], Hdict[region]["H"] * pN, label=region)
if xlabel:
ax.set_xlabel("Time (days)", fontsize=AXIS_LABEL_FONTSIZE)
ax.legend(
loc="best",
title="Region",
fontsize=LEGEND_FONTSIZE,
title_fontsize=TITLE_FONTSIZE,
)
ax.tick_params(axis="both", which="major", labelsize=TICK_LABEL_FONTSIZE)
ax.set_title(rf"$\mathcal{{R}}_0$ = {R0:.1f}", fontsize=TITLE_FONTSIZE)
def do_nothing_deaths_plot(region_dict, region_abm_dict, fname=None, ps=True):
fig, ax = plt.subplots(1, 1, figsize=CHART_SIZE)
for region in region_dict:
(line,) = ax.plot(
region_dict[region]["R0"],
region_dict[region]["D"],
label=region,
)
yerr = [
np.array(region_abm_dict[region]["D_mean"])
- np.array(region_abm_dict[region]["D_lb"]),
np.array(region_abm_dict[region]["D_ub"])
- np.array(region_abm_dict[region]["D_mean"]),
]
ax.errorbar(
region_abm_dict[region]["R0"],
region_abm_dict[region]["D_mean"],
yerr,
label=f"PR: {region}",
fmt=".",
c=line.get_color(),
capsize=5,
# c="k",
)
ax.set_xlabel(r"$\mathcal{R}_0$", fontsize=AXIS_LABEL_FONTSIZE)
ax.set_ylabel(r"Dead individuals $(\%N)$", fontsize=AXIS_LABEL_FONTSIZE)
ax.legend(
loc="best",
title="Region",
fontsize=LEGEND_FONTSIZE,
title_fontsize=TITLE_FONTSIZE,
)
ax.tick_params(axis="both", which="major", labelsize=TICK_LABEL_FONTSIZE)
if not (fname is None):
fig.savefig(fname)
if ps:
plt.show()
| jvanyperen/exploring-interventions-manuscript | plotting_scripts/do_nothing_plots.py | do_nothing_plots.py | py | 3,549 | python | en | code | 0 | github-code | 36 |
6241790730 | """
PRL 115, 114801 (2015)
Please keep the Python style guide of PEP8: pep8.org.
"""
# %%
import numpy as np
from scipy.special import jv
# %%
# Constants
C = 299792458
EV = 1.60217662e-19
# Machine parameters, to be checked from logbook
C1 = 1
C2 = 0.87
lambdaFEL = 50.52e-9 + 0.07e-9
# Other parameters
E0 = 1.16867e9 * EV # electron beam nominal energy (J)
sigmaE = 150e3 * EV # electron beam energy spread (J)
R56 = 50e-6 # dispersive strength
ebeamlinchirp = 0.19e6 * EV / 1e-12 # electron beam cubic chirp
ebeamquadchirp = 5.42e6 * EV / 1e-12 ** 2 # electron beam quadratic chirp
n = 5 # harmonic number
lambdaseed = lambdaFEL * n # seed laser wavelength
k1 = 2 * np.pi / lambdaseed # seed laser wave number
tau10 = 130e-15 # first seed transform-limited pulse duration
GDD1 = 0 # first seed linear frequency (quadratic phase) chirp
tau1 = (1 + (4*np.log(2)*GDD1/tau10**2) ** 2) ** 0.5 * tau10
tau20 = tau10 # second seed transform-limited pulse duration
GDD2 = 0 # second seed linear frequency (quadratic phase) chirp
tau2 = (1 + (4*np.log(2)*GDD2/tau20**2) ** 2) ** 0.5 * tau20
deltat = 150e-15 # separation between the seeds
def output(t: (float, np.ndarray)) -> (float, np.ndarray):
Psi1 = 1 / (2*GDD1 + tau10**4/(8*np.log(2)**2*GDD1)) * t ** 2
Psi2 = 1 / (2*GDD2 + tau20**4/(8*np.log(2)**2*GDD2)) * (t - deltat) ** 2
deltaphi = 3.146894088480846
ebeamtiming = 1.966066329749903e-12
seedfield = (
C1 * np.exp(-2*np.log(2)*t**2/tau1**2) * np.exp(1j*Psi1)
+ C2 * np.exp(-2*np.log(2)*(t-deltat)**2/tau2**2) * np.exp(1j*Psi2) * np.exp(1j*deltaphi)) # seed electric field; first seed sentered at time=0 fs
seedenvelope = np.abs(seedfield) ** 2 # seed envelope
seedphase = np.unwrap(np.angle(seedfield)) # seed phase
A0 = 3 # amplitude of the energy modulation of the electron beam induced by the seeds
A = A0 * seedenvelope ** 0.5
B = R56 * k1 * sigmaE / E0 # normalized dispersive strength
ebeamenergyprofile = (
E0
+ ebeamlinchirp * (t - ebeamtiming)
+ (1/2) * ebeamquadchirp * (t - ebeamtiming) ** 2
)
# electorn beam energy profile induces a phase onto the FEL pulse
ebeamphase = B / sigmaE * ebeamenergyprofile
# bunching (proportional to the FEL electric field) in the time domain
return (np.exp(-(n*B)**2/2)
* jv(n, -n*B*A)
* np.exp(1j*n*seedphase)
* np.exp(1j*n*ebeamphase))
# %%
t = np.linspace(-5.125e-12, 5.275e-12, 2 ** 12, endpoint=False)
wave = output(t)
freq = C * n / lambdaseed + np.fft.fftshift(np.fft.fftfreq(t.shape[0], t[1] - t[0]))
x = C / freq * 1e9
y = np.abs(np.fft.fftshift(np.fft.fft(np.fft.ifftshift(wave)))) ** 2
# %%
import matplotlib.pyplot as plt
plt.plot(x, y)
plt.xlim(50.5, 50.8)
plt.grid(True)
plt.show()
| DaehyunPY/FERMI_20149100 | Scripts/phase_locked.py | phase_locked.py | py | 2,821 | python | en | code | 0 | github-code | 36 |
2698417886 | from django.shortcuts import render
from markdown import markdown
from .models import *
from django.http import HttpResponseRedirect
def forbid_zhihu(request):
return render(request, 'forbidden_zhihu.html')
def index_redirect(request):
return HttpResponseRedirect('http://blog.alphamj.cn/')
def index(request):
articles = Article.objects.all()
classifications = Classifications.objects.all()
return render(request, 'article_preview.html',
{'navigation': 'nav_classification.html', 'articles': articles,
'nav_classifications': classifications})
def show_article(request, article_id):
article = Article.objects.get(id=article_id)
article.content = markdown(article.content, extentions=['markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc'])
classifications = Classifications.objects.all()
return render(request, 'article.html', {'navigation': 'nav_classification.html',
'article': article, 'nav_classifications': classifications,
'classification_name': '文章分类'})
def show_article_as_classification(request, name):
classification = Classifications.objects.get(name=name)
articles = classification.article_set.all()
return render(request, 'article_preview.html',
{'navigation': 'nav_articles.html', 'articles': articles, 'nav_articles': articles,
'classification_name': '全部文章'})
def post(request):
if request.method == 'GET':
classifications = Classifications.objects.all()
return render(request, 'post.html',
{'navigation': 'nav_classification.html', 'classifications': classifications})
elif request.method == 'POST':
title = request.POST.get('title')
context = request.POST.get('context')
cls = request.POST.getlist('cls')
if len(title) > 0 and len(context) > 0:
clss = Classifications.objects.get(name=cls)
return index(request)
| w-mj/cloud-server | blog/views.py | views.py | py | 2,222 | python | en | code | 0 | github-code | 36 |
70809120103 | #!/usr/bin/python3
"""
Started a Flask web application with these scripts
the web apps was listed on 0.0.0.0, port 5000
declare @app.teardown_appcontext and storage.close()
with routes /cities_by_states: display a HTML page:
in my route def option strict_slashes=False was used
"""
from flask import Flask, render_template
from models import storage
from models.state import State
from operator import getitem
app = Flask(__name__)
@app.route('/states_list', strict_slashes=False)
def list_states():
"""List all the states to the client"""
states = storage.all(State).values()
return render_template('7-states_list.html', states=states)
@app.route('/cities_by_states', strict_slashes=False)
def list_states_cities():
"""List all the states and its cities to the client"""
states = storage.all(State).values()
return render_template('8-cities_by_states.html', states=states)
@app.teardown_appcontext
def close_db(db):
storage.close()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| Realyoung1/AirBnB_clone_v2 | web_flask/8-cities_by_states.py | 8-cities_by_states.py | py | 1,054 | python | en | code | 0 | github-code | 36 |
27193127483 | from collections import namedtuple
import re
import string
import logging
import pickle
class Files:
dictionary = "dataset/nettalk.data"
top1000words = "dataset/nettalk.list"
continuous = "dataset/data"
Word = namedtuple('Word', ['letters', 'phonemes', 'structure', 'correspondance'])
all_letters = string.ascii_lowercase + ',' + '.' + ' '
all_phoneme_traits = frozenset([
'front1',
'front2',
'central1',
'central2',
'back1',
'back2',
'stop',
'nasal',
'fricative',
'affricative',
'glide',
'liquid',
'voiced', # 'unvoiced' is the default
'tensed',
'high',
'medium',
'low',
'silent',
'elide',
'pause',
'full stop'
])
all_stress_traits = frozenset([
'stress1',
'stress3', # 'stress2' is the default
'syllable boundary'
])
# synonyms for the same phoneme traits
phoneme_trait_synonyms = {
'labial' : 'front1',
'dental' : 'front2',
'alveolar' : 'central1',
'palatal' : 'central2',
'velar' : 'back1',
'glottal' : 'back2'
}
# traits we can ignore because they are the defaults
phoneme_trait_defaults = set([
'unvoiced'
])
phonemes_data = [
('a', ['low', 'tensed', 'central2']),
('b', ['voiced', 'labial', 'stop']),
('c', ['unvoiced', 'velar', 'medium']),
('d', ['voiced', 'alveolar', 'stop']),
('e', ['medium', 'tensed', 'front2']),
('f', ['unvoiced', 'labial', 'fricative']),
('g', ['voiced', 'velar', 'stop']),
('h', ['unvoiced', 'glottal', 'glide']),
('i', ['high', 'tensed', 'front1']),
('k', ['unvoiced', 'velar', 'stop']),
('l', ['voiced', 'dental', 'liquid']),
('m', ['voiced', 'labial', 'nasal']),
('n', ['voiced', 'alveolar', 'nasal']),
('o', ['medium', 'tensed', 'back2']),
('p', ['unvoiced', 'labial', 'stop']),
('r', ['voiced', 'palatal', 'liquid']),
('s', ['unvoiced', 'alveolar', 'fricative']),
('t', ['unvoiced', 'alveolar', 'stop']),
('u', ['high', 'tensed', 'back2']),
('v', ['voiced', 'labial', 'fricative']),
('w', ['voiced', 'labial', 'glide']),
('x', ['medium', 'central2']),
('y', ['voiced', 'palatal', 'glide']),
('z', ['voiced', 'alveolar', 'fricative']),
('A', ['medium', 'tensed', 'front2', 'central1']),
('C', ['unvoiced', 'palatal', 'affricative']),
('D', ['voiced', 'dental', 'fricative']),
('E', ['medium', 'front1', 'front2']),
('G', ['voiced', 'velar', 'nasal']),
('I', ['high', 'front1']),
('J', ['voiced', 'velar', 'nasal']),
('K', ['unvoiced', 'palatal', 'fricative', 'velar', 'affricative']),
('L', ['voiced', 'alveolar', 'liquid']),
('M', ['voiced', 'dental', 'nasal']),
('N', ['voiced', 'palatal', 'nasal']),
('O', ['medium', 'tensed', 'central1', 'central2']),
('Q', ['voiced', 'labial', 'velar', 'affricative', 'stop']),
('R', ['voiced', 'velar', 'liquid']),
('S', ['unvoiced', 'palatal', 'fricative']),
('T', ['unvoiced', 'dental', 'fricative']),
('U', ['high', 'back1']),
('W', ['high', 'medium', 'tensed', 'central2', 'back1']),
('X', ['unvoiced', 'affricative', 'front2', 'central1']),
('Y', ['high', 'tensed', 'front1', 'front2', 'central1']),
('Z', ['voiced', 'palatal', 'fricative']),
('@', ['low', 'front2']),
('!', ['unvoiced', 'labial', 'dental', 'affricative']),
('#', ['voiced', 'palatal', 'velar', 'affricative']),
('*', ['voiced', 'glide', 'front1', 'low', 'central1']),
(':', ['high', 'front1', 'front2']),
('^', ['low', 'central1']),
('-', ['silent', 'elide']),
(' ', ['pause', 'elide']),
('.', ['pause', 'full stop'])
]
for (name, traits) in phonemes_data:
# map synonyms
for (i, trait) in enumerate(traits):
if trait in phoneme_trait_synonyms:
traits[i] = phoneme_trait_synonyms[trait]
# delete defaults
for (i, trait) in enumerate(traits):
if trait in phoneme_trait_defaults:
del traits[i]
# encapsulate mapped traits
phoneme_traits = dict({(name, frozenset(traits)) for name, traits in phonemes_data})
# make sure there are no errors
for traits in phoneme_traits.itervalues():
assert traits.issubset(all_phoneme_traits), 'one is a bad trait: %s' % traits
def loadDictionary():
dictionary = {}
with open(Files.dictionary) as f:
for line in f:
# break line into columns
line = line.strip()
cols = line.split('\t')
# skip lines that don't appear to be dictionary entries
if len(cols) != 4:
logging.debug('skipping line: %s' % line)
continue
else:
word = Word(*cols)
dictionary[word.letters] = word
return dictionary
def loadTop1000Words(dict):
text = file(Files.top1000words).read()
text = re.search(r'\((\w+\b\s*){1000}\)', text).group(0)
text = text.lower()
words = re.findall(r'\w+', text)
return [dict[w] for w in words]
def loadContinuous(dict):
f = open(Files.continuous,'r')
text = pickle.load(f)
ltr = text[0]
letters = ltr
pho = text[1]
phonemes = pho
training_set = [(letters, phonemes)]
return training_set
dictionary = loadDictionary()
top1000words = loadTop1000Words(dictionary)
continuous = loadContinuous(dictionary)
| dtingley/netwhisperer | corpus.py | corpus.py | py | 5,330 | python | en | code | 1 | github-code | 36 |
25314732697 | def solution(a):
min_val = min(a)
left, right = 0, len(a)-1
left_min, right_min = float('inf'), float('inf')
cnt = 0
while left < right:
if a[left] < left_min:
left_min = a[left]
cnt += 1
if a[right] < right_min:
right_min = a[right]
cnt += 1
if a[left] != min_val:
left += 1
if a[right] != min_val:
right -= 1
return cnt | soohi0/Algorithm_study | 5월_4주/PRO_풍선터트리기/PRO_풍선터트리기_송영섭.py | PRO_풍선터트리기_송영섭.py | py | 467 | python | en | code | 0 | github-code | 36 |
70806954663 | import sys
import heapq
sys.stdin = open('input.txt')
def sol():
h = [(0, A, C)]
while h:
weight, node, remain = heapq.heappop(h)
if weights[node] <= weight:
continue
if node == B:
return weight
weights[node] = weight
for next_weight, next_node in linked[node]:
max_weight = max(weight, next_weight)
next_remain = remain - next_weight
if next_remain >= 0 and weights[next_node] > max_weight:
heapq.heappush(h, (max_weight, next_node, next_remain))
return -1
N, M, A, B, C = map(int, sys.stdin.readline().split())
linked = [[] for _ in range(N+1)]
weights = [1e10] * (N+1)
for _ in range(M):
S, E, W = map(int, sys.stdin.readline().split())
linked[S].append((W, E))
linked[E].append((W, S))
print(sol())
| unho-lee/TIL | CodeTest/Python/BaekJoon/20182.py | 20182.py | py | 852 | python | en | code | 0 | github-code | 36 |
9915784655 | moves_number = int(input())
houses_str = input().split()
houses = [int(house) for house in houses_str if 1 <= int(house) <= 500]
current_position = 0
for move in range(moves_number):
input_data = input().split()
command = input_data[0]
index = int(input_data[1])
if command == 'Forward':
if (index + current_position) in range(len(houses)):
current_position += index
houses.pop(current_position)
elif command == 'Back':
if (current_position - index) in range(len(houses)):
current_position -= index
houses.pop(current_position)
elif command == 'Gift' and index in range(len(houses)):
current_position = index
house = int(input_data[2])
if 1 <= house <= 500:
houses.insert(index, house)
elif command == 'Swap':
first_house_value = index
second_house_value = int(input_data[2])
if first_house_value in houses and second_house_value in houses:
first_index = houses.index(first_house_value)
second_index = houses.index(second_house_value)
houses[first_index], houses[second_index] = houses[second_index], houses[first_index]
print(f'Position: {current_position}')
print(*houses, sep=', ')
#################################### TASK CONDITION ############################
"""
Santa’s Gifts
You will be given an array of integers, which represent the house numbers you should visit.
The commands will lead you to them. If they lead you to non-existing places, don’t move.
• Forward {numberOfSteps}
• Back {numberOfSteps}
o When you receive the “Forward” or “Back” command, you move the given number
of times in this direction and remove the house in this position from your list.
Also, when you receive the next command, you continue from this position.
• Gift {index} {houseNumber}
o Enter a new house number, which the dwarves have left out on purpose,
at the given position and move to its position.
• Swap {indexOfFirst} {indexOfSecond}
o Santa wants to rearrange his path and swap the order of two houses.
You will receive the numbers of the houses, that need to be switched
and he doesn’t need to move to fulfill this command.
Input
• On the first line you will receive the number of commands – integer in the range [1-50]
• On the second line you will receive the array of integers,
that represent the houses, split by a single space – valid integers in the range [1 – 500]
• On the next n lines, you will receive the commands in the following format:
o Forward {steps}
o Back {steps}
o Gift {index} {value}
o Swap {value1} {value2}
Output
• Print the last position and the remaining houses in the following format:
“Position {position}”
“{houseNumber}, {houseNumber}………, {houseNumber}”
Constraints
• The house numbers will be valid integers in the range [1 - 1000]
• The number of commands will be a valid integer in the range [1 - 50]
• The commands will be given in the exact format as they are written above
• There will always be at least one valid command
____________________________________________________________________________________________
Example_01
Input
255 500 54 78 98 24 30 47 69 58
Forward 1
Swap 54 47
Gift 1 20
Back 1
Forward 3
Output
Position: 3
20, 47, 78, 24, 30, 54, 69, 58 First, we receive the “Forward” command,
the sleigh will start from the beginning – index 0. He has to move 1 step,
so he will move to index 1 and delete the house number, which is stored there – 500. What is left of the list:
255 54 78 98 24 30 47 69 58
and Santa’s position is 1.
The next command is “Swap”. After it, the list looks like this:
255 47 78 98 24 30 54 69 58 and Santa’s position doesn’t change.
The “Gift” command has to insert at index 1 the house with number 20:
255 20 47 78 98 24 30 54 69 58 and move Santa to current index – 1.
The “Back” command has to move Santa back 1 step from his current position.
He is at 1 position, so he has to move back to position 0, and remove the house number, which it stores:
20 47 78 98 24 30 54 69 58
The last “Forward” command will move him three steps forward from his current
position, which is 0, so he goes to – 3 and removes the house:
20 47 78 24 30 54 69 58
6
50 40 25 63 78 54 66 77 24 87
Forward 4
Back 3
Forward 3
Gift 2 88
Swap 50 87
Forward 1 Position: 3
87, 25, 88, 54, 77, 24, 50
"""
| qceka88/Fundametals-Module | 19 Exam Preparation - Mid Exam/02santas_gitfts.py | 02santas_gitfts.py | py | 4,492 | python | en | code | 8 | github-code | 36 |
2722149163 | #! /usr/bin/env python
"""
Author: LiangLiang ZHENG
Date:
File Description
"""
from __future__ import print_function
import sys
import argparse
class Solution(object):
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
memo = {}
n = len(nums)
def helper(target):
if target < 0: return 0
if target == 0: return 1
if target in memo: return memo[target]
res = 0
for i in range(n):
res += helper(target - nums[i])
memo[target] = res
return memo[target]
return helper(target)
def main():
pass
if __name__ == "__main__":
main()
| ZhengLiangliang1996/Leetcode_ML_Daily | Search/377_CombinationSumIV.py | 377_CombinationSumIV.py | py | 762 | python | en | code | 1 | github-code | 36 |
70123923304 | #! /usr/bin/env python
from sortrobot.neural import Classifier, OrientationClassifier
from PIL import Image
import sys, os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--outdir", dest="outdir", default=None,
help="Directory to write sorted files. Default: same directory as file.")
parser.add_option("-c", "--classifier", dest="classifier", default='orient',
help="Classifier from sortrobot.neural to use.")
opts, args = parser.parse_args(sys.argv[1:])
classifier = {
'orient': OrientationClassifier,
'color': Classifier,
}[opts.classifier]()
for i,filename in enumerate(args):
print('{}: Reading {}'.format(i, filename))
im = Image.open(filename)
label = classifier.classify(im)
print(' classified as', label)
outdir, basename = os.path.split(filename)
if opts.outdir is not None:
outdir = opts.outdir
newdir = os.path.join(outdir, label)
if not os.path.exists(newdir):
os.mkdir(newdir)
print(' moving to', newdir)
os.rename(filename, os.path.join(newdir, basename))
| AaronParsons/sortrobot | scripts/sr_sort_files.py | sr_sort_files.py | py | 1,118 | python | en | code | 0 | github-code | 36 |
9862393543 | import random
from flask import Flask, render_template, request
import tensorflow as tf
import numpy as np
from io import BytesIO
from PIL import Image
import base64
import os
# initiates flask app
app = Flask(__name__)
tf.get_logger().setLevel('ERROR')
model = None
model = tf.keras.models.load_model("prod_model.h5")
# loads in the weights of the model
model.load_weights("new_model_3000_0.h5")
# defines home route
@app.route("/")
def home():
send = ""
return render_template("index.html", send="")
# defines route to submit user image
@app.route("/guess", methods=["POST"])
def guess():
# gets the data from the image drawn by user
image_data = request.form["image_data"]
# saves the full image data to be used later before it is manipulated
image_data_full = image_data
# splits the data into the values needed to make an array
image_data = image_data.split(",")[1]
# decodes the data to make it usable
decoded_data = base64.b64decode(image_data)
# creates a PIL image
image = Image.open(BytesIO(decoded_data)).convert('L')
# turns image into a numpy array and preprocesses the array in the same way as the training images
image_array = np.reshape(np.array(image).astype(float) / 255, (1,400,400,1))
# defines the parameters of the model
lambda_ = 0.01
dropout_enter = 0
dropout_exit = 0.25
#sets the model to be used to predict what the user drew if the model couldn't be loaded
global model
if model is None:
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(12, (6, 6), strides=(1, 1), padding="valid", activation="relu",
input_shape=(400, 400, 1), kernel_regularizer=tf.keras.regularizers.l2(lambda_)),
tf.keras.layers.Dropout(dropout_enter),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(12, (8, 8), strides=(1, 1), padding="valid", activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(lambda_)),
tf.keras.layers.Dropout(dropout_enter),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(12, (10, 10), strides=(1, 1), padding="valid", activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(lambda_)),
tf.keras.layers.Dropout(dropout_exit),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(12, (12, 12), strides=(1, 1), padding="valid", activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(lambda_)),
tf.keras.layers.Dropout(dropout_exit),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(20, activation="softmax")
])
prediction = model.predict(tf.convert_to_tensor(image_array))
# turns the output of the model into a human-readable response
index = prediction.argmax()
categories = ["umbrella", "house", "sun", "apple", "envelope", "star", "heart",
"lightning bolt", "cloud", "spoon", "balloon", "mug", "mountains",
"fish", "bowtie", "ladder", "ice cream cone", "bow", "moon", "smiley"]
# gets the path need to display an example image on the front end
image_paths = ["umbrella", "house", "sun", "apple", "envelope", "star", "heart",
"lightning", "cloud", "spoon", "balloon", "mug", "mountains",
"fish", "bowtie", "ladder", "icecream", "bow", "moon", "smiley"]
# randomly picks on of 3 images to show
num = random.randint(1, 3)
image_url = "Images/" + image_paths[index] + str(num) + ".png"
send = categories[index]
# renders a template with the guess from the model
return render_template("guess.html", send=send, index=index, image=image_url, imagedata=image_data_full)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| joeschueren/SketchDetect | main.py | main.py | py | 4,108 | python | en | code | 0 | github-code | 36 |
42528330524 | import requests
from bs4 import BeautifulSoup
import re
import json
import sys
import eventlet
import concurrent.futures
import Constants
class Scraper:
def __init__(self, url_to_check):
self.BASE_URL = url_to_check
self.dictionary = {}
@staticmethod
def get_html(url):
try:
website_html = requests.get(url)
html = BeautifulSoup(website_html.text, 'html.parser')
except requests.exceptions.SSLError:
print(Constants.WEBSITE_NOT_FOUND_ERROR)
sys.exit(0)
except requests.exceptions.ConnectionError:
return None
return html
@staticmethod
def get_attributes(html, base_url, tag_name, attr_name):
links = []
for tag in html.findAll(tag_name):
url = str(tag.get(attr_name))
if re.search("^https?://", url) is None:
if not str(url).startswith("/") and not str(base_url).endswith("/"):
url = base_url + "/" + url
elif str(url).startswith("/") and str(base_url).endswith("/"):
base_url = base_url[:-1]
url = base_url + url
else:
url = base_url + url
links.append(url)
return links
def get_all_urls(self, url):
html = self.get_html(url)
if html:
links = self.get_attributes(html, url, "a", "href")
return links
def check_the_urls(self, link_to_check):
all_urls = self.get_all_urls(link_to_check)
if all_urls:
if link_to_check.endswith("/"):
link_to_check = link_to_check[:-1]
if link_to_check not in self.dictionary.keys():
for_each_broken_links = []
valid_links = []
for url in all_urls:
try:
with eventlet.Timeout(10):
get_link = requests.get(url)
if get_link.status_code >= 400:
for_each_broken_links.append(url)
continue
except requests.exceptions.ConnectionError:
for_each_broken_links.append(url)
continue
if url not in valid_links:
valid_links.append(url)
print("valid url -> ", str(url))
self.dictionary[link_to_check] = for_each_broken_links
return valid_links
def write(self):
with open("file.json", "w") as file:
file.truncate(0)
json.dump(self.dictionary, file)
def main(url, first_base_url):
scraper = Scraper(url)
normal_urls = scraper.check_the_urls(url)
while True:
if normal_urls:
for link in normal_urls:
if (link.split("//")[1]).find(str(first_base_url)) and link not in scraper.dictionary.keys():
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(scraper.check_the_urls, link)
return_value = future.result()
if return_value:
for value in return_value:
if value not in normal_urls:
normal_urls.append(value)
if link in normal_urls:
normal_urls.remove(link)
else:
normal_urls.remove(link)
break
else:
break
scraper.write()
| Hayk1997gh/Broken_Link_Checker | Scraper.py | Scraper.py | py | 3,659 | python | en | code | 0 | github-code | 36 |
25983391444 | #!/usr/bin/env python3
"""Setup script."""
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
"""Setup the py.test test runner."""
def finalize_options(self):
"""Set options for the command line."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Execute the test runner command."""
# Import here, because outside the required eggs aren't loaded yet
import pytest
sys.exit(pytest.main(self.test_args))
# Add installation instructions as well.
setup(
name='spaced-repetition',
tests_require=['pytest'],
cmdclass={
'test': PyTest
}
) | schedutron/spaced-repetition | setup.py | setup.py | py | 762 | python | en | code | 11 | github-code | 36 |
423347793 | import numpy as np
import yfinance as yf
import ta
import pandas as pd
from ta.trend import ADXIndicator
import pyxirr
def get_clean_df(ticker):
df = yf.Ticker(ticker).history(
period="10y").reset_index()[["Date", "Close", "Dividends", 'High', "Low"]]
df["Close"] = yf.download(tickers=ticker, period="10y")["Adj Close"].values
df["Returns"] = df["Close"].pct_change()
df["RSI"] = ta.momentum.RSIIndicator(df["Close"], 14).rsi()
adxI = ADXIndicator(df['High'], df['Low'], df['Close'], 14, True)
df["Plus DI"] = adxI.adx_pos()
df['Minus DI'] = adxI.adx_neg()
df['ADX'] = adxI.adx()
return df
def mod_df(conditions, df, reinvest_dividends):
ret_cond = conditions[0]
rsi_cond = conditions[1]*10
adx_cond = conditions[2]*10
for i in range(len(df["Returns"])):
df.at[i, "ADX_tf"] = df.at[i, "ADX"] >= adx_cond and df.at[i,
"Plus DI"] <= df.at[i, "Minus DI"]
df["Portfolio Opt"] = np.zeros(len(df["RSI"].values))
df["Buy Opt"] = np.zeros(len(df["RSI"].values))
df = df.dropna().reset_index(drop=True)
for i in np.arange(len(df["Returns"].values)):
if df["Returns"].values[i] < -ret_cond/100 and df["RSI"].values[i] <= rsi_cond and df["ADX_tf"].values[i] == True:
df.at[i, "Portfolio Opt"] = -100
df.at[i, "Buy Opt"] = 100/df["Close"].values[i]
df = pd.concat([df, df.tail(1)], axis=0).reset_index(drop=True)
df.loc[df.index[-1], "Portfolio Opt"] = 0
df.loc[df.index[-1], "Buy Opt"] = 0
if reinvest_dividends:
df.at[0, "Holdings Opt"] = df.at[0, "Buy Opt"]
for i in np.arange(len(df["Returns"].values)-1):
df.at[i+1, "Holdings Opt"] = df.at[i,
"Holdings Opt"] + df.at[i+1, "Buy Opt"] + (df.at[i, "Holdings Opt"] * df.at[i+1, "Dividends"])/df.at[i+1, "Close"]
df.loc[df.index[-1], "Portfolio Opt"] = df["Close"].values[-1] * \
df.loc[df.index[-1], "Holdings Opt"]
return df
def get_buy_months(df):
df['Month Year'] = df['Date'].dt.to_period('M')
buy_months = [i for i in df[df['Portfolio Opt'] == -100]
["Month Year"].values]
unique_months_pct = len(set(buy_months))/(10*12 + 1)*100
return unique_months_pct
def get_performance(df):
try:
# xirr_value = xirr(df[df['Portfolio Opt'] != 0]
# ["Portfolio Opt"].values, df[df['Portfolio Opt'] != 0]
# ["Date"].values)*100
xirr_value = pyxirr.xirr(
df["Date"].values, df["Portfolio Opt"].values)*100
except:
xirr_value = 0
return xirr_value
def get_irr_all(df, reinvest_dividends):
df["Portfolio All"] = np.zeros(len(df["RSI"].values))
df["Buy All"] = np.zeros(len(df["RSI"].values))
df = df.dropna().reset_index(drop=True)
for i in np.arange(len(df["Returns"].values)):
df.at[i, "Portfolio All"] = -100
df.at[i, "Buy All"] = 100/df["Close"].values[i]
df = pd.concat([df, df.tail(1)], axis=0).reset_index()
df.loc[df.index[-1], "Portfolio All"] = 0
df.loc[df.index[-1], "Buy All"] = 0
if reinvest_dividends:
df.at[0, "Holdings All"] = df.at[0, "Buy All"]
for i in np.arange(len(df["Returns"].values)-1):
df.at[i+1, "Holdings All"] = df.at[i,
"Holdings All"] + df.at[i+1, "Buy All"] + (df.at[i, "Holdings All"] * df.at[i+1, "Dividends"])/df.at[i+1, "Close"]
df.loc[df.index[-1], "Portfolio All"] = df["Close"].values[-1] * \
df.loc[df.index[-1], "Holdings All"]
all_irr = pyxirr.xirr(df["Date"].values, df["Portfolio All"].values)
return all_irr*100
def iterative_function(conditions, df, reinvest_dividends, pct_trading):
temp_df = mod_df(conditions, df, reinvest_dividends)
unique_months_pct_temp = get_buy_months(temp_df)
if unique_months_pct_temp >= pct_trading:
irr = get_performance(temp_df)
else:
irr = 0
return irr
def find_best_sco(ticker):
df = get_clean_df(ticker)
conditions = [0, 0, 0]
irr = 0
for ret in np.linspace(0, 5, 16):
for rsi in np.linspace(0, 7, 15):
for adx in np.linspace(0, 7, 15):
conditions_temp = [ret, rsi, adx]
irr_temp = iterative_function(
conditions_temp, df, True, 33)
if irr_temp > irr:
irr = irr_temp
conditions = conditions_temp
print(irr, conditions)
# bounds = ((0, 5), (0, 10), (0, 10))
# result = sco.minimize(iterative_function, (1, 3.5, 2),
# (df, reinvest_dividends, pct_trading), method="SLSQP", bounds=bounds, options={'eps': 0.01})
all_irr = get_irr_all(df, True)
np.savetxt(f"./optimise_data/{ticker}_optimise.csv",
np.array([all_irr, irr, conditions[0], conditions[1]*10, conditions[2]*10]))
if __name__ == "__main__":
ticker = "SSSS"
find_best_sco(ticker)
# find_best_sco("ALD.PA")
# df = mod_df((1, 70, 1), get_clean_df("TTE"), True)
# print(get_clean_df("AAPL"))
| victormorizon/stable-dividend-stock-trading-strategy | functions.py | functions.py | py | 5,229 | python | en | code | 1 | github-code | 36 |
24669291411 | import requests, json
import pandas as pd
import os
from datetime import date
#from mysql.connector import connect, Error
from flatten_json import flatten
from airflow.models import Variable
'''
Connects to the edamam API and sends a request
Return: The response object from the API query
'''
def airflow_var_test( ti ):
print( Variable.get('EDAMAM_ID') )
def edamam_get(ti):
# Initialize Variables
dag_path = os.getcwd()
host = 'https://api.edamam.com/'
recipe_base = 'api/recipes/v2'
url = host + recipe_base
# Xcom Pulls
query= "chicken"
# Initialize our config for the query
payload = {'type': 'public',
'q': query,
'app_id': Variable.get('EDAMAM_ID'),
'app_key': Variable.get('EDAMAM_KEY')
}
# Send a GET request to Edamam API
with requests.get(url, params=payload) as response:
query_results = response.json()['hits']
# Return the response
write_json(query_results, f"{dag_path}/raw_data/chicken_query.json")
def parse_json_request( ti ):
# Initialize variables
hits_list= ti.xcom_pull( task_ids=['get_edamam_request'][0] )
if not hits_list:
raise ValueError( 'no value currently in XComs.')
# Return our cleaned up search results
return edamam_json_cleanup( hits_list )
#[TODO] This is a redirecting function to other helper functions
# Have the return type be important for picking which filetype to convert to
def edamam_json_cleanup( json_list ):
# Initialization
# Isolate the hits and discard the metadata
hits_data = json_list
# Flatten the data from our hits
# Make the json data relational
return edamam_json_flatten( hits_data )
def edamam_json_flatten( json_list ):
# Init
index = 0
for index in range( len( json_list )):
json_list[index] = flatten( json_list[index] )
return json_list
def edamam_json_rename_cols( jason ):
jason.columns = jason.columns.str.replace('recipe_', '', regex=True)
return jason
def write_json( json_txt, path='new_json.json' ):
# [TODO] Initialize filename with date and time
# push file to XCom
with open( path, 'w' ) as outfile:
json.dump( json_txt, outfile )
''' #########
Submission Function
''' #########
def df_submit_mysql( ti ):
# Initialization
table_name = "testing_1"
########################################################
df= pd.json_normalize( ti.xcom_pull(task_ids=['parse_json_request']) )
# Write CREATE TABLE query using our dataframe
# Create the table query
table_query = df_create_table( table_name, df )
# Insert the information query
insert_queries = df_insert( df, table_name )
# Connect to local mysql
with connect( host='127.0.0.1', user=Variable.get('MYSQL_USER'), password=Variable.get('MYSQL_PW'), database=Variable.get('MYSQL_DB')) \
as connection:
cursor = connection.cursor()
# Submit the CREATE TABLE query to the database
cursor.execute( table_query )
connection.commit()
# Submit our INSERT queries into our newly CREATED TABLE
for query in insert_queries:
cursor.execute( query )
connection.commit()
print( cursor.rowcount, ": worked'" )
# Close our connection
cursor.close()
connection.close()
print( 'successful' )
return True
def df_create_table( table_name, df ):
# Initialization
query = f'CREATE TABLE IF NOT EXISTS {table_name} ( id INT AUTO_INCREMENT PRIMARY KEY, \n'
# Create column types (for this exercise, it'll all be strings)
table_cols = create_table_columns( df )
# Add our table columns to our query string
query += table_cols + ' )'
return query
def create_table_columns( df ):
# Initialization
col_string = ""
index = 0
# Loop through the columns of a dataframe to create a table query
for col in df.columns:
# Skip the first one for this example pipeline
if index==0:
index+=1
continue
col_string += f'{col} VARCHAR(255)'
index += 1
if index > 30:
return col_string
else:
col_string+= ',\n'
return col_string
def df_insert( df, table ):
# Initialization
df_cols = create_table_columns( df ).replace( ' VARCHAR(255)', '')
queries = []
row_limit = 10
row = 0
row_list = df.iloc[0: row_limit]
# Create template query string
insert_query= f'INSERT INTO {table} ({df_cols})\
VALUES ($val)'
# Add df info to the query
for row in row_list:
row_info = row[1:31]
# Convert our list to a string that REPLACE can use
row_values = f'\"{row_info[0]}\" '
for value in row_info[1:]:
row_values += f', \n\"{str(value)[:254]}\"'
queries.append( insert_query.replace('$val', row_values))
# Return the string
return queries | JoshusTenakhongva/Mentorship_Repo | food_at_home/dags/airflow_functions.py | airflow_functions.py | py | 5,053 | python | en | code | 1 | github-code | 36 |
35217766012 | from itertools import product
import sys
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import json
import random
sys.path.append('../..')
from lib import excelUtils
from lib import httpUtils
from lib import textUtil
from lib.htmlEleUtils import getNodeText
from lib.htmlEleUtils import getInnerHtml
products = []
header=['link','Category','CAS号','Product Name','price','imageName']
def addHeader(title):
if title not in header and len(title) > 0:
header.append(title)
chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("window-size=1024,768")
# chrome_options.add_argument("--no-sandbox")
browser = webdriver.Chrome(chrome_options=chrome_options)
def getProductInfo(url, type):
print(str(len(products)) + ":" + url)
browser.delete_all_cookies()
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
nav = sope.find("div", attrs={"class":"crumbs matp"})
if nav == None:
browser.delete_all_cookies()
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
nav = sope.find("div", attrs={"class":"crumbs matp"})
if nav == None:
browser.delete_all_cookies()
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
nav = sope.find("div", attrs={"class":"crumbs matp"})
pInfo = {
"Category": type,
"link": url
}
baseInfos = sope.find_all("li", attrs={"class":"proulllli"})
for baseInfo in baseInfos:
ebs = baseInfo.find_all("b")
for b in ebs:
title = getNodeText(b)
if title == "names:":
pInfo["Product Name"] = getNodeText(baseInfo).replace("names:", "")
else:
titlePart = title.split(":")
if len(titlePart) > 1:
addHeader(titlePart[0])
pInfo[titlePart[0]] = titlePart[1]
spans = baseInfo.find_all("span")
for span in spans:
title = getNodeText(span)
titlePart = title.split(":")
if len(titlePart) == 1:
titlePart = title.split(":")
if len(titlePart)>1:
addHeader(titlePart[0])
pInfo[titlePart[0]] = titlePart[1]
specTbs = sope.find_all("table",attrs={"class":"protwtab"})
specStr = ""
for specTb in specTbs:
trs = specTb.find_all("tr")
if len(trs) > 0:
ths = trs[0].find_all("th")
if len(ths)>2:
title = getNodeText(ths[1])
if title == "规格":
for inx,tr in enumerate(trs):
if inx>0:
tds = tr.find_all("td")
specStr += "("+getNodeText(tds[1])+"/"+getNodeText(tds[4])+");"
pInfo["price"] = specStr
infoTrs = sope.find_all("tr")
for infoTr in infoTrs:
tds = infoTr.find_all("td")
if len(tds) == 2:
title = getNodeText(tds[0])
value = getNodeText(tds[1])
addHeader(title)
pInfo[title] = value
imageName = ""
if "Product Name" in pInfo:
imageName = pInfo["Product Name"]+".png"
if "CAS号" in pInfo:
imageName = pInfo["CAS号"]+".png"
pInfo["imageName"] = imageName
imgArea = sope.find("i", attrs={"id":"D2"})
img = imgArea.find("img")
if img!=None:
httpUtils.urllib_download("http://bio-fount.com"+img["src"], imageName)
products.append(pInfo.copy())
def getProductType(url, type1):
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
plinkAreas = sope.find("ul", attrs={"id":"mo"}).find_all("li", attrs={"class":"fl"})
if len(plinkAreas) == 0:
time.sleep(1)
browser.delete_all_cookies()
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
plinkAreas = sope.find_all("article")
for plinkArea in plinkAreas:
pLink = plinkArea.find("a")
getProductInfo("http://bio-fount.com"+pLink["href"], type1)
# getProductType("http://bio-fount.com/cn/goods-list/1375.html",'cDNA Clones')
# getProductInfo("http://bio-fount.com/cn/goods2/61740_1375.html", "a")
for pageIndex in range(1, 5):
getProductType("http://bio-fount.com/cn/goods-list/1375__"+str(pageIndex)+".html",'脂肪族含氟砌块')
for pageIndex in range(1, 6):
getProductType("http://bio-fount.com/cn/goods-list/1374__"+str(pageIndex)+".html",'杂环含氟砌块')
getProductType("http://bio-fount.com/cn/goods-list/1372.html",'氟标记化合物')
for pageIndex in range(1, 22):
getProductType("http://bio-fount.com/cn/goods-list/1371__"+str(pageIndex)+".html",'芳香族含氟砌块')
excelUtils.generateExcel('bio-fount.xlsx', products, header) | Just-Doing/python-caiji | src/work/20230110/bio-fount.py | bio-fount.py | py | 4,549 | python | en | code | 1 | github-code | 36 |
38097195752 | import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
import pickle
import time
from os.path import exists
from GaslightEnv import GaslightEnv
from stable_baselines3 import PPO, TD3
from stable_baselines3.common.callbacks import CheckpointCallback
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from utils import distance
#Callback class that saves the model after a set interval of steps.
class GaslightCheckpoint(CheckpointCallback):
def __init__(self, save_interval, rl_model):
super().__init__(save_interval, ".", name_prefix=rl_model)
self.save_interval = save_interval
self.rl_model = rl_model
def _on_step(self) -> bool:
if self.save_interval > 0 and self.n_calls % self.save_interval == 0:
if self.rl_model is not None:
self.model.save(self.rl_model)
return True
def gaslightRun(predict, extra, input_shape, input_range, max_delta, target, norm, model_name, framework="PPO", save_interval=0, param_file=None):
if framework == "PPO":
hyperparams = {}
net_arch = dict(pi=[256, 256], vf=[256, 256])
hyperparams['policy_kwargs'] = dict(net_arch=net_arch)
#Hyperparameters collected from Optuna.py
if param_file is not None:
study = pickle.load(open(param_file, 'rb'))
hyperparams = study.best_params
if hyperparams['batch_size'] > hyperparams['n_steps']:
hyperparams['batch_size'] = hyperparams['n_steps']
#Create vectorized environment and model-saving callback.
env_kwargs = {
"predict": predict,
"extra": extra,
"input_shape": input_shape,
"input_range": input_range,
"max_delta": max_delta,
"target": target,
"norm": norm
}
vec_env = make_vec_env(GaslightEnv, 4, env_kwargs=env_kwargs)
checkpoint_callback = GaslightCheckpoint(save_interval, model_name)
#Create or load attack model.
model_attack = PPO("MlpPolicy", vec_env, **hyperparams)
if model_name is not None and exists(model_name):
model_attack.set_parameters(model_name)
elif framework == "TD3":
hyperparams = {}
hyperparams['policy_kwargs'] = dict(net_arch=[256, 256])
#Hyperparameters collected from Optuna.py
if param_file is not None:
study = pickle.load(open(param_file, 'rb'))
hyperparams = study.best_params
if hyperparams['noise_type'] == 'normal':
hyperparams['action_noise'] = NormalActionNoise(
mean=np.zeros(input_shape), sigma=hyperparams['noise_std'] * np.ones(input_shape)
)
elif hyperparams['noise_type'] == 'ornstein-uhlenbeck':
hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(
mean=np.zeros(input_shape), sigma=hyperparams['noise_std'] * np.ones(input_shape)
)
del hyperparams['noise_type']
del hyperparams['noise_std']
hyperparams['gradient_steps'] = hyperparams['train_freq']
#Create environment and model-saving callback.
env_kwargs = {
"predict": predict,
"extra": extra,
"input_shape": input_shape,
"input_range": input_range,
"max_delta": max_delta,
"target": target,
"norm": norm
}
vec_env = make_vec_env(GaslightEnv, 4, env_kwargs=env_kwargs)
checkpoint_callback = GaslightCheckpoint(save_interval, model_name)
#Create or load attack model.
model_attack = TD3("MlpPolicy", vec_env, **hyperparams)
if model_name is not None and exists(model_name):
model_attack.set_parameters(model_name)
else:
print(f"Framework {framework} does not exist. Available frameworks are (PPO, TD3)")
exit()
#Generate 1000 random inputs for testing.
originals = [np.random.uniform(low=input_range[0], high=input_range[1], size=input_shape) for _ in range(100)]
#Determine "true" labels from testing inputs.
true_labels = [predict(x, extra) for x in originals]
#Metrics used to validate attack model. Includes L2 Norm, L-Inf Norm, and Success Rate.
timesteps = []
l2_list = []
linf_list = []
success_list = []
#Create subplots to visualize metrics.
plt.ion()
figure, ax = plt.subplots(1, 3, figsize=(18, 6))
#Each iteration trains the attack model for a certain amount of steps. After each iteration, recalculate the metrics.
for timestep in range(500):
#Train the attack model for 1000 steps.
model_attack.learn(1000, progress_bar=True, callback=checkpoint_callback)
#Initialize metric averages to 0.
l2_avg = 0
linf_avg = 0
success_count = 0
#For every testing input, perturb it and calculate metrics.
for idx in range(len(originals)):
#Find the optimal distortion/action to modify the input values.
action, _ = model_attack.predict(originals[idx])
adv = np.clip(originals[idx] + action, input_range[0], input_range[1])
#Feed perturbed input into victim classifier and check its label.
new_label = predict(adv, extra)
#Calculate distance metrics.
l2_avg += distance(adv, originals[idx], 2)
linf_avg += distance(adv, originals[idx], np.inf)
#Determine if the attack is successful (Either for untargeted or targeted attacks).
if (target is None and new_label != true_labels[idx]) or (target is not None and new_label == target):
success_count += 1
#Average findings across all tests.
timesteps.append((timestep + 1) * 1000)
l2_list.append(l2_avg / len(originals))
linf_list.append(linf_avg / len(originals))
success_list.append(success_count * 100 / len(originals))
#Plot the new metrics.
ax[0].clear()
ax[1].clear()
ax[2].clear()
ax[0].plot(timesteps, l2_list)
ax[0].set_title("L-2")
ax[0].set_xlabel("Timesteps")
ax[1].plot(timesteps, linf_list)
ax[1].set_title("L-Inf")
ax[1].set_xlabel("Timesteps")
ax[2].plot(timesteps, success_list)
ax[2].set_title("Success Rate")
ax[2].set_xlabel("Timesteps")
figure.canvas.draw()
figure.canvas.flush_events()
time.sleep(0.1)
plt.savefig(f"Graphs/Graph.png")
| RajatSethi2001/Gaslight | GaslightEngine.py | GaslightEngine.py | py | 6,819 | python | en | code | 0 | github-code | 36 |
36728693283 | s=input()
s=list(s.split())
v='aeiouAEIOU'
t='qwrtyplkjhgfdszxcvnm'
c=0
for i in s:
if(i[0] in v and i[len(i)-1] in t):
c+=1
print(c)
| 21A91A05B8/codemind-python | count_words.py | count_words.py | py | 150 | python | en | code | 0 | github-code | 36 |
2846510453 | # Qus:https://practice.geeksforgeeks.org/problems/quick-sort/1
# User function Template for python3
class Solution:
# Function to sort a list using quick sort algorithm.
def quickSort(self, arr, low, high):
# code here
if(low >= high):
return
pi = self.partition(arr, low, high)
self.quickSort(arr, low, pi-1)
self.quickSort(arr, pi+1, high)
def partition(self, arr, low, high):
# code here
# this partition function will put the arr[high] in its right pos
pi = low # all the element that are greater then this eleemnt should be
# on the right side of the pi
l, r = low+1, high # 2,0
while(l <= r):
# move l until you find a eleemtn greater then or equal to the pivot
while(l <= r and arr[l] < arr[pi]):
l += 1
# move r to left until you find eleemnt less then pivot
while(l <= r and arr[r] >= arr[pi]):
r -= 1
# swap the l, and r max and min
if(l <= r):
arr[l], arr[r] = arr[r], arr[l]
l += 1 # increment l
r -= 1 # decreemtn r Note this
# print(l,r)
arr[pi], arr[r] = arr[r], arr[pi] # at the end swap pi with r
return r
# {
# Driver Code Starts
# Initial Template for Python 3
if __name__ == "__main__":
t = int(input())
for i in range(t):
n = int(input())
arr = list(map(int, input().split()))
Solution().quickSort(arr, 0, n-1)
for i in range(n):
print(arr[i], end=" ")
print()
# } Driver Code Ends
| mohitsinghnegi1/CodingQuestions | Algorithms/Quick Sort .py | Quick Sort .py | py | 1,677 | python | en | code | 2 | github-code | 36 |
8127811600 | # Jogo de Craps. Faça um programa que implemente um jogo de Craps. O jogador lança
# um par de dados, obtendo a soma entre 2 e 12. Se na primeira jogada você tirar 7 ou 11,
# você ganhou. Se você tirar 2, 3 ou 12 na primeira jogada, isto é chamado de "craps" e
# você perdeu. Se na primeira jogada você somou 4, 5, 6, 8, 9 ou 10, este é seu "Ponto".
# Seu objetivo agora é continuar jogando os dados até tirar este número novamente. Você
# perde, no entanto, se tirar um 7 antes de tirar este “Ponto” novamente. Utilize as
# funções
# abaixo
import random
#-----------------------funçoes------------------------
#funçao de jogar os dados
def lancarDado():
return random.randint(1,6)
#funçao para fazer a soma de dados
def rolagem():
input("Pressione ENTER para lançar os dados.")
d1 = lancarDado()
d2 = lancarDado()
soma = d1 + d2
print("Dado 1: %d" %d1)
print("Dado 2: %d" %d2)
print("SOMA: %d" %soma)
print("---------------------")
return soma
#funçao primeira vez
def primeiraVez():
soma = rolagem()
return ganhouPerdeuPonto(soma)
#funçao para comparar: ganhou, perdeu ou ponto
def ganhouPerdeuPonto(soma):
if soma == 7 or soma == 11:
return True
elif soma == 2 or soma == 3 or soma == 12:
return False
else:
print("--------------------------------")
print(" O SEU PONTO É: %d "%soma)
print("--------------------------------")
return proximasVezes(soma)
#funçao proximas vezes
def proximasVezes(ponto):
soma = rolagem()
while soma != 7 and soma != ponto:
soma = rolagem()
return ganhouPerdeu(soma)
#funçao para comparar: ganhou, perdeu ou ponto v.2
def ganhouPerdeu(soma):
if soma == 7 :
return False
else :
return True
#-----------------------main---------------------------
def main():
res1 = primeiraVez()
if res1 == True :
print("Voce ganhou!!!")
else:
print("Voce perdeu!!!")
main()
| Galaxyvideok/SI-IFES | SI_IFES/python_PROG_I/P4ex09.py | P4ex09.py | py | 2,014 | python | pt | code | 0 | github-code | 36 |
8676412955 | # -*- coding: utf-8 -*-
from odoo import fields, models
class ProductTemplate(models.Model):
_inherit = 'product.template'
pr_active = fields.Boolean('Is Asset')
asset_category_id = fields.Many2one(
'account.asset.category',
string='Asset Category',
company_dependent=True,
ondelete="restrict")
deferred_revenue_category_id = fields.Many2one(
'account.asset.category',
string='Deferred Revenue Type',
company_dependent=True,
ondelete="restrict")
def _get_asset_accounts(self):
res = super(ProductTemplate, self)._get_asset_accounts()
if self.asset_category_id:
res['stock_input'] = self.property_account_expense_id
if self.deferred_revenue_category_id:
res['stock_output'] = self.property_account_income_id
return res
| OpusVL/Odoo-Uk-Accounting | uk_account_asset/models/product.py | product.py | py | 864 | python | en | code | 0 | github-code | 36 |
16515229074 | import time
from werkzeug.wrappers import Response
import netmanthan
import netmanthan.rate_limiter
from netmanthan.rate_limiter import RateLimiter
from netmanthan.tests.utils import netmanthanTestCase
from netmanthan.utils import cint
class TestRateLimiter(netmanthanTestCase):
def test_apply_with_limit(self):
netmanthan.conf.rate_limit = {"window": 86400, "limit": 1}
netmanthan.rate_limiter.apply()
self.assertTrue(hasattr(netmanthan.local, "rate_limiter"))
self.assertIsInstance(netmanthan.local.rate_limiter, RateLimiter)
netmanthan.cache().delete(netmanthan.local.rate_limiter.key)
delattr(netmanthan.local, "rate_limiter")
def test_apply_without_limit(self):
netmanthan.conf.rate_limit = None
netmanthan.rate_limiter.apply()
self.assertFalse(hasattr(netmanthan.local, "rate_limiter"))
def test_respond_over_limit(self):
limiter = RateLimiter(0.01, 86400)
time.sleep(0.01)
limiter.update()
netmanthan.conf.rate_limit = {"window": 86400, "limit": 0.01}
self.assertRaises(netmanthan.TooManyRequestsError, netmanthan.rate_limiter.apply)
netmanthan.rate_limiter.update()
response = netmanthan.rate_limiter.respond()
self.assertIsInstance(response, Response)
self.assertEqual(response.status_code, 429)
headers = netmanthan.local.rate_limiter.headers()
self.assertIn("Retry-After", headers)
self.assertNotIn("X-RateLimit-Used", headers)
self.assertIn("X-RateLimit-Reset", headers)
self.assertIn("X-RateLimit-Limit", headers)
self.assertIn("X-RateLimit-Remaining", headers)
self.assertTrue(int(headers["X-RateLimit-Reset"]) <= 86400)
self.assertEqual(int(headers["X-RateLimit-Limit"]), 10000)
self.assertEqual(int(headers["X-RateLimit-Remaining"]), 0)
netmanthan.cache().delete(limiter.key)
netmanthan.cache().delete(netmanthan.local.rate_limiter.key)
delattr(netmanthan.local, "rate_limiter")
def test_respond_under_limit(self):
netmanthan.conf.rate_limit = {"window": 86400, "limit": 0.01}
netmanthan.rate_limiter.apply()
netmanthan.rate_limiter.update()
response = netmanthan.rate_limiter.respond()
self.assertEqual(response, None)
netmanthan.cache().delete(netmanthan.local.rate_limiter.key)
delattr(netmanthan.local, "rate_limiter")
def test_headers_under_limit(self):
netmanthan.conf.rate_limit = {"window": 86400, "limit": 0.01}
netmanthan.rate_limiter.apply()
netmanthan.rate_limiter.update()
headers = netmanthan.local.rate_limiter.headers()
self.assertNotIn("Retry-After", headers)
self.assertIn("X-RateLimit-Reset", headers)
self.assertTrue(int(headers["X-RateLimit-Reset"] < 86400))
self.assertEqual(int(headers["X-RateLimit-Used"]), netmanthan.local.rate_limiter.duration)
self.assertEqual(int(headers["X-RateLimit-Limit"]), 10000)
self.assertEqual(int(headers["X-RateLimit-Remaining"]), 10000)
netmanthan.cache().delete(netmanthan.local.rate_limiter.key)
delattr(netmanthan.local, "rate_limiter")
def test_reject_over_limit(self):
limiter = RateLimiter(0.01, 86400)
time.sleep(0.01)
limiter.update()
limiter = RateLimiter(0.01, 86400)
self.assertRaises(netmanthan.TooManyRequestsError, limiter.apply)
netmanthan.cache().delete(limiter.key)
def test_do_not_reject_under_limit(self):
limiter = RateLimiter(0.01, 86400)
time.sleep(0.01)
limiter.update()
limiter = RateLimiter(0.02, 86400)
self.assertEqual(limiter.apply(), None)
netmanthan.cache().delete(limiter.key)
def test_update_method(self):
limiter = RateLimiter(0.01, 86400)
time.sleep(0.01)
limiter.update()
self.assertEqual(limiter.duration, cint(netmanthan.cache().get(limiter.key)))
netmanthan.cache().delete(limiter.key)
| netmanthan/Netmanthan | netmanthan/tests/test_rate_limiter.py | test_rate_limiter.py | py | 3,663 | python | en | code | 0 | github-code | 36 |
3784075084 | ###############################################################################
# make park model
###############################################################################
import cantera as ct
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import rmgpy
from rmgpy.data.thermo import ThermoDatabase
from rmgpy.data.kinetics import KineticsDatabase
from rmgpy.molecule import Molecule
from rmgpy.species import Species
from rmgpy.reaction import Reaction
import inspect
import copy
from rmgpy.kinetics.surface import SurfaceArrhenius
from rmgpy.kinetics.surface import StickingCoefficient
from rmgpy.quantity import ScalarQuantity
import rmgpy.chemkin as Chemkin
from cantera import ck2cti
###############################################################################
# useful functions
###############################################################################
def get_thermo(spec_str):
'''
takes a string input and returns a species object with complete thermo
this may already exist in RMG.
'''
spec = Species()
spec.from_smiles(spec_str)
est_thermo = thermo_database.get_thermo_data(spec,metal_to_scale_to="Cu111")
spec.thermo = est_thermo
return spec
def get_gas_phase_precurs(spec):
'''
adapted from ThermoDatabase method:
get_thermo_data_for_surface_species()
gets a Species object corresponding to the gas phase precursor for
a given surface species
does NOT apply adsorption correction!
'''
dummy_molecules = spec.molecule[0].get_desorbed_molecules()
for mol in dummy_molecules:
mol.clear_labeled_atoms()
if len(dummy_molecules) == 0:
raise RuntimeError(f"Cannot get thermo for gas-phase molecule")
# if len(molecule) > 1, it will assume all resonance structures have already been
#generated when it tries to generate them, so evaluate each configuration separately
# and pick the lowest energy one by H298 value
gas_phase_species_from_libraries = []
gas_phase_species_estimates = []
for dummy_molecule in dummy_molecules:
dummy_species = Species()
dummy_species.molecule = [dummy_molecule]
dummy_species.generate_resonance_structures()
dummy_species.thermo = thermo_database.get_thermo_data(dummy_species)
if dummy_species.thermo.label:
gas_phase_species_from_libraries.append(dummy_species)
else:
gas_phase_species_estimates.append(dummy_species)
# define the comparison function to find the lowest energy
def lowest_energy(species):
if hasattr(species.thermo, 'H298'):
print(species.thermo.H298.value_si)
return species.thermo.H298.value_si
else:
print(species.thermo.get_enthalpy(298.0))
return species.thermo.get_enthalpy(298.0)
if gas_phase_species_from_libraries:
species = min(gas_phase_species_from_libraries, key=lowest_energy)
else:
species = min(gas_phase_species_estimates, key=lowest_energy)
thermo = species.thermo
return species
def update_thermo(spec, name, be1, be2):
'''
updates species thermo given an input for binding energy.
input species object (spec)
park name as string (name)
two floats for the original binding
energy (be1) and the "correct" binding energy (be2)
'''
spec_new = copy.deepcopy(spec)
ev_2_kj = 9.6e4
be_diff = (be_dict[name] - be_dict_park[name])*9.6e4
new_h298 = spec.thermo.H298.value_si - be_diff
spec_new.thermo.H298.value_si = new_h298
print(name, id(spec_new.thermo.H298.value_si), id(spec.thermo.H298.value_si))
print(name, spec_new.thermo.H298.value_si, spec.thermo.H298.value_si, be_diff)
return spec_new
def make_reaction(reactants, products, rxn_str, A, Ea, stick = False,):
'''
make a rmgpy reaction object.
takes a list of the species objects for
the reactants and products.
takes a string for the reaction string
if Stick is true, A-factor is the sticking coefficient
'''
if stick:
kinetics = StickingCoefficient(
A=A,
n=0.0,
Ea=Ea,
T0=(1.0, "K"),
Tmin=None,
Tmax=None,
Pmin=None,
Pmax=None,
coverage_dependence=None,
comment=''
)
else:
kinetics = SurfaceArrhenius(
A=A,
n=0.0,
Ea=Ea,
T0=(1.0, "K"),
Tmin=None,
Tmax=None,
Pmin=None,
Pmax=None,
coverage_dependence=None,
comment=''
)
# use the rmgpy reaction object
rxn = Reaction(
index=-1,
label=rxn_str,
reactants=reactants,
products=products,
specific_collider=None,
kinetics=kinetics,
network_kinetics=None,
reversible=True,
transition_state=None,
duplicate=False,
degeneracy=1,
pairs=None,
allow_pdep_route=False,
elementary_high_p=False,
allow_max_rate_violation=False,
rank=None,
comment='',
is_forward=None,
)
return rxn
def convert_to_nasa(spec):
thermo = spec.thermo
thermo_nasa = thermo.to_nasa(298, 1500, 1000)
spec.thermo = thermo_nasa
###############################################################################
# initiialize things
###############################################################################
# quick check that we are using the correct rmgpy and version
print('using rmgpy at: ',inspect.getfile(rmgpy))
print('using rmgpy version: ', rmgpy.__version__)
# save rmgpy and db directory. db is assumed to be in the same
# folder as RMG-Py
rmg_py_path = inspect.getfile(rmgpy).split("rmgpy")[0]
rmg_db_path = rmg_py_path.split("RMG-Py")[0] + "RMG-database/"
# import data
# set absolute location, using './' in jupyter performs differently
# in vscode
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
park_xl_file =os.path.join(__location__,'park_thermo_and_rates.xlsx')
BE_sheet='Binding Energies'
rxn_sheet = 'reactions'
be_df = pd.read_excel(park_xl_file, sheet_name=BE_sheet, engine='openpyxl')
rxn_df = pd.read_excel(park_xl_file, sheet_name=rxn_sheet, engine='openpyxl')
# output files
chemkin_gas_file = os.path.join(__location__, 'park_gas.inp')
chemkin_surface_file = os.path.join(__location__ + '/park_surf.inp') # why do we need a / for surface?
cantera_file = os.path.join(__location__,'park_mech.cti')
###############################################################################
# Constants/values
###############################################################################
site_density_mol_cm = 2.943e-09
site_density_si = site_density_mol_cm * 1e4
site_density_object = ScalarQuantity(site_density_si, 'mol/m^2')
###############################################################################
# get thermo for all species in RMG model. adjust BEs per the sheet values
###############################################################################
db_input_path = rmg_db_path + 'input/'
# load the thermo database
library_path = db_input_path + 'thermo/'
thermo_libraries = [
'surfaceThermoPt111',
]
thermo_database = ThermoDatabase()
thermo_database.load(
library_path,
libraries=thermo_libraries,
depository=False,
surface=True
)
# load the kinetics database
kin_libraries_dir = db_input_path + "kinetics/libraries/Surface/"
kin_fam_dir = db_input_path + "kinetics/families/"
kinetics_libraries = [
'CPOX_Pt/Deutschmann2006_adjusted',
]
kinetics_families = ['surface']
kinetics_database = KineticsDatabase()
kinetics_database.load_recommended_families(kin_fam_dir + 'recommended.py')
kinetics_database.load_families(
path=kin_fam_dir,
families=kinetics_families,
)
kinetics_database.load_libraries(
kin_libraries_dir,
libraries=kinetics_libraries
)
# get binding energies
# need a dictionary translating species names to smiles
# need a dictionary translating species names to smiles
spec_smiles_dict = {
'CO*':'O=C=[*]',
'CO2*':'O=C=O.[*]',
'H*':'[H]*',
'H2O*':'O.[*]',
'CH3OH*':'CO.[*]',
'O*':'O=[*]',
'OH*':'O[*]',
'HCO*':'O=C*',
# 'HCOO**':'O=CO[*][*]', #formate, bidentate
'HCOO**':'O=CO[*].[*]', # formate, bidentate, plus extra X
'H2CO2*':'[*]OCO[*]',
'COOH*':'O=C(O)[*]',
'CH2O*':'C=O.[*]',
'CH3O*':'CO[*]',
'CH3O2*':'OCO[*]',
'*':'[*]',
}
# also need a dict of gas phase species to get be's from
# key is surface species, value is Gas phase precursor
# either from RMGs estimate or if it's explicitly known,
# just the gas phase version (e.g. 'CO2*': 'CO2')
gas_pre_dict = {
'CO*':'[C-]#[O+]',
'CO2*':'O=C=O',
'H*':'[H]',
'H2O*':'O',
'CH3OH*':'CO',
'O*':'[O]',
'OH*':'[OH]',
'HCO*':'[CH]=O',
'HCOO**':'[O]C=O', #formate, bidentate
'H2CO2*':'[O]C[O]',
'COOH*':'O=[C]O',
'CH2O*':'C=O',
'CH3O*':'C[O]',
'CH3O2*':'[O]CO',
'*':'[*]',
}
# all of the gas phase species in the model
gas_smiles_dict = {
'CO':'[C-]#[O+]',
'CO2':'O=C=O',
'H2O':'O',
'CH3OH':'CO',
'CH2O':'C=O',
'H2':'[H][H]',
}
# construct a dictionary of binding energies
be_dict = {}
for label in spec_smiles_dict.keys():
surf_spec = get_thermo(spec_smiles_dict[label])
gas_spec = get_thermo(gas_pre_dict[label])
surf_h298 = surf_spec.thermo.get_enthalpy(298)
gas_h298 = gas_spec.thermo.get_enthalpy(298)
be_dict[label] = (surf_h298 - gas_h298)/9.6e4
species_dict = {}
for spec_name in be_df['Species']:
smiles = spec_smiles_dict[spec_name.strip()]
spec = get_thermo(smiles)
spec.label = spec_name
species_dict[spec_name.strip()] = spec
# # manually add surface site to species_dict
# species_dict['*'] = get_thermo(spec_smiles_dict['*'])
gas_species_dict = {}
for spec_name in gas_smiles_dict.keys():
smiles = gas_smiles_dict[spec_name.strip()]
spec = get_thermo(smiles)
spec.label = spec_name
gas_species_dict[spec_name.strip()] = spec
# make binding energy dictionary from park data
be_dict_park = {}
for i in range(len(be_df)):
species = be_df['Species'][i].strip()
be_park = be_df["BE"][i]
be_dict_park[species] = be_park
# update thermo to be closer to bark BE values
new_thermo_spec_dict = {}
for name, spec in species_dict.items():
spec_new = update_thermo(
spec,
name,
be_dict[name],
be_dict_park[name],
)
new_thermo_spec_dict[name] = spec_new
# combine gas and surface species dicts
combined_species_dict = {**new_thermo_spec_dict, **gas_species_dict}
# now that we've solidified the thermo, convert to nasa so chemkin conversion
# is a little easier
for spec in combined_species_dict.values():
convert_to_nasa(spec)
# pull the information for rea ctants, products,
# and arrhenius prefactors for the equations below
rxn_spec_dict = {}
rxn_dict = {}
rxn_dict_coeff = {}
rxn_list = {}
for index, row in rxn_df.iterrows():
rxn_raw = row['eqtn']
rxn = rxn_raw.strip()
reactants, products = rxn.split("<=>")
reac_spl = reactants.split("+")
prod_spl = products.split("+")
# retain to list with stoichiometric coeff
# just in case we need it
reac_spl_coeff = reac_spl
prod_spl_coeff = prod_spl
# expand split reactant/product string so
# reactants with "2" as prefix become two
# separate strings
# e.g. 2OH --> OH, OH
for reac in reac_spl:
if reac.startswith("2"):
reac_dup = reac.replace("2","")
reac_spl.remove(reac)
reac_spl.extend([reac_dup]*2)
for prod in prod_spl:
if prod.startswith("2"):
prod_dup = prod.replace("2","")
prod_spl.remove(prod)
prod_spl.extend([prod_dup]*2)
rxn_dict[rxn] = [reac_spl, prod_spl]
rxn_dict_coeff[rxn] = [reac_spl_coeff, prod_spl_coeff]
if row['Af'] == 'N/A' and row['stick']:
# if no rate info and sticking coefficient
A = 1.0 # units of mol/m^2/s
elif row['Af'] != 'N/A' and row['stick']:
# if we supply a sticking coefficient
A = float(row['Af'])
else:
# we are making a concession here. rates that do
# not have an A-factor or Ea specified are quasi-
# equilibrated, so I am setting the A-factor to the
# highest value (1e22 1/s) in the mechanism, and
# making it barrierless (Ea=0 eV)
if len(reac_spl) > 1:
A = (float(row['Af'] / site_density_si), 'm^2/(mol*s)') # units of mol/m^2/s
else:
A = (float(row['Af'] / site_density_si), 's^-1') # units of mol/m^2/s
Ea = (float(row['Ef (eV)'] * 9.6e4), 'J/mol') # units of J/mol
rxn_spec_dict[rxn] = [
[combined_species_dict[reac] for reac in reac_spl],
[combined_species_dict[prod] for prod in prod_spl],
]
rxn_obj = make_reaction(
rxn_spec_dict[rxn][0],
rxn_spec_dict[rxn][1],
rxn,
A,
Ea,
stick = row['stick'],
)
rxn_list[rxn] = rxn_obj
# finally, make inputs into lists for chemkin file write
chemkin_specs = []
for spec in combined_species_dict.values():
chemkin_specs.append(spec)
chemkin_rxns = []
for rxn in rxn_list.values():
chemkin_rxns.append(rxn)
# write chemkin file
# make inputs into lists for chemkin file write
chemkin_specs = []
for spec in gas_species_dict.values():
chemkin_specs.append(spec)
chemkin_rxns = []
Chemkin.save_chemkin_file(
chemkin_gas_file,
chemkin_specs,
chemkin_rxns,
verbose=True,
check_for_duplicates=True,
)
# make inputs into lists for chemkin file write
chemkin_specs = []
for spec in new_thermo_spec_dict.values():
chemkin_specs.append(spec)
chemkin_rxns = []
for rxn in rxn_list.values():
chemkin_rxns.append(rxn)
Chemkin.save_chemkin_surface_file(
chemkin_surface_file,
chemkin_specs,
chemkin_rxns,
verbose=True,
check_for_duplicates=True,
surface_site_density=site_density_object,
)
parser = ck2cti.Parser()
parser.convertMech(
chemkin_gas_file,
outName=cantera_file,
quiet=True,
permissive=True,
surfaceFile=chemkin_surface_file
)
# test that model works by attempting to load it
gas = ct.Solution(cantera_file, "gas")
surf = ct.Interface(cantera_file,"surface1", [gas]) | comocheng/meOH-analysis | External_data/park_et_al_model_reconstruction/make_park_model.py | make_park_model.py | py | 14,690 | python | en | code | 0 | github-code | 36 |
32262924755 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0011_response'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='weight',
),
migrations.AlterField(
model_name='survey',
name='evaluator',
field=models.CharField(help_text='Leave this blank for the first save. Enter values such as .5{1}+.5{2} for two equally weighted questions.', blank=True, max_length=200),
),
]
| mikelaughton/harold | polls/migrations/0012_auto_20160804_0005.py | 0012_auto_20160804_0005.py | py | 630 | python | en | code | 0 | github-code | 36 |
32846235212 | #Goal of this project is to make a song that we like on youtube go directly to our spotify "liked youtube songs" playlist
""" STEPS
1 - Log into youtube
2 - Grab our playlist
3 - Create a new playlist
4 - Search the song
5 - Add the song to the spotify playlist
"""
import json
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import requests
import youtube_dl
from exceptions import ResponseException
from userData import spotyId,spotyToken
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
from youtube_title_parse import get_artist_title
#print(spotifyUser.token)
class CreatePlaylist:
def __init__(self):
#self.youtube_client = self.yt_client()
self.all_song_info = {}
#1 - Log into youtube
def yt_client(self):
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
# Get credentials and create an API client
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
# from the Youtube DATA API
youtube_client = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
return youtube_client
#2 - Grab our playlist
def get_ytplaylist(self):
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "client_secret.json"
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
request = youtube.playlistItems().list(
part="snippet,contentDetails",
maxResults=25,
playlistId="PLQ_99qrIfCg3Mm7SDtxHfIBMt7aUkIDZD"
)
response = request.execute()
for item in response["items"]:
video_title = item["snippet"]["title"]
youtube_url = "https://www.youtube.com/watch?v={}".format(
item["id"])
print("\n\n\n")
#print(video_title)
#print(youtube_url)
# use youtube_dl to collect the song name & artist name
#video = youtube_dl.YoutubeDL({}).extract_info(
#'https://www.youtube.com/watch?v=dPhwbZBvW2o', download=False)
artist, title = get_artist_title(video_title)
#print(artist)
#print(title)
if title is not None and artist is not None:
# save all important info and skip any missing song and artist
self.all_song_info[video_title] = {
"youtube_url": youtube_url,
"song_name": title,
"artist": artist,
# add the uri, easy to get song to put into playlist
"spotify_uri": self.search_song(title, artist)
}
#print(response)
print("\n\n\n")
#print(video_title)
#3 - Create a new playlist
def new_spotifyplaylist(self):
request_body = json.dumps({
"name": "Youtube to Spotify playlist",
"description": "Playlist of a program that I did in python that picks my songs from a youtube playlist, search them and add to this playlist :) ",
"public": True
})
print(request_body)
query = f"https://api.spotify.com/v1/users/{spotyId}/playlists"
response = requests.post(
url=query,
data=request_body,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {spotyToken}"
}
)
print(response)
response_json = response.json()
# playlist id
return response_json["id"]
#4 - Search the song
def search_song(self,song,artist):
query = "https://api.spotify.com/v1/search?query=track%3A{}+artist%3A{}&type=track&offset=0&limit=20".format(
song,
artist
)
response = requests.get(
query,
headers={
"Content-Type":"application/json",
"Authorization":"Bearer {}".format(spotyToken)
}
)
response_json = response.json()
songs = response_json["tracks"]["items"]
#first song only
uri = songs[0]["uri"]
return uri
#5 - Add the song to the spotify playlist
def add_song(self):
# populate dictionary with our liked songs
self.get_ytplaylist()
# collect all of uri
uris = [info["spotify_uri"]
for song, info in self.all_song_info.items()]
# create a new playlist
playlist_id = self.new_spotifyplaylist()
# add all songs into new playlist
request_data = json.dumps(uris)
query = "https://api.spotify.com/v1/playlists/{}/tracks".format(
playlist_id)
response = requests.post(
query,
data=request_data,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(spotyToken)
}
)
# check for valid response status
if response.status_code != 201:
raise ResponseException(response.status_code)
response_json = response.json()
return response_json
if __name__ == '__main__':
cp = CreatePlaylist()
cp.add_song()
| GiovaniCenta/YoutubetoSpotify | spotyoutube.py | spotyoutube.py | py | 6,243 | python | en | code | 0 | github-code | 36 |
30176950739 | from functools import cache
def najcenejsa_pot(mat):
m, n = len(mat), len(mat[0])
@cache
def pomozna(i, j):
if i == m - 1 and j == n - 1:
return (mat[-1][-1], "o")
else:
moznosti = []
if i < m - 1:
cena_dol, pot_dol = pomozna(i + 1, j)
moznosti.append((cena_dol, "↓" + pot_dol))
if j < n - 1:
cena_desno, pot_desno = pomozna(i, j + 1)
moznosti.append((cena_desno, "→" + pot_desno))
cena, pot = min(moznosti)
return mat[i][j] + cena, pot
return pomozna(0, 0)
mat = [[131, 673, 234, 103, 18],
[201, 96, 342, 965, 150],
[630, 803, 746, 422, 111],
[537, 699, 497, 121, 956],
[805, 732, 524, 37, 331]] | matijapretnar/programiranje-1 | 13-memoizacija-v-pythonu/predavanja/pot.py | pot.py | py | 772 | python | en | code | 6 | github-code | 36 |
27894781637 | def search_kth_simple(a1, a2, k_req):
i, j, k = 0, 0, 0
while i < len(a1) and j < len(a2) and k < k_req:
if a1[i] < a2[j]:
if k + 1 == k_req:
return a1[i]
i += 1
k += 1
else:
if k + 1 == k_req:
return a2[j]
j += 1
k += 1
while i < len(a1) and k < k_req:
if k + 1 == k_req:
return a1[i]
i += 1
k += 1
while j < len(a1) and k < k_req:
if k + 1 == k_req:
return a2[j]
j += 1
k += 1
def search_kth(a1, a2, k):
n = len(a1)
m = len(a2)
mid1 = n // 2
mid2 = m // 2
while mid1 + mid2 + 1 != k:
if mid1 + mid2 + 1 < k:
if a1[mid1] < a2[mid2]:
a1 = a1[mid1:]
k -= mid1 - 1
else:
a2 = a2[mid2:]
k -= mid2 - 1
else:
if a1[mid1] > a2[mid2]:
a1 = a1[:mid1]
else:
a2 = a2[:mid2]
mid1 = len(a1) // 2
mid2 = len(a2) // 2
return min(a1[mid1], a2[mid2])
if __name__ == "__main__":
a1 = [2, 3, 6, 7, 9]
a2 = [1, 4, 8, 10]
print(search_kth_simple(a1, a2, 6))
print(search_kth(a1, a2, 6))
| stgleb/algorithms-and-datastructures | advanced/search_in_two_sorted.py | search_in_two_sorted.py | py | 1,299 | python | en | code | 0 | github-code | 36 |
4014271672 | # 문제 출처 : https://programmers.co.kr/learn/courses/30/lessons/12973
from collections import deque
def solution(s):
deq = deque(list(s))
# print(deq)
stack = []
while deq:
stack.append(deq.popleft())
if len(stack) > 1:
if stack[-1] == stack[-2]:
stack.pop()
stack.pop()
# print(stack)
if len(stack) == 0:
return 1
else:
return 0
| ThreeFive85/Algorithm | Programmers/level2/removePair/remove_pair.py | remove_pair.py | py | 442 | python | en | code | 1 | github-code | 36 |
40212915722 | from chatbot import Chatbot
messages=[
'hi',
'i want to know something about the market',
'what about AAPL today',
'volume',
'the open price of TSLA and GOOG, please.',
'the interest of ABCDEF',
'MSFT',
'end'
]
def static_test(interpreter):
chatbot=Chatbot(interpreter)
for msg in messages:
chatbot.respond(msg) | yuminhao107/chatbot4iexfinance | chatbot4iexfinance/static_test.py | static_test.py | py | 361 | python | en | code | 0 | github-code | 36 |
2476057039 | # 🚨 Don't change the code below 👇
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is their name? \n")
# 🚨 Don't change the code above 👆
# Write your code below this line 👇
def true_count(name):
true_count = 0
true_count += name.count("t")
true_count += name.count("r")
true_count += name.count("u")
true_count += name.count("e")
return true_count
def love_count(name):
love_count = 0
love_count += name.count("l")
love_count += name.count("o")
love_count += name.count("v")
love_count += name.count("e")
return love_count
combined_names = name1 + name2
true_count = true_count(combined_names.lower())
love_count = love_count(combined_names.lower())
love_score = int(str(true_count) + str(love_count))
if love_score < 10 or love_score > 90:
print(f"Your score is {love_score}, you go together like coke and mentos.")
elif love_score >= 40 and love_score <= 50:
print(f"Your score is {love_score}, you are alright together.")
else:
print(f"Your score is {love_score}.")
| devProMaleek/learning-python | day-3-conditional-statement/love-calculator.py | love-calculator.py | py | 1,103 | python | en | code | 0 | github-code | 36 |
41844180206 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next or not head.next.next:
return head
oddHead = odd = head
evenHead = even = head.next
cur = even.next
isOdd = True
while cur:
if isOdd:
odd.next = cur
odd = odd.next
isOdd = False
else:
even.next = cur
even = even.next
isOdd = True
cur = cur.next
odd.next = evenHead
even.next = None
return oddHead | zmxrice/leetcodetraining | 328-Odd-Even-Linked-List/solution.py | solution.py | py | 819 | python | en | code | 0 | github-code | 36 |
8754917915 | # -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
from odoo.exceptions import UserError
from odoo.osv.expression import NEGATIVE_TERM_OPERATORS, TERM_OPERATORS_NEGATION, TRUE_LEAF, FALSE_LEAF
from odoo.tools.safe_eval import safe_eval
import copy
# 100.000.000 ids devraient suffire pour les produits. Les chiffres suivants serviront pour le fournisseur
DATASTORE_IND = 100000000
def _of_datastore_is_computed_field(model_obj, field_name):
env = model_obj.env
field = model_obj._fields[field_name]
if not field.compute:
return False
if field.company_dependent:
return False
if not field._description_related:
return True
# Traitement particulier des champs related en fonction de la relation
if model_obj._of_datastore_is_computed_field(field._description_related[0]):
return True
f = model_obj._fields[field._description_related[0]]
for f_name in field._description_related[1:]:
obj = env[f.comodel_name]
if not hasattr(obj, '_of_datastore_is_computed_field'):
# L'objet référencé n'est pas un objet centralisé
return True
if obj._of_datastore_is_computed_field(f_name):
return True
f = obj._fields[f_name]
return False
class OfImportProductCategConfig(models.Model):
_inherit = 'of.import.product.categ.config'
is_datastore_matched = fields.Boolean('Is centralized')
class OfDatastoreCentralized(models.AbstractModel):
_name = 'of.datastore.centralized'
of_datastore_res_id = fields.Integer(string="ID on supplier database", index=True, copy=False)
@classmethod
def _browse(cls, ids, env, prefetch=None):
# Il est important d'hériter de _browse() et non de browse()
# car c'est _browse() qui est appelé dans fields.Many2one.convert_to_record()
result = super(OfDatastoreCentralized, cls)._browse(ids, env, prefetch=prefetch)
for i in ids:
if i < 0:
# Appel de super() pour éviter un appel récursif infini
record = super(OfDatastoreCentralized, cls)._browse((i, ), env, prefetch=prefetch)
if not record._cache:
env['of.datastore.cache'].apply_values(record)
return result
@api.model
def _get_datastore_unused_fields(self):
u"""
Retourne la liste des champs qu'on ne veut pas récupérer chez le fournisseur (par ex. les quantités en stock).
"""
cr = self._cr
# On ne veut aucun des champs ajoutés par les modules stock, mrp, purchase
cr.execute(
"SELECT f.name "
"FROM ir_model_data AS d "
"INNER JOIN ir_model_fields AS f "
" ON d.res_id=f.id "
"WHERE d.model = 'ir.model.fields' "
" AND f.model = %s "
" AND d.module IN ('mrp','procurement','stock')",
(self._name, ))
res = [row[0] for row in cr.fetchall()]
# Ajout de certains champs
res += [
'invoice_policy',
'purchase_method',
# Champs à valeur forcée manuellement pour l'import
'of_import_categ_id',
'of_import_cout',
'of_import_price',
'of_import_remise',
# Champs de notes
'description_sale', # Description pour les devis
'description_purchase', # Description pour les fournisseurs
'description_picking', # Description pour le ramassage
# Champs de structure de prix
'of_purchase_transport',
'of_sale_transport',
'of_sale_coeff',
'of_other_logistic_costs',
'of_misc_taxes',
'of_misc_costs',
# Champs de localisation d'inventaire
'property_stock_procurement',
'property_stock_production',
'property_stock_inventory',
'of_product_posx',
'of_product_posy',
'of_product_posz',
]
# On ne veut pas non-plus les champs one2many ou many2many
# (seller_ids, packagind_ids, champs liés aux variantes....)
# On conserve les lignes de kits
for field_name, field in self._fields.iteritems():
if field.type in ('one2many', 'many2many'):
if field_name != 'kit_line_ids' and field_name not in res:
res.append(field_name)
return res
@api.model
def _of_datastore_is_computed_field(self, field_name):
if field_name == 'of_seller_name':
# Le fournisseur est directement défini par la marque
return True
if field_name == 'of_theoretical_cost':
# Cas particulier pour le coût théorique qu'on veut récupérer du TC
return False
return _of_datastore_is_computed_field(self, field_name)
@api.model
def _of_get_datastore_computed_fields(self):
u"""
Retourne la liste des champs qu'on ne veut pas récupérer chez le fournisseur
mais qu'il faudra calculer en local (par ex. le champ price qui dépend de la liste de prix du context).
"""
return [field for field in self._fields if self._of_datastore_is_computed_field(field)]
@api.multi
def _of_read_datastore(self, fields_to_read, create_mode=False):
u"""
Lit les donnees des produits dans leur base fournisseur.
@param ids: id modifié des produits, en valeur négative
@param create_mode: En mode create on ne renvoie pas les champs non trouvés (remplis avec False ou [])
En mode create on remplit seller_ids
"""
supplier_obj = self.env['of.datastore.supplier']
product_tmpl_obj = self.env['product.template']
result = []
# Certains champs sont nécessaires pour le calcul d'autres champs :
# - brand_id : La marque, depuis laquelle on extrait les règles de lecture
# - categ_id : La catégorie, qui peut correspondre à des règles de lecture plus spécifiques dans la marque
# - product_tmpl_id : L'article de base, utile pour of_tmpl_datastore_res_id
# - default_code : La référence de l'article, utile pour of_seller_product_code
# - uom_id et uom_po_id : Les unités de mesure et de mesure d'achat de l'article,
# utiles pour calculer les prix d'achat/vente
# - list_price : Le prix d'achat de l'article,
# à partir duquel sont réalisés les calculs pour le prix de vente et le coût
# En mode création (create_mode == True), ces champs sont obligatoires donc déjà présents dans fields_to_read.
# En lecture classique (create_mode == False), nous testons si au moins un champ de fields_to_read
# nécessite un accès distant (avec self._get_datastore_unused_fields()).
# Si c'est le cas, nous chargeons fields_to_read avec tous les champs de l'objet courant afin de peupler
# notre cache et ainsi d'éviter de multiplier les accès distants.
unused_fields = self._get_datastore_unused_fields() + self._of_get_datastore_computed_fields()
if not create_mode:
# Pour la lecture classique, on veut stocker tous les champs en cache pour éviter de futurs accès distants
for field in fields_to_read:
if field not in unused_fields:
fields_to_read += [field for field in self._fields if field not in fields_to_read]
break
if 'id' in fields_to_read: # Le champ id sera de toute façon ajouté, le laisser génèrera des erreurs
fields_to_read.remove('id')
# Articles par fournisseur
datastore_product_ids = {}
# Produits par fournisseur
for full_id in self._ids:
supplier_id = -full_id / DATASTORE_IND
datastore_product_ids.setdefault(supplier_id, []).append((-full_id) % DATASTORE_IND)
# Champs a valeurs spécifiques
fields_defaults = [
('of_datastore_supplier_id', lambda: create_mode and supplier_id or supplier.sudo().name_get()[0]),
('of_datastore_res_id', lambda: vals['id']),
('of_seller_pp_ht', lambda: vals['of_seller_pp_ht']),
('of_seller_product_category_name', lambda: vals['categ_id'][1]),
('of_tmpl_datastore_res_id', lambda: vals['product_tmpl_id'][0]),
('description_norme', lambda: product.description_norme or vals['description_norme']),
('of_template_image', lambda: vals.get('of_template_image') or product.image),
# Attention, l'ordre des deux lignes suivantes est important
('of_seller_product_code', lambda: vals['default_code']),
('default_code', lambda: default_code_func[brand](vals['default_code'])),
]
fields_defaults = [(k, v) for k, v in fields_defaults if k in fields_to_read]
if create_mode:
# Ajout des champs nécessaires à la creation du product_supplierinfo
for field in ('of_seller_delay',):
if field not in fields_to_read:
fields_to_read.append(field)
# Création de la relation fournisseur
fields_defaults.append(('seller_ids', lambda: [(5, ), (0, 0, {
'name': brand.partner_id.id,
'min_qty': 1,
'delay': vals['of_seller_delay'],
})]))
datastore_fields = [field for field in fields_to_read if field not in unused_fields]
m2o_fields = [
field for field in datastore_fields
if self._fields[field].type == 'many2one' and
field != 'of_datastore_supplier_id'
]
o2m_fields = ['kit_line_ids']
for supplier_id, product_ids in datastore_product_ids.iteritems():
supplier_value = supplier_id * DATASTORE_IND
if not datastore_fields:
if create_mode:
result += [{'id': product_id} for product_id in product_ids]
else:
# Pas d'accès à la base centrale, on remplit l'id et on met tout le reste à False ou []
datastore_defaults = {
field: [] if self._fields[field].type in ('one2many', 'many2many') else False
for field in fields_to_read
if field != 'id'
}
result += [
dict(datastore_defaults, id=-(product_id + supplier_value))
for product_id in product_ids]
continue
supplier = supplier_obj.browse(supplier_id)
client = supplier.of_datastore_connect()
ds_product_obj = supplier_obj.of_datastore_get_model(client, self._name)
datastore_product_data = supplier_obj.of_datastore_read(
ds_product_obj, product_ids, datastore_fields, '_classic_read')
if not create_mode:
# Les champs manquants dans la table du fournisseur ne sont pas renvoyés, sans générer d'erreur
# Il faut donc leur attribuer une valeur par défaut
missing_fields = [field for field in fields_to_read if field not in datastore_product_data[0]]
# Valeur remplie en 2 étapes
# 1 : on met une valeur vide (False ou [] pour des one2many)
datastore_defaults = {
field: [] if self._fields[field].type in ('one2many', 'many2many') else False
for field in missing_fields
}
# 2 : On renseigne les valeurs qui sont trouvées avec la fonction default_get
datastore_defaults.update(product_tmpl_obj.default_get(missing_fields))
# Traitement des données
match_dicts = {}
match_dicts['brand_id'] = {brand.datastore_brand_id: brand for brand in supplier.brand_ids}
# Calcul de la fonction à appliquer sur la référence des articles de chaque marque
if 'default_code' in fields_to_read:
default_code_func = supplier.get_product_code_convert_func(client)
datastore_read_m2o_fields = [field for field in m2o_fields if field in datastore_product_data[0]]
field_res_ids = {field: set() for field in datastore_read_m2o_fields}
for vals in datastore_product_data:
# --- Calculs préalables ---
brand = match_dicts['brand_id'][vals['brand_id'][0]]
# Ajouter un search par article est couteux.
# Tant pis pour les règles d'accès, on fait une recherche SQL
if self._name == 'product.template':
self._cr.execute(
"SELECT id FROM product_template "
"WHERE brand_id = %s AND of_datastore_res_id = %s",
(brand.id, vals['id']))
else:
self._cr.execute(
"SELECT t.id FROM product_product p "
"INNER JOIN product_template t ON t.id=p.product_tmpl_id "
"WHERE t.brand_id = %s AND p.of_datastore_res_id = %s",
(brand.id, vals['id']))
rows = self._cr.fetchall()
product = product_tmpl_obj.browse(rows and rows[0][0])
categ_name = vals['categ_id'][1]
obj_dict = {}
# Calcul des valeurs spécifiques
for field, val in fields_defaults:
vals[field] = val()
if create_mode:
del vals['id']
else:
vals['id'] = -(vals['id'] + supplier_value)
vals.update(datastore_defaults)
# ---- Champs many2one ---
for field in datastore_read_m2o_fields:
# Conversion du many2one pour la base courante
if vals[field]:
obj = self._fields[field].comodel_name
res = brand.datastore_match(
client, obj, vals[field][0], vals[field][1], product, match_dicts, create=create_mode)
if field in ('categ_id', 'uom_id', 'uom_po_id'):
obj_dict[field] = res
if res:
if res.id < 0:
# Valeur de la base centrale
# Normalement uniquement utilisé pour product_tmpl_id
vals[field] = (res.id, vals[field][1])
else:
vals[field] = res.id
field_res_ids[field].add(res.id)
else:
vals[field] = False
# --- Champs x2many ---
for field in o2m_fields:
if field not in datastore_fields:
continue
if not vals[field]:
continue
line_ids = [-(line_id + supplier_value) for line_id in vals[field]]
if create_mode:
# Preparation des lignes
obj = self._fields[field].comodel_name
obj_obj = self.env[obj]
vals[field] = [(5, )] + [(0, 0, line.copy_data()[0]) for line in obj_obj.browse(line_ids)]
else:
# Conversion en id datastore
# Parcours avec indice pour ne pas recreer la liste
vals[field] = line_ids
# --- Champs spéciaux ---
vals['of_datastore_has_link'] = bool(product)
# Prix d'achat/vente
# Note: On retire 'standard_price' de vals car dans certains cas on ne veut pas le mettre à jour
vals.update(brand.compute_product_price(
vals['of_seller_pp_ht'], categ_name, obj_dict['uom_id'], obj_dict['uom_po_id'], product=product,
price=vals['of_seller_price'], remise=None, cost=vals.pop('standard_price', None),
based_on_price=vals['of_is_net_price']))
# Calcul de la marge et de la remise
if 'of_seller_remise' in fields_to_read:
vals['of_seller_remise'] =\
vals['of_seller_pp_ht'] and\
(vals['of_seller_pp_ht'] - vals['of_seller_price']) * 100 / vals['of_seller_pp_ht']
if 'marge' in fields_to_read:
vals['marge'] =\
vals['list_price'] and\
(vals['list_price'] - vals.get('standard_price', 0)) * 100 / vals['list_price']
# Suppression des valeurs non voulues
# Suppression désactivée ... après tout, c'est calculé maintenant, autant le garder en cache
# for field in added_fields:
# vals.pop(field, False)
if not create_mode:
# Conversion au format many2one (id,name)
for field, res_ids in field_res_ids.iteritems():
if not res_ids:
continue
obj = self._fields[field].comodel_name
res_obj = self.env[obj].browse(res_ids)
res_names = {v[0]: v for v in res_obj.sudo().name_get()}
for vals in datastore_product_data:
# Test en deux temps car en python, False est une instance de int
if vals.get(field) and isinstance(vals[field], (int, long)):
vals[field] = res_names[vals[field]]
result += datastore_product_data
return result
def _recompute_check(self, field):
# Les champs calculés et stockés en base de données doivent toujours être recalculés
# dans le cas des éléments centralisés.
result = super(OfDatastoreCentralized, self)._recompute_check(field)
res1 = self.filtered(lambda record: record.id < 0)
if result and res1:
res1 |= result
# En cas d'ensemble vide, c'est result qui est renvoyé, qui vaut None
return res1 or result
@api.multi
def read(self, fields=None, load='_classic_read'):
new_ids = [i for i in self._ids if i > 0]
# Produits sur la base courante
res = super(OfDatastoreCentralized, self.browse(new_ids)).read(fields, load=load)
if len(new_ids) != len(self._ids):
cache_obj = self.env['of.datastore.cache']
# Si fields est vide, on récupère tous les champs accessibles pour l'objet (copié depuis BaseModel.read())
self.check_access_rights('read')
fields = self.check_field_access_rights('read', fields)
fields = set(fields)
if 'id' in fields:
fields.remove('id')
obj_fields = [self._fields[field] for field in fields]
use_name_get = (load == '_classic_read')
# Gestion à part des modèles d'articles, sans quoi of_read_datastore sera appelé pour name_get()
# une fois par modèle.
read_tmpl = self._fields.get('product_tmpl_id') in obj_fields
tmpl_values = {}
# Séparation des ids par base centrale
datastore_product_ids = {}
for full_id in self._ids:
if full_id < 0:
datastore_product_ids.setdefault(-full_id / DATASTORE_IND, []).append(full_id)
res = {vals['id']: vals for vals in res}
for supplier_id, datastore_ids in datastore_product_ids.iteritems():
with self.env['of.datastore.cache']._get_cache_token(supplier_id) as of_cache:
# Vérification des données dans notre cache
cached_products = of_cache.search([('model', '=', self._name), ('res_id', 'in', datastore_ids)])
# Les articles non en cache sont à lire
new_ids = set(datastore_ids) - set(cached_products.mapped('res_id'))
# Si au moins un objet est inexistant en cache, tous les champs sont à lire
new_fields = set(fields) if new_ids else set()
# Les articles dont au moins un champ n'est pas en cache sont aussi à lire, pour au moins ce champ
for cached_product in cached_products:
product_data = safe_eval(cached_product.vals)
missing_fields = fields - set(product_data.keys())
if missing_fields:
new_fields |= missing_fields
new_ids.add(cached_product.res_id)
if read_tmpl and 'product_tmpl_id' in product_data:
tmpl_values[product_data['product_tmpl_id'][0]] = product_data['product_tmpl_id']
if new_ids:
# Lecture des données sur la base centrale
data = self.browse(new_ids)._of_read_datastore(list(new_fields), create_mode=False)
# Stockage des données dans notre cache
of_cache.store_values(self._name, data)
if read_tmpl and 'product_tmpl_id' in new_fields:
for d in data:
tmpl_values[d['product_tmpl_id'][0]] = d['product_tmpl_id']
for obj in self.browse(datastore_ids):
# Il faut charger les valeurs dans le cache manuellement car elles ne se chargent de façon
# automatique que si le cache est vide, ce qui n'est plus le cas à ce stade.
cache_obj.apply_values(obj)
# Filtre des champs à récupérer et conversion au format read
vals = {
field.name: field.convert_to_read(obj[field.name], self, use_name_get)
for field in obj_fields
}
vals['id'] = obj.id
res[obj.id] = vals
if read_tmpl:
vals['product_tmpl_id'] = tmpl_values[obj.product_tmpl_id.id]
# Remise des résultats dans le bon ordre
res = [res[i] for i in self._ids]
return res
@api.model
def of_datastore_update_domain(self, domain):
u"""
Vérifie si le domaine indique une recherche sur une base de données fournisseur.
Si oui, retourne le domaine de recherche adapté pour la base de données fournisseur.
@requires: Si args contient un tuple dont le premier argument est 'ds_supplier_search_id',
le second argument doit être '='
@return: Id du fournisseur (of.datastore.supplier) ou False sinon, suivi du nouveau domaine de recherche
"""
if 'of_datastore_product_search' not in domain:
return False, domain
domain = [copy.copy(arg) for arg in domain if arg != 'of_datastore_product_search']
# Recherche des marques
brand_domain = []
for arg in domain:
if not isinstance(arg, (list, tuple)):
continue
if arg[0] == 'brand_id':
operator, right = arg[1], arg[2]
# resolve string-based m2o criterion into IDs
if isinstance(right, basestring) or\
right\
and isinstance(right, (tuple, list))\
and all(isinstance(item, basestring) for item in right):
brand_domain.append(('name', operator, right))
else:
brand_domain.append(('id', operator, right))
brands = self.env['of.product.brand'].search(brand_domain)
ds_supplier = brands.mapped('datastore_supplier_id')
if not ds_supplier:
if brands:
raise UserError(_('Selected brands are not centralized : %s') % ", ".join(brands.mapped('name')))
return False, [FALSE_LEAF]
if len(ds_supplier) > 1:
raise UserError(_('You must select one or several brands using the same centralized database '
'(provided by the same supplier).'))
brands = brands.filtered('datastore_supplier_id')
# Recherche des produits non déjà enregistrés
if self._context.get('datastore_not_stored'):
orig_ids = self.sudo().with_context(active_test=False)\
.search([('brand_id', 'in', brands._ids), ('of_datastore_res_id', '!=', False)])\
.mapped('of_datastore_res_id')
domain.append(('id', 'not in', orig_ids))
parse_domain = self._of_datastore_update_domain_item
# Conversion des champs
for arg in domain:
if not isinstance(arg, (list, tuple)):
continue
if arg[0].startswith('ds_'):
arg[0] = arg[0][3:]
elif arg[0] in ('categ_id', 'brand_id'):
obj_name = self._fields[arg[0]].comodel_name
new_arg = parse_domain(arg, self.env[obj_name])
if new_arg:
arg[0], arg[1], arg[2] = new_arg
return brands, domain
@api.model
def _of_datastore_update_domain_item(self, domain, obj):
u""" Convertit un élément du domaine pour utilisation sur la base centrale
@type domain: Tuple (left, operator, right)
@var obj: Objet sur lequel doit s'appliquer le domaine
@type obj: Browse record
@todo: Permettre la conversion de la recherche sur catégories d'articles
"""
left, operator, right = domain
if obj._name == 'product.category':
# Une categorie d'articles peut avoir une correspondance différente selon la marque ou l'article.
# La conversion est compliquée
if isinstance(right, basestring) or \
right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right):
return False
elif isinstance(right, (int, long)) and right < 0:
return domain
else:
return TRUE_LEAF
if operator in NEGATIVE_TERM_OPERATORS:
operator = TERM_OPERATORS_NEGATION[operator]
new_operator = 'not in'
else:
new_operator = 'in'
if isinstance(right, basestring) or \
right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right):
obj_domain = [('name', operator, right)]
else:
obj_domain = [('id', operator, right)]
obj = obj.search(obj_domain)
result = False
if obj._name == 'of.product.brand':
result = (left, new_operator, obj.mapped('datastore_brand_id'))
return result
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
brands, domain = self.of_datastore_update_domain(domain)
# Recherche sur la base du fournisseur
if brands:
supplier = brands[0].datastore_supplier_id
# Ex: si la base n'a qu'une base centralisée, elle peut appeler les articles de la base distante
# sans autre filtre de recherche.
# Dans ce cas, on ne veut pas les autres marques du fournisseur
domain = ['&', ('brand_id', 'in', brands.mapped('datastore_brand_id'))] + domain
supplier_obj = self.env['of.datastore.supplier']
# Exécution de la requête sur la base du fournisseur
client = supplier.of_datastore_connect()
if isinstance(client, basestring):
# Échec de la connexion à la base fournisseur
raise UserError(u'Erreur accès '+supplier.db_name)
ds_product_obj = supplier_obj.of_datastore_get_model(client, self._name)
res = supplier_obj.of_datastore_read_group(ds_product_obj, domain, fields, groupby, offset, limit, orderby, lazy)
for row in res:
for arg in row['__domain']:
if isinstance(arg, (list, tuple)):
arg[0] = 'ds_' + arg[0]
row['__domain'] = [
'of_datastore_product_search',
('brand_id', 'in', brands.ids)
] + row['__domain']
else:
# Éxecution de la requête sur la base courante
res = super(OfDatastoreCentralized, self).read_group(domain, fields, groupby, offset, limit, orderby, lazy)
return res
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
brands, args = self.of_datastore_update_domain(args)
# Recherche sur la base du fournisseur
if brands:
supplier = brands[0].datastore_supplier_id
# Ex: si la base n'a qu'une base centralisée, elle peut appeler les articles de la base distante
# sans autre filtre de recherche.
# Dans ce cas, on ne veut pas les autres marques du fournisseur
args = ['&', ('brand_id', 'in', brands.mapped('datastore_brand_id'))] + args
supplier_obj = self.env['of.datastore.supplier']
# Exécution de la requête sur la base du fournisseur
client = supplier.of_datastore_connect()
if isinstance(client, basestring):
# Échec de la connexion à la base fournisseur
raise UserError(u'Erreur accès '+supplier.db_name)
ds_product_obj = supplier_obj.of_datastore_get_model(client, self._name)
res = supplier_obj.of_datastore_search(ds_product_obj, args, offset, limit, order, count)
if not count:
supplier_value = supplier.id * DATASTORE_IND
res = [-(product_id + supplier_value) for product_id in res]
else:
# Éxecution de la requête sur la base courante
res = super(OfDatastoreCentralized, self)._search(
args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
return res
@api.model
def _of_datastore_name_search(self, res, brands, name, args, operator, limit):
supplier = brands.mapped('datastore_supplier_id')
if len(supplier) != 1:
# Les marques doivent être centralisées, une seule base centrale à la fois
return res
if limit != 8 or len(res) == limit:
# La recherche sur une base fournisseur ne se fait en automatique que pour les recherches
# dynamiques des champs many2one (limit=8)
return res
if len(res) == 7:
# Le 8e produit ne sert qu'à savoir si on affiche "Plus de résultats"
return res + [(False, '')]
# Recherche des produits dans la base centrale
client = supplier.of_datastore_connect()
if isinstance(client, basestring):
# Échec de la connexion à la base fournisseur
return res
brands = brands.filtered('datastore_supplier_id')
# Recherche des produits non déjà enregistrés
orig_ids = self.with_context(active_test=False).search(
[('brand_id', 'in', brands._ids),
('of_datastore_res_id', '!=', False)]).mapped('of_datastore_res_id')
# Mise a jour des paramètres de recherche
new_args = [('brand_id', 'in', brands.mapped('datastore_brand_id')),
('id', 'not in', orig_ids)] + list(args or [])
ds_product_obj = supplier.of_datastore_get_model(client, self._name)
res2 = supplier.of_datastore_name_search(ds_product_obj, name, new_args, operator, limit-len(res))
supplier_ind = DATASTORE_IND * supplier['id']
default_code_func = supplier.get_product_code_convert_func(client)
if len(brands) == 1:
f = default_code_func[brands]
res += [[-(pid + supplier_ind), '[' + f(pname[1:]) if pname.startswith('[') else pname]
for pid, pname in res2]
else:
brand_match = {brand.datastore_brand_id: brand for brand in supplier.brand_ids}
ds_products_brand = supplier.of_datastore_read(ds_product_obj, zip(*res2)[0], ['brand_id'])
# {clef=identifiant article sur base centrale : valeur=marque sur base courante}
ds_products_brand = {data['id']: brand_match[data['brand_id'][0]] for data in ds_products_brand}
default_code_func = supplier.get_product_code_convert_func(client)
res += [
[
-(pid + supplier_ind),
'[' + default_code_func[ds_products_brand[pid]](pname[1:]) if pname.startswith('[') else pname
]
for pid, pname in res2
]
return res
class Property(models.Model):
_inherit = 'ir.property'
@api.model
def get_multi(self, name, model, ids):
""" Surcharge pour récupérer une valeur company_dependent d'un article centralisé.
"""
result = {}
model_obj = self.env[model]
if ids and hasattr(model_obj, '_of_read_datastore'):
ds_ids = [i for i in ids if i < 0]
if ds_ids:
ids = [i for i in ids if i >= 0]
result = {d['id']: d[name] for d in model_obj.browse(ds_ids)._of_read_datastore([name])}
result.update(super(Property, self).get_multi(name, model, ids))
return result
| odof/openfire | of_datastore_product/models/of_datastore_product.py | of_datastore_product.py | py | 34,286 | python | fr | code | 3 | github-code | 36 |
35860587241 | from fastapi import FastAPI, Depends
from sqlalchemy.orm import Session
from typing import List, Optional
from database import SessionLocal, engine
import models, schemas, crud
# データベース作成
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
'''
Organization
Employee
Theme
KwCategory
KeyWord
Meeting
ICS
Schedule
Entry
'''
@app.get("/")
async def index():
return {"message": "Success"}
@app.get("/organizations", response_model=List[schemas.Organization])
async def read_users(
limit: int = 100,
db: Session = Depends(get_db),
q_name: str = None
):
users = crud.get_organizations(db=db, limit=limit, q_name=q_name)
return users
# @app.get("/rooms", response_model=List[schemas.Room])
# async def read_rooms(skip: int =0, limit: int = 100, db: Session = Depends(get_db)):
# rooms = crud.get_rooms(db=db, skip=skip, limit=limit)
# return rooms
# @app.get("/bookings", response_model=List[schemas.Booking])
# async def read_bookings(skip: int =0, limit: int = 100, db: Session = Depends(get_db)):
# bookings = crud.get_bookings(db=db, skip=skip, limit=limit)
# return bookings
@app.post("/organizations", response_model=schemas.Organization)
async def create_organization(data: schemas.OrganizationCreate, db: Session = Depends(get_db)):
organization = crud.create_organization(db=db, data=data)
return organization
@app.post("/employees", response_model=schemas.Employee)
async def create_employee(data: schemas.EmployeeCreate, db: Session = Depends(get_db)):
employee = crud.create_employee(db=db, data=data)
return employee
@app.post("/themes", response_model=schemas.Theme)
async def create_theme(data: schemas.ThemeCreate, db: Session = Depends(get_db)):
theme = crud.create_theme(db=db, data=data)
return theme
@app.post("/kwcategories", response_model=schemas.KwCategory)
async def create_kwcategory(data: schemas.KwCategoryCreate, db: Session = Depends(get_db)):
kwcategory = crud.create_kwcategory(db=db, data=data)
return kwcategory
@app.post("/keywords", response_model=schemas.KeyWord)
async def create_keyword(data: schemas.KeyWordCreate, db: Session = Depends(get_db)):
keyword = crud.create_keyword(db=db, data=data)
return keyword
@app.post("/meetings", response_model=schemas.Meeting)
async def create_meeting(data: schemas.MeetingCreate, db: Session = Depends(get_db)):
meeting = crud.create_meeting(db=db, data=data)
return meeting
@app.post("/icss", response_model=schemas.ICS)
async def create_ics(data: schemas.ICSCreate, db: Session = Depends(get_db)):
ics = crud.create_ics(db=db, data=data)
return ics
@app.post("/schedules", response_model=schemas.Schedule)
async def create_schedule(data: schemas.ScheduleCreate, db: Session = Depends(get_db)):
schedule = crud.create_schedule(db=db, data=data)
return schedule
@app.post("/entries", response_model=schemas.Entry)
async def create_entry(data: schemas.EntryCreate, db: Session = Depends(get_db)):
entry = crud.create_entry(db=db, data=data)
return entry
# @app.post("/users", response_model=schemas.User)
# async def create_users(user: schemas.UserCreate, db: Session = Depends(get_db)):
# user = crud.create_user(db=db, user=user)
# return user
# @app.post("/rooms", response_model=schemas.Room)
# async def create_rooms(room: schemas.RoomCreate, db: Session = Depends(get_db)):
# room = crud.create_room(db=db, room=room)
# return room
# @app.post("/bookings", response_model=schemas.Booking)
# async def create_bookings(booking: schemas.BookingCreate, db: Session = Depends(get_db)):
# booking = crud.create_booking(db=db, booking=booking)
# return booking
| ishi23/fastapi-streamlit | conf_app_test/sql_app/main.py | main.py | py | 3,816 | python | en | code | 0 | github-code | 36 |
27234809133 | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Online whiteboard')
self.setGeometry(100, 100, 800, 900)
self.image = QImage(self.size(), QImage.Format_ARGB32)
self.color = Qt.white
self.image.fill(Qt.white)
self.drawing = False
self.brushSize = 2
self.brushColor = Qt.black
self.lastPoint = QPoint()
main_menu = self.menuBar()
file_menu = main_menu.addMenu("File")
b_size = main_menu.addMenu("Brush Size")
b_color = main_menu.addMenu("Brush Color")
b_erase = main_menu.addMenu("Eraser")
b_back = main_menu.addMenu("Fill color")
save_action = QAction("Save", self)
save_action.setShortcut("Ctrl + S")
file_menu.addAction(save_action)
save_action.triggered.connect(self.save)
clear_action = QAction("Clear", self)
clear_action.setShortcut("Ctrl + C")
file_menu.addAction(clear_action)
clear_action.triggered.connect(self.clear)
erase_action = QAction("Eraser", self)
b_erase.addAction(erase_action)
erase_action.triggered.connect(self.erase)
fill_action = QAction("Blue", self)
self.color = Qt.blue
b_back.addAction(fill_action)
fill_action.triggered.connect(self.fill_blue)
fill_action = QAction("Red", self)
self.color = Qt.red
b_back.addAction(fill_action)
fill_action.triggered.connect(self.fill_red)
fill_action = QAction("Green", self)
self.color = Qt.green
b_back.addAction(fill_action)
fill_action.triggered.connect(self.fill_green)
fill_action = QAction("Black", self)
self.color = Qt.black
b_back.addAction(fill_action)
fill_action.triggered.connect(self.fill_black)
fill_action = QAction("Yellow", self)
self.color = Qt.yellow
b_back.addAction(fill_action)
fill_action.triggered.connect(self.fill_yellow)
fill_action = QAction("White", self)
self.color = Qt.white
b_back.addAction(fill_action)
fill_action.triggered.connect(self.fill_white)
pix_4 = QAction("4px", self)
b_size.addAction(pix_4)
pix_4.triggered.connect(self.pixel_4)
pix_7 = QAction("7px", self)
b_size.addAction(pix_7)
pix_7.triggered.connect(self.pixel_7)
pix_9 = QAction("9px", self)
b_size.addAction(pix_9)
pix_9.triggered.connect(self.pixel_9)
pix_12 = QAction("12px", self)
b_size.addAction(pix_12)
pix_12.triggered.connect(self.pixel_12)
pix_15 = QAction("15px", self)
b_size.addAction(pix_15)
pix_15.triggered.connect(self.pixel_15)
black = QAction("Black", self)
b_color.addAction(black)
black.triggered.connect(self.black_color)
green = QAction("Green", self)
b_color.addAction(green)
green.triggered.connect(self.green_color)
yellow = QAction("Yellow", self)
b_color.addAction(yellow)
yellow.triggered.connect(self.yellow_color)
red = QAction("Red", self)
b_color.addAction(red)
red.triggered.connect(self.red_color)
blue = QAction("Blue", self)
b_color.addAction(blue)
blue.triggered.connect(self.blue_color)
blue = QAction("White", self)
b_color.addAction(blue)
blue.triggered.connect(self.white_color)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.drawing = True
self.lastPoint = event.pos()
def mouseMoveEvent(self, event):
if (event.buttons() & Qt.LeftButton) & self.drawing:
painter = QPainter(self.image)
painter.setPen(QPen(self.brushColor, self.brushSize,
Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawLine(self.lastPoint, event.pos())
self.lastPoint = event.pos()
self.update()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
self.drawing = False
def paintEvent(self, event):
canvas_painter = QPainter(self)
canvas_painter.drawImage(self.rect(), self.image, self.image.rect())
def save(self):
file_path, _ = QFileDialog.getSaveFileName(self, "Save Image", "",
"PNG(*.png);;JPEG(*.jpg *.jpeg);;All Files(*.*) ")
if file_path == "":
return
self.image.save(file_path)
def clear(self):
self.color = Qt.white
self.image.fill(Qt.white)
self.update()
def pixel_4(self):
self.brushSize = 4
def pixel_7(self):
self.brushSize = 7
def pixel_9(self):
self.brushSize = 9
def pixel_12(self):
self.brushSize = 12
def pixel_15(self):
self.brushSize = 15
def black_color(self):
self.brushColor = Qt.black
def green_color(self):
self.brushColor = Qt.green
def yellow_color(self):
self.brushColor = Qt.yellow
def red_color(self):
self.brushColor = Qt.red
def blue_color(self):
self.brushColor = Qt.blue
def white_color(self):
self.brushColor = Qt.white
def erase(self):
self.brushColor = self.color
def fill_blue(self):
self.color = Qt.blue
self.image.fill(Qt.blue)
def fill_black(self):
self.color = Qt.black
self.image.fill(Qt.black)
def fill_yellow(self):
self.color = Qt.yellow
self.image.fill(Qt.yellow)
def fill_green(self):
self.color = Qt.green
self.image.fill(Qt.green)
def fill_red(self):
self.color = Qt.red
self.image.fill(Qt.red)
def fill_white(self):
self.color = Qt.white
self.image.fill(Qt.white)
App = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(App.exec())
| Azhar-ka29/whiteboard | main.py | main.py | py | 6,169 | python | en | code | 0 | github-code | 36 |
19082484272 | import cv2
import sys
import numpy as np
class Context:
def __init__(self):
self.sliders = {}
self.toggles = {}
self._redraw = False
self.cur_buf_id = 0;
self.buffers = []
self.buffers_by_name = {}
self._once = []
self._store = {}
cv2.namedWindow('image')
def once(self, key):
if key in self._once:
return False
self._once.append(key)
return True
def store(self, key, data):
self._store[key] = data
def load(self, key):
return self._store[key]
def redraw(self, *_):
self._redraw = True
def add_buffer(self, name, shape=[], src=None):
if name not in self.buffers_by_name:
img = src if src is not None else np.zeros(shape, np.uint8)
self.buffers.append(img)
self.cur_buf_id = len(self.buffers)-1
self.buffers_by_name[name] = (self.buffers[-1], self.cur_buf_id)
def b(self, name):
return self.buffers_by_name[name][0]
def __setitem__(self, key, value):
if key in self.buffers_by_name:
id = self.buffers_by_name[key][1]
self.buffers_by_name[key] = (value, id)
self.buffers[id] = value
else:
self.add_buffer(key, src=value)
def __getitem__(self, key):
if key in self.buffers_by_name:
return self.buffers_by_name[key][0]
if key in self._store:
return self._store['key']
return None
def get_toggle(self, key, max_, callback, init=0):
key = ord(key)
if key not in self.toggles:
self.toggles[key] = {'state': init, 'has_changed': True, 'callback': callback}
ko = self.toggles[key]
ko['callback'] = callback
has_changed = ko['has_changed']
ko['has_changed'] = False
if ko['state'] > max_:
ko['state'] = 0
return (ko['state'], has_changed)
def got_key(self, key):
(_, _) = self.get_toggle('b', 1, None, init=0)
(_, _) = self.get_toggle('v', 1, None, init=0)
if key in self.toggles:
ko = self.toggles[key]
ko['state'] += 1
ko['has_changed'] = True
if ko['callback'] is not None:
ko['callback'](None)
#print "Key:", chr(key), "=", ko['state']
sys.stdout.flush()
(_, ffd ) = self.get_toggle('b', 1, None, init=0)
(_, back) = self.get_toggle('v', 1, None, init=0)
if back:
self.cur_buf_id -= 1
self._redraw = True
if ffd:
self.cur_buf_id += 1
self._redraw = True
self.cur_buf_id = self.cur_buf_id % len(self.buffers)
def get_slider(self, name, callback=None, init=0, max_=255):
created = False
if name not in self.sliders:
def none():
pass
if callback == None:
callback = none
self.sliders[name] = {'old_value': init}
cv2.createTrackbar(name,'image',init,max_,callback)
created = True
val = cv2.getTrackbarPos(name,'image')
old_val = self.sliders[name]['old_value']
self.sliders[name]['old_value'] = val
return (val, val != old_val or created)
def eventloop(self):
while(1):
k = cv2.waitKey(100) & 0xFF
if k != 255:
self.got_key(k)
if k == 27 or k == ord('q'):
break
if self._redraw:
self._redraw = False
#print "imshow"
#sys.stdout.flush()
cv2.imshow('image', self.buffers[self.cur_buf_id])
cv2.destroyAllWindows()
def save_all_buffers(self):
for (i, (k, b)) in enumerate(self.buffers_by_name.items()):
fn = "debug/%02d_%s.png" % (i, k)
cv2.imwrite(fn, b[0])
| Phaiax/sudoku | src/context.py | context.py | py | 3,957 | python | en | code | 0 | github-code | 36 |
14054446569 | import numpy as np
import cv2
from .kalman import Kalman
#https://github.com/uoip/monoVO-python
def get_R(alpha):
M = np.array([[np.cos(np.pi*alpha/180), np.sin(np.pi*alpha/180)],
[-np.sin(np.pi*alpha/180), np.cos(np.pi*alpha/180)]
])
return M
def show_direction(image, t, M):
line_thickness = 1
cx, cy = t
triangle = np.array([[-9, 9], [9, 9], [0, -11]]).T
triangle_rot = M@triangle
triangle = triangle_rot.T
triangle[:,0] += cx
triangle[:,1] += cy
points = [[0,1], [0,2], [1,2]]
for point in points:
cv2.line(image, (int(triangle[point[0]][0]),int(triangle[point[0]][1])),
(int(triangle[point[1]][0]),int(triangle[point[1]][1])),
(0, 0, 255),
thickness=line_thickness
)
dt = 0.1
# Q
GPS = 11.7*8.8*dt**2 # assume 8.8m/s2 as maximum acceleration, forcing the vehicle
Course = 1.7*dt # assume 0.2rad/s as maximum turn rate for the vehicle
Velocity= 8.8*dt # assume 8.8m/s2 as maximum acceleration, forcing the vehicle
q = np.diag([GPS**2, GPS**2, Course**2, Velocity**2])
# H
h = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0]])
# R
varGPS = 0.5 # Standard Deviation of GPS Measurement
r = np.diag([varGPS**2.0, varGPS**2.0])
# F
f = np.eye(4)
def mapping(q_in):
kalman = Kalman(f = f, h = h, q = q, r = r)
kalman.set_state()
traj = np.zeros((400,400,3), dtype=np.uint8)
while True:
#raw_frame, frame, coords, frame_id
_, _, coords, frame_id = q_in.get()
alpha = coords[3]
Rt = get_R(alpha)
x, y, z = coords[0], coords[1], coords[2]
# Kalman
# kalman.predict()
# kalman.update(np.array([[float(coords[0])],
# [float(coords[2])]]))
# coords = np.array([[float(kalman.state[0])],
# coords[1],
# [float(kalman.state[1])]])
# x, y, z = coords[0], coords[1], coords[2]
draw_x, draw_y = int(x), int(y)
z_color = int(z*255/300)
#cv2.circle(traj, (draw_x,draw_y), 1, (z_color,255-z_color,255), 2)
cv2.circle(traj, (draw_x,draw_y), 1, (frame_id/1000,255-frame_id/1000,255), 2)
cv2.rectangle(traj, (10, 20), (600, 60), (0,0,0), -1)
text = "Coordinates: x={:.2f}m y={:.2f}m z={:.2f}m".format(x,y,z)
cv2.putText(traj, text, (20,40), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 1, 8)
show_direction(traj, (draw_x, draw_y), Rt)
cv2.imshow('Trajectory', traj)
cv2.waitKey(1)
if __name__ == '__main__':
mapping()
| vvabi-sabi/drone_RK3588 | addons/odometry/odometry.py | odometry.py | py | 2,434 | python | en | code | 2 | github-code | 36 |
6172988761 | from google.cloud import texttospeech
from pydub import AudioSegment
from pydub.playback import play
google_credentials_file = "PATH_TO_YOUR_GOOGLE_CREDENTIALS_JSON"
# Set the environment variable for Google Text-to-Speech API
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = google_credentials_file
# Initialize Google Text-to-Speech API client
tts_client = texttospeech.TextToSpeechClient()
# Function to synthesize text to speech using Google Text-to-Speech API
def synthesize_text(text, language_code="zh-CN"):
input_text = texttospeech.SynthesisInput(text=text)
voice = texttospeech.VoiceSelectionParams(
language_code=language_code, ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.LINEAR16
)
response = tts_client.synthesize_speech(
input=input_text, voice=voice, audio_config=audio_config
)
return response.audio_content
# Function to play the synthesized audio
def play_audio(audio_data):
audio = AudioSegment.from_file(io.BytesIO(audio_data), format="wav")
play(audio)
| qiusiyuan/gpt-live-stream | src/bilibiligptlive/tts.py | tts.py | py | 1,133 | python | en | code | 0 | github-code | 36 |
23382530066 | # -*- coding: utf-8 -*-
import re
import json
import time
import scrapy
import requests
import itertools
from lxml import etree
from hashlib import md5
from overseaSpider.items import ShopItem, SkuAttributesItem, SkuItem
from overseaSpider.util.scriptdetection import detection_main
from overseaSpider.util.utils import isLinux
website = 'samys'
class SamysSpider(scrapy.Spider):
name = website
# allowed_domains = ['samys.com']
start_urls = ['https://www.samys.com/']
@classmethod
def update_settings(cls, settings):
custom_debug_settings = getattr(cls, 'custom_debug_settings' if getattr(cls, 'is_debug',
False) else 'custom_settings', None)
system = isLinux()
if not system:
# 如果不是服务器, 则修改相关配置
custom_debug_settings["HTTPCACHE_ENABLED"] = False
custom_debug_settings["MONGODB_SERVER"] = "127.0.0.1"
settings.setdict(custom_debug_settings or {}, priority='spider')
def __init__(self, **kwargs):
super(SamysSpider, self).__init__(**kwargs)
self.counts = 0
setattr(self, 'author', "无穹")
is_debug = True
custom_debug_settings = {
'MONGODB_COLLECTION': website,
'CONCURRENT_REQUESTS': 4,
'DOWNLOAD_DELAY': 1,
'LOG_LEVEL': 'DEBUG',
'COOKIES_ENABLED': True,
# 'HTTPCACHE_EXPIRATION_SECS': 14 * 24 * 60 * 60, # 秒
'DOWNLOADER_MIDDLEWARES': {
# 'overseaSpider.middlewares.PhantomjsUpdateCookieMiddleware': 543,
# 'overseaSpider.middlewares.OverseaspiderProxyMiddleware': 400,
'overseaSpider.middlewares.OverseaspiderUserAgentMiddleware': 100,
},
'ITEM_PIPELINES': {
'overseaSpider.pipelines.OverseaspiderPipeline': 300,
},
}
def filter_html_label(self, text): # 洗description标签函数
label_pattern = [r'(<!--[\s\S]*?-->)', r'<script>.*?</script>', r'<style>.*?</style>', r'<[^>]+>']
for pattern in label_pattern:
labels = re.findall(pattern, text, re.S)
for label in labels:
text = text.replace(label, '')
text = text.replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '').strip()
return text
def filter_text(self, input_text):
filter_list = [u'\x85', u'\xa0', u'\u1680', u'\u180e', u'\u2000-', u'\u200a',
u'\u2028', u'\u2029', u'\u202f', u'\u205f', u'\u3000', u'\xA0', u'\u180E',
u'\u200A', u'\u202F', u'\u205F']
for index in filter_list:
input_text = input_text.replace(index, "").strip()
return input_text
def parse(self, response):
"""获取全部分类"""
category_url = ['https://www.samys.com/c/Photography/1/113.html',
'https://www.samys.com/c/Video/1/235.html',
'https://www.samys.com/c/Studio--Lighting/1/360.html',
'https://www.samys.com/c/Electronics/1/421.html',
'https://www.samys.com/c/Smartphone/1/830.html',
'https://www.samys.com/c/Pro-Cinema--Audio/2/794.html']
for i in category_url:
yield scrapy.Request(
url=i,
callback=self.parse_list,
meta={"flag": 0}
)
def parse_list(self, response):
"""商品列表页"""
detail_url = response.xpath("//div[@itemprop='name']/a/@href").getall()
if detail_url:
for i in detail_url:
yield scrapy.Request(
url='https://www.samys.com'+i,
callback=self.parse_detail
)
if response.meta.get("flag") == 0:
next_url = response.url + '?start=37'
yield scrapy.Request(
url=next_url,
callback=self.parse_list,
meta={"flag": 1, "start": 37, "url": response.url}
)
else:
start = response.meta.get("start") + 36
next_url = response.url + '?start=' + str(start)
yield scrapy.Request(
url=next_url,
callback=self.parse_list,
meta={"flag": 1, "start": start, "url": response.meta.get("url")}
)
else:
category_url = response.xpath("//div[@class='category-container']/div/a/@href").getall()
for i in category_url:
yield scrapy.Request(
url='https://www.samys.com' + i,
callback=self.parse_list,
meta={"flag": 0}
)
def parse_detail(self, response):
"""详情页"""
items = ShopItem()
items["url"] = response.url
items["name"] = response.xpath('//meta[@property="og:title"]/@content').get()
cat_temp = response.xpath("//ul[@class='breadcrumbs floatContainer']//a//text()").getall()
items["detail_cat"] = '/'.join(cat_temp)
items["cat"] = cat_temp[-1]
des_temp=response.xpath('//span[@itemprop="description"]//text()').getall()
items["description"] = self.filter_text(self.filter_html_label(''.join(des_temp)))
items["source"] = 'samys.com'
items["brand"] = response.xpath('//meta[@itemprop="brand"]/@content').get()
image_temp=response.xpath("//ul[@class='slider-detail']/li/a/img/@src").getall()[:1]+response.xpath("//ul[@class='slider-detail']/li/a/img/@data-post-load-image").getall()
if not image_temp:
image_temp = response.xpath("//div[@class='swiper-slide false']/img/@src").getall()
image=[]
for i in image_temp:
image.append('https://www.samys.com'+i)
items["images"] = image
items["current_price"] = response.xpath("//meta[@itemprop='price']/@content").get()
items["original_price"] = items["current_price"]
items["measurements"] = ["Weight: None", "Height: None", "Length: None", "Depth: None"]
items["sku_list"] =[]
status_list = list()
status_list.append(items["url"])
status_list.append(items["original_price"])
status_list.append(items["current_price"])
status_list = [i for i in status_list if i]
status = "-".join(status_list)
items["id"] = md5(status.encode("utf8")).hexdigest()
items["lastCrawlTime"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
items["created"] = int(time.time())
items["updated"] = int(time.time())
items['is_deleted'] = 0
# detection_main(items=items, website=website, num=20, skulist=True, skulist_attributes=True)
# print(items)
yield items
| husky-happy/templatespider | overseaSpider/spiders/xg/samys.py | samys.py | py | 6,903 | python | en | code | 0 | github-code | 36 |
30397052082 | from os.path import join
from typing import Optional
from dagger.dag_creator.airflow.operator_creator import OperatorCreator
from dagger.dag_creator.airflow.operators.redshift_sql_operator import (
RedshiftSQLOperator,
)
class RedshiftLoadCreator(OperatorCreator):
ref_name = "redshift_load"
def __init__(self, task, dag):
super().__init__(task, dag)
self._input_path = join(self._task.inputs[0].rendered_name, "")
self._input_s3_bucket = self._task.inputs[0].bucket
self._input_s3_prefix = self._task.inputs[0].path
self._output_schema = self._task.outputs[0].schema
self._output_table = self._task.outputs[0].table
self._output_schema_quoted = f'"{self._output_schema}"'
self._output_table_quoted = f'"{self._output_table}"'
self._tmp_table = (
f"{self._task.tmp_table_prefix}_{self._output_table}"
if self._task.tmp_table_prefix
else None
)
self._tmp_table_quoted = f'"{self._tmp_table}"' if self._tmp_table else None
self._copy_ddl_from = self._task.copy_ddl_from
self._alter_columns = self._task.alter_columns
self._sort_keys = self._task.sort_keys
@staticmethod
def _read_sql(directory, file_path):
full_path = join(directory, file_path)
with open(full_path, "r") as f:
sql_string = f.read()
return sql_string
def _get_create_table_cmd(self) -> Optional[str]:
if self._tmp_table and self._task.create_table_ddl:
ddl = self._read_sql(
self._task.pipeline.directory, self._task.create_table_ddl
)
return ddl.format(
schema_name=self._output_schema_quoted,
table_name=self._tmp_table_quoted,
)
if self._tmp_table and self._copy_ddl_from:
return (
f"CREATE TABLE {self._output_schema_quoted}.{self._tmp_table_quoted}"
f"(LIKE {self._copy_ddl_from})"
)
elif self._tmp_table:
return (
f"CREATE TABLE {self._output_schema_quoted}.{self._tmp_table_quoted}"
f"(LIKE {self._output_schema_quoted}.{self._output_table_quoted})"
)
elif self._task.create_table_ddl:
ddl = self._read_sql(
self._task.pipeline.directory, self._task.create_table_ddl
)
return ddl.format(
schema_name=self._output_schema_quoted,
table_name=self._output_table_quoted,
)
elif self._copy_ddl_from:
return (
f"CREATE TABLE IF NOT EXISTS {self._output_schema_quoted}.{self._output_table}"
f"(LIKE {self._copy_ddl_from})"
)
return None
def _get_sort_key_cmd(self) -> Optional[str]:
sort_key_cmd = None
if self._sort_keys:
sort_key_cmd = (
f"ALTER TABLE {self._output_schema_quoted}.{self._tmp_table_quoted} "
f"ALTER COMPOUND SORTKEY({self._sort_keys})"
)
return sort_key_cmd
def _get_delete_cmd(self) -> Optional[str]:
if self._task.incremental:
return (
f"DELETE FROM {self._output_schema_quoted}.{self._output_table_quoted}"
f"WHERE {self._task.delete_condition}"
)
if not self._task.incremental and self._tmp_table is None:
return f"TRUNCATE TABLE {self._output_schema_quoted}.{self._output_table_quoted}"
return None
def _get_load_cmd(self) -> Optional[str]:
table_name = self._tmp_table_quoted or self._output_table_quoted
columns = "({})".format(self._task.columns) if self._task.columns else ""
extra_parameters = "\n".join(
[
"{} {}".format(key, value)
for key, value in self._task.extra_parameters.items()
]
)
return (
f"copy {self._output_schema_quoted}.{table_name}{columns}\n"
f"from '{self._input_path}'\n"
f"iam_role '{self._task.iam_role}'\n"
f"{extra_parameters}"
)
def _get_replace_table_cmd(self) -> Optional[str]:
if self._tmp_table is None:
return None
return (
f"BEGIN TRANSACTION;\n"
f"DROP TABLE IF EXISTS {self._output_schema_quoted}.{self._output_table_quoted};\n"
f"ALTER TABLE {self._output_schema_quoted}.{self._tmp_table_quoted} "
f"RENAME TO {self._output_table_quoted};\n"
f"END"
)
def _get_alter_columns_cmd(self) -> Optional[str]:
if self._alter_columns is None:
return None
alter_column_commands = []
alter_columns = self._alter_columns.split(",")
for alter_column in alter_columns:
[column_name, column_type] = alter_column.split(":")
alter_column_commands.append(
f"ALTER TABLE {self._output_schema_quoted}.{self._tmp_table_quoted} "
f"ALTER COLUMN {column_name} TYPE {column_type}"
)
return ";\n".join(alter_column_commands)
def _get_drop_tmp_table_cmd(self) -> Optional[str]:
if self._tmp_table is None:
return None
return f"DROP TABLE IF EXISTS {self._output_schema_quoted}.{self._tmp_table_quoted}"
def _get_cmd(self) -> str:
raw_load_cmd = [
self._get_drop_tmp_table_cmd(),
self._get_create_table_cmd(),
self._get_alter_columns_cmd(),
self._get_sort_key_cmd(),
self._get_delete_cmd(),
self._get_load_cmd(),
self._get_replace_table_cmd(),
]
load_cmd = [cmd for cmd in raw_load_cmd if cmd]
return ";\n".join(load_cmd)
def _create_operator(self, **kwargs):
load_cmd = self._get_cmd()
redshift_op = RedshiftSQLOperator(
dag=self._dag,
task_id=self._task.name,
sql=load_cmd,
redshift_conn_id=self._task.postgres_conn_id,
autocommit=True,
**kwargs,
)
return redshift_op
| siklosid/dagger | dagger/dag_creator/airflow/operator_creators/redshift_load_creator.py | redshift_load_creator.py | py | 6,239 | python | en | code | 7 | github-code | 36 |
71364721383 | def qaq():
string = list(input())
n = len(string)
count = 0
for i in range(0, n):
for j in range(i+1, n):
for k in range(j+1, n):
if(string[i] == "Q" and string[j] == "A" and string[k] == "Q"):
count += 1
print(count)
if __name__ == "__main__":
qaq()
| humanolaranja/MC521 | 6/g/index.py | index.py | py | 346 | python | en | code | 0 | github-code | 36 |
31524035648 | import pandas as pd
import numpy as np
from typing import List
from loguru import logger
from meche_copilot.utils.num_tokens_from_string import num_tokens_from_string
def combine_dataframe_chunks(dfs: List[pd.DataFrame]) -> pd.DataFrame:
if all(df.shape[1] == dfs[0].shape[1] for df in dfs):
return pd.concat(dfs, axis=0, ignore_index=True)
elif all(df.shape[0] == dfs[0].shape[0] for df in dfs):
return pd.concat(dfs, axis=1)
else:
raise ValueError("Chunks do not have consistent shape for concatenation.")
def chunk_dataframe(df: pd.DataFrame, axis=1, num_chunks=None, pct_list=None, max_tokens=None, **kwargs) -> List[pd.DataFrame]:
"""Chunk a dataframe into a list of dataframes using number of chunks xor pct of data in each chunk xor max_tokens in each chunk"""
if axis not in [0, 1]:
raise ValueError("axis should be either 0 (rows) or 1 (columns).")
if sum([num_chunks is not None, pct_list is not None, max_tokens is not None]) != 1:
raise ValueError(f"Exactly one of num_chunks, pct_list, or max_tokes must be specified. Got {num_chunks}, {pct_list}, {max_tokens}")
# if using percents, they should not add up to greater than 100
if pct_list:
if sum(pct_list) > 100:
raise ValueError("Sum of pct_list should be 100% or less.")
num_chunks = len(pct_list) + 1
pct_list.append(100 - sum(pct_list))
# if using num_chunks (or pct_list), shouldnt be greater than items in axis
if num_chunks:
if axis == 0 and num_chunks > df.shape[0]:
raise ValueError("Number of chunks should not be greater than number of rows.")
if axis == 1 and num_chunks > df.shape[1]:
raise ValueError("Number of chunks should not be greater than number of columns.")
chunks = []
if num_chunks and not pct_list: # split into num_chunks along axis
logger.debug(f"Splitting df into {num_chunks} chunks along axis {axis}.")
split_func = np.array_split
chunks = split_func(df, num_chunks, axis=axis)
elif pct_list: # split into fractions along axis
logger.debug(f"Splitting df into {len(pct_list)} chunks along axis {axis} with pct_list {pct_list}.")
fractions = [pct / 100 for pct in pct_list]
if axis == 0: # split rows into fractions
start_idx = 0
for frac in fractions:
end_idx = start_idx + int(frac * df.shape[0])
chunks.append(df.iloc[start_idx:end_idx])
start_idx = end_idx
else: # split columns into fractions
start_idx = 0
for frac in fractions:
end_idx = start_idx + int(frac * df.shape[1])
chunks.append(df.iloc[:, start_idx:end_idx])
start_idx = end_idx
else: # split using max_tokens
logger.debug(f"Splitting df along axis {axis} with max_tokens {max_tokens} per chunk.")
encoding_name = kwargs.get("encoding_name", "gpt-4")
start_idx = 0
prev_tokens = None # To keep track of the previous token size
while start_idx < df.shape[0] if axis == 0 else start_idx < df.shape[1]:
for i in range(start_idx, df.shape[0] if axis == 0 else df.shape[1]): # iterate over rows/cols until max_tokens is reached, then append that chunk
csv_string = df.iloc[start_idx:i+1].to_csv() if axis == 0 else df.iloc[:, start_idx:i+1].to_csv()
tokens = num_tokens_from_string(csv_string, encoding_name)
if tokens > max_tokens:
# Print the previous token size, not the updated token size
logger.debug(f"Adding chunk with shape {df.iloc[start_idx:i].shape if axis == 0 else df.iloc[:, start_idx:i].shape} and prev num tokens {prev_tokens}.")
chunks.append(df.iloc[start_idx:i] if axis == 0 else df.iloc[:, start_idx:i])
start_idx = i + 1 # update start_idx
break
prev_tokens = tokens # Save the previous token size
else: # if loop completes without breaking (i.e., all remaining data fits within max_tokens)
chunks.append(df.iloc[start_idx:] if axis == 0 else df.iloc[:, start_idx:])
break
logger.debug(f"Split df into {len(chunks)} chunks")
return chunks
| fuzzy-tribble/meche-copilot | meche_copilot/utils/chunk_dataframe.py | chunk_dataframe.py | py | 4,373 | python | en | code | 1 | github-code | 36 |
43777238511 | import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import time
MIN_MATCH_COUNT = 10
img_sample = cv.cvtColor(cv.imread("./img/dataset/9.png",cv.IMREAD_COLOR),cv.COLOR_BGR2GRAY)
img_q = cv.cvtColor(cv.imread("./img/query/3.png",cv.IMREAD_COLOR),cv.COLOR_BGR2GRAY)
sift = cv.SIFT_create()
keypoints_1, descriptors_1 = sift.detectAndCompute(img_q, None)
keypoints_2, descriptors_2 = sift.detectAndCompute(img_sample, None)
outImage_1 = cv.drawKeypoints(img_q, keypoints_1,None)
outImage_2 = cv.drawKeypoints(img_sample, keypoints_2,None)
print(len(keypoints_1))
print(len(keypoints_2))
#cv.imwrite('image.jpg', outImage_1)
#cv.waitKey(0)
# BFMatcher
def BFMatcher(descript_1,descript_2):
bf = cv.BFMatcher()
matches = bf.knnMatch(descript_1,descript_2,k=2)
return matches
# FLANNMatcher
def FLANNMatcher(descript_1,descript_2):
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(descript_1,descript_2,k=2)
return matches
start = time.time()
matches = BFMatcher(descriptors_1,descriptors_2)
#matches = FLANNMatcher(descriptors_1,descriptors_2)
end = time.time()
print("time cost: ",end-start)
# ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append(m)
# cv.drawMatchesKnn expects list of lists as matches.
print("match pairs: ", len(good))
# img4 = cv.drawMatchesKnn(img_q,keypoints_1,img_sample,keypoints_2,good,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# plt.imshow(img4),plt.show()
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ keypoints_1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ keypoints_2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img_q.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
#box_color = (0,0,255)
sample_draw = cv.merge((img_sample.copy(),img_sample.copy(),img_sample.copy()))
img_sample_detected = cv.polylines(sample_draw,[np.int32(dst)],True,(255,0,0),5, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
matchesMask = None
query_draw = cv.merge((img_q.copy(),img_q.copy(),img_q.copy()))
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv.drawMatches(query_draw,keypoints_1,img_sample_detected,keypoints_2,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show()
| Laurie-xzh/AI-Practice | CV/Point_Feature_Match/test.py | test.py | py | 2,845 | python | en | code | 0 | github-code | 36 |
2986533119 |
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
import time
from constants import URL
def get_product_properties(browser):
properties = {}
search_result = browser.find_element(By.CLASS_NAME, "sooqrSearchResults")
title = search_result.find_element(By.CLASS_NAME, "productlist-title")
title.find_element(By.TAG_NAME, "a").click()
time.sleep(0.1)
content_place_holder = browser.find_element(By.ID, "pdetailTableSpecs")
table_body = content_place_holder.find_element(By.TAG_NAME, "tbody")
searched_keys = ["Zusammenstellung", "Nadelstärke"]
for table_row in table_body.find_elements(By.TAG_NAME, "tr"):
key, value = table_row.find_elements(By.TAG_NAME, "td")
if key.text in searched_keys:
properties[key.text] = value.text
try:
properties["Lieferbar"] = True if browser.find_element(By.CLASS_NAME, "stock-green").text == "Lieferbar" else False
except:
properties["Lieferbar"] = False
properties["Preis"] = browser.find_element(By.CLASS_NAME, "product-price-amount").text
return properties
def select_correct_brand(browser, marke):
"""checks if any search results were found and if they are from the correct marke
Args:
browser ([webdriver]): Firefox browser
marke ([str]):
Returns:
[str]: returns error message if the correct prodcut can not be found
"""
# try to locate marken_filter
try:
marken_search_filter = browser.find_element(By.ID, "sooqr44898be26662b0dfSearchFilter191640")
marken_search_filter_field = marken_search_filter.find_elements(By.TAG_NAME, "input")
except NoSuchElementException:
return "No search result found"
# try to click marke
for marken_input in marken_search_filter_field:
test_marke = marken_input.get_attribute("value")
if test_marke == marke:
marken_input.click()
break
else:
return "No such brand for search term"
return ""
def search_product(browser, marke, bezeichnung):
"""webscrapes all needed information for product
Args:
browser ([webdriver]): Firefox browser
marke ([str]):
bezeichnung ([str]):
Returns:
[dict]: dictionary of properties of searched products
"""
# nativating to url (home) of site everytime before searching for product becuase occationally elements could not
# be found (although visible) after continuing from previous product site
browser.get(URL)
search_field = browser.find_element(By.ID, "searchSooqrTop")
search_field.clear()
search_field.send_keys(bezeichnung)
product_properties = {"marke": marke, "bezeichnung": bezeichnung}
# checking for errors like not finding any element when searching for bezeichnung or none of the correct marke
occured_errors = select_correct_brand(browser, marke)
if occured_errors != "":
product_properties["error"] = occured_errors
return product_properties
product_properties.update(get_product_properties(browser))
return product_properties | Felix-95/programming_challenge | src/scraper.py | scraper.py | py | 3,238 | python | en | code | 0 | github-code | 36 |
72516592744 | import time
import datetime
from timeit import default_timer as timer
import settings
from pymongo import MongoClient
from faker import Faker
from bson.decimal128 import Decimal128
import random
fake = Faker()
####
# Start script
####
startTs = time.gmtime()
start = timer()
print("================================")
print(" Generating Transactions Data ")
print("================================")
print("\nStarting " + time.strftime("%Y-%m-%d %H:%M:%S", startTs) + "\n")
####
# Main start function
####
def main():
mongo_client = MongoClient(MDB_CONNECTION)
db = mongo_client[MDB_DATABASE]
my_collection = db[MDB_COLLECTION]
print('Begin generating txns documents.')
print('Number of documents to generate: ' + str(NUM_DOCS))
for index in range(int(NUM_DOCS)):
fake_timestamp = fake.date_between(start_date='-1y', end_date='today')
txn_types = ['deposit', 'withdrawal']
txns = random.choice(txn_types)
my_txn_document = {
"customerId": fake.ssn(),
"name": fake.name(),
"address": fake.street_address(),
"city": fake.city(),
"state": fake.state(),
"postalCode": fake.postcode(),
"email": fake.email(),
"lastLocation": {
"type": "Point",
"coordinates": [
Decimal128(fake.longitude()),
Decimal128(fake.latitude())
]
},
"txnType": txns,
"txnAmount": random.randint(0, 10000),
"txnDate": datetime.datetime(fake_timestamp.year, fake_timestamp.month, fake_timestamp.day)
}
# Print example doc on first doc creation
if index == 1:
print('Example Document')
print(my_txn_document)
# Indicate how many docs inserted
if index % 100 == 0:
print('Docs inserted: ' + str(index))
my_collection.insert_one(my_txn_document)
####
# Constants loaded from .env file
####
MDB_CONNECTION = settings.MDB_CONNECTION
MDB_DATABASE = settings.MDB_DATABASE
MDB_COLLECTION = settings.MDB_COLLECTION
NUM_DOCS = settings.NUM_DOCS
####
# Main
####
if __name__ == '__main__':
main()
####
# Indicate end of script
####
end = timer()
endTs = time.gmtime()
total_time = end - start
if total_time < 1:
docs_inserted_time = int(NUM_DOCS) / 1
else:
docs_inserted_time = int(NUM_DOCS) / total_time
print("\nEnding " + time.strftime("%Y-%m-%d %H:%M:%S", endTs))
print('===============================')
print('Total Time Elapsed (in seconds): ' + str(total_time))
print('===============================')
print('Number of Docs inserted per second: ' + str(docs_inserted_time))
print('===============================')
| blainemincey/generate_sample_data | generate_transactions_data.py | generate_transactions_data.py | py | 2,781 | python | en | code | 1 | github-code | 36 |
74643015143 | from PyQt4.QtGui import QMainWindow,QListWidgetItem, QMessageBox, QTableWidgetItem, QInputDialog,QLineEdit, QFileDialog
from Ui_MainWindow import Ui_MainWindow
from Gramaticas.Producao import Producao
from Gramaticas.Gramatica import Gramatica, ExcecaoConstruirGramatica
from Automatos.Automato import Automato, ExcecaoMinimizarAutomatoNaoDeterministico
from Expressao.Expressao import Expressao, ExcecaoExpressaoInvalida
import pickle, random
SIMBOLO_SEPARADOR_TRANSICAO = "|"
SIMBOLO_SEPARADOR_PRODUCAO = "->"
DIRETORIO_GRAMATICAS = "persistencia/Gramaticas/"
DIRETORIO_AUTOMATOS = "persistencia/Automatos/"
class MainWindow (QMainWindow, Ui_MainWindow):
"""
Janela grafica do programa, que chama e configura os metodos
dos automatos e das gramaticas
"""
def __init__(self, parent=None):
'''
inicializa os dados da gramatica e do automato atuais
'''
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self._infoGramaticaAtual = {
"ProducaoInicial" : None,
"NaoTerminais" : set([]),
"Terminais" : set([]),
"Producoes" : []
}
self._infoAutomatoAtual = {
"EstadoInicial" : None,
"EstadosFinais" : set([]),
"Alfabeto" : set([]),
"Estados" : set([]),
"TabelaTransicao" : {}
}
def adicionarProducao(self):
'''
adiciona a producao entrada na caixa de producoes na lista de producao
apenas se ela for valida
Gera uma excecao caso a producao nao seja valida
'''
try:
#pega a producao entrada pelo usuario e remove os espacos em branco
producao = self.caixaProducoes.text().remove(" ")
#verifica se tem epsilon, caso tenha, tem que estar na producao inicial e nao deve possuir recursao
if str(producao).count("&") > 0 and (self.listaProducoes.count() > 0 or str(producao).count(str(producao)[0]) >= 2 ):
raise ExcecaoAdicionarProducao("Producao invalida. So eh permitido & na producao inicial e sem recursao")
#verifica se a producao atende os padroes das gramaticas regulares
if self._verificarProducao(producao):
p = QListWidgetItem(producao)
self.listaProducoes.addItem(p)
self.caixaProducoes.setText("")
except ExcecaoAdicionarProducao as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def deletarProducao(self):
'''
Deleta a producao selecionada pelo usuario na lista de producoes ja aceitas
e remove o alfa dessa producao da lista de nao terminais
'''
linha = self.listaProducoes.currentRow()
item = self.listaProducoes.takeItem(linha)
nt = self._infoGramaticaAtual["NaoTerminais"]
self._infoGramaticaAtual["NaoTerminais"] = nt.difference(str(item.text())[0])
def converterGRparaAF(self):
"""
Converte a gramatica entrada pelo usuario em um automato finito
primeiro monta a gramatica pegandos os dados que estao na lista de producao
depois manda converter e mostrar o automato resultante
"""
try:
self._limparAutomatoResultante()
gramatica = self._montarGramatica()
if gramatica is not None:
automato = self._converterGRparaAF(gramatica)
self._mostrarAutomato(automato)
except ExcecaoImpossivelMontarGramatica as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def novaGramatica(self):
"""
Limpa todas as caixas e listas e reinicializa as variaveis para poder
criar uma nova gramatica
"""
self._limparListaSentencas()
self._limparProducoes()
self._limparCaixasGramatica()
self._infoGramaticaAtual["NaoTerminais"] = set([])
def novoAutomato(self):
"""
Limpa todas as caixas, listas e tabelas e tambem reinicializa
as variaveis para poder criar um novo automato
"""
self._infoAutomatoAtual["EstadoInicial"] = None
self._infoAutomatoAtual["EstadosFinais"] = set([])
self._infoAutomatoAtual["Alfabeto"] = set([])
self._infoAutomatoAtual["Estados"] = set([])
self._infoAutomatoAtual["TabelaTransicao"] = {}
self._limparAutomatoResultante()
self._limparEstados()
self._limparAlfabeto()
self._limparCaixasAutomato()
def gerarSentencas(self):
"""
Gera as sentencas de tamanho n utilizando a gramatica
passada pelo usuario
"""
try:
#pega o valor de n (tamanho da sentenca)
n = int(self.caixaTamanhoProducoes.text())
if n <= 0:
QMessageBox.warning(self,"Erro","Entre com um numero positivo maior que zero.", QMessageBox.Ok)
return
#limpa a lista de sentencas anteriores, monta a gramatica e gera as sentencas
self._limparListaSentencas()
gramatica = self._montarGramatica()
sentencas = gramatica.gerarSentencas(n)
#popula a lista de sentencas
for sentenca in sentencas:
self.listaSentencas.addItem( QListWidgetItem(str(sentenca)) )
except ExcecaoConstruirGramatica:
QMessageBox.warning(self,"Erro","Impossivel construir a gramatica", QMessageBox.Ok)
except ValueError:
QMessageBox.warning(self,"Erro","Entre com um numero positivo maior que zero.", QMessageBox.Ok)
def adicionarEstado(self):
"""
Adiciona o estado passado na caixa de estados pelo usuario
na lista de estados e nos dados do automato
Apenas insere o estado caso ele seja valido
"""
estado = str(self.caixaEstado.text())
#verifica se eh uma letra maiuscula que ainda nao foi utilizada
if estado != estado.upper() or estado == "":
QMessageBox.warning(self,"Erro","O estado deve ser uma letra maiuscula.", QMessageBox.Ok)
return
if estado in self._infoAutomatoAtual["Estados"]:
QMessageBox.warning(self,"Erro","Esse estado ja foi inserido.", QMessageBox.Ok)
return
estados = self._infoAutomatoAtual["Estados"]
self._infoAutomatoAtual["Estados"] = estados.union(estado)
#monta o nome do estado com * e -> para poder colocar na lista de estados e na tabela de transicao
nomeEstado = estado
if self.cbFinal.isChecked():
estadosFinais = self._infoAutomatoAtual["EstadosFinais"]
self._infoAutomatoAtual["EstadosFinais"] = estadosFinais.union(estado)
nomeEstado = "*" + nomeEstado
if self.cbInicial.isChecked():
estadoInicial = self._infoAutomatoAtual["EstadoInicial"]
#caso seja um estado inicial, verifica se eh o primeiro inserido, caso nao seja, substitui o anterior
if estadoInicial != None:
QMessageBox.warning(self,"Atencao","Estado inicial antigo ("+estadoInicial+") substituido por ("+estado+")!", QMessageBox.Ok)
#substitui o antigo na tabela de transicao
for i in range(self.tabelaTransicoes.columnCount()):
if str(self.tabelaTransicoes.takeVerticalHeaderItem(i).text()) == "->" + estadoInicial or str(self.tabelaTransicoes.takeVerticalHeaderItem(i).text()) == "->*" + estadoInicial:
self.tabelaTransicoes.setVerticalHeaderItem(i,QTableWidgetItem(estadoInicial.replace("->","")))
self._infoAutomatoAtual["EstadoInicial"] = estado
nomeEstado = "->" + nomeEstado
#adiciona na lista de estados e na tabela de transicao
self.listaEstados.addItem(nomeEstado)
n = self.tabelaTransicoes.rowCount()
self.tabelaTransicoes.setRowCount(n+1)
self.tabelaTransicoes.setVerticalHeaderItem(n,QTableWidgetItem(nomeEstado))
self.caixaEstado.setText("")
self.cbFinal.setChecked(False)
self.cbInicial.setChecked(False)
def adicionarAlfabeto(self):
"""
Adiciona um letra do alfabeto entrada pelo usuario na lista de alfabeto
e nas informacoes do automato atual
Apenas insere o simbolo na lista do alfabeto caso ele seja valido
"""
#pega o alfabeto entrado
alfabeto = str(self.caixaAlfabeto.text())
#verifica se eh minusculo e se ainda nao foi utilizado
if alfabeto != alfabeto.lower() or alfabeto == "":
QMessageBox.warning(self,"Erro","O alfabeto deve ser uma letra minuscula.", QMessageBox.Ok)
return
if alfabeto in self._infoAutomatoAtual["Alfabeto"]:
QMessageBox.warning(self,"Erro","Esse simbolo ja foi inserido.", QMessageBox.Ok)
return
#adiciona nas informacoes do automato atual, na tabela de transicao e na lista de alfabetos
n = self.tabelaTransicoes.columnCount()
self.tabelaTransicoes.setColumnCount(n+1)
self.tabelaTransicoes.setHorizontalHeaderItem(n,QTableWidgetItem(alfabeto))
letras = self._infoAutomatoAtual["Alfabeto"]
self._infoAutomatoAtual["Alfabeto"] = letras.union(alfabeto)
self.listaAlfabeto.addItem(alfabeto)
self.caixaAlfabeto.setText("")
def deletarEstado(self):
"""
Deleta o estado selecionado pelo usuario e remove
as referencias a esse estado das informacoes do
automato atual
"""
#pega o estado que o usuario deseja remover
estado = str(self.listaEstados.currentText())
#verifica se existe algum estado para remover
if estado != "":
indice = self.listaEstados.currentIndex()
self.listaEstados.removeItem(indice)
#agora deleta a linha desse estado
for indice in range(self.tabelaTransicoes.rowCount()):
if self.tabelaTransicoes.verticalHeaderItem(indice).text() == estado:
self.tabelaTransicoes.removeRow(indice)
break
#formata o nome do estado e remove ele das informacoes do automato
estado = estado.replace('*','')
estado = estado.replace('->','')
estados = self._infoAutomatoAtual["Estados"]
estados.remove(estado)
self._infoAutomatoAtual["Estados"] = estados
#caso ele esteja na lista de estados finais, remove
if estado in self._infoAutomatoAtual["EstadosFinais"]:
estadosFinais = self._infoAutomatoAtual["EstadosFinais"]
estadosFinais.discard(estado)
self._infoAutomatoAtual["EstadosFinais"] = estadosFinais
#caso ele fosse um estado inicial, remove
if estado == self._infoAutomatoAtual["EstadoInicial"]:
self._infoAutomatoAtual["EstadoInicial"] = None
def deletarAlfabeto(self):
"""
Retira da lista de alfabeto e das informacoes do automato
o simbolo selecionado pelo usuario
"""
#pega o simbolo que o usuario deseja deletar
alfabeto = str(self.listaAlfabeto.currentText())
#verifica se esse simbolo realmente existe
if alfabeto != "":
indice = self.listaAlfabeto.currentIndex()
self.listaAlfabeto.removeItem(indice)
letras = self._infoAutomatoAtual["Alfabeto"]
letras.remove(alfabeto)
self._infoAutomatoAtual["Alfabeto"] = letras
#agora deleta a coluna dessa letra
for indice in range(self.tabelaTransicoes.columnCount()):
if self.tabelaTransicoes.horizontalHeaderItem(indice).text() == alfabeto:
self.tabelaTransicoes.removeColumn(indice)
return
def determinizarAutomato(self):
"""
Determiniza o automato especificado
na tabela de transicoes
"""
try:
automato = self._montarAutomato()
if automato is not None:
automato.determinizar()
self._mostrarAutomato(automato)
except ExcecaoImpossivelMontarAutomato as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def minimizarAutomato(self):
"""
Minimiza o automato especificado na tabela de transicoes
caso o automato nao esteja determinizado, a excecao
gerada eh tratada
"""
try:
automato = self._montarAutomato()
automato.minimizarAFD()
self._mostrarAutomato(automato)
except ExcecaoMinimizarAutomatoNaoDeterministico as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
except ExcecaoImpossivelMontarAutomato as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def converterERparaAF(self):
"""
Converte a expressao regular passada pelo usuario em um automato
deterministico e minimo
Trata uma excecao caso a gramatica passada seja invalida
"""
er = str( self.caixaER.text().remove("") )
expressaoRegular = Expressao()
try:
automato = expressaoRegular.converterParaAF(er)
automato.removerEpsilonTransicao()
self._mostrarAutomato(automato)
except ExcecaoExpressaoInvalida as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def salvarAutomato(self):
"""
Salva o automato caso ele seja valido em um arquivo
na pasta especificada em DIRETORIO_AUTOMATOS
tendo por default um nome no formato automato[NumeroQualquer]
podendo ser alterado por um nome escolhido pelo usuario
"""
try:
automato = self._montarAutomato()
#gera um numero qualquer para montar o nome do automato
x = int(random.random()*1000)
#retorna uma tupla (nomeArquivo, true/false)
nomeArq = QInputDialog.getText(self,"Nome do arquivo", "Qual o nome do automato?", QLineEdit.Normal, "Automato" + str(x))
if nomeArq[1]:
pickle.dump(automato, file(DIRETORIO_AUTOMATOS + str(nomeArq[0]), 'w'))
QMessageBox.information(self,"Automato salvo","Automato salvo com sucesso!")
except ExcecaoImpossivelMontarAutomato as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def salvarGramatica(self):
"""
Salva a gramatica caso ela seja valida em um arquivo
na pasta especificada em DIRETORIO_GRAMATICAS
tendo por default um nome no formato gramatica[NumeroQualquer]
podendo ser alterado por um nome escolhido pelo usuario
"""
try:
gramatica = self._montarGramatica()
#numero qualquer para gerar o nome da gramatica
x = int(random.random()*1000)
#retorna uma tupla (nomeArquivo, true/false)
nomeArq = QInputDialog.getText(self,"Nome do arquivo", "Qual o nome do gramatica?", QLineEdit.Normal, "Gramatica" + str(x))
if nomeArq[1]:
pickle.dump(gramatica, file(DIRETORIO_GRAMATICAS + str(nomeArq[0]), 'w'))
QMessageBox.information(self,"Gramatica salva","Gramatica salva com sucesso!")
except ExcecaoImpossivelMontarGramatica as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def carregarAutomato(self):
"""
Carrega um automato de um arquivo e monta e mostra o
automato na tabela de transicao
"""
nomeArquivo = str(QFileDialog.getOpenFileName(self, 'Abrir automato',DIRETORIO_AUTOMATOS,""))
arq = file(nomeArquivo)
automato = pickle.load(arq)
arq.close()
if automato is not None:
self._mostrarAutomato(automato)
def carregarGramatica(self):
"""
Carrega um arquivo contendo uma gramatica e monta
e mostra a gramatica na lista de producoes
"""
nomeArquivo = str(QFileDialog.getOpenFileName(self, 'Abrir gramatica',DIRETORIO_GRAMATICAS,""))
arq = file(nomeArquivo)
gramatica = pickle.load(arq)
arq.close()
if gramatica is not None:
self._mostrarGramatica(gramatica)
def reconhecerSentenca(self):
"""
Verifica se o automato em questao reconhece uma sentenca especifica ou nao
"""
try:
automato = self._montarAutomato()
sentenca = str(self.caixaSentenca.text())
reconheceu = automato.reconhecerSentenca(sentenca)
if reconheceu:
QMessageBox.information(self,"Reconhecimento","A sentenca " + sentenca + " pertence a linguagem!")
else:
QMessageBox.information(self,"Reconhecimento","A sentenca " + sentenca + " nao pertence a linguagem!")
except ExcecaoImpossivelMontarAutomato as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def converterAFparaGR(self):
"""
Converte um automato finito que esteja corretamente
especificado para uma gramatica regular e mostra essa
gramatica na lista de producoes
Verifica se o automato esta corretamente especificado
"""
try:
self._limparProducoes()
automato = self._montarAutomato()
gramatica = self._converterAFparaGR(automato)
self._mostrarGramatica(gramatica)
except ExcecaoImpossivelMontarAutomato as mensagem:
QMessageBox.warning(self,"Erro", str(mensagem), QMessageBox.Ok)
def _limparAutomatoResultante(self):
"""
Limpa a tabela de transicao que contem
o automato resultante
NAO DELETA as informacoes do automato, apensar apaga
o automato visualmente
"""
for _ in range(self.tabelaTransicoes.rowCount()):
self.tabelaTransicoes.removeRow(0)
for _ in range(self.tabelaTransicoes.columnCount()):
self.tabelaTransicoes.removeColumn(0)
def _atualizarAutomato(self, automato):
"""
Atualiza as informacoes do automato usando os
dados do automato passado como paramentro
"""
self._infoAutomatoAtual["EstadoInicial"] = automato.estadoInicial
self._infoAutomatoAtual["EstadosFinais"] = automato.estadosFinais
self._infoAutomatoAtual["Alfabeto"] = automato.alfabeto
self._infoAutomatoAtual["Estados"] = automato.estados
self._infoAutomatoAtual["TabelaTransicao"] = automato.tabelaTransicao
def _atualizarGramatica(self, gramatica):
"""
Atualiza as informacoes da gramatica usando os
dados da gramatica passada como paramentro
"""
self._infoGramaticaAtual["ProducaoInicial"] = gramatica.producaoInicial
self._infoGramaticaAtual["NaoTerminais"] = gramatica.naoTerminais
self._infoGramaticaAtual["Terminais"] = gramatica.terminais
self._infoGramaticaAtual["Producoes"] = gramatica.producoes
def _mostrarAutomato(self, automato):
"""
Mostra o automato passado (e ja corretamente especificado)
na tabela de transicao
Tambem popula a lista de estados e a lista de simbolos do alfabeto
"""
#limpandos os dados do automato anterior
self._limparCaixasAutomato()
self._limparAlfabeto()
self._limparEstados()
self._limparAutomatoResultante()
self._atualizarAutomato(automato)
tabelaTransicao = automato.tabelaTransicao
#pega os simbolos de transicao e ordena
alfabeto = ([letra for letra in automato.alfabeto])
alfabeto.sort()
#popula a lista de simbolos do alfabeto
for letra in alfabeto:
n = self.tabelaTransicoes.columnCount()
self.tabelaTransicoes.setColumnCount(n+1)
self.tabelaTransicoes.setHorizontalHeaderItem(n,QTableWidgetItem(letra))
self.listaAlfabeto.addItem(letra)
#para cada um dos estados do automato
linha = 0
n = len(tabelaTransicao.keys())
self.tabelaTransicoes.setRowCount( n )
#popula a lista de estados
for estado in tabelaTransicao.keys():
transicoes = tabelaTransicao.get(estado)
if estado == self._infoAutomatoAtual["EstadoInicial"] and estado in self._infoAutomatoAtual["EstadosFinais"]:
estado = "*->" + estado
elif estado == self._infoAutomatoAtual["EstadoInicial"]:
estado = "->" + estado
elif estado in self._infoAutomatoAtual["EstadosFinais"]:
estado = "*" + estado
self.tabelaTransicoes.setVerticalHeaderItem(linha,QTableWidgetItem(estado))
self.listaEstados.addItem(estado)
coluna = 0
for letra in transicoes.keys():
simbolo = transicoes.get(letra)
for i in range(len(alfabeto)):
if alfabeto[i] == letra:
coluna = i
self.tabelaTransicoes.setItem(linha,coluna,QTableWidgetItem(simbolo))
linha += 1
def _verificarProducao(self, producao):
"""
Verifica se a producao passada eh valida segundo
as especificados das gramaticas regulares
Retorna True ou lanca uma excecao contendo a explicacao
do porque a producao nao foi aceita
"""
#valores possiveis de serem usados na producao
maiusculas = map(chr, range(65, 91))
minusculas = map(chr, range(97, 123))
numeros = map(chr, range(48, 57))
numeros.append("&")
p = producao.split(SIMBOLO_SEPARADOR_PRODUCAO)
#se o resultado for maior que 2 (o terminal e suas producoes)
if len(p) != 2:
raise ExcecaoAdicionarProducao("Deve haver apenas um simbolo " + SIMBOLO_SEPARADOR_PRODUCAO + " em toda a producao.")
#se alfa for maior que um ou nao for maiuscula
if len(p[0]) != 1 or str(p[0]) not in maiusculas:
raise ExcecaoAdicionarProducao("Producao invalida. Alfa ("+str(p[0])+") deveria ser maiuscula e com apenas um caracter.")
if str(p[0]) in self._infoGramaticaAtual["NaoTerminais"]:
raise ExcecaoAdicionarProducao("Esse nao terminal ja foi utlizado, escolha outro")
transicoes = p[1].split(SIMBOLO_SEPARADOR_TRANSICAO)
#para cada uma das transicoes encontradas
for t in transicoes:
#transicoes no formato aA
if len(t) == 2:
#verifica se a primeira eh minuscula e a segunda maiuscula
if str(t[0]) != str(t[0]).lower() or str(t[1]) != str(t[1]).upper():
raise ExcecaoAdicionarProducao("Transicao " + str(t) + " no formato errado. Deveria ser " + str(t[0]).lower() + str(t[1]).upper())
elif str(t[1]) in numeros:
raise ExcecaoAdicionarProducao("Transicao invalida. O segundo simbolo nao pode ser um numero, deve ser uma letra maiuscula.")
elif str(t[0]) not in numeros and str(t[0]) not in minusculas:
raise ExcecaoAdicionarProducao("Simbolo invalido na producao " + str(t) + ". Deve-se usar apenas letras ou numeros.")
#transicoes no formato a
elif len(t) == 1:
#caso a primeira letra nao seja um terminal
if str(t[0]) != str(t[0]).lower():
raise ExcecaoAdicionarProducao("A transicao " + str(t) + " esta no formato errado. Deveria ser " + str(t).lower())
elif str(t[0]) not in numeros and str(t[0]) not in minusculas:
raise ExcecaoAdicionarProducao("Simbolo " + str(t[0]) + " eh invalido. Deve ser uma letra ou numero.")
#caso nao atenda a nenhum dos formatos das gramaticas regulares
else:
#caso tenha mais de dois simbolos, nao pertence a GR
raise ExcecaoAdicionarProducao("A transicao " + str(t) + " esta no formato errado. Deveria ter no maximo dois simbolos.")
nt = self._infoGramaticaAtual["NaoTerminais"]
self._infoGramaticaAtual["NaoTerminais"] = nt.union(str(p[0]))
return True
def _limparListaSentencas(self):
"""
Limpa a lista de sentencas de tamanho n geradas pela gramatica
"""
for _ in range(self.listaSentencas.count()): self.listaSentencas.takeItem(0)
def _limparCaixasGramatica(self):
"""
Limpa a caixa onde o usuario entra com a
producao que deseja adicionar, e tambem limpa a caixa
que contem o tamanho das sentencas que ele deseja gerar
"""
self.caixaProducoes.setText("")
self.caixaTamanhoProducoes.setText("")
def _limparCaixasAutomato(self):
"""
Limpa as caixas do automato: a caixa que o usuario usa para entrar
com o novo simbolo do alfabeto, para entar com o novo estado que deseja
adicionar e a caixa onde ele especifica uma sentenca que ele gostaria de saber
se o automato reconhece ou nao
"""
self.caixaAlfabeto.setText("")
self.caixaEstado.setText("")
self.caixaSentenca.setText("")
def _limparEstados(self):
"""
Limpa a lista de estados do automato
"""
for _ in range(self.listaEstados.count()): self.listaEstados.removeItem(0)
def _limparAlfabeto(self):
"""
Limpa a lista de simbolos do alfabeto do automato
"""
for _ in range(self.listaAlfabeto.count()): self.listaAlfabeto.removeItem(0)
def _montarGramatica(self):
"""
Monta a gramatica especificada na lista de producoes e lanca uma excecao
caso alguma coisa nao esteja corretamente especificada
Caso de tudo certo, retorna uma Gramatica
"""
terminais = set([])
nterminais = set([])
producoes = []
nterminaisFaltantes = set([])
#para cada uma das producoes na lista de producoes:
for indice in range(self.listaProducoes.count()):
item = self.listaProducoes.item(indice).text()
#cria uma nova producao e adiciona na lista de producoes
p = item.split(SIMBOLO_SEPARADOR_PRODUCAO)
producao = Producao(str(p[0]),str(p[1]))
producoes.append(producao)
#adiciona o alfa na lista de nao terminais ja encontrados
nterminais.add(str(p[0]))
nterminaisFaltantes.add(str(p[0])) #desnecessario?
#agora separa as partes do beta e faz a mesma coisa
for t in str(p[1]).split(SIMBOLO_SEPARADOR_TRANSICAO):
if len(str(t)) == 2:
terminais.add(str(t[0]))
nterminaisFaltantes.add(str(t[1]))
if len(str(t)) == 1 and str(t) != "&":
terminais.add(str(t))
#depois de montar todas as producoes, verifica se algum simbolo apareceu mas nao possui producao propria
if nterminaisFaltantes.difference(nterminais) != set([]):
raise ExcecaoImpossivelMontarGramatica("Gramatica invalida. Alguns nao terminais nao foram especificados: " + str(nterminaisFaltantes.difference(nterminais)) )
if len(producoes) > 0:
return Gramatica(producoes[0], nterminais, terminais, producoes)
else:
raise ExcecaoImpossivelMontarGramatica("Gramatica vazia")
def _montarAutomato(self):
"""
Monta o automato especificado na tabela de transicao
e lanca uma excecao caso ele nao esteja corretamente
especificado.
Caso de tudo certo, retorna um Automato
"""
tabelaTransicao = {}
#verifica se o automato possui estado inicial e estados finais
if self._infoAutomatoAtual["EstadoInicial"] == None:
raise ExcecaoImpossivelMontarAutomato("O automato nao possui estado inicial.")
if self._infoAutomatoAtual["EstadosFinais"] == set([]):
raise ExcecaoImpossivelMontarAutomato(self,"Erro","O automato nao possui estados finais.")
#para cada uma das linhas da tabela de transicoes
for linha in range(self.tabelaTransicoes.rowCount()):
aux = {}
#para cada uma das colunas da tabela de transicoes
for coluna in range(self.tabelaTransicoes.columnCount()):
#pega o simbolo do alfabeto que vai para determinado estado
simboloAlfabeto = str(self.tabelaTransicoes.horizontalHeaderItem(coluna).text())
transicao = self.tabelaTransicoes.item(linha,coluna)
#se nao for uma transicao vazia:
if transicao != None and transicao.text() != "" :
transicao = str(transicao.text())
#verifica se o estado foi especificado ou se ele nao esta corretamente formado (por exemplo (AB, ou A,)
if (len(transicao) == 1 and transicao not in self._infoAutomatoAtual["Estados"] ) or len(transicao) % 2 == 0:
raise ExcecaoImpossivelMontarAutomato("A transicao " + transicao + " nao esta corretamente especificada na tabela.")
elif len(transicao) % 2 != 0:
for i in transicao:
if i != ",":
if i not in self._infoAutomatoAtual["Estados"]:
raise ExcecaoImpossivelMontarAutomato("A transicao " + transicao + " nao esta corretamente especificada na tabela.")
aux[ simboloAlfabeto ] = transicao
# else:
# transicao = ""
# aux[ simboloAlfabeto ] = transicao
estado = str(self.tabelaTransicoes.verticalHeaderItem(linha).text())
estado = estado.replace("*","")
estado = estado.replace("->","")
tabelaTransicao[ estado ] = aux
return Automato(tabelaTransicao, self._infoAutomatoAtual["Alfabeto"], self._infoAutomatoAtual["Estados"],
self._infoAutomatoAtual["EstadoInicial"], self._infoAutomatoAtual["EstadosFinais"])
def _limparProducoes(self):
"""
Limpa a lista que mostra todas as producoes da gramatica
"""
for _ in range(self.listaProducoes.count()): self.listaProducoes.takeItem(0)
def _mostrarGramatica(self, gramatica):
"""
Limpa a lista de producoes, atualiza os dados da gramatica
com a gramatica passada como parametro e mostra seus dados
na lista de producoes
"""
self._limparProducoes()
self._atualizarGramatica(gramatica)
#insere todos os outros itens
for producao in gramatica.producoes:
if producao != gramatica.producaoInicial:
self.listaProducoes.insertItem(0,QListWidgetItem(str(producao)) )
#insere a producao inicial
self.listaProducoes.insertItem(0, QListWidgetItem(str(gramatica.producaoInicial)) )
def _converterAFparaGR(self, automato):
"""
Converte o automato finito passado para uma gramatica regular
Assume-se que o automato passado esta corretamente especificado
Retorna uma Gramatica
"""
producoes = []
#para cada um dos estados
for estado in automato.tabelaTransicao.keys():
transicoes = ""
#para cada uma das transicoes desse estado
transicao = automato.tabelaTransicao.get(estado)
for trans in transicao.keys():
#caso o caminho de destino seja final, coloca apenas o nao terminal
if transicao.get(trans) in automato.estadosFinais:
transicoes += trans + "|"
t = automato.tabelaTransicao.get(transicao.get(trans))
#caso o estado de destino tenha alguma transicao, coloca um caminho pra esse estado
if len(t.keys()) > 0:
transicoes += trans + transicao.get(trans) + "|"
#caso seja a producao inicial e seja final, coloca o epsilon
if estado == automato.estadoInicial and estado in automato.estadosFinais:
transicoes += "&" + "|"
#se nao for vazia, adiciona na lista de producoes
if transicoes[:-1] != "":
producoes.append(Producao(estado,transicoes[:-1]))
for i in range(len(producoes)):
producao = producoes[i]
if producao.obterAlfa() == automato.estadoInicial:
producaoInicial = producao
break
estados = ([p.obterAlfa() for p in producoes ])
return Gramatica(producaoInicial,estados,automato.alfabeto,producoes)
def _converterGRparaAF(self,gramatica):
"""
Converte a gramatica regular passada para automato finito
Assume-se que a gramatica passada esta corretamente especificada
Retorna um Automato
"""
estadosFinais = []
if "F" not in gramatica.naoTerminais:
estadoFinal = "F"
else:
possiveisNaoTerminais = map(chr, range(65, 91))
for nt in possiveisNaoTerminais:
if nt not in gramatica.naoTerminais:
estadoFinal = nt
break
tabelaTransicao = {}
for beta in gramatica.producaoInicial.obterListaBetas():
if beta == "&":
estadosFinais.append(gramatica.producaoInicial.obterAlfa())
#para cada uma das producoes
for producao in gramatica.producoes:
novaTransicao = {}
#para cada um dos betas dessa producao
for beta in producao.obterListaBetas():
#caso beta seja apenas um terminal
if len(beta) == 1 and beta != "&":
if novaTransicao.has_key(beta):
novaTransicao[beta] = novaTransicao[beta] + "," + estadoFinal
else:
novaTransicao[beta] = estadoFinal
elif len(beta) == 2:
if novaTransicao.has_key(beta[0]):
novaTransicao[beta[0]] = beta[1] +","+ novaTransicao[beta[0]]
else:
novaTransicao[beta[0]] = beta[1]
alfa = producao.obterAlfa()
tabelaTransicao[alfa] = novaTransicao
tabelaTransicao[estadoFinal] = {}
estadosFinais.append(estadoFinal)
return Automato( tabelaTransicao, gramatica.terminais, tabelaTransicao.keys(), gramatica.producaoInicial.obterAlfa(), estadosFinais)
#----------------------- classes de Excecao -----------------------
class ExcecaoAdicionarProducao(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr("Producao invalida. " + self.value)
class ExcecaoImpossivelMontarAutomato(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr("Impossivel montar o automato. " + self.value)
class ExcecaoImpossivelMontarGramatica(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr("Impossivel montar a gramatica. " + self.value)
| pdousseau/formal_language | src/Gui/MainWindow.py | MainWindow.py | py | 37,282 | python | pt | code | 2 | github-code | 36 |
73325698663 | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import tree
import pydotplus
iris = load_iris()
iris_X = iris.data
iris_Y = iris.target
X_train, X_test, y_train, y_test = train_test_split(
iris_X, iris_Y, test_size=0.3)
clf = tree.DecisionTreeClassifier()
clf.fit(X_train, y_train)
dot_data = tree.export_graphviz(clf, out_file=None)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("iris.pdf")
| beancookie/sklearn | tree.py | tree.py | py | 473 | python | en | code | 0 | github-code | 36 |
14029297694 | import asyncio
import datetime
import os
import discord
from new_skyline2 import SKYLINE
token = os.environ['token']
loop = asyncio.get_event_loop()
client = SKYLINE(loop=loop, intents=discord.Intents.all())
async def main():
now = datetime.datetime.utcnow()
endtime = now.replace(hour=17, minute=1, second=0, microsecond=0)
if now >= endtime:
endtime += datetime.timedelta(days=1)
await asyncio.wait([client.start(token)], timeout=(endtime - now).total_seconds())
await client.close()
all_tasks = [t for t in asyncio.all_tasks(loop=loop) if t != main_task]
while all_tasks:
done, pending = await asyncio.wait(all_tasks, timeout=5)
print(pending)
[t.cancel() for t in pending]
if not pending:
break
main_task = loop.create_task(main())
loop.run_until_complete(main_task)
loop.close()
| Kesigomon/Skyline_py | run.py | run.py | py | 870 | python | en | code | 7 | github-code | 36 |
39157550873 | #!/usr/bin/env python3
import click
import sys
from pathlib import Path
from RecBlast.RecBlast import RecSearch
import RecBlast.WarningsExceptions as RBWE
def deduce_searchtype(query_type, db_type, search_algorithm):
# a bit of cleaning
query_type = query_type.lower()
db_type = db_type.lower()
search_algorithm = search_algorithm.lower()
if "blast" in search_algorithm:
if query_type == "dna":
if db_type == "prot":
return "blastx"
elif db_type == "dna":
return "blastn"
else:
raise Exception("Unknown search database type! Allowed options are 'dna' or 'prot'")
elif query_type == "prot":
if db_type == "prot":
return "blastp"
elif db_type == "dna":
return "tblastn"
else:
raise Exception("Unknown search database type! Allowed options are 'dna' or 'prot'")
else:
raise Exception("Unknown search sequence type! Allowed options are 'dna' or 'prot'")
if "blat" in search_algorithm:
if query_type == "dna":
if db_type == "prot":
return "blatx"
elif db_type == "dna":
return "blat"
else:
raise Exception("Unknown search database type! Allowed options are 'dna' or 'prot'")
elif query_type == "prot":
if db_type == "prot":
return "blatp"
elif db_type == "dna":
return "tblat"
else:
raise Exception("Unknown search database type! Allowed options are 'dna' or 'prot'")
else:
raise Exception("Unknown search sequence type! Allowed options are 'dna' or 'prot'")
else:
raise RBWE.SearchEngineNotImplementedError("This search engine hasn't been implemented yet! Only BLAT and BLAST have been implemented!")
@click.command()
@click.option("-q", "--query-file", type=click.Path(exists=True))
@click.option("--query-file-type", type=str, default="fasta")
@click.option("-p", "--max-processes", type=int, default=40)
@click.option("-fp", "--forward-port")
@click.option("-rp", "--reverse-port")
@click.option("-fs", "--forward-species", type=str)
@click.option("-ft", "--forward-twobit", type=click.Path(exists=False))
@click.option("-rs", "--reverse-species", type=str)
@click.option("-rt", "--reverse-twobit", type=click.Path(exists=False))
@click.option("-ps", "--perc-score", type=str, default= "0.1")
@click.option("-pi", "--perc-identity", type=str, default = "0.5")
@click.option("-pq", "--perc-query-span", type=str, default = "0.5")
@click.option("--query_type", type=str, default = "prot")
@click.option("--reverse_type", type=str, default = "dna")
@click.option("--forward_algo", type=str, default = "blat")
@click.option("--reverse_algo", type=str, default = "blat")
@click.option("--reverse_db_type", type=str, default = "dna")
@click.option("--forward_db_type", type=str, default = "dna")
@click.option("--annotation_lookup_tsv", type=str, default = "")
@click.option("--output-root", type=str, default="./output")
@click.option('-v', '--verbose', count=True)
def __main__(query_file, forward_port, forward_species, forward_twobit,
reverse_port, reverse_species, reverse_twobit,
query_type, forward_db_type, forward_algo,
reverse_type, reverse_db_type, reverse_algo,
perc_score, perc_identity, perc_query_span, query_file_type, max_processes,
annotation_lookup_tsv, output_root, verbose):
perc_score = float(perc_score)
perc_identity = float(perc_identity)
perc_query_span = float(perc_query_span)
forward_twobit = Path(forward_twobit)
reverse_twobit = Path(reverse_twobit)
print(forward_twobit, reverse_twobit, output_root, perc_identity, perc_score, perc_query_span, query_file, sep="\n", file=sys.stderr)
output_location = Path(output_root, forward_twobit.stem)
print(output_location, file=sys.stderr)
f_search_type = deduce_searchtype(query_type, forward_db_type, forward_algo)
r_search_type = deduce_searchtype(reverse_type, reverse_db_type, reverse_algo)
recblast = RecSearch(target_species=forward_species, query_species=reverse_species,
forward_search_type=f_search_type, reverse_search_type=r_search_type,
sequence_source="twobit", verbose=verbose)
recblast.max_processes = max_processes
recblast.set_queries(query_file,
infile_type=query_file_type)
recblast.forward_search_settings['database_port'] = {forward_species: forward_port}
recblast.forward_search_settings['database'] = {forward_species: str(forward_twobit.name)}
recblast.forward_search_settings['database_path'] = str(forward_twobit.parent)
recblast.forward_search_criteria = dict(perc_score=perc_score,
perc_ident=perc_identity,
perc_query_span=perc_query_span)
recblast.sequence_source_settings['database'] = {forward_species: str(forward_twobit.name)}
recblast.sequence_source_settings['database_path'] = str(forward_twobit.parent)
recblast.memory_saver_level = 1
recblast.reverse_search_settings['database'] = {reverse_species: str(reverse_twobit.name)}
recblast.reverse_search_settings['database_path'] = str(reverse_twobit.parent)
recblast.reverse_search_settings['database_port'] = {reverse_species: reverse_port}
if annotation_lookup_tsv:
recblast.set_translation_annotation_parameters(method="table", key_value_order=False,
tsv_location=annotation_lookup_tsv)
else:
recblast.set_translation_annotation_parameters(method=False)
recblast(run_name="{0}-pcScore{1}_pcIdent{2}_pcQuerySpan{3}_reverse-{4}".format(Path(query_file).stem,
perc_score,
perc_identity,
perc_query_span,
reverse_twobit.stem),
output_type="bed-complete",
output_location=output_location)
if __name__ == "__main__":
__main__()
exit()
| docmanny/smRecSearch | code/rbb.py | rbb.py | py | 6,528 | python | en | code | 1 | github-code | 36 |
18394258634 | class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
def matches_pattern(word, pattern):
mapping1 = dict()
mapping2 = dict()
for i in range(len(word)):
if word[i] not in mapping1:
mapping1[word[i]] = pattern[i]
else:
if mapping1[word[i]] != pattern[i]:
return False
if pattern[i] not in mapping2:
mapping2[pattern[i]] = word[i]
else:
if mapping2[pattern[i]] != word[i]:
return False
return True
answer = []
for word in words:
if matches_pattern(word, pattern):
answer.append(word)
return answer
| ileenf/Data-Structures-Algos | String/find_replace_pattern.py | find_replace_pattern.py | py | 879 | python | en | code | 0 | github-code | 36 |
40376403155 | import os, sys, re, string
sys.path.append('../../framework')
import bldutil
progs = 'fftwave1dd cfftwave1dd cfftwave1in fftwave2p fftwave3p cfftwave2 cfftwave3 cfftexpmig2 fftexp0test fd2d cfftexp2 cfftexp2test fdtacc wcfftexp2 wcfftexp2adj cfftwave2nsps cfftwave2mix2 wavemixop lrosrtm2 lroslsrtm2 stack2d cstack2d fftexp0test lrlsrtm2mpi imagsrc zolsrtm2 initwave2 correctwave2 lrwave2 cmatrix cfftwave1d zortmgmres claplac psp pspmig premig cfftwave2omp cfftwave3p lrtti2de clfd1 clfd2 ofd2_test swnorm pspp '
# cfftwave2taper fftwave2taper
# cfftwave2taper
# pawave2 cpawave2 cpswave2
# cfftwave2dhalfft
# icfftwave2 cfftwave1dtest cfftwave2test
# cfftwave1dinit cfftwave2nspstest
# lriso2de lrelasiso
# cfftwave2tt lasthope cfftwave2one waveadjtest cfftwave2abc fftwave2abc
# pawave2 cpawave2 cpswave2 icfftwave2 cfftwave1dtest
libprop = 'fftwave2omp fftwave3omp eweks3d vweks3d xcor2 xcor3 xcor3d mutter3'
ccprogs = 'tilr2 ortholr3 cortholr3 cisolr2 cisolr2rev cisolr3 ctilr2 cisolr2grad cisolr2abc cisolr2abc1 cisolr1 canisolr2 canisolr2abc fraclr2 zfraclr2 fraclr2test tilrzone ortholrzone zortholr3 zanisolr2 zanisolr2abc clfdc1 clfdc2 clfdc1-bak zisolr2abc clfdc1frac'
# abclr2 isolr2sta isolr2abc cfraclr2
# eiktest eiktest2 cisolr1eik eiksol cfraclr2 rfraclr2 cisolr2twostep
# cisolr2hyb icisolr2
mpi_progs = 'mpilsrtm mpiewertm mpirtmop mpilsrtmgmres mpircvrtm mpiqrtm'
#mpiwave2 mpiwave3 mpifftexp1 mpiwave2kiss mpiwave3kiss mpifftexp1kiss'
pyprogs = 'fft'
pymods = ''
try: # distributed version
Import('env root pkgdir bindir libdir incdir')
env = env.Clone()
except: # local version
env = bldutil.Debug()
root = None
SConscript('../lexing/SConstruct')
src = Glob('[a-z]*.c')
env.Prepend(CPPPATH=['../../include'],
LIBPATH=['../../lib'],
LIBS=[env.get('DYNLIB','')+'rsf'])
fftw = env.get('FFTW')
if fftw:
env.Prepend(CPPDEFINES=['SF_HAS_FFTW'])
for source in src:
inc = env.RSF_Include(source,prefix='')
obj = env.StaticObject(source)
env.Depends(obj,inc)
mpicc = env.get('MPICC')
mpi_src = Glob('N[a-z]*.c')
for source in mpi_src:
inc = env.RSF_Include(source,prefix='')
obj = env.StaticObject(source,CC=mpicc)
env.Depends(obj,inc)
mains = Split(progs+' '+libprop)
for prog in mains:
sources = ['M' + prog]
bldutil.depends(env,sources,'M'+prog)
prog = env.Program(prog,map(lambda x: x + '.c',sources))
if root:
env.Install(bindir,prog)
mpi_mains = Split(mpi_progs)
#env.Append(LIBS=['fftw3f_mpi'])
for prog in mpi_mains:
sources = ['M' + prog]
bldutil.depends(env,sources,'M'+prog)
if mpicc:
env.StaticObject('M'+prog+'.c',CC=mpicc)
#prog = env.Program(prog,map(lambda x: x + '.o',sources),CC=mpicc,LIBS=env.get('LIBS')+['fftw3f_mpi'])
prog = env.Program(prog,map(lambda x: x + '.o',sources),CC=mpicc)
else:
prog = env.RSF_Place('sf'+prog,None,var='MPICC',package='mpi')
if root:
env.Install(bindir,prog)
if 'c++' in env.get('API',[]):
lapack = env.get('LAPACK')
else:
lapack = None
if lapack:
libsxx = [env.get('DYNLIB','')+'rsf++','vecmatop']
if not isinstance(lapack,bool):
libsxx.extend(lapack)
env.Prepend(LIBS=libsxx)
ccmains = Split(ccprogs)
for prog in ccmains:
sources = ['M' + prog]
if lapack:
prog = env.Program(prog,map(lambda x: x + '.cc',sources))
else:
prog = env.RSF_Place('sf'+prog,None,var='LAPACK',package='lapack')
if root:
env.Install(bindir,prog)
for prog in Split('cmatmult2'):
sources = ['Test' + prog,prog]
if prog=='cmatmult2':
sources.append('cgmres')
bldutil.depends(env,sources,prog)
sources = map(lambda x: x + '.o',sources)
env.Object('Test' + prog + '.c')
env.Program(sources,PROGPREFIX='',PROGSUFFIX='.x')
######################################################################
# SELF-DOCUMENTATION
######################################################################
if root:
user = os.path.basename(os.getcwd())
main = 'sf%s.py' % user
docs = map(lambda prog: env.Doc(prog,'M' + prog),mains+mpi_mains) + \
map(lambda prog: env.Doc(prog,'M%s.cc' % prog,lang='c++'),ccmains)
env.Depends(docs,'#/framework/rsf/doc.py')
doc = env.RSF_Docmerge(main,docs)
env.Install(pkgdir,doc)
| gewala/mada | user/jsun/SConstruct | SConstruct | 4,351 | python | en | code | 7 | github-code | 36 | |
15826519032 | import json
import logging
logging.basicConfig(level=logging.DEBUG)
import argparse
import uuid
import emission.storage.decorations.user_queries as esdu
import emission.net.ext_service.push.notify_usage as pnu
import emission.net.ext_service.push.query.dispatch as pqd
import emission.core.wrapper.user as ecwu
import emission.core.get_database as edb
def get_uuid_list_for_platform(platform):
query_fn = pqd.get_query_fn("platform")
return query_fn({"platform": platform})
def get_upgrade_push_spec(platform):
android_url = "https://play.google.com/store/apps/details?id=gov.nrel.cims.openpath"
ios_url = "https://apps.apple.com/us/app/nrel-openpath/id1628058068"
if platform == "android":
platform_url = android_url
elif platform == "ios":
platform_url = ios_url
else:
raise InvalidArgumentException("Found unknown platform %s, expected 'android' or 'ios'" % platform)
push_spec = {
"alert_type": "website",
"title": "Your version of the NREL OpenPATH app may have errors",
"message": "Please upgrade to the most recent version",
"image": "icon",
"spec": {
"url": platform_url
}
}
return push_spec
def needs_version_update(uuid, target_version):
curr_profile = edb.get_profile_db().find_one({"user_id": uuid})
logging.debug("Read profile %s for user %s" % (curr_profile, uuid))
if curr_profile is None:
logging.error("Could not find profile for %s" % uuid)
return False
elif curr_profile["client_app_version"] == target_version:
logging.debug("%s is already at version %s" % (uuid, curr_profile["client_app_version"]))
return False
else:
logging.debug("%s is at version %s, needs update to %s" % (uuid, curr_profile["client_app_version"], target_version))
return True
def push_upgrade_message_for_platform(platform, cli_args):
logging.info("About to push to %s" % platform)
uuid_list = get_uuid_list_for_platform(platform)
logging.info("UUID list for %s = %s" % (platform, uuid_list))
if cli_args.target_version:
filtered_uuid_list = [uuid for uuid in uuid_list if needs_version_update(uuid, cli_args.target_version)]
logging.info("After filtering for %s, uuid_list is %s" % (cli_args.target_version, filtered_uuid_list))
else:
filtered_uuid_list = uuid_list
logging.info("No target version specified, not filtering list")
spec = get_upgrade_push_spec(platform)
if cli_args.dry_run:
logging.info("dry run, skipping actual push")
else:
response = pnu.send_visible_notification_to_users(filtered_uuid_list,
spec["title"],
spec["message"],
spec,
dev = cli_args.dev)
pnu.display_response(response)
def runTests():
try:
edb.get_profile_db().insert_one({"user_id": "v4", "client_app_version": "1.0.4"})
edb.get_profile_db().insert_one({"user_id": "v5", "client_app_version": "1.0.5"})
edb.get_profile_db().insert_one({"user_id": "v6", "client_app_version": "1.0.6"})
assert needs_version_update("v4", "1.0.6")
assert needs_version_update("v5", "1.0.6")
assert not needs_version_update("v6", "1.0.6")
assert not needs_version_update("unknown", "1.0.6")
finally:
logging.debug("About to delete all entries from the profile")
edb.get_profile_db().delete_many({"user_id": "v4"})
edb.get_profile_db().delete_many({"user_id": "v5"})
edb.get_profile_db().delete_many({"user_id": "v6"})
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="prompt_upgrade_to_latest")
# until we figure out a way to add unit tests for scripts
parser.add_argument("--test", action="store_true", default=False,
help="Do everything except actually push the survey")
parser.add_argument("-n", "--dry-run", action="store_true", default=False,
help="Do everything except actually push the survey")
parser.add_argument("-t", "--target-version",
help="Only push to people who have not upgraded to this version")
parser.add_argument("-d", "--dev", action="store_true", default=False)
args = parser.parse_args()
if args.test:
runTests()
else:
push_upgrade_message_for_platform("android", args)
push_upgrade_message_for_platform("ios", args)
| e-mission/e-mission-server | bin/monitor/prompt_upgrade_to_latest.py | prompt_upgrade_to_latest.py | py | 4,640 | python | en | code | 22 | github-code | 36 |
7813711056 | """clean up unused tables
Create Date: 2022-05-02 17:19:09.910095
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "20220502_171903"
down_revision = "20220425_225456"
branch_labels = None
depends_on = None
def upgrade():
op.drop_table("region_types", schema="aspen")
op.drop_table("align_read_workflows", schema="aspen")
op.drop_table("call_consensus_workflows", schema="aspen")
op.drop_table("sequencing_reads_collections", schema="aspen")
op.drop_table("sequencing_instrument_types", schema="aspen")
op.drop_table("filter_read_workflows", schema="aspen")
op.drop_table("host_filtered_sequencing_reads_collections", schema="aspen")
op.drop_table("sequencing_protocol_types", schema="aspen")
op.drop_table("bams", schema="aspen")
op.drop_table("called_pathogen_genomes", schema="aspen")
# Drop dummy data tied to our enums, relevant for dev environments
op.execute(
"DELETE FROM aspen.entities WHERE entity_type IN ('SEQUENCING_READS', 'BAM', 'CALLED_PATHOGEN_GENOME', 'HOST_FILTERED_SEQUENCE_READS')"
)
op.enum_delete(
"entity_types",
[
"CALLED_PATHOGEN_GENOME",
"BAM",
"SEQUENCING_READS",
"HOST_FILTERED_SEQUENCE_READS",
],
schema="aspen",
)
op.enum_delete(
"workflow_types",
["CALL_CONSENSUS", "ALIGN_READ", "FILTER_READ"],
schema="aspen",
)
def downgrade():
raise NotImplementedError("don't downgrade")
| chanzuckerberg/czgenepi | src/backend/database_migrations/versions/20220502_171903_clean_up_unused_tables.py | 20220502_171903_clean_up_unused_tables.py | py | 1,530 | python | en | code | 11 | github-code | 36 |
36684803245 | import pandas as pd
import tekore as tk
from config import CLIENT_ID, CLIENT_SECRET
class SpotifyData:
def get_one_song_data(self, query):
token = tk.request_client_token(CLIENT_ID, CLIENT_SECRET)
spotify = tk.Spotify(token)
searched_track = spotify.search(query, types=('track',), market='pl')
artist_id = searched_track[0].items[0].artists[0].id
id = searched_track[0].items[0].id
af = spotify.track_audio_features(id)
output = [
[af.danceability,
af.energy,
af.loudness,
af.acousticness,
af.instrumentalness,
af.liveness,
af.speechiness,
af.valence]
]
print("Znaleziono:", searched_track[0].items[0].artists[0].name, "-", searched_track[0].items[0].name)
print("Gatunek:", spotify.artist(artist_id).genres[0])
return pd.DataFrame(output, columns=['danceability', 'energy', 'loudness', 'acousticness', 'instrumentalness',
'liveness', 'speechiness', 'valence'])
def get_data(self, genres):
genres_names = genres
token = tk.request_client_token(CLIENT_ID, CLIENT_SECRET)
spotify = tk.Spotify(token)
output = pd.DataFrame(
columns=['genre', 'danceability', 'energy', 'loudness', 'acousticness', 'instrumentalness',
'liveness', 'speechiness', 'valence'])
for genre in genres_names:
print('now: ', genre)
tracks_id = []
tracks_af = []
try:
searched_playlists = spotify.search(genre, types=('playlist',), market='pl', limit=50, offset=0)
playlist_id = None
for i in range(100):
if searched_playlists[0].items[i].tracks.total >= 100:
playlist_id = searched_playlists[0].items[i].id
break
elif i == 100:
playlist_id = searched_playlists[0].items[0].id
playlist = spotify.playlist(playlist_id)
playlist_tracks = playlist.tracks.items
for i in range(100):
tracks_id.append(playlist_tracks[i].track.id)
afs = spotify.tracks_audio_features(track_ids=tracks_id)
print(len(afs))
for af in afs:
tracks_af.append(
[genre, af.danceability, af.energy, af.loudness, af.acousticness, af.instrumentalness,
af.liveness,
af.speechiness, af.valence])
x = pd.DataFrame(tracks_af,
columns=['genre', 'danceability', 'energy', 'loudness', 'acousticness',
'instrumentalness',
'liveness', 'speechiness', 'valence'])
output = pd.concat([output, x])
except AttributeError:
print('tekore attribute error')
continue
except IndexError:
print('playlist index error')
continue
except TypeError:
print('audio features type error')
continue
return output
| SINEdowskY/spotify-songs-classification | spotify_data.py | spotify_data.py | py | 3,351 | python | en | code | 1 | github-code | 36 |
23002903726 | import sqlite3
connection = sqlite3.connect('databasePeças.db')
c = connection.cursor()
def CREATE():
# PEÇA #
c.execute('CREATE TABLE IF NOT EXISTS PECA (\
`codigo` VARCHAR(5) NOT NULL,\
`nomeSingular` VARCHAR(25) NOT NULL,\
`nomePlural` VARCHAR(25) NOT NULL,\
`genero` VARCHAR(1) NOT NULL,\
`preco` VARCHAR(8),\
PRIMARY KEY(`codigo`));')
connection.commit()
connection.close()
CREATE() | GilbertoMJ/Projeto-Andaimes | Scripts Banco de Dados/criar_database_Peça.py | criar_database_Peça.py | py | 473 | python | en | code | 0 | github-code | 36 |
43110008124 | from codecs import open
from os import path
import re
from setuptools import setup, find_packages
dot = path.abspath(path.dirname(__file__))
# get the dependencies and installs
with open(path.join(dot, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if
x.startswith('git+')]
# parse the version file
ver_content = open("cloudy/_version.py", "rt").read()
ver_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", ver_content, re.M)
if ver_match:
version = ver_match.group(1)
else:
raise RuntimeError("Unable to find version string")
setup(
name='cloudy',
version=version,
description='opinionated & personal screenshot handler',
long_description=(
'Watches a directory for file changes, uploads them to a remote,'
'generates a link, shortens it and dumps it into the clipboard.'
),
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
entry_points={
'console_scripts': ['cloudy=cloudy.cloudy:cli'],
},
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
install_requires=install_requires,
dependency_links=dependency_links,
)
| rarescosma/env.cloudy | setup.py | setup.py | py | 1,466 | python | en | code | 0 | github-code | 36 |
2922309209 | import numpy as np
import json
def dump_to_file(arrays, filename):
arrays_for_dump = {}
for key, array in arrays.items():
if isinstance(array, np.ndarray):
arrays_for_dump[key] = array.tolist()
else:
arrays_for_dump[key] = array
if isinstance(array, dict):
try:
for k,v in array.items():
arrays_for_dump[key][k] = v.tolist()
except:
pass
with open(filename, 'w') as handle:
json.dump(arrays_for_dump, handle, indent=2)
def load_from_file(filename):
with open(filename, 'r') as handle:
arrays_for_dump = json.load(handle)
arrays = {}
for key, array in arrays_for_dump.items():
if isinstance(array, list):
arrays[key] = np.asarray(array)
elif isinstance(array, dict):
try:
arrays[key] = {int(k):np.asarray(v) for k,v in array.items()}
except:
arrays[key] = array
else:
arrays[key] = array
return arrays | sdemyanov/tensorflow-worklab | classes/utils.py | utils.py | py | 951 | python | en | code | 24 | github-code | 36 |
32637130659 | from image import PGMImage
import random
from gaussian import convolve
def apply_median_filter(image_pixels, filter_size):
offset = filter_size // 2
output = [[0] * len(row) for row in image_pixels]
for i in range(offset, len(image_pixels) - offset):
for j in range(offset, len(image_pixels[0]) - offset):
values = []
for x in range(-offset, offset + 1):
for y in range(-offset, offset + 1):
values.append(image_pixels[i + x][j + y])
output[i][j] = sorted(values)[len(values) // 2]
return output
def salt_and_pepper_noise(image_pixels, percentage):
output = [row[:] for row in image_pixels]
num_changes = int(percentage * len(image_pixels) * len(image_pixels[0]) / 100)
for _ in range(num_changes):
i, j = random.randint(0, len(image_pixels) - 1), random.randint(0, len(image_pixels[0]) - 1)
output[i][j] = 0 if random.random() < 0.5 else 255
return output
def apply_averaging(image_pixels, filter_size):
filter_vals = [[1/filter_size**2 for _ in range(filter_size)] for _ in range(filter_size)]
return convolve(image_pixels, filter_vals)
| charalampidi-gabriella/cs474-pa2 | median.py | median.py | py | 1,204 | python | en | code | 0 | github-code | 36 |
32259680615 | # pylint: disable=W0613
from flask import request
from injector import inject
from app import app
from app.regali_app.list.application.use_cases import (
get_gift_list,
get_gift_lists,
delete_gift_list,
create_gift_list,
delete_gift_list_element,
create_gift_list_element
)
from app.regali_app.shared.infrastructure.routes.authentication import token_required
@inject
@app.route('/giftlists', methods=['POST'])
@token_required
def post_giftlist(
current_user,
use_case: create_gift_list.UseCase,
request_data_transformer: create_gift_list.RequestDataTransformer
):
return use_case.execute(
request_data_transformer.transform(
current_user.id,
request
)
)
@inject
@app.route('/giftlists/<reference>', methods=['GET'])
@token_required
def get_giftlist(current_user, use_case: get_gift_list.UseCase, reference):
giftlists = use_case.execute(get_gift_list.Request(reference))
return giftlists
@inject
@app.route('/giftlists', methods=['GET'])
@token_required
def get_giftlists(current_user, use_case: get_gift_lists.UseCase):
giftlists = use_case.execute()
return giftlists
@inject
@app.route('/giftlists/<reference>', methods=['DELETE'])
@token_required
def delete_giftlists(current_user, use_case: delete_gift_list.UseCase, reference):
use_case.execute(delete_gift_list.Request(reference))
return {
'message': 'List Deleted'
}
@inject
@app.route('/giftlists/<reference>/elements', methods=['POST'])
@token_required
def post_giftlist_element(current_user, use_case: create_gift_list_element.UseCase, reference):
return use_case.execute(
create_gift_list_element.Request(reference, request.json['url'])
)
@inject
@app.route('/giftlists/<list_reference>/elements/<element_reference>', methods=['DELETE'])
@token_required
def delete_giftlist_element(
current_user,
use_case: delete_gift_list_element.UseCase,
list_reference,
element_reference
):
use_case.execute(
delete_gift_list_element.Request(
list_reference,
element_reference
)
)
return {
'message': 'List Element Deleted'
}
| MikelDB/regali-app | api/app/regali_app/shared/infrastructure/routes/giftlist.py | giftlist.py | py | 2,209 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.