hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87bb8386a706e5db2e0919438b1978e95a037840 | 1,280 | py | Python | ykdl/extractors/laifeng.py | danxinshang/python | a79d06abbca633f98c3825cc22ba4872f8c2aeef | [
"MIT"
] | null | null | null | ykdl/extractors/laifeng.py | danxinshang/python | a79d06abbca633f98c3825cc22ba4872f8c2aeef | [
"MIT"
] | null | null | null | ykdl/extractors/laifeng.py | danxinshang/python | a79d06abbca633f98c3825cc22ba4872f8c2aeef | [
"MIT"
] | 1 | 2022-03-09T14:43:52.000Z | 2022-03-09T14:43:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.util.html import get_content
from ykdl.util.match import match1
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
import json
from random import randint
class Laifeng(VideoExtractor):
name = u'laifeng (来疯直播)'
def prepare(self):
assert self.url, "please provide valid url"
info = VideoInfo(self.name, True)
html = get_content(self.url)
Alias = match1(html, 'initAlias:\'([^\']+)')
Token = match1(html, 'initToken: \'([^\']+)')
info.artist = match1(html, 'anchorName:\'([^\']+)')
info.title = info.artist + u'的直播房间'
api_url = "http://lapi.xiu.youku.com/v1/get_playlist?app_id=101&alias={}&token={}&player_type=flash&sdkversion=0.1.0&playerversion=3.1.0&rd={}".format(Alias, Token, randint(0,9999))
data1 = json.loads(get_content(api_url))
assert data1['error_code'] == 0
url_data = data1['url_list'][0]
stream_url = json.loads(get_content(url_data['url']))['u']
info.stream_types.append('current')
info.streams['current'] = {'container': url_data["format"], 'video_profile': 'current', 'src' : [stream_url], 'size': float('inf')}
return info
site = Laifeng()
| 33.684211 | 189 | 0.638281 | 168 | 1,280 | 4.755952 | 0.517857 | 0.04005 | 0.030038 | 0.047559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024062 | 0.188281 | 1,280 | 37 | 190 | 34.594595 | 0.744947 | 0.032813 | 0 | 0 | 0 | 0.04 | 0.241909 | 0 | 0 | 0 | 0 | 0 | 0.08 | 1 | 0.04 | false | 0 | 0.24 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87bcc7e9ab9f5365aff2add5f1697f1e3608cb3a | 5,047 | py | Python | mhctools/binding_prediction.py | denklewer/mhctools | 1aed7e8b975253349a0c504f7d42e7051139e459 | [
"Apache-2.0"
] | 47 | 2018-01-25T16:16:22.000Z | 2022-03-21T13:59:52.000Z | mhctools/binding_prediction.py | denklewer/mhctools | 1aed7e8b975253349a0c504f7d42e7051139e459 | [
"Apache-2.0"
] | 117 | 2015-03-30T21:34:38.000Z | 2017-12-04T18:45:42.000Z | mhctools/binding_prediction.py | denklewer/mhctools | 1aed7e8b975253349a0c504f7d42e7051139e459 | [
"Apache-2.0"
] | 11 | 2018-12-04T22:39:16.000Z | 2021-11-11T16:02:43.000Z | # Copyright (c) 2014-2019. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import numpy as np
from serializable import Serializable
class BindingPrediction(Serializable):
def __init__(
self,
peptide,
allele,
score=None,
percentile_rank=None,
affinity=None,
source_sequence_name=None,
offset=0,
prediction_method_name=""):
"""
Parameters
----------
peptide : str
Short amino acid sequence
allele : str
HLA allele, e.g. HLA-A*02:01
score : float
Continuous prediction of peptide-MHC binding where larger values
indicate either higher affinity or higher probability. For affinity
predictors this can be 1-log(IC50)/log(max_IC50) For mass spec
predictors this can be the probability of detection.
percentile_rank : float
Percentile rank of the score
affinity : float
Predicted binding affinity IC50
source_sequence_name : str
Name of sequence from which peptide was extracted
offset : int
Base0 starting position in source sequence that all epitopes were
extracted from
prediction_method_name : str, optional
Name of predictor used to generate this prediction.
"""
self.source_sequence_name = source_sequence_name
self.offset = offset
self.allele = allele
self.peptide = peptide
if score is None and affinity is not None:
# make an ascending score by taking 1-log_50k (IC50)
score = 1.0 - (np.log(affinity) / np.log(50000))
self.score = score
self.percentile_rank = percentile_rank
self.affinity = affinity
self.prediction_method_name = prediction_method_name
def __str__(self):
format_string = (
"BindingPrediction("
"peptide='%s', "
"allele='%s', "
"score=%s, "
"percentile_rank=%s, "
"affinity=%s, "
"source_sequence_name=%s, "
"offset=%d, "
"prediction_method_name='%s')")
return format_string % (
self.peptide,
self.allele,
("None" if self.score is None else '%0.3f' % self.score),
("None" if self.percentile_rank is None else '%0.3f' % self.percentile_rank),
("None" if self.affinity is None else '%0.3f' % self.affinity),
("None" if self.source_sequence_name is None else "'%s'" % self.source_sequence_name),
self.offset,
self.prediction_method_name)
def clone_with_updates(self, **kwargs):
"""Returns new BindingPrediction with updated fields"""
fields_dict = self.to_dict()
fields_dict.update(kwargs)
return BindingPrediction(**fields_dict)
def __repr__(self):
return str(self)
@property
def length(self):
"""Length of peptide, preserved for backwards compatibility"""
return len(self.peptide)
@property
def value(self):
"""Alias for affinity preserved for backwards compatibility"""
return self.affinity
@property
def elution_score(self):
"""
Deprecated alias of `score` from when we only considered
predictors of peptide-MHC binding affinity.
Returns
-------
float
"""
return self.score
fields = (
"source_sequence_name",
"offset",
"peptide",
"allele",
"score",
"affinity",
"percentile_rank",
"prediction_method_name"
)
def to_tuple(self):
return (
self.source_sequence_name,
self.offset,
self.peptide,
self.allele,
self.score,
self.affinity,
self.percentile_rank,
self.prediction_method_name)
def to_dict(self):
return {k: v for (k, v) in zip(self.fields, self.to_tuple())}
def __eq__(self, other):
return (
other.__class__ is BindingPrediction and
self.to_tuple() == other.to_tuple())
def __hash__(self):
return hash(self.to_tuple())
def __lt__(self, other):
return self.value < other.value
| 30.587879 | 102 | 0.594016 | 572 | 5,047 | 5.076923 | 0.325175 | 0.048209 | 0.055785 | 0.030303 | 0.106061 | 0.042355 | 0.024793 | 0 | 0 | 0 | 0 | 0.01258 | 0.322766 | 5,047 | 164 | 103 | 30.77439 | 0.837039 | 0.337824 | 0 | 0.146067 | 0 | 0 | 0.09002 | 0.024136 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134831 | false | 0 | 0.033708 | 0.067416 | 0.314607 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87befaf80e2eb6c98552e884bc81e98ca40969f8 | 9,399 | py | Python | tracking/train_ocean.py | JackieZhai/TracKit | 1783e6ac950decb55a6541935d184353c5ed4ec0 | [
"MIT"
] | 567 | 2020-06-15T01:30:56.000Z | 2022-03-21T03:57:12.000Z | tracking/train_ocean.py | JackieZhai/TracKit | 1783e6ac950decb55a6541935d184353c5ed4ec0 | [
"MIT"
] | 96 | 2020-06-19T02:23:42.000Z | 2022-03-15T02:52:48.000Z | tracking/train_ocean.py | JackieZhai/TracKit | 1783e6ac950decb55a6541935d184353c5ed4ec0 | [
"MIT"
] | 112 | 2020-06-16T07:11:29.000Z | 2022-03-29T05:07:59.000Z | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Zhipeng Zhang (zhangzhipeng2017@ia.ac.cn)
# ------------------------------------------------------------------------------
import _init_paths
import os
import shutil
import time
import math
import pprint
import argparse
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from utils.utils import build_lr_scheduler
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau
import torch.backends.cudnn as cudnn
import models.models as models
from utils.utils import create_logger, print_speed, load_pretrain, restore_from, save_model
from dataset.ocean import OceanDataset
from core.config import config, update_config
from core.function import ocean_train
eps = 1e-5
def parse_args():
"""
args for training.
"""
parser = argparse.ArgumentParser(description='Train Ocean')
# general
parser.add_argument('--cfg', type=str, default='experiments/train/Ocean.yaml', help='yaml configure file name')
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
parser.add_argument('--gpus', type=str, help='gpus')
parser.add_argument('--workers', type=int, help='num of dataloader workers')
args = parser.parse_args()
return args
def reset_config(config, args):
"""
set gpus and workers
"""
if args.gpus:
config.GPUS = args.gpus
if args.workers:
config.WORKERS = args.workers
def check_trainable(model, logger):
"""
print trainable params info
"""
trainable_params = [p for p in model.parameters() if p.requires_grad]
logger.info('trainable params:')
for name, param in model.named_parameters():
if param.requires_grad:
logger.info(name)
assert len(trainable_params) > 0, 'no trainable parameters'
return trainable_params
def get_optimizer(cfg, trainable_params):
"""
get optimizer
"""
optimizer = torch.optim.SGD(trainable_params, cfg.OCEAN.TRAIN.LR,
momentum=cfg.OCEAN.TRAIN.MOMENTUM,
weight_decay=cfg.OCEAN.TRAIN.WEIGHT_DECAY)
return optimizer
def build_opt_lr(cfg, model, current_epoch=0):
# fix all backbone first
for param in model.features.features.parameters():
param.requires_grad = False
for m in model.features.features.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if current_epoch >= cfg.OCEAN.TRAIN.UNFIX_EPOCH:
if len(cfg.OCEAN.TRAIN.TRAINABLE_LAYER) > 0: # specific trainable layers
for layer in cfg.OCEAN.TRAIN.TRAINABLE_LAYER:
for param in getattr(model.features.features, layer).parameters():
param.requires_grad = True
for m in getattr(model.features.features, layer).modules():
if isinstance(m, nn.BatchNorm2d):
m.train()
else: # train all backbone layers
for param in model.features.features.parameters():
param.requires_grad = True
for m in model.features.features.modules():
if isinstance(m, nn.BatchNorm2d):
m.train()
else:
for param in model.features.features.parameters():
param.requires_grad = False
for m in model.features.features.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
trainable_params = []
trainable_params += [{'params': filter(lambda x: x.requires_grad,
model.features.features.parameters()),
'lr': cfg.OCEAN.TRAIN.LAYERS_LR * cfg.OCEAN.TRAIN.BASE_LR}]
try:
trainable_params += [{'params': model.neck.parameters(),
'lr': cfg.OCEAN.TRAIN.BASE_LR}]
except:
pass
trainable_params += [{'params': model.connect_model.parameters(),
'lr': cfg.OCEAN.TRAIN.BASE_LR}]
try:
trainable_params += [{'params': model.align_head.parameters(),
'lr': cfg.OCEAN.TRAIN.BASE_LR}]
except:
pass
# print trainable parameter (first check)
print('==========first check trainable==========')
for param in trainable_params:
print(param)
optimizer = torch.optim.SGD(trainable_params,
momentum=cfg.OCEAN.TRAIN.MOMENTUM,
weight_decay=cfg.OCEAN.TRAIN.WEIGHT_DECAY)
lr_scheduler = build_lr_scheduler(optimizer, cfg, epochs=cfg.OCEAN.TRAIN.END_EPOCH)
lr_scheduler.step(cfg.OCEAN.TRAIN.START_EPOCH)
return optimizer, lr_scheduler
def lr_decay(cfg, optimizer):
if cfg.OCEAN.TRAIN.LR_POLICY == 'exp':
scheduler = ExponentialLR(optimizer, gamma=0.8685)
elif cfg.OCEAN.TRAIN.LR_POLICY == 'cos':
scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)
elif cfg.OCEAN.TRAIN.LR_POLICY == 'Reduce':
scheduler = ReduceLROnPlateau(optimizer, patience=5)
elif cfg.OCEAN.TRAIN.LR_POLICY == 'log':
scheduler = np.logspace(math.log10(cfg.OCEAN.TRAIN.LR), math.log10(cfg.OCEAN.TRAIN.LR_END), cfg.OCEAN.TRAIN.END_EPOCH)
else:
raise ValueError('unsupported learing rate scheduler')
return scheduler
def pretrain_zoo():
GDriveIDs = dict()
GDriveIDs['Ocean'] = "1UGriYoerXFW48_tf9R1NzwQ06M-5Yz-K"
return GDriveIDs
def main():
# [*] args, loggers and tensorboard
args = parse_args()
reset_config(config, args)
logger, _, tb_log_dir = create_logger(config, 'OCEAN', 'train')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
}
# [*] gpus parallel and model prepare
# prepare pretrained model -- download from google drive
# auto-download train model from GoogleDrive
if not os.path.exists('./pretrain'):
os.makedirs('./pretrain')
# try:
# DRIVEID = pretrain_zoo()
#
# if not os.path.exists('./pretrain/{}'.format(config.OCEAN.TRAIN.PRETRAIN)):
# os.system(
# 'wget --no-check-certificate \'https://drive.google.com/uc?export=download&id={0}\' -O ./pretrain/{1}'
# .format(DRIVEID[config.OCEAN.TRAIN.MODEL], config.OCEAN.TRAIN.PRETRAIN))
# except:
# print('auto-download pretrained model fail, please download it and put it in pretrain directory')
if config.OCEAN.TRAIN.ALIGN:
print('====> train object-aware version <====')
model = models.__dict__[config.OCEAN.TRAIN.MODEL](align=True).cuda() # build model
else:
print('====> Default: train without object-aware, also prepare for OceanPlus <====')
model = models.__dict__[config.OCEAN.TRAIN.MODEL](align=False).cuda() # build model
print(model)
model = load_pretrain(model, './pretrain/{0}'.format(config.OCEAN.TRAIN.PRETRAIN)) # load pretrain
# get optimizer
if not config.OCEAN.TRAIN.START_EPOCH == config.OCEAN.TRAIN.UNFIX_EPOCH:
optimizer, lr_scheduler = build_opt_lr(config, model, config.OCEAN.TRAIN.START_EPOCH)
else:
optimizer, lr_scheduler = build_opt_lr(config, model, 0) # resume wrong (last line)
# check trainable again
print('==========double check trainable==========')
trainable_params = check_trainable(model, logger) # print trainable params info
if config.OCEAN.TRAIN.RESUME and config.OCEAN.TRAIN.START_EPOCH != 0: # resume
model, optimizer, args.start_epoch, arch = restore_from(model, optimizer, config.OCEAN.TRAIN.RESUME)
# parallel
gpus = [int(i) for i in config.GPUS.split(',')]
gpu_num = len(gpus)
logger.info('GPU NUM: {:2d}'.format(len(gpus)))
device = torch.device('cuda:{}'.format(gpus[0]) if torch.cuda.is_available() else 'cpu')
model = torch.nn.DataParallel(model, device_ids=gpus).to(device)
logger.info(lr_scheduler)
logger.info('model prepare done')
# [*] train
for epoch in range(config.OCEAN.TRAIN.START_EPOCH, config.OCEAN.TRAIN.END_EPOCH):
# build dataloader, benefit to tracking
train_set = OceanDataset(config)
train_loader = DataLoader(train_set, batch_size=config.OCEAN.TRAIN.BATCH * gpu_num, num_workers=config.WORKERS, pin_memory=True, sampler=None, drop_last=True)
# check if it's time to train backbone
if epoch == config.OCEAN.TRAIN.UNFIX_EPOCH:
logger.info('training backbone')
optimizer, lr_scheduler = build_opt_lr(config, model.module, epoch)
print('==========double check trainable==========')
check_trainable(model, logger) # print trainable params info
lr_scheduler.step(epoch)
curLR = lr_scheduler.get_cur_lr()
model, writer_dict = ocean_train(train_loader, model, optimizer, epoch + 1, curLR, config, writer_dict, logger, device=device)
# save model
save_model(model, epoch, optimizer, config.OCEAN.TRAIN.MODEL, config, isbest=False)
writer_dict['writer'].close()
if __name__ == '__main__':
main()
| 35.602273 | 166 | 0.63943 | 1,129 | 9,399 | 5.189548 | 0.23295 | 0.073391 | 0.048814 | 0.017921 | 0.322239 | 0.269671 | 0.208909 | 0.207203 | 0.125789 | 0.102919 | 0 | 0.005507 | 0.227258 | 9,399 | 263 | 167 | 35.737643 | 0.801184 | 0.145122 | 0 | 0.230303 | 0 | 0 | 0.082062 | 0.007689 | 0 | 0 | 0 | 0 | 0.006061 | 1 | 0.048485 | false | 0.012121 | 0.121212 | 0 | 0.206061 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87c058a8278517251a6cbf94b8d5d1cd88342b21 | 49,876 | py | Python | bgcArgoDMQC/core/core.py | ArgoCanada/BGC-QC | c058f3e1a1992fc961ce2c4d5862d426725c1e43 | [
"MIT"
] | null | null | null | bgcArgoDMQC/core/core.py | ArgoCanada/BGC-QC | c058f3e1a1992fc961ce2c4d5862d426725c1e43 | [
"MIT"
] | 16 | 2020-07-15T12:26:26.000Z | 2020-10-14T14:28:04.000Z | bgcArgoDMQC/core/core.py | ArgoCanada/bgcArgo | 500cd10526e5b88393310d457eebaef19d49e4d8 | [
"MIT"
] | 1 | 2020-08-30T02:40:33.000Z | 2020-08-30T02:40:33.000Z | import sys
import copy
from pathlib import Path
import fnmatch
import numpy as np
from scipy.interpolate import interp1d, interp2d
import matplotlib.dates as mdates
from matplotlib.offsetbox import AnchoredText
import gsw
from netCDF4 import Dataset
from .. import io
from .. import interp
from .. import unit
from .. import util
from .. import configure
# ----------------------------------------------------------------------------
# LOCAL MACHINE SETUP
# ----------------------------------------------------------------------------
global REF_PATH
REF_PATH = Path(__file__).parent.absolute() / 'ref'
def get_config_dirs():
'''
Get previously set local directories to look for Argo, WOA, and NCEP data.
'''
config = configure.read_config()
if 'argo_path' in config.keys():
global ARGO_PATH
ARGO_PATH = config['argo_path']
if 'ncep_path' in config.keys():
global NCEP_PATH
NCEP_PATH = config['ncep_path']
if 'woa_path' in config.keys():
global WOA_PATH
WOA_PATH = config['woa_path']
def set_dirs(argo_path='./', woa_path=None, ncep_path=None):
'''
Set local directories to look for Argo, WOA, and NCEP data.
Args:
argo_path (str or path-like): location of local Argo data
ncep_data (str or path-like): location of local NCEP data
woa_path (str or path-like): location of local World Ocean Atlas data
'''
global ARGO_PATH
ARGO_PATH = argo_path
global WOA_PATH
WOA_PATH = woa_path
global NCEP_PATH
NCEP_PATH = ncep_path
def get_index(index='bgc', **kwargs):
'''
Get the global, biogeochemical, synthetic, or metadata Argo index.
Args:
index (str): *bgc* for the biogeochemical Argo index, *global* for the core index, *synthetic* for the synthetic index, or *meta* for the metadata index
'''
if index == 'bgc':
if '__bgcindex__' not in globals():
global __bgcindex__
__bgcindex__ = io.read_index()
return_index = __bgcindex__
elif index == 'global':
if '__globalindex__' not in globals():
global __globalindex__
__globalindex__ = io.read_index(mission='C')
return_index = __globalindex__
elif index == 'synthetic':
if '__synthindex__' not in globals():
global __synthindex__
__synthindex__ = io.read_index(mission='S')
return_index = __synthindex__
elif index == 'meta':
if '__metaindex__' not in globals():
global __metaindex__
__metaindex__ = io.read_index(mission='M')
return_index = __metaindex__
elif index == 'traj':
if '__trajindex__' not in globals():
global __trajindex__
__trajindex__ = io.read_index(mission='T')
return_index = __trajindex__
else:
raise ValueError('Input "{}" is unrecognized'.format(index))
for arg, val in kwargs.items():
return_index = return_index[return_index[arg] == val]
return return_index.reset_index()
# ----------------------------------------------------------------------------
# FLOAT CLASS
# ----------------------------------------------------------------------------
# class traj:
# '''
# Class that loads Argo trajectory file data for a given float ID number
# (wmo).
# '''
# def __init__(self, wmo, keep_fillvalue=False, verbose=False):
# self.__trajdict__, self.__trajfile__ = load_traj(ARGO_PATH, wmo, verbose=verbose)
# # local path info
# self.argo_path = ARGO_PATH
# self.woa_path = WOA_PATH
# self.ncep_path = NCEP_PATH
# if not keep_fillvalue:
# self.rm_fillvalue()
class profiles:
set_dirs = set_dirs
def __init__(self, floats, cycles=None, mission='B', mode='RD', keep_fillvalue=False, rcheck=True, verbose=False):
if type(floats) is int:
floats = [floats]
self.__argofiles__ = organize_files(get_files(ARGO_PATH, floats, cycles=cycles, mission=mission, mode=mode))
self.__floatdict__ = load_profiles(self.__argofiles__, verbose=verbose)
self.__rawfloatdict__ = self.__floatdict__
# local path info
self.argo_path = ARGO_PATH
self.woa_path = WOA_PATH
self.ncep_path = NCEP_PATH
self.assign(self.__floatdict__)
if not keep_fillvalue:
self.rm_fillvalue()
if rcheck:
self.check_range('all', verbose=verbose)
def assign(self, floatdict):
# metadata and dimension variables
self.floatType = floatdict['floatType']
self.N_LEVELS = floatdict['N_LEVELS']
self.CYCLE = floatdict['CYCLES']
self.CYCLE_GRID = floatdict['CYCLE_GRID']
# time and location data
self.SDN = floatdict['SDN']
self.SDN_GRID = floatdict['SDN_GRID']
self.LATITUDE = floatdict['LATITUDE']
self.LATITUDE_GRID = floatdict['LATITUDE_GRID']
self.LONGITUDE = floatdict['LONGITUDE']
self.LONGITUDE_GRID = floatdict['LONGITUDE_GRID']
self.WMO = floatdict['WMO']
# core variables
self.PRES = floatdict['PRES']
# self.PRES_QC = floatdict['PRES_QC']
if 'TEMP' in floatdict.keys():
self.TEMP = floatdict['TEMP']
self.TEMP_QC = floatdict['TEMP_QC']
self.PSAL = floatdict['PSAL']
self.PSAL_QC = floatdict['PSAL_QC']
# potential density
self.PDEN = gsw.pot_rho_t_exact(gsw.SA_from_SP(self.PSAL, self.PRES, self.LONGITUDE_GRID, self.LATITUDE_GRID), self.TEMP, self.LONGITUDE_GRID, self.LATITUDE_GRID) - 1000
# bgc variables - not necessarily all there so check if the fields exist
if 'DOXY' in floatdict.keys():
self.DOXY = floatdict['DOXY']
self.DOXY_QC = floatdict['DOXY_QC']
if 'CHLA' in floatdict.keys():
self.CHLA = floatdict['CHLA']
self.CHLA_QC = floatdict['CHLA_QC']
if 'BBP700' in floatdict.keys():
self.BBP700 = floatdict['BBP700']
self.BBP700_QC = floatdict['BBP700_QC']
if 'CDOM' in floatdict.keys():
self.CDOM = floatdict['CDOM']
self.CDOM_QC = floatdict['CDOM_QC']
# adjusted variables
if 'DOXY_ADJUSTED' in floatdict.keys():
self.DOXY_ADJUSTED = floatdict['DOXY_ADJUSTED']
self.DOXY_ADJUSTED_QC = floatdict['DOXY_ADJUSTED_QC']
if 'CHLA_ADJUSTED' in floatdict.keys():
self.CHLA_ADJUSTED = floatdict['CHLA_ADJUSTED']
self.CHLA_ADJUSTED_QC = floatdict['CHLA_ADJUSTED_QC']
if 'BBP700_ADJUSTED' in floatdict.keys():
self.BBP700_ADJUSTED = floatdict['BBP700_ADJUSTED']
self.BBP700_ADJUSTED_QC = floatdict['BBP700_ADJUSTED_QC']
if 'CDOM_ADJUSTED' in floatdict.keys():
self.CDOM_ADJUSTED = floatdict['CDOM_ADJUSTED']
self.CDOM_ADJUSTED_QC = floatdict['CDOM_ADJUSTED_QC']
if 'O2Sat' in floatdict.keys():
self.O2Sat = floatdict['O2Sat']
self.O2Sat_QC = floatdict['O2Sat_QC']
def rm_fillvalue(self):
self.__nofillvaluefloatdict__ = dict_fillvalue_clean(self.__rawfloatdict__)
self.__floatdict__ = self.__nofillvaluefloatdict__
self.assign(self.__nofillvaluefloatdict__)
self.to_dataframe()
def clean(self, bad_flags=None):
self.__cleanfloatdict__ = dict_clean(self.__floatdict__, bad_flags=bad_flags)
self.__floatdict__ = self.__cleanfloatdict__
self.assign(self.__cleanfloatdict__)
self.to_dataframe()
def reset(self):
self.__floatdict__ = self.__rawfloatdict__
self.assign(self.__rawfloatdict__)
self.to_dataframe()
def check_range(self, key, verbose=False):
'''
Performs a range check for variables that have a RTQC range available.
Replaces values outside the range with NaN values. Takes string input
to do the range check on that variable. Available variables are
PRES, TEMP, PSAL, and DOXY. Can also take input 'all' to do the range
check on all four variables, or a list of strings to do each of those
variables.
'''
if key == 'all':
key = ['PRES', 'TEMP', 'PSAL', 'DOXY']
elif type(key) is not list:
key = [key]
for k in key:
if k in self.__floatdict__.keys():
self.__rangecheckdict__ = range_check(k, self.__floatdict__, verbose=verbose)
self.__floatdict__ = self.__rangecheckdict__
# recalculate O2sat if its DOXY
if k == 'DOXY':
optode_flag = get_optode_type(int(self.__rangecheckdict__['WMO'])) == 'AANDERAA_OPTODE_4330'
self.__rangecheckdict__['O2Sat'] = 100*self.__rangecheckdict__['DOXY']/unit.oxy_sol(self.__rangecheckdict__['PSAL'], self.__rangecheckdict__['TEMP'], a4330=optode_flag)
self.assign(self.__rangecheckdict__)
def to_dict(self):
return copy.deepcopy(self.__floatdict__)
def to_dataframe(self):
import pandas as pd
df = pd.DataFrame()
df['CYCLE'] = self.CYCLE_GRID
df['SDN'] = self.SDN_GRID
df['WMO'] = self.WMO
df['LATITUDE'] = self.LATITUDE_GRID
df['LONGITUDE'] = self.LONGITUDE_GRID
df['PRES'] = self.PRES
df['TEMP'] = self.TEMP
df['TEMP_QC'] = self.TEMP_QC
df['PSAL'] = self.PSAL
df['PSAL_QC'] = self.PSAL_QC
df['PDEN'] = self.PDEN
if 'DOXY' in self.__floatdict__.keys():
df['DOXY'] = self.DOXY
df['DOXY_QC'] = self.DOXY_QC
if 'CHLA' in self.__floatdict__.keys():
df['CHLA'] = self.CHLA
df['CHLA_QC'] = self.CHLA_QC
if 'BBP700' in self.__floatdict__.keys():
df['BBP700'] = self.BBP700
df['BBP700_QC'] = self.BBP700_QC
if 'CDOM' in self.__floatdict__.keys():
df['CDOM'] = self.CDOM
df['CDOM_QC'] = self.CDOM_QC
if 'DOXY_ADJUSTED' in self.__floatdict__.keys():
df['DOXY_ADJUSTED'] = self.DOXY_ADJUSTED
df['DOXY_ADJUSTED_QC'] = self.DOXY_ADJUSTED_QC
if 'CHLA_ADJUSTED' in self.__floatdict__.keys():
df['CHLA_ADJUSTED'] = self.CHLA_ADJUSTED
df['CHLA_ADJUSTED_QC'] = self.CHLA_ADJUSTED_QC
if 'BBP700_ADJUSTED' in self.__floatdict__.keys():
df['BBP700_ADJUSTED'] = self.BBP700_ADJUSTED
df['BBP700_ADJUSTED_QC'] = self.BBP700_ADJUSTED_QC
if 'CDOM_ADJUSTED' in self.__floatdict__.keys():
df['CDOM_ADJUSTED'] = self.CDOM_ADJUSTED
df['CDOM_ADJUSTED_QC'] = self.CDOM_ADJUSTED_QC
if 'O2Sat' in self.__floatdict__.keys():
df['O2Sat'] = self.O2Sat
df['O2Sat_QC'] = self.O2Sat_QC
self.df = df
return copy.deepcopy(self.df)
def get_track(self):
self.track = track(self.__floatdict__)
return self.track
def get_ncep(self):
if not hasattr(self, 'track'):
self.get_track()
self.NCEP = ncep_to_float_track('pres', self.track, local_path=self.ncep_path)
return self.NCEP
def get_woa(self):
if not hasattr(self, 'track'):
self.get_track()
self.z_WOA, self.WOA, self.__WOAweights__ = woa_to_float_track(self.track, 'O2sat', local_path=self.woa_path)
return self.WOA
def calc_gains(self, ref='WOA'):
if not hasattr(self, 'track'):
self.get_track()
if ref == 'NCEP':
sys.stdout.write('In-air data contained in BRtraj file, NCEP not a valid reference for individual profile files, returning None\n')
self.gains = None
if ref == 'WOA':
# check if reference data is already calculated
if not hasattr(self, 'WOA'):
self.get_woa()
self.__WOAgains__, self.__WOAfloatref__, self.__WOAref__ = calc_gain(self.__floatdict__, dict(z=self.z_WOA, WOA=self.WOA), inair=False)
self.gains = self.__WOAgains__
return self.gains
def calc_fixed_error(self, fix_err=10):
self.DOXY_ADJUSTED_ERROR = calc_fixed_doxy_adjusted_error(self.__floatdict__, fix_err=fix_err)
self.__floatdict__['DOXY_ADJUSTED_ERROR'] = self.DOXY_ADJUSTED_ERROR
return copy.deepcopy(self.DOXY_ADJUSTED_ERROR)
def reassign_flags(self):
return
def assess_profile_flags(self):
return
def describe(self):
if not hasattr(self, 'df'):
self.to_dataframe()
sys.stdout.write('Data for profile files for floats ')
for i,w in enumerate(self.df.WMO.unique()):
if i > 0:
sys.stdout.write(', ')
sys.stdout.write('{}'.format(int(w)))
sys.stdout.write('\n')
sys.stdout.write('Variables:\n')
for k in self.__floatdict__.keys():
sys.stdout.write('{}\n'.format(k))
sys.stdout.write('\n')
# ----------------------------------------------------------------------------
# FUNCTIONS
# ----------------------------------------------------------------------------
def apply_gain(DOXY, G):
DOXY_ADJUSTED = G*DOXY
return DOXY_ADJUSTED
def calc_doxy_error(DOXY, G, eG):
return None
def get_files(local_path, wmo_numbers, cycles=None, mission='B', mode='RD', verbose=True):
local_path = Path(local_path)
if mission == 'B':
if '__bgcindex__' not in globals():
global __bgcindex__
__bgcindex__ = get_index()
subset_index = __bgcindex__[__bgcindex__.wmo.isin(wmo_numbers)]
elif mission == 'C':
if '__globalindex__' not in globals():
global __globalindex__
__globalindex__ = get_index(index='global')
subset_index = __globalindex__[__globalindex__.wmo.isin(wmo_numbers)]
else:
raise ValueError('Invalid input for parameter "mission"')
if cycles is not None:
subset_index = subset_index[subset_index.cycle.isin(cycles)]
wcs = ['*' + a + b + '*.nc' for a in mission for b in mode]
wcs = [w.replace('C','') for w in wcs]
matches = [fn for sub in [fnmatch.filter(subset_index.file, w) for w in wcs] for fn in sub]
subset_index = subset_index[subset_index.file.isin(matches)]
local_files = [(local_path / dac / str(wmo) / 'profiles' / fn.split('/')[-1]) for dac, wmo, fn in zip(subset_index.dac, subset_index.wmo, subset_index.file)]
remove_ix = []
for i,fn in enumerate(local_files):
if not Path(fn).exists():
if verbose:
sys.stdout.write('File {} does not exists locally - removing from returned list, suggest the user downloads using bgcArgo.io.get_argo(...)\n'.format(fn))
remove_ix.append(i)
if len(remove_ix) > 0:
for ix in remove_ix[::-1]:
local_files.pop(ix)
return local_files
def organize_files(files):
'''
Sort files according to time they were recorded.
'''
lead_letter = files[0].name[0]
if lead_letter == 'R' or lead_letter == 'D':
index = get_index('global')
else:
if '__bgcindex__' not in globals():
global __bgcindex__
__bgcindex__ = get_index()
index = __bgcindex__
dates = np.array([index[index.file.str.find(fn.name) != -1].date.iloc[0] for fn in files])
sorted_files = list(np.array(files)[np.argsort(dates)])
return sorted_files
# def load_traj(local_path, wmo):
# return trajData, trajFile
def load_argo(local_path, wmo, grid=False, verbose=True):
'''
Function to load in all data from a single float, using BRtraj, meta,
and Sprof files.
Args:
local_path: local path of float data
wmo: float ID number
Returns:
floatData: python dict() object with the following fields
- floatName: WMO number, from input
- floatType: Kind of float (APEX, ARVOR, etc.)
- N_LEVELS: Number of depth levels, Argo dimension N_LEVELS
- N_PROF: Number of profiles, Argo dimension N_PROF
- LATITUDE: Latitude (-90, 90) for each profile
- LONGITUDE: Longitude (-180, 180) for each profile
- SDN: Serial Date Number for each profile
- PRES: Pressure (dbar), compressed to vector (1D array)
- TEMP: Temperature (deg C)
- PSAL: Salinity (psu)
if the variables are available, it will also contain:
- DOXY: Dissolved Oxygen (micromole/kg)
- O2sat: Oxygen percent saturation (%)
- PPOX_DOXY: Oxygen partial pressure (mbar) [if avail.]
- TRAJ_CYCLE: Cycle number for PPOX_DOXY [if avail.]
- inair: Boolean to indicate if in-air data exists
for all the variables listen above, there will also exist
<PARAM>_QC fields for quality flags, and <PARAM>_ADJUSTED
fields if they exist.
CYCLES, LATITUDE, LONGITUDE, and SDN all also have
analogous <VAR>_GRID fields that match the
dimension of PRES, TEMP, PSAL, DOXY, and O2SAT
Author:
Christopher Gordon
Fisheries and Oceans Canada
chris.gordon@dfo-mpo.gc.ca
Acknowledgement: this code is adapted from the SOCCOM SAGE_O2Argo matlab
code, available via https://github.com/SOCCOM-BGCArgo/ARGO_PROCESSING,
written by Tanya Maurer & Josh Plant
Change log:
- 2020-04-22: updated so that pressure mask determines all variables - need to add all quality flags to output
- 2020-04-29: switched file/path handling from os module to pathlib
- 2020-10-28: read variable DOXY from BRtraj file and convert to PPOX_DOXY if PPOX_DOXY not in file
'''
# make local_path a Path() object from a string, account for windows path
local_path = Path(local_path)
dac = io.get_dac(wmo)
if type(wmo) is not str:
wmo = str(wmo)
# check that necessary files exist - can continue without BRtraj file but
# need Sprof and meta files
BRtraj = local_path / dac / wmo / '{}_BRtraj.nc'.format(wmo)
Sprof = local_path / dac / wmo / '{}_Sprof.nc'.format(wmo)
meta = local_path / dac /wmo / '{}_meta.nc'.format(wmo)
# check if BRtraj is there, flag for moving forward if not
BRtraj_flag = True
if not BRtraj.exists():
BRtraj_nc = None
BRtraj_flag = False
if verbose:
sys.stdout.write('Continuing without BRtraj file\n')
elif BRtraj.exists():
BRtraj_nc = Dataset(BRtraj, 'r')
if 'PPOX_DOXY' not in BRtraj_nc.variables.keys() and 'DOXY' not in BRtraj_nc.variables.keys():
BRtraj_flag = False
if verbose:
sys.stdout.write('BRtraj file exists, but no in-air data exists, continuing without using BRtraj file\n')
else:
BRtraj_nc = None
# Sprof and meta are required, so raise error if they are not there
if not Sprof.exists():
raise FileNotFoundError('No such Sprof file: {}'.format(Sprof))
if not meta.exists():
raise FileNotFoundError('No such meta file: {}'.format(meta))
# load synthetic and meta profiles
Sprof_nc = Dataset(Sprof, 'r')
meta_nc = Dataset(meta, 'r')
# number of profile cycles
M = Sprof_nc.dimensions['N_LEVELS'].size
N = Sprof_nc.dimensions['N_PROF'].size
floatData = read_all_variables(Sprof_nc)
floatData['SDN'] = floatData['JULD'] + mdates.datestr2num('1950-01-01')
floatData['CYCLES'] = floatData['CYCLE_NUMBER']
floatData['WMO'] = wmo
qc_keys = [s for s in floatData.keys() if '_QC' in s and 'PROFILE' not in s]
for qc in qc_keys:
floatData[qc] = io.read_qc(floatData[qc])
if grid:
ftype = ''
if 'PLATFORM_TYPE' in meta_nc.variables.keys():
for let in meta_nc.variables['PLATFORM_TYPE'][:].compressed():
ftype = ftype + let.decode('UTF-8')
floatData['floatType'] = ftype
floatData['SDN_GRID'] = np.tile(floatData['SDN'],(M,1)).T.flatten()
floatData['CYCLE_GRID'] = np.tile(floatData['CYCLES'],(M,1)).T.flatten()
floatData['LATITUDE_GRID'] = np.tile(floatData['LATITUDE'],(M,1)).T.flatten()
floatData['LONGITUDE_GRID'] = np.tile(floatData['LONGITUDE'],(M,1)).T.flatten()
floatData['PDEN'] = gsw.pot_rho_t_exact(gsw.SA_from_SP(floatData['PSAL'], floatData['PRES'], floatData['LONGITUDE_GRID'], floatData['LATITUDE_GRID']), floatData['TEMP'], floatData['PRES'], 0)
if 'DOXY' in floatData.keys():
optode_flag = get_optode_type(int(wmo)) == 'AANDERAA_OPTODE_4330'
floatData['O2Sat'] = 100*floatData['DOXY']/unit.oxy_sol(floatData['PSAL'], floatData['TEMP'], floatData['PDEN'], a4330=optode_flag)
# match the fill values
ix = np.logical_or(np.logical_or(floatData['PSAL'] >= 99999., floatData['TEMP'] >= 99999.), floatData['DOXY'] >= 99999.)
floatData['O2Sat'][ix] = 99999.
# get the worst QC flag from each quantity that goes into the calculation
floatData['O2Sat_QC'] = util.get_worst_flag(floatData['TEMP_QC'], floatData['PSAL_QC'], floatData['DOXY_QC'])
if BRtraj_flag:
if 'PPOX_DOXY' in BRtraj_nc.variables.keys() and 'TEMP_DOXY' in BRtraj_nc.variables.keys():
floatData['PPOX_DOXY'] = BRtraj_nc.variables['PPOX_DOXY'][:].data.flatten()
floatData['TEMP_DOXY'] = BRtraj_nc.variables['TEMP_DOXY'][:].data.flatten()
floatData['TRAJ_CYCLE'] = BRtraj_nc.variables['CYCLE_NUMBER'][:].data.flatten()
floatData['inair'] = True
elif 'DOXY' in BRtraj_nc.variables.keys() and 'TEMP_DOXY' in BRtraj_nc.variables.keys():
# unit conversion from umol kg-1 to pO2, some shaky S and P assumptions?
floatData['PPOX_DOXY'] = unit.doxy_to_pO2(unit.umol_per_sw_to_mmol_per_L(
BRtraj_nc.variables['DOXY'][:].data.flatten(),
0, # salinity is 0 in air???
BRtraj_nc.variables['TEMP_DOXY'][:].data.flatten(),
0 # pressure is 0 in air???
), 0, BRtraj_nc.variables['TEMP_DOXY'][:].data.flatten())
floatData['TEMP_DOXY'] = BRtraj_nc.variables['TEMP_DOXY'][:].data.flatten()
floatData['TRAJ_CYCLE'] = BRtraj_nc.variables['CYCLE_NUMBER'][:].data.flatten()
floatData['inair'] = True
else:
floatData['inair'] = False
else:
floatData['inair'] = False
return floatData, Sprof, BRtraj, meta
def load_profiles(files, verbose=False):
common_variables = util.get_vars(files)
core_files = len(files)*[' ']
for i,f in enumerate(files):
data_mode = f.name[1]
if data_mode == 'D':
core_files[i] = f.parent / f.name.replace('B','')
else:
test_file = f.parent / f.name.replace('B','')
if not test_file.exists():
test_file = f.parent / f.name.replace('BR', 'D')
if not test_file.exists():
raise FileNotFoundError('Corresponding core file not found')
core_files[i] = test_file
floatData = dict(
floatName=[], N_LEVELS=[], N_PROF=[], CYCLES=np.array([], dtype=int), floatType=[]
)
for v in ['PRES', 'TEMP', 'PSAL', 'SDN']:
floatData[v] = np.array([])
floatData[v + '_QC'] = np.array([])
for v in ['WMO', 'LATITUDE', 'LONGITUDE', 'POSITION_QC', 'SDN_GRID', 'LATITUDE_GRID', 'LONGITUDE_GRID', 'CYCLE_GRID']:
floatData[v] = np.array([])
for v in common_variables:
floatData[v] = np.array([])
floatData[v + '_QC'] = np.array([])
if v + '_ADJUSTED' in common_variables:
floatData[v + '_ADJUSTED'] = np.array([])
floatData[v + '_ADJUSTED' + '_QC'] = np.array([])
for fn, cn in zip(files, core_files):
if verbose:
print(fn, cn)
# try to load the profile as absolute path or relative path
try:
nc = Dataset(fn, 'r')
except:
try:
nc = Dataset(Path(ARGO_PATH) / fn, 'r')
except:
raise FileNotFoundError('No such file {} or {}'.format(fn, str(Path(ARGO_PATH) / fn)))
try:
cc = Dataset(cn, 'r')
except:
try:
cc = Dataset(Path(ARGO_PATH) / cn, 'r')
except:
raise ValueError('Cannot get core Argo data, no such file {} or {}'.format(fn, str(Path(ARGO_PATH) / fn)))
# number of profile cycles
M = cc.dimensions['N_LEVELS'].size
N = cc.dimensions['N_PROF'].size
wmo = ''
if N > 1:
for let in nc.variables['PLATFORM_NUMBER'][:][0,:].compressed():
wmo = wmo + let.decode('UTF-8')
else:
for let in nc.variables['PLATFORM_NUMBER'][:].compressed():
wmo = wmo + let.decode('UTF-8')
cycle = nc.variables['CYCLE_NUMBER'][:].data.flatten()
ftype = ''
if 'PLATFORM_TYPE' in nc.variables.keys():
for let in nc.variables['PLATFORM_TYPE'][:].compressed():
ftype = ftype + let.decode('UTF-8')
floatData['floatName'] = floatData['floatName'] + [int(wmo)]
floatData['N_LEVELS'] = floatData['N_LEVELS'] + [M]
floatData['N_PROF'] = floatData['N_PROF'] + [N]
floatData['CYCLES'] = np.append(floatData['CYCLES'], cycle)
floatData['CYCLE_GRID'] = np.append(floatData['CYCLE_GRID'], np.array(N*M*[cycle[0]]))
floatData['floatType'] = floatData['floatType'] + [ftype]
floatData['WMO'] = np.append(floatData['WMO'], np.array(M*N*[wmo]))
# load in variables that will be in every file
floatData['PRES'] = np.append(floatData['PRES'], cc.variables['PRES'][:].data.flatten())
floatData['PRES_QC'] = np.append(floatData['PRES_QC'], io.read_qc(cc.variables['PRES_QC'][:].data.flatten()))
floatData['TEMP'] = np.append(floatData['TEMP'], cc.variables['TEMP'][:].data.flatten())
floatData['TEMP_QC'] = np.append(floatData['TEMP_QC'], io.read_qc(cc.variables['TEMP_QC'][:].data.flatten()))
floatData['PSAL'] = np.append(floatData['PSAL'], cc.variables['PSAL'][:].data.flatten())
floatData['PSAL_QC'] = np.append(floatData['PSAL_QC'], io.read_qc(cc.variables['PSAL_QC'][:].data.flatten()))
floatData['SDN'] = np.append(floatData['SDN'], cc.variables['JULD'][:].data.flatten() + mdates.datestr2num('1950-01-01'))
floatData['SDN_QC'] = np.append(floatData['SDN_QC'], io.read_qc(cc.variables['JULD_QC'][:].data.flatten()))
floatData['SDN_GRID'] = np.append(floatData['SDN_GRID'], np.array(N*M*[np.nanmean(cc.variables['JULD'][:].data.flatten() + mdates.datestr2num('1950-01-01'))]))
floatData['LATITUDE'] = np.append(floatData['LATITUDE'], cc.variables['LATITUDE'][:].data.flatten())
floatData['LATITUDE_GRID'] = np.append(floatData['LATITUDE_GRID'], np.array(N*M*[np.nanmean(cc.variables['LATITUDE'][:].data.flatten())]))
floatData['LONGITUDE'] = np.append(floatData['LONGITUDE'], cc.variables['LONGITUDE'][:].data.flatten())
floatData['LONGITUDE_GRID'] = np.append(floatData['LONGITUDE_GRID'], np.array(N*M*[np.nanmean(cc.variables['LONGITUDE'][:].data.flatten())]))
floatData['POSITION_QC'] = np.append(floatData['POSITION_QC'], io.read_qc(cc.variables['POSITION_QC'][:].data.flatten()))
print(common_variables)
# loop through other possible BGC variables
for v in common_variables:
var_check = v in nc.variables.keys() and 'N_LEVELS' in nc.variables[v].dimensions
dtype_check = nc.variables[v].dtype == 'float32' or nc.variables[v].dtype == 'float64'
check = var_check and dtype_check
if check:
floatData[v] = np.append(floatData[v], vertically_align(cc.variables['PRES'][:].data.flatten(), nc.variables['PRES'][:].data.flatten(), nc.variables[v][:].data.flatten()))
floatData['dPRES'] = delta_pres(cc.variables['PRES'][:].data.flatten(), nc.variables['PRES'][:].data.flatten())
for v in floatData.keys():
v_qc = v + '_QC'
if v_qc in common_variables:
floatData[v_qc] = np.append(floatData[v_qc], io.read_qc(nc.variables[v_qc][:].data.flatten()))
if 'DOXY' in floatData.keys():
floatData['O2Sat'] = 100*floatData['DOXY']/unit.oxy_sol(floatData['PSAL'], floatData['TEMP'])
floatData['O2Sat_QC'] = util.get_worst_flag(floatData['TEMP_QC'], floatData['PSAL_QC'], floatData['DOXY_QC'])
return floatData
def read_all_variables(nc):
'''
Read all variables and dimensions from an Argo netCDF file.
Args:
nc: a netCDF file object
Returns:
floatData: python dict with all variable and dimension names
'''
floatData = dict()
for name, dim in nc.dimensions.items():
floatData[name] = dim.size
for name, var in nc.variables.items():
floatData[name] = var[:].data.flatten()
return floatData
def read_sprof_gridded_variables(nc):
'''
Read all variables and dimensions from an Argo Sprof file, do not flatten
arrays, keep as 2D arrays.
Args:
nc: a netCDF file object
Returns:
floatData: python dict with all variable and dimension names
'''
floatData = dict()
for name, dim in nc.dimensions.items():
floatData[name] = dim.size
for name, var in nc.variables.items():
floatData[name] = var[:].data
return floatData
def read_history_qctest(nc):
QC_ACTION = np.squeeze(nc.variables['HISTORY_ACTION'][:].data)
actions = []
for row in QC_ACTION:
rval = ''
for let in row:
rval = rval + let.decode('UTF-8')
actions.append(rval.strip())
actions = np.array(actions)
QC_TESTS = np.squeeze(nc.variables['HISTORY_QCTEST'][:].data)
tests = []
for row in QC_TESTS:
rval = ''
for let in row:
rval = rval + let.decode('UTF-8')
tests.append(rval.strip())
tests = np.array(tests)
qcp_index = np.logical_or(actions == 'QCP', actions == 'QCP$')
qcf_index = np.logical_or(actions == 'QCF', actions == 'QCF$')
QCP, QCF = tests[qcp_index][0], tests[qcf_index][0]
return QCP, QCF
def dict_clean(float_data, bad_flags=None):
clean_float_data = copy.deepcopy(float_data)
qc_flags = [k for k in clean_float_data.keys() if '_QC' in k and 'PROFILE' not in k]
if bad_flags is None:
for qc_key in qc_flags:
data_key = qc_key.replace('_QC','')
good_index = np.logical_or(np.logical_or(clean_float_data[qc_key] < 4, clean_float_data[qc_key] == 5), clean_float_data[qc_key] == 8)
bad_index = np.invert(good_index)
if data_key == 'POSITION':
for dk in ['LATITUDE', 'LONGITUDE']:
clean_float_data[dk][bad_index] = np.nan
else:
clean_float_data[data_key][bad_index] = np.nan
else:
if type(bad_flags) is int:
bad_flags = [bad_flags]
for flag in bad_flags:
for qc_key in qc_flags:
data_key = qc_key.replace('_QC','')
bad_index = clean_float_data[qc_key] == flag
if data_key == 'POSITION':
for dk in ['LATITUDE', 'LONGITUDE']:
clean_float_data[dk][bad_index] = np.nan
else:
clean_float_data[data_key][bad_index] = np.nan
return clean_float_data
def dict_fillvalue_clean(float_data):
clean_float_data = copy.deepcopy(float_data)
qc_keys = [k for k in clean_float_data.keys() if '_QC' in k and 'SDN' not in k and 'PROFILE' not in k]
for k in qc_keys:
data_key = k.replace('_QC','')
if data_key == 'POSITION':
for dk in ['LATITUDE', 'LONGITUDE', 'LATITUDE_GRID', 'LONGITUDE_GRID']:
fillvalue_index = clean_float_data[dk] >= 99999. # use greater than because date fillval is 999999
clean_float_data[dk][fillvalue_index] = np.nan
else:
fillvalue_index = clean_float_data[data_key] >= 99999. # use greater than because date fillval is 999999
clean_float_data[data_key][fillvalue_index] = np.nan
# check if there is in-air data present
if 'PPOX_DOXY' in float_data.keys():
fillvalue_index = clean_float_data['PPOX_DOXY'] >= 99999. # use greater than because date fillval is 999999
clean_float_data['PPOX_DOXY'][fillvalue_index] = np.nan
fillvalue_index = clean_float_data['SDN'] >= 999999.
clean_float_data['SDN'][fillvalue_index] = np.nan
fillvalue_index = clean_float_data['SDN_GRID'] >= 999999.
clean_float_data['SDN_GRID'][fillvalue_index] = np.nan
return clean_float_data
def track(float_data):
# make 'track' array with columns (time, lat, lon) to be used in interpolation
track = np.array([float_data['SDN'], float_data['LATITUDE'], float_data['LONGITUDE']]).T
return track
def woa_to_float_track(track, param, zlim=(0,1000), local_path='./', verbose=True):
'''
Function to load WOA18 climatological data for comparison with autonomous
floats. Data to be interpolated along the provided track (t, lat, lon).
Combines function load_woa_data() and interp_woa_data() for convenience,
see documentation for those funcions for more detail.
Args:
track: array with the columns (SDN, lat, lon)
param: requested variable, valid inputs are
- T: temperature
- S: salinity
- O2: dissolved oxygen
- O2sat: oxygen percent saturation
- NO3: nitrate
- Si: silicate
- PO4: phosphate
zlim: depth bounds (upper, lower), default to (0, 1000)
local_path: local directory where WOA files are stored, assumes
current directory if no input
Returns:
z: WOA depth array
woa_interp: 2D array of requested WOA parameter (depth x time)
Author:
Christopher Gordon
Fisheries and Oceans Canada
chris.gordon@dfo-mpo.gc.ca
Last update: 2020-04-23
Change log:
'''
xtrack, woa_track, woa_data = io.load_woa_data(track, param, zlim=zlim, local_path=local_path, verbose=verbose)
woa_interp, wt, yrday = interp.interp_woa_data(xtrack, woa_track, woa_data, verbose=verbose)
z = woa_track[0]
return z, woa_interp, wt
def ncep_to_float_track(varname, track, local_path='./'):
'''
Function to load NCEP reanalysis data for comparison with autonomous
floats. Data to be interpolated along the provided track (t, lat, lon).
Combines function load_ncep_data() and interp_ncep_data() for convenience,
see documentation for those funcions for more detail.
Args:
varname: either 'pres' (pressure) or 'rhum' (relative humidity)
track: array with the columns (SDN, lat, lon)
Returns:
z: WOA depth array
woa_interp: 2D array of requested WOA parameter (depth x time)
Author:
Christopher Gordon
Fisheries and Oceans Canada
chris.gordon@dfo-mpo.gc.ca
Last update: 2020-04-29
Change log:
'''
xtrack, ncep_track, data = io.load_ncep_data(track, varname, local_path=local_path)
if track[0,0] > ncep_track[0][-1] and mdates.num2date(track[0,0]).year == mdates.datetime.date.today().year:
raise ValueError('First float date occurs after last NCEP date, NCEP data not available yet, recommend using WOA data to calcualte gain')
ncep_interp, wt = interp.interp_ncep_data(xtrack, ncep_track, data)
return ncep_interp, wt
def calc_gain(data, ref, inair=True, zlim=25., verbose=True):
'''
Calculate the gain for each profile by comparing float oxygen data to a
reference data set, either NCEP for in-air or WOA surface data if in-air
comparison is not available.
Args:
data: float data dict object, output from load_argo()
ref: reference data set, either NCEP pO2 or WOA O2sat
inair: boolean flag to indicate if comparison to NCEP in-air
data or WOA surface data should be done, default to
in-air, but function also performs check
zlim: lower limit to define as 'surface' and take mean within,
default value 25 dbar, for use only when inair is False
Returns:
g: vector of gains
surf_data: array of float surface stats (cycle, N, mean, std)
Author:
Christopher Gordon
Fisheries and Oceans Canada
chris.gordon@dfo-mpo.gc.ca
Last update: 2020-04-23
Change log:
'''
# check which reference data to use
if inair and 'PPOX_DOXY' not in data.keys():
raise ValueError('Flag ''inair'' set to True but partial pressure data not available')
if inair:
if verbose:
sys.stdout.write('\nCalculating gains using NCEP surface pressure and float in-air measurements...\n')
g = np.nan*np.ones((ref.shape[0],))
# float partial pressure measurements at each cycle
ppox = data['PPOX_DOXY']
cycle = data['CYCLES']
inair_cycle = data['TRAJ_CYCLE']
intersect_cycles = np.intersect1d(cycle, np.unique(inair_cycle), assume_unique=True)
mean_float_data = np.nan*np.ones((ref.shape[0],4))
for i,c in enumerate(intersect_cycles):
subset_ppox = ppox[inair_cycle == c]
mean_float_data[i,0] = c
mean_float_data[i,1] = np.sum(~np.isnan(subset_ppox))
mean_float_data[i,2] = np.nanmean(subset_ppox)
mean_float_data[i,3] = np.nanstd(subset_ppox)
g[i] = ref[i]/mean_float_data[i,2]
g[g == 0] = np.nan
return g, mean_float_data
else:
if verbose:
sys.stdout.write('\nCalculating gains using WOA surface data and float O2 percent saturation...\n')
surf_ix = data['PRES'] <= zlim
surf_o2sat = data['O2Sat'][surf_ix]
grid_cycle = data['CYCLE_GRID'][surf_ix]
grid_time = data['SDN_GRID'][surf_ix]
cycle = data['CYCLES']
time = data['SDN']
z_woa = ref['z']
woa_data = ref['WOA']
woa_index = np.where(z_woa <= zlim)[0]
woa_surf = np.nanmean(woa_data[woa_index,:],axis=0)
woa_surf = woa_data[0,:]
mean_float_data = np.nan*np.ones((woa_surf.shape[0],4))
g = np.nan*np.ones((woa_surf.shape[0],))
for i,t in enumerate(time):
ref_o2sat = woa_surf[i]
subset_o2sat = surf_o2sat[grid_time == t] # uncomment when ready
mean_float_data[i,0] = cycle[i]
mean_float_data[i,1] = np.sum(~np.isnan(subset_o2sat))
mean_float_data[i,2] = np.nanmean(subset_o2sat)
mean_float_data[i,3] = np.nanstd(subset_o2sat)
g[i] = ref_o2sat/mean_float_data[i,2]
g[g == 0] = np.nan
return g, mean_float_data, woa_surf
def calc_gain_with_carryover(pO2_opt_air, pO2_ref_air, pO2_opt_water):
'''
Calculate gain with carryover parameter, following Bittig et al. (2018).
Args:
pO2_opt_air (array-like): partial pressure measured by the oxygen optode in-air
pO2_ref_air (array-like): partial pressure in-air from a reference dataset such as NCEP
pO2_opt_water (array-like): partial pressure of oxygen measured by the optode just below the surface
Returns:
*need to run this by Henry and see if I'm doing it right*
Derive the O2 slope including a correction for 'carry-over' effect, to
account for the observation that optode in-air data do not represent pure
air but show a bias by in-water O2 saturation excess/deficiency (Bittig
and Kortzinger 2015). Johnson et al. (2015) confirm the 'carry-over' effect
for optodes close to the surface (~20cm).
Carry-over effect is recommended to be account for Argo floats using in-air
measurements, if enough surfacings are available (N > 20). It both removes
an identified bias (which is most relevant for cases with strong
super-/undersaturation and/or carry-overs) and reduces uncertainty on the
O2 slope factor. The equation for linear regression is as follows (see,
e.g., Bittig et al., 2018):
m*pO2^{optode}_{surf in-air} - pO2^{reference}_{in-air}
= c*(m*pO2^{optode}_{surf in-water} - pO2^{reference}_{in-air})
where:
- m is the O2 slope factor: m = pO2_adjusted / pO2
- pO2^{optode}_{surf in-air} is the oxygen partial pressure observed by
the optode in-air (i.e., close to the water surface), e.g., MC = X+11
- pO2^{reference}_{in-air} is the reference oxygen partial pressure in-air,
e.g., from re-analysis data
- pO2^{optode}_{surf in-water} is the oxygen partial pressure observed by
the optode at the water surface (in-water), e.g., MC = X+10 or profile
MC = X–10
- c is the slope of the 'carry-over' effect, i.e., the water-fraction of
the observed optode in-air data.
Above equation can be used for linear regression to obtain m and c from
data of the partial pressures (from several cycles together). See
Thierry Virginie, Bittig Henry, The Argo-Bgc Team (2018). Argo quality
control manual for dissolved oxygen concentration.
https://doi.org/10.13155/46542
'''
x1 = pO2_opt_air - pO2_ref_air
y1 = pO2_opt_water - pO2_ref_air
x1 = x1[:,np.newaxis]
carry_over, resid, _, _ = np.linalg.lstsq(x1, y1, rcond=None)
c = carry_over
gains = ((1-c)*pO2_ref_air)/(pO2_opt_air - c*pO2_opt_water)
return gains, carry_over
def vertically_align(P1, P2, V2):
out = np.nan*np.ones(P1.shape)
for i, p in enumerate(P1):
index = np.abs(P2 - p) == np.min(np.abs(P2 - p))
out[i] = np.nanmean(V2[index])
return out
def delta_pres(P1, P2):
dpres = np.nan*np.ones(P1.shape)
for i, p in enumerate(P1):
index = np.abs(P2 - p) == np.min(np.abs(P2 - p))
dpres[i] = np.nanmean(P2[index] - p)
return dpres
def range_check(key, floatdict, verbose=True):
if 'range_dict' not in globals():
global range_dict
range_dict = dict(
PRES=(-5, np.inf),
TEMP=(-2.5, 40),
PSAL=(2, 41),
DOXY=(-5, 600),
)
cleandict = copy.deepcopy(floatdict)
argo_var = floatdict[key]
r = range_dict[key.replace('_ADJUSTED','')]
outside_range = np.logical_or(argo_var < r[0], argo_var > r[1])
if verbose:
sys.stdout.write('{} values found outside RTQC range check, replacing with NaN\n'.format(np.sum(outside_range)))
argo_var[outside_range] = np.nan
cleandict[key] = argo_var
return cleandict
def calc_fixed_doxy_adjusted_error(floatdict, fix_err=10):
'''
Calculate DOXY_ADJUSTED_ERROR for fixed partial pressure of 10 mbar
PPOX_DOXY.
'''
S = floatdict['PSAL']
T = floatdict['TEMP']
P = floatdict['PRES']
error = unit.pO2_to_doxy(np.array(S.shape[0]*[fix_err]), S, T, P=P)
return error
def profile_qc(flags):
'''
Return overall profile quality flag via the following from the Argo User
Manual (v 3.41):
3.2.2 Reference table 2a: overall profile quality flag
https://vocab.nerc.ac.uk/collection/RP2/current
N is defined as the percentage of levels with good data where:
- QC flag values of 1, 2, 5, or 8 are considered GOOD data
- QC flag values of 9 (missing) or “ “ are NOT USED in the computation
All other QC flag values are BAD data
The computation should be taken from <PARAM_ADJUSTED>_QC if available and from
<PARAM>_QC otherwise.
n Meaning
"" No QC performed
A N = 100%; All profile levels contain good data.
B 75% <= N < 100%
C 50% <= N < 75%
D 25% <= N < 50%
E 0% < N < 25%
F N = 0%; No profile levels have good data.
Args:
- flags (pandas.Series): quality flags for a given profile
Returns:
- grade (str): profile grade based on description above
'''
n_good = flags.isin([1, 2, 5, 8]).sum()
n_exclude = flags.isin([9]).sum()
pct = 100*n_good/(flags.size - n_exclude)
grade = np.nan
if flags.isin([0]).sum() >= flags.size - n_exclude:
grade = ''
if pct == 100:
grade = 'A'
elif pct >= 75:
grade = 'B'
elif pct >= 50:
grade = 'C'
elif pct >= 25:
grade = 'D'
elif pct > 0:
grade = 'E'
elif pct == 0:
grade = 'F'
if not type(grade) == str and np.isnan(grade):
raise ValueError('No grade assigned, check input value of `flags`')
return grade
def oxy_b(dt, tau):
inv_b = 1 + 2*(tau/dt)
return 1/inv_b
def oxy_a(dt, tau):
return 1 - 2*oxy_b(dt, tau)
# hard code the LUT table value so I don't have to
# ship the text file with the package
### is this the right/ok way to do this??? feels wrong ###
from ..lut import lut as lut_data
def correct_response_time(t, DO, T, thickness):
# convert time to seconds
t_sec = t*24*60*60
# array for the loop
N = DO.shape[0]
mean_oxy = np.array((N-1)*[np.nan])
mean_time = t_sec[:-1] + np.diff(t_sec)/2
mean_temp = T[:-1] + np.diff(T)/2
# load temperature, boundary layer thickness, and tau matrix from
# look-up table provided in the supplement to Bittig and Kortzinger (2017)
lut_lL = lut_data[0,1:]
lut_T = lut_data[1:,0]
tau100 = lut_data[1:,1:]
thickness = thickness*np.ones((N-1,))
# translate boundary layer thickness to temperature dependent tau
f_thickness = interp2d(lut_T, lut_lL, tau100.T, bounds_error=False)
tau_T = np.squeeze(f_thickness(mean_temp, thickness))[0,:]
# loop through oxygen data
for i in range(N-1):
dt = t_sec[i+1] - t_sec[i]
# do the correction using the mean filter, get the mean time
mean_oxy[i] = (1/(2*oxy_b(dt, tau_T[i])))*(DO[i+1] - oxy_a(dt, tau_T[i])*DO[i])
# interpolate back to original times for output
f = interp1d(mean_time, mean_oxy, kind='linear', bounds_error=False, fill_value='extrapolate')
DO_out = f(t_sec)
return DO_out
def correct_response_time_Tconst(t, DO, tau):
# convert time to seconds
t_sec = t*24*60*60
# array for the loop
N = DO.shape[0]
mean_oxy = np.array((N-1)*[np.nan])
mean_time = t_sec[:-1] + np.diff(t_sec)/2
# loop through oxygen data
for i in range(N-1):
dt = t_sec[i+1] - t_sec[i]
# do the correction using the mean filter, get the mean time
mean_oxy[i] = (1/(2*oxy_b(dt, tau)))*(DO[i+1] - oxy_a(dt, tau)*DO[i])
# interpolate back to original times for output
f = interp1d(mean_time, mean_oxy, kind='linear', bounds_error=False, fill_value='extrapolate')
DO_out = f(t_sec)
return DO_out
def get_optode_type(wmo):
if '__metaindex__' not in globals():
global __metaindex__
__metaindex__ = get_index(index='meta')
ix = __metaindex__[__metaindex__.wmo == wmo]
local_file = Path(ARGO_PATH) / ix.dac.iloc[0] / str(wmo) / ix.file.iloc[0].split('/')[-1]
nc = Dataset(local_file)
doxy_index = io.get_parameter_index(nc['SENSOR'][:].data, 'OPTODE_DOXY')
if doxy_index.shape[0] == 0:
return 'NO_OPTODE_FOUND'
else:
optode_type = io.read_ncstr(nc['SENSOR_MODEL'][:].data[doxy_index[0], :])
return optode_type
def profile_qc(flags):
'''
Return overall profile quality flag via the following from the Argo User
Manual (v 3.41):
3.2.2 Reference table 2a: overall profile quality flag
https://vocab.nerc.ac.uk/collection/RP2/current
N is defined as the percentage of levels with good data where:
- QC flag values of 1, 2, 5, or 8 are considered GOOD data
- QC flag values of 9 (missing) or " " are NOT USED in the computation
All other QC flag values are BAD data
The computation should be taken from <PARAM_ADJUSTED>_QC if available and from
<PARAM>_QC otherwise.
n Meaning
"" No QC performed
A N = 100%; All profile levels contain good data.
B 75% <= N < 100%
C 50% <= N < 75%
D 25% <= N < 50%
E 0% < N < 25%
F N = 0%; No profile levels have good data.
Args:
- flags (pandas.Series): quality flags for a given profile
Returns:
- grade (str): profile grade based on description above
'''
n_good = flags.isin([1, 2, 5, 8]).sum()
n_exclude = flags.isin([9]).sum()
pct = 100*n_good/(flags.size - n_exclude)
grade = np.nan
if flags.isin([0]).sum() >= flags.size - n_exclude:
grade = ''
if pct == 100:
grade = 'A'
elif pct >= 75:
grade = 'B'
elif pct >= 50:
grade = 'C'
elif pct >= 25:
grade = 'D'
elif pct > 0:
grade = 'E'
elif pct == 0:
grade = 'F'
if not type(grade) == str and np.isnan(grade):
raise ValueError('No grade assigned, check input value of `flags`')
return grade | 37.613876 | 199 | 0.610735 | 6,672 | 49,876 | 4.369904 | 0.115408 | 0.015126 | 0.012485 | 0.007168 | 0.396316 | 0.321992 | 0.285533 | 0.26221 | 0.231685 | 0.221704 | 0 | 0.017699 | 0.262551 | 49,876 | 1,326 | 200 | 37.613876 | 0.774965 | 0.258661 | 0 | 0.271871 | 0 | 0.004038 | 0.109958 | 0.000727 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061911 | false | 0 | 0.02288 | 0.006729 | 0.139973 | 0.002692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87c0afc1daf540dd230e441894740b3b8d3a3b9f | 624 | py | Python | deprecated/jutge-like/P96767b.py | balqui/pytokr | 45878d5480efb9c8cb66a5657f6c646361a43633 | [
"MIT"
] | null | null | null | deprecated/jutge-like/P96767b.py | balqui/pytokr | 45878d5480efb9c8cb66a5657f6c646361a43633 | [
"MIT"
] | null | null | null | deprecated/jutge-like/P96767b.py | balqui/pytokr | 45878d5480efb9c8cb66a5657f6c646361a43633 | [
"MIT"
] | null | null | null |
def make_get_toks(f=None):
"make iterator and next functions out of iterable of split strings"
from sys import stdin
from itertools import chain
def sp(ln):
"to split the strings with a map"
return ln.split()
def the_it():
"so that both results are callable in similar manner"
return it
if f is None:
f = stdin
it = chain.from_iterable(map(sp, f))
return the_it, it.__next__
get_toks, get_tok = make_get_toks()
x = float(get_tok())
x_pow = 1
r = 0
for coef in get_toks():
r += x_pow * float(coef)
x_pow *= x
print('{:.4f}'.format(r))
| 18.909091 | 71 | 0.620192 | 103 | 624 | 3.582524 | 0.514563 | 0.075881 | 0.059621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006682 | 0.280449 | 624 | 32 | 72 | 19.5 | 0.815145 | 0.238782 | 0 | 0 | 0 | 0 | 0.245981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.363636 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87c37cd09f0821303e00a18c5d526b25a9f43c3f | 8,877 | py | Python | src/reporting.py | centonization/centonizationtheory | 8c4f383d1a659a5f6a0784110a4e362e7bd81d5c | [
"MIT"
] | null | null | null | src/reporting.py | centonization/centonizationtheory | 8c4f383d1a659a5f6a0784110a4e362e7bd81d5c | [
"MIT"
] | null | null | null | src/reporting.py | centonization/centonizationtheory | 8c4f383d1a659a5f6a0784110a4e362e7bd81d5c | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
import re
from collections import Counter
import extraction
import model
plt.rcParams["font.family"] = "serif"
def get_amins_plot(frame_grouped, nawba, nawba_centones):
"""
Plot distribution from <frame_grouped> for <nawba> that are in <nawba_centones>[<nawba>]
"""
relevant_patterns = nawba_centones[nawba]
this_frame = frame_grouped[(frame_grouped['index'] == nawba) & (frame_grouped['pattern'].isin(relevant_patterns))].sort_values(by='tf-idf', ascending=False)
plt.xticks(rotation=60)
plt.title('{}, Amins Centones'.format(nawba.replace('_',' ')))
plt.ylabel('Average tf-idf')
plt.xlabel('Centone')
p = plt.bar(this_frame['pattern'], this_frame['tf-idf'])
return plt
def string_set(string_list):
return set(i for i in string_list
if not any(i in s for s in string_list if i != s))
def get_patterns(frame_grouped, nawba, scores_in_nawba, min_freq, exclude_monopattern, prefer_superstrings=True):
# Apply selection rules
this_nawba = frame_grouped[(frame_grouped['index'] == nawba)]
this_nawba = this_nawba[this_nawba['frequency']/scores_in_nawba > min_freq]
if exclude_monopattern:
this_nawba = this_nawba.loc[~this_nawba['pattern'].apply(lambda y: len(set(extraction.reduce_pattern(y))) == 1)]
this_frame = this_nawba.sort_values(by='tf-idf', ascending=False)
top_patterns = sorted(this_frame['pattern'])
# If substrings filter to include the largest
if prefer_superstrings:
top_patterns_filt = string_set(top_patterns)
else:
top_patterns_filt = set(top_patterns)
this_frame = this_frame[this_frame['pattern'].isin(top_patterns_filt)]
return this_frame
def get_top_centones_plot(frame_grouped, nawba, nawba_centones, scores_in_nawba, height=12, width=10, min_freq=10, exclude_monopattern=False, prefer_superstrings=True):
"""
Plot top <n> centones for <nawba> in <frame_grouped>
Bars marked green are centones in <nawba_centones>[<nawba>]
Bars marked red are centones that are superstrings of <nawba_centones>[<nawba>]
Bars marked blue are centones not specified in lookup tables
<scores_in_nawba> is used to normalise pattern frequency
(number of scores for this Nawba)
"""
nawba_string = nawba.replace('_',' ')
# Colour scheme
bar_edge_colour = '#383838'
canvas_colour = '#ffffff'
gridline_colour = '#bcbcbc'
standard_bar_colour = '#f7f7f7'
amin_bar_colour = '#303030'
super_amin_bar_colour = '#aaaaaa'
this_frame = get_patterns(frame_grouped, nawba, scores_in_nawba, min_freq, exclude_monopattern, prefer_superstrings)
patterns = this_frame['pattern']
tfidf = this_frame['tf-idf']
frequencies = [int(x/scores_in_nawba) for x in this_frame['frequency']]
max_tfidf = max(tfidf)
fig, ax = plt.subplots()
fig.set_figheight(height)
fig.set_figwidth(width)
# gridlines beneath other elements
#ax.xaxis.grid(True, color=gridline_colour, linestyle='dashed', alpha=0.8)
#ax.set_axisbelow(True)
# Canvas colour
ax.set_facecolor(canvas_colour)
# Custom Legend
custom_boxes = [Line2D([0], [0], color=standard_bar_colour, lw=4),
Line2D([0], [0], color=amin_bar_colour, lw=4),
Line2D([0], [0], color=super_amin_bar_colour, lw=4),
Line2D([0], [0], color='#000000', lw=0)]
ax.legend(
custom_boxes,
["New pattern", "Chaachoo's Pattern", "Superstring of Chaachoo's pattern", "Average frequency per recording in nawba"]
)
# Example data
y_pos = np.arange(len(patterns))
# Create horizontal bars
bars = ax.barh(y_pos, tfidf, align='center', color=standard_bar_colour, edgecolor=bar_edge_colour)
ax.set_yticks(y_pos)
ax.set_yticklabels(patterns, fontsize=12)
ax.invert_yaxis() # labels read top-to-bottom
# Labels
plt.title('{}, Highest Ranking Patterns'.format(nawba_string), size=18)
plt.xlabel('Average tf-idf', size=16)
plt.ylabel('Pattern', size=16)
# Annotate with frequency
for i, t in enumerate(tfidf):
ax.text(t + max_tfidf/400, i + .25, str(frequencies[i]), color='black')
# Colour bars as per amins centones
for i, pat in enumerate(patterns):
if any([x == extraction.reduce_pattern(pat) for x in nawba_centones[nawba]]):
bars[i].set_color(amin_bar_colour)
bars[i].set_edgecolor(bar_edge_colour)
#print('\\textbf{'+pat+'},', end=" ")
elif any([x in extraction.reduce_pattern(pat) for x in nawba_centones[nawba]]):
bars[i].set_color(super_amin_bar_colour)
bars[i].set_edgecolor(bar_edge_colour)
#print('\\textit{'+pat+'},', end=" ")
else:
pass
#print(pat+',', end=" ")
#print(len(patterns))
return patterns
def get_all_patterns(set_tabs, frame_grouped, tab_num_scores, min_freq, exclude_monopattern=False, prefer_superstrings=False, tfidf_thresh=0, return_confidence=False):
max_tfidf = max(frame_grouped['tf-idf'])
lim = tfidf_thresh*max_tfidf
frame_grouped = frame_grouped[frame_grouped['tf-idf']>lim]
all_patterns = pd.DataFrame(columns=['index','pattern','tf-idf','frequency','num_scores'])
for t in set_tabs:
this_df = get_patterns(frame_grouped, t, tab_num_scores[t], min_freq=min_freq, exclude_monopattern=exclude_monopattern, prefer_superstrings=prefer_superstrings)
all_patterns = all_patterns.append(this_df)
our_patterns = {}
for t,df in all_patterns.groupby('index'):
if return_confidence:
our_patterns[t] = list(zip(df['pattern'].values, df['tf-idf'].values))
else:
our_patterns[t] = df['pattern'].values
return our_patterns
def get_recalls(our_patterns, centones_tab, set_tabs, print_screen=True, match_superstrings=False):
all_ours = []
all_his = []
tot_n = 0
results_dict = {}
if print_screen:
print('Recall Scores')
print('-------------')
for t in set_tabs:
if t in our_patterns:
ours = our_patterns[t]
else:
ours = []
his = centones_tab[t]
all_ours += list(ours)
all_his += list(his)
n_ = [is_match(x, set(his), match_superstrings) for x in set(ours)]
n = len(set([x for y in n_ for x in y if x]))
tot_n += n
h = len(set(his))
R = n/h
P = n/len(ours) if len(ours) > 0 else np.nan
results_dict[t] = (R, P)
if print_screen:
print('{t}: {R} ({n}/{h})'.format(R=R, t=t, n=n, h=h))
#print(P)
h = len(all_his)
R = tot_n/h
P = tot_n/len(all_ours) if len(all_ours) > 0 else np.nan
if print_screen:
print('\nOverall: {R} ({n}/{h})'.format(R=R, t=t, n=tot_n, h=h))
results_dict['overall'] = (R,P)
return results_dict
def get_bop(patterns, min_n=3, max_n=10):
notes = [[y[0] for y in x['notes']] for x in patterns]
extracted = [extraction.extract_pattern_grams(nt, min_n=3, max_n=10) for nt in notes]
return extracted
def complete_pipeline(patterns, tab_num_scores, centones_tab, min_n, max_n, min_freq, exclude_monopatterns, prefer_superstrings, match_superstrings=False, tfidf_thresh=0):
tabs = [p['tab'] for p in patterns]
set_tabs = set(tabs)
# Get Bag of Patterns
extracted = get_bop(patterns, min_n=min_n, max_n=max_n)
# TF-IDF
distributions = model.get_tfidf_distributions(extracted)
# Average
frame_grouped = average_tfidf(distributions, tabs)
# Get Patterns
our_patterns = get_all_patterns(set_tabs, frame_grouped, tab_num_scores, min_freq=min_freq, exclude_monopattern=True, prefer_superstrings=False, tfidf_thresh=tfidf_thresh)
# Evaluate
ct = {k:v for k,v in centones_tab.items() if k in set_tabs}
set_tabs = {x for x in set_tabs if x in set_tabs}
results_dict = get_recalls(our_patterns, ct, set_tabs, print_screen=False, match_superstrings=match_superstrings)
recall, precision = results_dict['overall']
return recall, precision, [len(x) for x in our_patterns.values()]
def average_tfidf(distributions, tabs):
set_tabs = list(set(tabs))
tab_num_scores = Counter(tabs)
index_tab = dict(enumerate(set_tabs))
tab_index = {v:k for k,v in index_tab.items()}
all_tabs_index = [tab_index[t] for t in tabs]
frame_grouped = model.average_tfidf(distributions, tabs)
return frame_grouped
def is_match(x, st, superstrings=False):
if not superstrings:
return [x] if x in st else []
else:
return [s for s in st if s in x]
| 35.794355 | 175 | 0.661823 | 1,263 | 8,877 | 4.421219 | 0.19715 | 0.045129 | 0.008596 | 0.022385 | 0.230122 | 0.157951 | 0.119448 | 0.108345 | 0.103868 | 0.088467 | 0 | 0.010647 | 0.217078 | 8,877 | 247 | 176 | 35.939271 | 0.792806 | 0.110736 | 0 | 0.077419 | 0 | 0 | 0.067554 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0.006452 | 0.051613 | 0.006452 | 0.187097 | 0.058065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87c55c196ed21b7a0e0092c7dfc1c7be846cb7a0 | 770 | py | Python | python/synonym.py | robotlightsyou/test | 015f13943fc402d8ce86c5f6d2f5a7d032b3340a | [
"MIT"
] | 2 | 2019-05-26T15:09:34.000Z | 2021-09-12T08:01:23.000Z | python/synonym.py | robotlightsyou/test | 015f13943fc402d8ce86c5f6d2f5a7d032b3340a | [
"MIT"
] | null | null | null | python/synonym.py | robotlightsyou/test | 015f13943fc402d8ce86c5f6d2f5a7d032b3340a | [
"MIT"
] | 1 | 2021-04-11T20:28:21.000Z | 2021-04-11T20:28:21.000Z | def synonym_queries(synonym_words, queries):
'''
synonym_words: iterable of pairs of strings representing synonymous words
queries: iterable of pairs of strings representing queries to be tested for
synonymous-ness
'''
synonyms = defaultdict(set)
for w1, w2 in synonym_words:
synonyms[w1].add(w2)
def are_synonyms(q1, q2):
q1, q2 = q1.split(), q2.split()
if len(q1) != len(q2):
return False
for i, (w1, w2) in enumerate(zip(q1, q2)):
if w1 != w2:
s1, s2 = synonyms.get(w1, ()), synonyms.get(w2, ())
if not (w1 in s2 or w2 in s1):
return False
return True
return [are_synonyms(q1, q2) for q1, a2 in queries]
| 33.478261 | 79 | 0.568831 | 104 | 770 | 4.153846 | 0.384615 | 0.037037 | 0.087963 | 0.078704 | 0.166667 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0.057915 | 0.327273 | 770 | 22 | 80 | 35 | 0.776062 | 0.225974 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87c83c3ea9e2036aa73c734a113a6b2415f08bcd | 671 | py | Python | Rock Paper Scissors/practice/practice6.py | OIrabor24/Python-Games | 0d84c04f126984b5afbbc5d26ac4f4cef7cd1534 | [
"MIT"
] | null | null | null | Rock Paper Scissors/practice/practice6.py | OIrabor24/Python-Games | 0d84c04f126984b5afbbc5d26ac4f4cef7cd1534 | [
"MIT"
] | null | null | null | Rock Paper Scissors/practice/practice6.py | OIrabor24/Python-Games | 0d84c04f126984b5afbbc5d26ac4f4cef7cd1534 | [
"MIT"
] | null | null | null | import random
def rock_paper_scissors():
player = input("Choose 'r' for rock, 'p' for paper, or 's' for scissors: ")
choices = ['r', 'p', 's']
opponent = random.choice(choices)
if player == opponent:
return print(f"It's a tie! You both selected {player}")
if winner(player, opponent):
return print(f"You win! {player} beats {opponent}!")
if winner(player, opponent) != True:
return print(f"You lose! {opponent} beats {player}")
def winner(user, computer):
if (user == 'r' and computer == 's') or (user == 'p' and computer == 'r') or (user == 's' and computer == 'p'):
return True
rock_paper_scissors() | 33.55 | 115 | 0.603577 | 93 | 671 | 4.311828 | 0.376344 | 0.104738 | 0.089776 | 0.124688 | 0.129676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.233979 | 671 | 20 | 116 | 33.55 | 0.780156 | 0 | 0 | 0 | 0 | 0 | 0.258929 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.466667 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87c967ff6fa14d95e0be17635ab85e0425981494 | 5,009 | py | Python | stanCode projects/find_anagram/anagram.py | dianapei/SC-projects | f6a9a7f5b20cc81a8dce9b94621bac58274669d5 | [
"MIT"
] | null | null | null | stanCode projects/find_anagram/anagram.py | dianapei/SC-projects | f6a9a7f5b20cc81a8dce9b94621bac58274669d5 | [
"MIT"
] | null | null | null | stanCode projects/find_anagram/anagram.py | dianapei/SC-projects | f6a9a7f5b20cc81a8dce9b94621bac58274669d5 | [
"MIT"
] | null | null | null | """
File: anagram.py
Name: Diana Pei-Rung Yu
----------------------------------
This program recursively finds all the anagram(s)
for the word input by user and terminates when the
input string matches the EXIT constant defined
at line 19
If you correctly implement this program, you should see the
number of anagrams for each word listed below:
* arm -> 3 anagrams
* contains -> 5 anagrams
* stop -> 6 anagrams
* tesla -> 10 anagrams
* spear -> 12 anagrams
"""
# Constants
FILE = 'dictionary.txt' # This is the filename of an English dictionary
EXIT = '-1' # Controls when to stop the loop
# Global variable
dictionary = {} # to save all the words in FILE
def main():
"""
This program will find the anagrams of the word provide by user.
user can leave the program by enter EXIT constant
"""
print('Welcome to stanCode \"Anagram Generator\" (or -1 to quit)')
read_dictionary()
while True:
# get word from user
word = input('Find anagram for: ')
# to be case insensitive
word = word.lower()
if word == EXIT:
break
else:
find_anagrams(word)
def read_dictionary():
"""
this function add all the words in FILE to global variable dictionary by use the alphabet letter as keys
and words come with the alphabet as values
"""
global dictionary
with open(FILE, 'r') as f:
for line in f:
# clear the front and behind each line to have clean word
line = line.strip()
# add word to dictionary
if line[0] not in dictionary:
dictionary[line[0]] = [line]
else:
dictionary[line[0]].append(line)
def find_anagrams(s):
"""
:param s: the word provide by user
:return: the anagram in dictionary
"""
# tell user the program is searching for anagram
print('Searching...')
# to store the anagrams that can be found in dictionary
a_lst = []
# turn the string given by user into lst
s_lst = word_generator(s)
# use s_lst to get character combinations and see if it's in dictionary
find_anagrams_helper(s_lst, len(s_lst), [], a_lst)
# show the number of anagrams we found and list of the anagrams
print(len(a_lst), 'anagrams: ', a_lst)
def word_generator(s):
"""
:param s: str, the the word get from user
:return: lst, list contain all characters within s
"""
s_lst = []
for i in range(len(s)):
ch = s[i]
s_lst.append(ch)
return s_lst
def find_anagrams_helper(lst, target, current, a_lst):
"""
:param lst: lst, contain all the characters
:param target: num, target length of current which is same as the length of the word provide by user
:param current: lst, to store the characters by order
:param a_lst: lst, to store the anagram of the word provided by user
:return: lst, all anagram words
"""
# check word if current length equals to lst
if len(current) == target:
# turn the current character order list into string
candidate = word_processor(current)
# avoid to have duplicate anagram
if candidate in a_lst:
return
else:
# check if candidate is in dictionary
if candidate in dictionary[current[0]]:
a_lst.append(candidate)
print('Found: ' + candidate)
# tell user the program is searching for anagram
print('Searching...')
# check next word combination
else:
return
else:
for i in range(len(lst)):
ele = lst[i]
# choose
if len(current) == 0:
current.append(ele)
else:
word = word_processor(current)
# if there's a word come with current initial, will continue explore
if has_prefix(word):
current.append(ele)
else:
return
# Explore
find_anagrams_helper(lst[:i]+lst[i+1:], target, current, a_lst)
# un-choose
current.pop()
def word_processor(current):
"""
:param current: lst, store the new order of the characters provide by user
:return string: str, string version of current
"""
string = ''
for i in range(len(current)):
ch = current[i]
string += ch
return string
def has_prefix(test_word):
"""
:param test_word: str, the possible anagram word
:return: bool, whether any word in dictionary begin with test_word
"""
initial = test_word[0]
if initial in dictionary:
if len(test_word) == 1:
return True
else:
for word in dictionary[initial]:
if word.startswith(test_word):
return True
return False
if __name__ == '__main__':
main()
| 30.357576 | 108 | 0.589539 | 660 | 5,009 | 4.40303 | 0.271212 | 0.037164 | 0.017894 | 0.016518 | 0.084652 | 0.051617 | 0.036476 | 0.036476 | 0.036476 | 0.036476 | 0 | 0.005628 | 0.326013 | 5,009 | 164 | 109 | 30.542683 | 0.855154 | 0.457776 | 0 | 0.220779 | 0 | 0 | 0.055512 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.194805 | 0.064935 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87cba13bbdb979545c6706a7dabc31b281c6e0f6 | 1,043 | py | Python | release/src/router/wget/testenv/conf/expected_ret_code.py | zhoutao0712/rtn11pb1 | 09e6b6c7ef4b91be0a9374daeacc3ac9f2fa3a05 | [
"Apache-2.0"
] | null | null | null | release/src/router/wget/testenv/conf/expected_ret_code.py | zhoutao0712/rtn11pb1 | 09e6b6c7ef4b91be0a9374daeacc3ac9f2fa3a05 | [
"Apache-2.0"
] | null | null | null | release/src/router/wget/testenv/conf/expected_ret_code.py | zhoutao0712/rtn11pb1 | 09e6b6c7ef4b91be0a9374daeacc3ac9f2fa3a05 | [
"Apache-2.0"
] | null | null | null | from exc.test_failed import TestFailed
from conf import hook
""" Post-Test Hook: ExpectedRetCode
This is a post-test hook which checks if the exit code of the Wget instance
under test is the same as that expected. As a result, this is a very important
post test hook which is checked in all the tests.
Returns a TestFailed exception if the return code does not match the expected
value. Else returns gracefully.
"""
@hook(alias='ExpectedRetcode')
class ExpectedRetCode:
def __init__(self, expected_ret_code):
self.expected_ret_code = expected_ret_code
def __call__(self, test_obj):
if test_obj.ret_code != self.expected_ret_code:
if test_obj.ret_code == 45:
failure = "Memory Leak Found by Valgrind"
else:
failure = "Return codes do not match.\n" \
"Expected: %s\n" \
"Actual: %s" % (self.expected_ret_code,
test_obj.ret_code)
raise TestFailed(failure)
| 37.25 | 78 | 0.639501 | 141 | 1,043 | 4.546099 | 0.453901 | 0.087363 | 0.117005 | 0.118565 | 0.120125 | 0.081123 | 0 | 0 | 0 | 0 | 0 | 0.002717 | 0.294343 | 1,043 | 27 | 79 | 38.62963 | 0.868207 | 0 | 0 | 0 | 0 | 0 | 0.139332 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87d0409427b4dddbeb8ed6ac9f5c9e4cffe9da54 | 3,339 | py | Python | mmdet/core/utils/checkpoint_hook.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | mmdet/core/utils/checkpoint_hook.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | mmdet/core/utils/checkpoint_hook.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | import re
import os
from mmcv.runner.hooks.logger.base import LoggerHook
from mmcv.runner import master_only
from mmdet.core.utils import logger
class CheckpointHook(LoggerHook):
def __init__(self,
save_every_n_steps,
max_to_keep=2,
keep_every_n_epochs=50,
keep_in_n_epoch=[],
ignore_last=True,
reset_flag=True,
**kwargs):
super(CheckpointHook, self).__init__(save_every_n_steps, ignore_last, reset_flag)
self.save_every_n_steps = save_every_n_steps
self.max_to_keep = max_to_keep
self.keep_every_n_epochs = keep_every_n_epochs
self.keep_in_n_epoch = keep_in_n_epoch
def clear_extra_checkpoints(self, work_dir):
rm_filename_dict = dict()
for filename in os.listdir(work_dir):
if not filename.endswith('.pth') or 'iter' not in filename:
continue
# keep the checkpoints which are protected.
iter_n = re.search("epoch_[0-9]*_iter_([0-9]*).pth", filename)
if iter_n:
rm_filename_dict[int(iter_n.group(1))] = filename
sort_iter_list = sorted(rm_filename_dict.keys())
for rm_iter in sort_iter_list[: -self.max_to_keep]:
os.remove(os.path.join(work_dir, rm_filename_dict[rm_iter]))
def save_checkpoint(self, runner, protect=False, offset=1, iter_no_offset=False):
epoch_n, iter_n = runner.epoch + offset, runner.iter + offset
if iter_no_offset:
iter_n = runner.iter
filename_tmpl = 'epoch_{}.pth'
if not protect:
filename_tmpl = 'epoch_{{0}}_iter_{}.pth'.format(iter_n)
runner.save_checkpoint(runner.work_dir,
filename_tmpl=filename_tmpl,
save_optimizer=True,
offset=offset,
iter_no_offset=iter_no_offset)
logger.info("The new checkpoint has been saved to [{}]".format(os.path.join(
runner.work_dir, filename_tmpl.format(epoch_n))))
try:
self.clear_extra_checkpoints(runner.work_dir)
except:
logger.warn("Cannot remove the old checkpoints.")
@master_only
def after_train_iter(self, runner):
# note that when after_train_iter is called, the iter num has not plus 1 yet.
if self.every_n_iters(runner, self.save_every_n_steps):
self.save_checkpoint(runner)
# @master_only
# def before_train_epoch(self, runner):
# if runner.epoch > 0:
# self.save_checkpoint(runner, protect=False, iter_no_offset=False)
@master_only
def after_train_epoch(self, runner):
# note that when after_train_iter is called, the epoch num has not plus 1 yet.
if self.every_n_epochs(runner, self.keep_every_n_epochs) or \
(runner.epoch + 1) in self.keep_in_n_epoch:
self.save_checkpoint(runner, protect=True, iter_no_offset=True)
else:
self.save_checkpoint(runner, protect=False, iter_no_offset=True)
@master_only
def after_run(self, runner):
# note that when after_run is called, the iter and epoch num has already plus 1.
self.save_checkpoint(runner, offset=0)
| 40.719512 | 89 | 0.630129 | 451 | 3,339 | 4.341463 | 0.237251 | 0.033708 | 0.042901 | 0.038304 | 0.304392 | 0.140449 | 0.12666 | 0.12666 | 0.12666 | 0.07763 | 0 | 0.006697 | 0.284516 | 3,339 | 81 | 90 | 41.222222 | 0.812892 | 0.126685 | 0 | 0.048387 | 0 | 0 | 0.050912 | 0.018232 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.080645 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87d2d0c2f2d27d7a883f2f561beb750c3bc97ecc | 1,950 | py | Python | carbondesign/tags/list_.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | carbondesign/tags/list_.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | carbondesign/tags/list_.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | """
List
====
See: https://www.carbondesignsystem.com/components/list/usage/
Lists are vertical groupings of related content. List items begin with either
a number or a bullet.
Overview
--------
Lists consist of related content grouped together and organized vertically.
Use bulleted lists when you don’t need to convey a specific order for list
items.
Use numbered lists when you need to convey a priority, hierarchy, or sequence
between list items.
""" # pylint:disable=line-too-long
# pylint:disable=too-many-lines
from .base import Node
class List(Node):
"""List component.
"""
WANT_CHILDREN = True
"Template Tag needs closing end tag."
NODE_PROPS = ('native',)
"Extended Template Tag arguments."
DEFAULT_TAG = 'ul'
"Rendered HTML tag."
def prepare(self, values, context):
"""Prepare values for rendering the templates.
"""
if values['tag'] == 'ul':
values['class'].append('bx--list--unordered')
elif self.eval(self.kwargs.get('native'), context):
values['class'].append('bx--list--ordered--native')
else:
values['class'].append('bx--list--ordered')
if context.get('list_nested'):
values['class'].append('bx--list--nested')
context['list_nested'] = True
def render_default(self, values, context):
"""Output html of the component.
"""
template = """
<{tag} class="{class}" {props}>
{child}
</{tag}>
"""
return self.format(template, values)
class ListItem(Node):
"""List item component.
"""
WANT_CHILDREN = True
"Template Tag needs closing end tag."
def render_default(self, values, context):
"""Output html of the component.
"""
template = """
<li class="bx--list__item {class}" {props}>
{child}
</li>
"""
return self.format(template, values)
components = {
'List': List,
'Li': ListItem,
}
| 23.780488 | 77 | 0.624103 | 237 | 1,950 | 5.092827 | 0.434599 | 0.045568 | 0.056338 | 0.062966 | 0.334714 | 0.246893 | 0.197183 | 0.197183 | 0.197183 | 0.197183 | 0 | 0 | 0.232821 | 1,950 | 81 | 78 | 24.074074 | 0.806818 | 0.353846 | 0 | 0.358974 | 0 | 0 | 0.307504 | 0.03752 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.025641 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87d2e9c941b60074b0a07fec9a0055c544567a73 | 4,536 | py | Python | ns/port/red_port.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | 3 | 2021-06-17T01:57:43.000Z | 2021-12-16T11:53:31.000Z | ns/port/red_port.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | null | null | null | ns/port/red_port.py | wsyCUHK/ns.py | 44f9c627a9d3b9b31a0799b9a9cea50560eda8a1 | [
"Apache-2.0"
] | null | null | null | """
Models an output port on a switch with a given rate and buffer size (in either bytes
or the number of packets), using the Early Random Detection (RED) mechanism to drop packets.
"""
import random
from ns.port.port import Port
class REDPort(Port):
""" Models an output port on a switch with a given rate and buffer size (in either bytes
or the number of packets), using the Early Random Detection (RED) mechanism to drop packets.
Parameters
----------
env: simpy.Environment
the simulation environment
rate: float
the bit rate of the port
element_id: int
the element id of this port
qlimit: integer (or None)
a buffer size limit in bytes or packets for the queue (including items
in service).
limit_bytes: bool
if true, the queue limit will be based on bytes if false the queue limit
will be based on packets.
zero_downstream_buffer: bool
if true, assume that the downstream element does not have any buffers,
and backpressure is in effect so that all waiting packets queue up in this
element's buffer.
debug: bool
if true, print more debugging information.
"""
def __init__(self,
env,
rate: float,
max_threshold: int,
min_threshold: int,
max_probability: float,
weight_factor: int = 9,
element_id: int = None,
qlimit: int = None,
limit_bytes: bool = True,
zero_downstream_buffer: bool = False,
debug: bool = False):
super().__init__(env,
rate,
element_id=element_id,
qlimit=qlimit,
limit_bytes=limit_bytes,
zero_downstream_buffer=zero_downstream_buffer,
debug=debug)
self.max_probability = max_probability
self.max_threshold = max_threshold
self.min_threshold = min_threshold
self.weight_factor = weight_factor
self.average_queue_size = 0
def put(self, pkt):
""" Sends the packet 'pkt' to this element. """
self.packets_received += 1
if self.limit_bytes:
current_queue_size = self.byte_size
else:
current_queue_size = len(self.store.items)
alpha = 2**-self.weight_factor
self.average_queue_size = self.average_queue_size * (
1 - alpha) + current_queue_size * alpha
if self.average_queue_size >= self.qlimit:
self.packets_dropped += 1
if self.debug:
print(f"Average queue size ({self.average_queue_size}) "
f"exceeds threshold ({self.qlimit})")
elif self.average_queue_size >= self.max_threshold:
rand = random.uniform(0, 1)
if rand <= self.max_probability:
self.packets_dropped += 1
if self.debug:
print(
f"Avg queue size ({self.average_queue_size}) "
f"exceeds threshold ({self.qlimit})",
f"Packet dropped with probability {self.max_probability}"
)
else:
self.byte_size += pkt.size
if self.zero_downstream_buffer:
self.downstream_store.put(pkt)
return self.store.put(pkt)
elif self.average_queue_size >= self.min_threshold:
prob = (self.average_queue_size - self.min_threshold) / (
self.max_threshold - self.min_threshold) * self.max_probability
rand = random.uniform(0, 1)
if rand <= prob:
self.packets_dropped += 1
if self.debug:
print(
f"Avg queue size ({self.average_queue_size}) exceeds min threshold",
f"Packet dropped with probability {prob}")
else:
self.byte_size += pkt.size
if self.zero_downstream_buffer:
self.downstream_store.put(pkt)
return self.store.put(pkt)
else:
self.byte_size += pkt.size
if self.zero_downstream_buffer:
self.downstream_store.put(pkt)
return self.store.put(pkt)
| 39.103448 | 100 | 0.550265 | 515 | 4,536 | 4.673786 | 0.229126 | 0.059826 | 0.07312 | 0.083091 | 0.525966 | 0.464063 | 0.430827 | 0.340673 | 0.340673 | 0.325717 | 0 | 0.00427 | 0.380511 | 4,536 | 115 | 101 | 39.443478 | 0.852313 | 0.23567 | 0 | 0.342105 | 0 | 0 | 0.095296 | 0.03146 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.026316 | 0 | 0.105263 | 0.039474 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87d3896732548709f56de5a6b0039bd16b06b971 | 831 | py | Python | Array/769. Max Chunks To Make Sorted/solution.py | Shalini180/LeetCode-Solutions | 0ea364fa40b69dd129230903a0bdbe0fc116db08 | [
"Unlicense"
] | 9 | 2021-03-24T11:21:03.000Z | 2022-02-14T05:05:48.000Z | Array/769. Max Chunks To Make Sorted/solution.py | Shalini180/LeetCode-Solutions | 0ea364fa40b69dd129230903a0bdbe0fc116db08 | [
"Unlicense"
] | 38 | 2021-10-07T18:04:12.000Z | 2021-12-05T05:53:27.000Z | Array/769. Max Chunks To Make Sorted/solution.py | Shalini180/LeetCode-Solutions | 0ea364fa40b69dd129230903a0bdbe0fc116db08 | [
"Unlicense"
] | 27 | 2021-10-06T19:55:48.000Z | 2021-11-18T16:53:20.000Z | '''
You are given an integer array arr of length n that represents a permutation of the integers in the range [0, n - 1].
We split arr into some number of chunks (i.e., partitions), and individually sort each chunk. After concatenating them, the result should equal the sorted array.
Return the largest number of chunks we can make to sort the array.
Example 1:
Input: arr = [4,3,2,1,0]
Output: 1
Explanation:
Splitting into two or more chunks will not return the required result.
For example, splitting into [4, 3], [2, 1, 0] will result in [3, 4, 0, 1, 2], which isn't sorted.
'''
class Solution:
def maxChunksToSorted(self, arr: List[int]) -> int:
n = len(arr)
ma = 0
chunks = 0
for i in range(n):
ma = max(ma,arr[i])
if ma == i:chunks += 1
return chunks
| 33.24 | 161 | 0.651023 | 140 | 831 | 3.864286 | 0.535714 | 0.029575 | 0.051756 | 0.014787 | 0.018484 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035427 | 0.252708 | 831 | 24 | 162 | 34.625 | 0.835749 | 0.694344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87d54e2d24d4b863a6839fe3dfc5b4dd568e5522 | 2,292 | py | Python | firmware_tools/ghidra/vxhunter_firmware_init.py | FiniteStateInc/vxhunter | aaacf453dc77180012da8de13e25a2ebb6688fab | [
"BSD-2-Clause"
] | null | null | null | firmware_tools/ghidra/vxhunter_firmware_init.py | FiniteStateInc/vxhunter | aaacf453dc77180012da8de13e25a2ebb6688fab | [
"BSD-2-Clause"
] | null | null | null | firmware_tools/ghidra/vxhunter_firmware_init.py | FiniteStateInc/vxhunter | aaacf453dc77180012da8de13e25a2ebb6688fab | [
"BSD-2-Clause"
] | 2 | 2020-03-23T19:12:46.000Z | 2020-12-22T17:08:32.000Z | # coding=utf-8
from vxhunter_core import *
from vxhunter_utility.symbol import *
from vxhunter_utility.common import *
from ghidra.util.task import TaskMonitor
# For https://github.com/VDOO-Connected-Trust/ghidra-pyi-generator
try:
from ghidra_builtins import *
except Exception as err:
pass
try:
vx_version = askChoice("Choice", "Please choose VxWorks main Version ", ["5.x", "6.x"], "5.x")
if vx_version == u"5.x":
vx_version = 5
elif vx_version == u"6.x":
vx_version = 6
if vx_version:
firmware_path = currentProgram.domainFile.getMetadata()['Executable Location']
firmware = open(firmware_path, 'rb').read()
target = VxTarget(firmware=firmware, vx_version=vx_version)
# target.logger.setLevel(logging.DEBUG)
target.quick_test()
if target.load_address is None:
target.find_loading_address()
if target.load_address:
load_address = target.load_address
target.logger.info("load_address:%s" % hex(load_address))
# Rebase_image
target_block = currentProgram.memory.blocks[0]
print("target_block: %s" % target_block)
address = toAddr(load_address)
print("address: %s" % address)
currentProgram.memory.moveBlock(target_block, address, TaskMonitor.DUMMY)
# Create symbol table structs
symbol_table_start = target.symbol_table_start + target.load_address
symbol_table_end = target.symbol_table_end + target.load_address
fix_symbol_table_structs(symbol_table_start, symbol_table_end, vx_version)
# Load symbols
symbols = target.get_symbols()
for symbol in symbols:
try:
symbol_name = symbol["symbol_name"]
symbol_name_addr = symbol["symbol_name_addr"]
symbol_dest_addr = symbol["symbol_dest_addr"]
symbol_flag = symbol["symbol_flag"]
add_symbol(symbol_name, symbol_name_addr, symbol_dest_addr, symbol_flag)
except Exception as err:
continue
else:
popup("Can't find symbols in binary")
except Exception as err:
print(err)
| 34.208955 | 98 | 0.63089 | 269 | 2,292 | 5.126394 | 0.375465 | 0.058738 | 0.061639 | 0.04351 | 0.156635 | 0.136331 | 0.08702 | 0 | 0 | 0 | 0 | 0.005468 | 0.28185 | 2,292 | 66 | 99 | 34.727273 | 0.832321 | 0.073735 | 0 | 0.130435 | 0 | 0 | 0.094991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.021739 | 0.108696 | 0 | 0.108696 | 0.065217 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87d89d3ed71a9a090a6dd8a120fec2ab36dbb63e | 8,457 | py | Python | backend/kesaseteli/applications/exporters/excel_exporter.py | jannetasa/yjdh | 5d86a56c722dfbcee03110f66c7e7ddbea966db9 | [
"MIT"
] | null | null | null | backend/kesaseteli/applications/exporters/excel_exporter.py | jannetasa/yjdh | 5d86a56c722dfbcee03110f66c7e7ddbea966db9 | [
"MIT"
] | null | null | null | backend/kesaseteli/applications/exporters/excel_exporter.py | jannetasa/yjdh | 5d86a56c722dfbcee03110f66c7e7ddbea966db9 | [
"MIT"
] | null | null | null | import io
from django.db.models import QuerySet
from django.shortcuts import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from xlsxwriter import Workbook
from applications.models import SummerVoucher
from common.utils import getattr_nested
FIELDS = (
# Field title, field value, field names in summer voucher model, column width
(_("Järjestys"), "", [], 15, "white"),
(_("Saatu pvm"), "%s", ["application__created_at"], 15, "white"),
(_("Hakemuksen kieli"), "%s", ["application__language"], 15, "white"),
(_("Setelin numero"), "%s", ["summer_voucher_serial_number"], 30, "white"),
(
_("Erikoistapaus (esim yhdeksäsluokkalainen)"),
"%s",
["summer_voucher_exception_reason"],
30,
"white",
),
("", "", [], 5, "#7F7F7F"),
(_("Nuoren nimi"), "%s", ["employee_name"], 30, "#DCEDF8"),
(_("Henkilötunnus"), "%s", ["employee_ssn"], 30, "#DCEDF8"),
(_("Koulu"), "%s", ["employee_school"], 30, "#DCEDF8"),
(_("Kotipostinumero"), "%s", ["employee_postcode"], 15, "#DCEDF8"),
(_("Puhelin"), "%s", ["employee_phone_number"], 30, "#DCEDF8"),
(_("Kotikaupunki"), "%s", ["employee_home_city"], 30, "#DCEDF8"),
("", "", [], 5, "#7F7F7F"),
(
_("Työnantaja muoto"),
"%s",
["application__company__company_form"],
15,
"#E7E3F9",
),
(_("Työnantaja"), "%s", ["application__company__name"], 30, "#E7E3F9"),
(_("Y-tunnus"), "%s", ["application__company__business_id"], 15, "#E7E3F9"),
(
_("Työnantajan lähiosoite"),
"%s",
["application__company__street_address"],
30,
"#E7E3F9",
),
(
_("Työnantajan postinumero"),
"%s",
["application__company__postcode"],
15,
"#E7E3F9",
),
(_("Työnantajan kunta"), "%s", ["application__company__city"], 15, "#E7E3F9"),
(_("Yhdyshenkilö"), "%s", ["application__contact_person_name"], 30, "#E7E3F9"),
(
_("Yhdyshenkilön sähköposti"),
"%s",
["application__contact_person_email"],
30,
"#E7E3F9",
),
(
_("Yhdyshenkilön Puhelin"),
"%s",
["application__contact_person_phone_number"],
30,
"#E7E3F9",
),
(
_("Erillinen laskuttaja"),
"%s",
["application__is_separate_invoicer"],
30,
"#E7E3F9",
),
(_("Laskuttajan nimi"), "%s", ["application__invoicer_name"], 30, "#E7E3F9"),
(
_("Laskuttajan sähköposti"),
"%s",
["application__invoicer_email"],
30,
"#E7E3F9",
),
(
_("Laskuttajan Puhelin"),
"%s",
["application__invoicer_phone_number"],
30,
"#E7E3F9",
),
(_("Yrityksen toimiala"), "%s", ["application__company__industry"], 30, "#E7E3F9"),
("", "", [], 5, "#7F7F7F"),
(
_("Työn suorituspaikan postinumero"),
"%s",
["employment_postcode"],
15,
"#F7DAE3",
),
(
_("Työsuhteen aloituspäivämäärä"),
"%s",
["employment_start_date"],
30,
"#F7DAE3",
),
(
_("Työsuhteen päättymispäivämäärä"),
"%s",
["employment_end_date"],
30,
"#F7DAE3",
),
(_("Työtunnit"), "%s", ["employment_work_hours"], 15, "#F7DAE3"),
(_("Maksettu palkka"), "%s", ["employment_salary_paid"], 15, "#F7DAE3"),
(_("Muut edut"), "%s", [""], 15, "#F7DAE3"),
(_("Raporttiin luokittelu"), "%s", [""], 15, "#F7DAE3"),
(_("Työtehtävät"), "%s", ["employment_description"], 30, "#F7DAE3"),
(
_("Olisitko palkannut?"),
"%s",
["hired_without_voucher_assessment"],
15,
"#F7DAE3",
),
(_("Työnantajan kokemus"), "%s", [""], 30, "#F7DAE3"),
(_("Muuta"), "%s", [""], 30, "#F7DAE3"),
(_("Liite: Työsopimus 1"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Työsopimus 2"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Työsopimus 3"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Työsopimus 4"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Työsopimus 5"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Palkkalaskelma 1"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Palkkalaskelma 2"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Palkkalaskelma 3"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Palkkalaskelma 4"), "%s", ["attachments"], 120, "#F7DAE3"),
(_("Liite: Palkkalaskelma 5"), "%s", ["attachments"], 120, "#F7DAE3"),
)
def get_xlsx_filename() -> str:
"""
Get the name of the excel file. Example filename:
kesasetelihakemukset_2021-01-01_23-59-59.xlsx
"""
local_datetime_now_as_str = timezone.localtime(timezone.now()).strftime(
"%Y-%m-%d_%H-%M-%S"
)
filename = f"kesasetelihakemukset_{local_datetime_now_as_str}.xlsx"
return filename
def set_header_and_formatting(wb, ws, column, field, header_format):
ws.write(0, column, str(_(field[0])), header_format)
cell_format = wb.add_format()
cell_format.set_border(1)
cell_format.set_bg_color(field[4])
ws.set_column(column, column, field[3], cell_format)
def get_attachment_uri(summer_voucher: SummerVoucher, field: tuple, value, request):
field_name = field[0]
attachment_number = int(field_name.split(" ")[-1])
attachment_type = field_name.split(" ")[1]
if attachment_type == "Työsopimus":
attachment_type = "employment_contract"
elif attachment_type == "Palkkalaskelma":
attachment_type = "payslip"
# Get attachment of type `attachment_type` and use the OFFSET and LIMIT to get only the n'th entry
# where n is `attachment_number`.
attachment = (
value.filter(attachment_type=attachment_type)
.order_by("created_at")[attachment_number - 1 : attachment_number] # noqa
.first()
)
if not attachment:
return ""
path = reverse(
"v1:summervoucher-handle-attachment",
kwargs={"pk": summer_voucher.id, "attachment_pk": attachment.id},
)
return request.build_absolute_uri(path)
def handle_special_cases(value, attr_str, summer_voucher, field, request):
if isinstance(value, bool):
value = str(_("Kyllä")) if value else str(_("Ei"))
elif attr_str == "attachments":
value = get_attachment_uri(summer_voucher, field, value, request)
elif "application__invoicer" in attr_str and getattr(
summer_voucher, "application", None
):
value = value if summer_voucher.application.is_separate_invoicer else ""
return value
def write_data_row(ws, row_number, summer_voucher, request):
ws.write(row_number, 0, row_number)
timestamp = summer_voucher.created_at.astimezone().strftime("%d/%m/%Y")
ws.write(row_number, 1, timestamp)
for column_number, field in enumerate(FIELDS[2:], 2):
attr_names = field[2]
values = []
for attr_str in attr_names:
value = getattr_nested(summer_voucher, attr_str.split("__"))
value = handle_special_cases(
value, attr_str, summer_voucher, field, request
)
values.append(value)
cell_value = field[1] % tuple(values)
ws.write(row_number, column_number, cell_value)
def populate_workbook(wb: Workbook, summer_vouchers: QuerySet[SummerVoucher], request):
"""
Fill the workbook with information from the summer vouchers queryset. Field names and values are
fetched from the FIELDS tuple.
"""
ws = wb.add_worksheet(name=str(_("Setelit")))
wrapped_cell_format = wb.add_format()
wrapped_cell_format.set_text_wrap()
header_format = wb.add_format({"bold": True})
for column, field in enumerate(FIELDS):
set_header_and_formatting(wb, ws, column, field, header_format)
for row_number, summer_voucher in enumerate(summer_vouchers, 1):
write_data_row(ws, row_number, summer_voucher, request)
wb.close()
def export_applications_as_xlsx_output(
summer_vouchers: QuerySet[SummerVoucher], request
) -> bytes:
"""
Creates an xlsx file in memory, without saving it on the disk. Return the output value as bytes.
"""
output = io.BytesIO()
wb = Workbook(output)
populate_workbook(wb, summer_vouchers, request)
return output.getvalue()
| 33.426877 | 102 | 0.599149 | 890 | 8,457 | 5.393258 | 0.289888 | 0.04 | 0.03125 | 0.04375 | 0.186458 | 0.134583 | 0.06125 | 0.06125 | 0.06125 | 0.043333 | 0 | 0.037542 | 0.228332 | 8,457 | 252 | 103 | 33.559524 | 0.697977 | 0.062788 | 0 | 0.255924 | 0 | 0 | 0.303342 | 0.103063 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033175 | false | 0 | 0.037915 | 0 | 0.094787 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87dc121187dc7fd544d9f06b185b9579263cbdac | 2,604 | py | Python | tests/continuous_performance_test.py | cswaney/fawkes | 90c623476bf62b808947277840a2d5de3c95a7ce | [
"MIT"
] | 4 | 2020-05-05T18:59:32.000Z | 2021-09-24T12:40:08.000Z | tests/continuous_performance_test.py | cswaney/fawkes | 90c623476bf62b808947277840a2d5de3c95a7ce | [
"MIT"
] | null | null | null | tests/continuous_performance_test.py | cswaney/fawkes | 90c623476bf62b808947277840a2d5de3c95a7ce | [
"MIT"
] | 1 | 2018-11-19T05:18:41.000Z | 2018-11-19T05:18:41.000Z | from fawkes.models import NetworkPoisson
import numpy as np
import time
# Continuous-Time
N = 2
T = 1000.0
dt_max = 1.00
bias = np.array([1.0, 1.0])
weights = np.array([[0.5, 0.2], [0.2, 0.5]])
mu = 0.0 * np.ones((N,N))
tau = 1.0 * np.ones((N,N))
params = {'lamb': bias, 'weights': weights, 'mu': mu, 'tau': tau}
net = NetworkPoisson(N=N, dt_max=dt_max, params=params)
data = net.generate_data(T=T)
# Class Gibbs sampling
# start = time.time()
# sample = net.sample(data, T, size=10)
# stop = time.time()
# py_time = stop - start
# print('elapsed time: {:.3f}\n'.format(py_time))
#
# start = time.time()
# sample = net.sample_ext(data, T, size=10)
# stop = time.time()
# cy_time = stop - start
# print('elapsed time: {:.3f}'.format(cy_time))
# print('speed-up: {:.2f}\n'.format(py_time / cy_time))
# Parent sampling
start = time.time()
for _ in range(1):
parents = net.sample_parents(data, bias, weights, mu, tau)
stop = time.time()
py_time = stop - start
print('elapsed time: {:.3f}'.format(py_time))
start = time.time()
for _ in range(1):
parents = net.sample_parents_ext(data, bias, weights, mu, tau, method='cython')
stop = time.time()
cy_time = stop - start
print('elapsed time: {:.3f}'.format(cy_time))
print('speed-up: {:.2f}\n'.format(py_time / cy_time))
# Discrete-Time
# from fawkes.extensions import sample_parents_discrete
# from fawkes.models import DiscreteNetworkPoisson
#
# N = 2
# B = 1
# dt = 1.0
# dt_max = 10
# T = 16000
# lambda0 = np.array([1.0, 1.0], dtype='float64')
# W = np.array([[0.5, 0.1], [0.1, 0.5]], dtype='float64')
# theta = (1 / B) * np.ones((B, N, N))
# params = {'weights': W, 'bias': lambda0, 'impulse': theta}
# model = DiscreteNetworkPoisson(N=N, L=dt_max, B=B, dt=dt, params=params)
# S = model.generate_data(T)
# Shat = model.convolve(S)
# Lambda = model.calculate_intensity(S, Shat)
# Parent sampling
# start = time.time()
# model.sample_parents(S, Shat, lambda0, W, theta)
# stop = time.time()
# py_time = stop - start
# print('elapsed time: {:.3f}'.format(py_time))
#
# start = time.time()
# model.sample_parents_ext(S, Shat, lambda0, W, theta)
# stop = time.time()
# cy_time = stop - start
# print('elapsed time: {:.3f}'.format(cy_time))
# print('speed-up: {:.2f}'.format(py_time / cy_time))
# Gibbs Sampling
# start = time.time()
# model.sample(S)
# stop = time.time()
# py_time = stop - start
# print('elapsed time: {:.3f}'.format(py_time))
#
# start = time.time()
# model.sample_ext(S)
# stop = time.time()
# cy_time = stop - start
# print('elapsed time: {:.3f}'.format(cy_time))
# print('speed-up: {:.2f}'.format(py_time / cy_time))
| 26.571429 | 83 | 0.647465 | 422 | 2,604 | 3.890995 | 0.177725 | 0.077954 | 0.063337 | 0.087698 | 0.644336 | 0.573691 | 0.471376 | 0.457978 | 0.436054 | 0.436054 | 0 | 0.03386 | 0.149386 | 2,604 | 97 | 84 | 26.845361 | 0.707449 | 0.629032 | 0 | 0.230769 | 0 | 0 | 0.088203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e070f5ffe74366b472e143326a525a8c93af75 | 6,638 | py | Python | statcord/client.py | Iapetus-11/better-statcord.py | c49c1a0d23dd0e0f3ecd906977a5fb70ded232b6 | [
"MIT"
] | 13 | 2021-04-26T18:43:12.000Z | 2022-02-05T13:51:49.000Z | statcord/client.py | Iapetus-11/better-statcord.py | c49c1a0d23dd0e0f3ecd906977a5fb70ded232b6 | [
"MIT"
] | 8 | 2021-05-06T18:53:02.000Z | 2021-11-26T09:32:07.000Z | statcord/client.py | Iapetus-11/better-statcord.py | c49c1a0d23dd0e0f3ecd906977a5fb70ded232b6 | [
"MIT"
] | 5 | 2021-04-29T18:37:53.000Z | 2021-12-10T13:42:02.000Z | from collections import defaultdict
from typing import Callable
import traceback
import aiohttp
import asyncio
import logging
import psutil
HEADERS = {"Content-Type": "application/json"}
STAT_ENDPOINT = "https://api.statcord.com/v3/stats"
def _get_package_name(obj: object) -> str:
return obj.__module__.split(".")[0]
class StatcordClient:
"""The base Statcord client class."""
def __init__(
self,
bot,
statcord_key: str,
custom_1: Callable = None,
custom_2: Callable = None,
resource_stats: bool = True,
) -> None:
self.bot = bot
self.statcord_key = statcord_key
self.custom_1 = custom_1
self.custom_2 = custom_2
# validate args
if not isinstance(statcord_key, str):
raise TypeError("The statcord_key argument must be a string.")
if not (custom_1 is None or callable(custom_1)):
raise TypeError("The custom_1 argument must be a callable.")
if not (custom_2 is None or callable(custom_2)):
raise TypeError("The custom_2 argument must be a callable.")
# setup logging
self.logger = logging.getLogger("statcord")
self.logger.setLevel(logging.WARNING)
# configuration
self.resource_stats = resource_stats
# create aiohttp clientsession instance
self._aiohttp_ses = aiohttp.ClientSession(loop=bot.loop)
# create counters
if self.resource_stats:
net_io_counter = psutil.net_io_counters()
self._prev_net_usage = net_io_counter.bytes_sent + net_io_counter.bytes_recv
else:
self._prev_net_usage = None
self._popular_commands = defaultdict(int)
self._command_count = 0
self._active_users = set()
# add on_command handler
bot.add_listener(self._command_ran, name="on_command")
if _get_package_name(bot) == "disnake":
bot.add_listener(self._disnake_slash_command_ran, name="on_slash_command")
# start stat posting loop
self._post_loop_task = bot.loop.create_task(self._post_loop())
def close(self) -> None:
"""Closes the Statcord client safely."""
self._post_loop_task.cancel()
self.bot.remove_listener(self._command_ran, name="on_command")
@staticmethod
def _format_traceback(e: Exception) -> str:
"""Formats exception traceback nicely."""
return "".join(traceback.format_exception(type(e), e, e.__traceback__, 4))
def _get_user_count(self) -> int:
"""Gets the user count of the bot as accurately as it can."""
cache_size = len(self.bot.users)
member_count = sum(
[g.member_count for g in self.bot.guilds if hasattr(g, "member_count") and g.member_count is not None]
)
return max(cache_size, member_count)
async def _command_ran(self, ctx) -> None:
"""Updates command-related statistics."""
if ctx.command_failed:
return
self._command_count += 1
self._active_users.add(ctx.author.id)
self._popular_commands[ctx.command.name] += 1
async def _disnake_slash_command_ran(self, inter: "disnake.ApplicationCommandInteraction") -> None: # type: ignore
"""Updates disnake slash command-related statistics."""
self._command_count += 1
self._active_users.add(inter.author.id)
self._popular_commands[inter.data.name] += 1
async def _post_loop(self) -> None:
"""The stat posting loop which posts stats to the Statcord API."""
while not self.bot.is_closed():
await self.bot.wait_until_ready()
try:
await self.post_stats()
except Exception as e:
self.logger.error(f"Statcord stat posting error:\n{self._format_traceback(e)}")
await asyncio.sleep(60)
async def _call_custom_graph(self, custom_graph_callable: Callable) -> object:
if custom_graph_callable is None:
return None
if asyncio.iscoroutinefunction(custom_graph_callable):
return await custom_graph_callable()
return custom_graph_callable()
async def post_stats(self) -> None:
"""Helper method used to actually post the stats to Statcord."""
self.logger.debug("Posting stats to Statcord...")
if self.resource_stats:
mem = psutil.virtual_memory()
mem_used = str(mem.used)
mem_load = str(mem.percent)
cpu_load = str(psutil.cpu_percent())
net_io_counter = psutil.net_io_counters()
total_net_usage = net_io_counter.bytes_sent + net_io_counter.bytes_recv # current net usage
period_net_usage = str(total_net_usage - self._prev_net_usage) # net usage to be sent
self._prev_net_usage = total_net_usage # update previous net usage counter
else:
mem_used = "0"
mem_load = "0"
cpu_load = "0"
period_net_usage = "0"
data = {
"id": str(self.bot.user.id),
"key": self.statcord_key,
"servers": str(len(self.bot.guilds)), # server count
"users": str(self._get_user_count()), # user count
"commands": str(self._command_count), # command count
"active": list(self._active_users),
"popular": [{"name": k, "count": v} for k, v in self._popular_commands.items()], # active commands
"memactive": mem_used,
"memload": mem_load,
"cpuload": cpu_load,
"bandwidth": period_net_usage,
}
custom_1_value = await self._call_custom_graph(self.custom_1)
custom_2_value = await self._call_custom_graph(self.custom_2)
if custom_1_value is not None:
data["custom1"] = custom_1_value
if custom_2_value is not None:
data["custom2"] = custom_2_value
# reset counters
self._popular_commands = defaultdict(int)
self._command_count = 0
self._active_users = set()
# actually send the post request
resp = await self._aiohttp_ses.post(url=STAT_ENDPOINT, json=data, headers=HEADERS)
# handle server response
if resp.status == 429:
self.logger.warning("Statcord is ratelimiting us.")
elif resp.status != 200:
raise Exception(f"Statcord server response status was not 200 OK (Was {resp.status}):\n{await resp.text()}")
else:
self.logger.debug("Successfully posted stats to Statcord.")
| 33.695431 | 120 | 0.629708 | 831 | 6,638 | 4.762936 | 0.258724 | 0.026276 | 0.018191 | 0.01617 | 0.188984 | 0.130369 | 0.130369 | 0.097019 | 0.059626 | 0.059626 | 0 | 0.009542 | 0.273727 | 6,638 | 196 | 121 | 33.867347 | 0.81145 | 0.076981 | 0 | 0.11811 | 0 | 0.007874 | 0.10626 | 0.016203 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03937 | false | 0 | 0.055118 | 0.007874 | 0.15748 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e2b00d7e8c9d53d6c3b6e80a002d8cb05a8b2d | 6,706 | py | Python | sqlalchemy_continuum/model_builder.py | quantus/sqlalchemy-continuum | 1453888e4e696dac835f8b907b7f819433b27e6c | [
"BSD-3-Clause"
] | null | null | null | sqlalchemy_continuum/model_builder.py | quantus/sqlalchemy-continuum | 1453888e4e696dac835f8b907b7f819433b27e6c | [
"BSD-3-Clause"
] | null | null | null | sqlalchemy_continuum/model_builder.py | quantus/sqlalchemy-continuum | 1453888e4e696dac835f8b907b7f819433b27e6c | [
"BSD-3-Clause"
] | null | null | null | from copy import copy
import sqlalchemy as sa
from sqlalchemy_utils.functions import primary_keys, declarative_base
from .expression_reflector import ClassExpressionReflector
from .version import VersionClassBase
class ModelBuilder(object):
"""
VersionedModelBuilder handles the building of History models based on
parent table attributes and versioning configuration.
"""
def __init__(self, versioning_manager, model):
"""
:param versioning_manager:
VersioningManager object
:param model:
SQLAlchemy declarative model object that acts as a parent for the
built history model
"""
self.manager = versioning_manager
self.model = model
def build_parent_relationship(self):
"""
Builds a relationship between currently built history class and
parent class (the model whose history the currently build history
class represents).
"""
conditions = []
foreign_keys = []
for primary_key in primary_keys(self.model):
conditions.append(
getattr(self.model, primary_key.name)
==
getattr(self.extension_class, primary_key.name)
)
foreign_keys.append(
getattr(self.extension_class, primary_key.name)
)
# We need to check if versions relation was already set for parent
# class.
if not hasattr(self.model, 'versions'):
self.model.versions = sa.orm.relationship(
self.extension_class,
primaryjoin=sa.and_(*conditions),
foreign_keys=foreign_keys,
lazy='dynamic',
backref=sa.orm.backref(
'version_parent'
),
viewonly=True
)
def build_transaction_relationship(self, tx_log_class):
"""
Builds a relationship between currently built history class and
TransactionLog class.
:param tx_log_class: TransactionLog class
"""
# Only define transaction relation if it doesn't already exist in
# parent class.
backref_name = self.manager.options['relation_naming_function'](
self.model.__name__
)
if not hasattr(self.extension_class, 'transaction'):
self.extension_class.transaction = sa.orm.relationship(
tx_log_class,
primaryjoin=(
tx_log_class.id ==
self.extension_class.transaction_id
),
foreign_keys=[self.extension_class.transaction_id],
backref=self.manager.options['relation_naming_function'](
self.model.__name__
)
)
else:
setattr(
tx_log_class,
backref_name,
sa.orm.relationship(
self.extension_class,
primaryjoin=(
tx_log_class.id ==
self.extension_class.transaction_id
),
foreign_keys=[self.extension_class.transaction_id]
)
)
def build_changes_relationship(self, tx_changes_class):
"""
Builds a relationship between currently built history class and
TransactionChanges class.
:param tx_changes_class: TransactionChanges class
"""
# Only define changes relation if it doesn't already exist in
# parent class.
if not hasattr(self.extension_class, 'changes'):
self.extension_class.changes = sa.orm.relationship(
tx_changes_class,
primaryjoin=(
tx_changes_class.transaction_id ==
self.extension_class.transaction_id
),
foreign_keys=[tx_changes_class.transaction_id],
backref=self.manager.options['relation_naming_function'](
self.model.__name__
)
)
def find_closest_versioned_parent(self):
"""
Finds the closest versioned parent for current parent model.
"""
for class_ in self.model.__bases__:
if class_ in self.manager.history_class_map:
return (self.manager.history_class_map[class_], )
def base_classes(self):
"""
Returns all base classes for history model.
"""
parents = (
self.find_closest_versioned_parent()
or self.manager.option(self.model, 'base_classes')
or (declarative_base(self.model), )
)
return parents + (VersionClassBase, )
def inheritance_args(self):
"""
Return mapper inheritance args for currently built history model.
"""
if self.find_closest_versioned_parent():
reflector = ClassExpressionReflector(self.model)
inherit_condition = reflector(
self.model.__mapper__.inherit_condition
)
return {
'inherit_condition': inherit_condition
}
return {}
def build_model(self, table):
"""
Build history model class.
"""
mapper_args = {}
mapper_args.update(self.inheritance_args())
return type(
'%sHistory' % self.model.__name__,
self.base_classes(),
{
'__table__': table,
'__mapper_args__': mapper_args
}
)
def __call__(self, table, tx_log_class, tx_changes_class):
"""
Build history model and relationships to parent model, transaction
log model and transaction changes model.
"""
# versioned attributes need to be copied for each child class,
# otherwise each child class would share the same __versioned__
# option dict
self.model.__versioned__ = copy(self.model.__versioned__)
self.model.__versioned__['transaction_log'] = tx_log_class
self.model.__versioned__['transaction_changes'] = tx_changes_class
self.model.__versioned__['manager'] = self.manager
self.extension_class = self.build_model(table)
self.build_parent_relationship()
self.build_transaction_relationship(tx_log_class)
self.build_changes_relationship(tx_changes_class)
self.model.__versioned__['class'] = self.extension_class
self.extension_class.__parent_class__ = self.model
self.manager.history_class_map[self.model] = self.extension_class
| 36.053763 | 77 | 0.592156 | 654 | 6,706 | 5.753823 | 0.201835 | 0.052618 | 0.081318 | 0.053946 | 0.312251 | 0.266543 | 0.226415 | 0.18549 | 0.173798 | 0.145097 | 0 | 0 | 0.336415 | 6,706 | 185 | 78 | 36.248649 | 0.845618 | 0.204593 | 0 | 0.194915 | 0 | 0 | 0.0453 | 0.014368 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076271 | false | 0 | 0.042373 | 0 | 0.169492 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e348a7cdae20ba845eb052eaf1676cec4cf12f | 10,726 | py | Python | descarteslabs/workflows/types/geospatial/mixins.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 167 | 2017-03-23T22:16:58.000Z | 2022-03-08T09:19:30.000Z | descarteslabs/workflows/types/geospatial/mixins.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 93 | 2017-03-23T22:11:40.000Z | 2021-12-13T18:38:53.000Z | descarteslabs/workflows/types/geospatial/mixins.py | descarteslabs/descarteslabs-python | efc874d6062603dc424c9646287a9b1f8636e7ac | [
"Apache-2.0"
] | 46 | 2017-03-25T19:12:14.000Z | 2021-08-15T18:04:29.000Z | import six
from collections import abc
from ..containers import Dict, List, Tuple
from ..core import typecheck_promote, Proxytype
from ..primitives import Float, Int, Str, Bool
from ..proxify import proxify
from ..function import Function
class BandsMixin:
def __init__(self):
raise TypeError("Please use Image or ImageCollection.")
def with_bandinfo(self, band, **bandinfo):
if not isinstance(band, (Str, six.string_types)):
raise TypeError(
"Invalid type {!r} for band argument, must be a string.".format(
type(band).__name__
)
)
bandinfo_promoted = {}
for name, value in six.iteritems(bandinfo):
try:
bandinfo_promoted[name] = proxify(value)
except NotImplementedError as e:
raise ValueError(
"Invalid value {!r} for bandinfo field {!r}.\n{}".format(
value, name, str(e)
)
)
return self._from_apply("wf.with_bandinfo", self, band, **bandinfo_promoted)
def without_bandinfo(self, band, *bandinfo_keys):
if not isinstance(band, (Str, six.string_types)):
raise TypeError(
"Invalid type {!r} for band argument, must be a string.".format(
type(band).__name__
)
)
for bandinfo_key in bandinfo_keys:
if not isinstance(bandinfo_key, (Str, six.string_types)):
raise TypeError(
"Invalid type {!r} for bandinfo key, must be a string.".format(
type(bandinfo_key).__name__
)
)
return self._from_apply("wf.without_bandinfo", self, band, *bandinfo_keys)
def pick_bands(self, bands, allow_missing=False):
"""
New `Image`, containing only the given bands.
Bands can be given as a sequence of strings,
or a single space-separated string (like ``"red green blue"``).
Bands on the new `Image` will be in the order given.
If names are duplicated, repeated names will be suffixed with ``_N``,
with N incrementing from 1 for each duplication (``pick_bands("red red red")``
returns bands named ``red red_1 red_2``).
If the `Image` is empty, returns the empty `Image`.
If ``allow_missing`` is False (default), raises an error if given band
names that don't exist in the `Image`. If ``allow_missing``
is True, any missing names are dropped, and if none of the names exist,
returns an empty `Image`.
Example
-------
>>> from descarteslabs.workflows import Image
>>> img = Image.from_id("sentinel-2:L1C:2019-05-04_13SDV_99_S2B_v1")
>>> rgb = img.pick_bands("red green blue")
>>> rgb.bandinfo.keys().inspect(ctx) # doctest: +SKIP
["red", "green", "blue"]
>>> red = img.pick_bands(["red", "nonexistent_band_name"], allow_missing=True)
>>> red.bandinfo.keys().inspect(ctx) # doctest: +SKIP
["red"]
>>> s1_img = Image.from_id("sentinel-1:GRD:meta_2020-06-09_049A0903_S1B")
>>> vv_vh_vv = s1_img.pick_bands("vv vh vv")
>>> vv_vh_vv.bandinfo.keys().inspect(ctx) # doctest: +SKIP
["vv", "vh", "vv_1"]
"""
if isinstance(bands, abc.Sequence):
# Allows for a cleaner graft for this common use-case.
# Note that both strings and normal lists/tuples are Sequences.
if isinstance(bands, six.string_types):
bands = bands.split()
else:
if not all(
isinstance(band, six.string_types + (Str,)) for band in bands
):
raise TypeError(
"Band names must all be strings, not {!r}".format(bands)
)
return self._from_apply(
"wf.pick_bands", self, *bands, allow_missing=allow_missing
)
else:
if isinstance(bands, Str):
bands = bands.split()
return self._pick_bands_list(bands, allow_missing=allow_missing)
@typecheck_promote(List[Str], allow_missing=Bool)
def _pick_bands_list(self, bands, allow_missing=False):
return self._from_apply(
"wf.pick_bands_list", self, bands, allow_missing=allow_missing
)
def unpack_bands(self, bands):
"""
Convenience method for unpacking multiple bands into Python variables.
Returns a Python tuple of ``self.pick_bands`` called for each band name.
Bands can be given as a space-separated string of band names, or a sequence.
Example
-------
>>> from descarteslabs.workflows import Image
>>> img = Image.from_id("sentinel-2:L1C:2019-05-04_13SDV_99_S2B_v1")
>>> red, green, blue = img.unpack_bands("red green blue")
"""
if isinstance(bands, six.string_types):
bands = bands.split()
if not isinstance(bands, (abc.Sequence, Tuple)):
msg = "unpack_bands requires a Python string or sequence, not {}".format(
bands
)
if isinstance(bands, Proxytype):
msg += (
". Proxytypes cannot be used, since their length is unknown, "
"so we don't know how many values to return."
)
raise TypeError(msg)
if len(bands) == 1:
return self.pick_bands(bands[0])
else:
return tuple(self.pick_bands(band) for band in bands)
def rename_bands(self, *new_positional_names, **new_names):
"""
New `Image`, with bands renamed by position or name.
New names can be given positionally (like ``rename_bands('new_red', 'new_green')``),
which renames the i-th band to the i-th argument.
Or, new names can be given by keywords (like ``rename_bands(red="new_red")``)
mapping from old band names to new ones.
To eliminate ambiguity, names cannot be given both ways.
Example
-------
>>> from descarteslabs.workflows import Image
>>> img = Image.from_id("sentinel-2:L1C:2019-05-04_13SDV_99_S2B_v1")
>>> renamed = img.rename_bands(red="new_red", blue="new_blue", green="new_green")
"""
if len(new_positional_names) > 0 and len(new_names) > 0:
raise TypeError(
"New band names cannot be given both positionally and by name, "
"due to potential ambiguity. Please separate this into two calls."
)
if len(new_positional_names) > 0:
return self._rename_bands_positionally(new_positional_names)
else:
return self._rename_bands(new_names)
@typecheck_promote(Dict[Str, Str])
def _rename_bands(self, new_names):
return self._from_apply("wf.rename_bands", self, new_names)
@typecheck_promote(List[Str])
def _rename_bands_positionally(self, new_positional_names):
return self._from_apply(
"wf.rename_bands_positionally", self, new_positional_names
)
def map_bands(self, func):
"""
Map a function over each band in ``self``.
The function must take 2 arguments:
1. `Str`: the band name
2. `Image`: 1-band `Image`
If the function returns an `Image`, `map_bands` will also
return one `Image`, containing the bands from all Images
returned by ``func`` concatenated together.
Otherwise, `map_bands` will return a `Dict` of the results
of each call to ``func``, where the keys are the band names.
Note that ``func`` can return Images with more than 1 band,
but the band names must be unique across all of its results.
Parameters
----------
func: Python function
Function that takes a `Str` and an `Image`.
Returns
-------
`Image` if ``func`` returns `Image`, otherwise ``Dict[Str, T]``,
where ``T`` is the return type of ``func``.
Example
-------
>>> from descarteslabs.workflows import Image
>>> img = Image.from_id("sentinel-2:L1C:2019-05-04_13SDV_99_S2B_v1")
>>> mapped = img.map_bands(lambda name, band: band / 2) # divide each band by 2
"""
from .image import Image
from .imagecollection import ImageCollection
self_type = type(self)
delayed_func = Function.from_callable(func, Str, self_type)
result_type = delayed_func.return_type
func = "wf.map_bands_imagery"
if result_type not in (Image, ImageCollection):
result_type = Dict[Str, result_type]
func = "wf.map_bands"
return result_type._from_apply(func, self, delayed_func)
class GeometryMixin:
@typecheck_promote((Int, Float))
def buffer(self, distance):
"""
Buffer the area around ``self`` by a given distance.
Parameters
----------
distance: Int or Float
The distance (in decimal degrees) to buffer the area around the Geometry.
Returns
-------
Same type as self
Example
-------
>>> import descarteslabs.workflows as wf
>>> geom = wf.Geometry(type="Point", coordinates=[1, 2])
>>> buffered = geom.buffer(2)
"""
return self._from_apply("wf.buffer", self, distance)
@typecheck_promote(value=(Int, Float))
def rasterize(self, value=1):
"""
Rasterize this Geometry into an `~.geospatial.Image`
Parameters
----------
value: Int, Float, default=1
Fill pixels within the Geometry with this value.
Pixels outside the Geometry will be masked, and set to 0.
Note
----
Rasterization happens according to the `~.workflows.types.geospatial.GeoContext`
of the `.Job`, so the geometry is projected into and rasterized at
that CRS and resolution.
Returns
-------
rasterized: ~.geospatial.Image
An Image with 1 band named ``"features"``, and empty properties and bandinfo.
Example
-------
>>> import descarteslabs.workflows as wf
>>> geom = wf.Geometry(type="Point", coordinates=[1, 2])
>>> geom.rasterize(value=0.5)
<descarteslabs.workflows.types.geospatial.image.Image object at 0x...>
"""
from .image import Image
return Image._from_apply("wf.rasterize", self, value)
| 37.114187 | 92 | 0.58512 | 1,299 | 10,726 | 4.688222 | 0.214781 | 0.017734 | 0.01445 | 0.021839 | 0.319704 | 0.244335 | 0.197537 | 0.154023 | 0.141872 | 0.126765 | 0 | 0.014988 | 0.309528 | 10,726 | 288 | 93 | 37.243056 | 0.807318 | 0.417024 | 0 | 0.208333 | 0 | 0 | 0.137156 | 0.005246 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.083333 | 0.025 | 0.316667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e670fc8f6366014190f114f8136ab2523cc5d9 | 14,712 | py | Python | InnerEye/ML/Histopathology/preprocessing/create_tiles_dataset.py | faz1993/InnerEye-DeepLearning | fb258d5c9a3ba18565b5a67e7ac1f00127d9ecb9 | [
"MIT"
] | 402 | 2020-09-22T16:38:16.000Z | 2022-03-30T09:56:03.000Z | InnerEye/ML/Histopathology/preprocessing/create_tiles_dataset.py | wensincai/InnerEye-DeepLearning | ccb53d01ad0f1c20336588c0066059b8de5266fd | [
"MIT"
] | 259 | 2020-09-23T09:32:33.000Z | 2022-03-30T18:15:01.000Z | InnerEye/ML/Histopathology/preprocessing/create_tiles_dataset.py | wensincai/InnerEye-DeepLearning | ccb53d01ad0f1c20336588c0066059b8de5266fd | [
"MIT"
] | 112 | 2020-09-23T00:12:58.000Z | 2022-03-31T07:39:55.000Z | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import functools
import logging
import shutil
import traceback
import warnings
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union
import numpy as np
import PIL
from monai.data import Dataset
from monai.data.image_reader import WSIReader
from tqdm import tqdm
from InnerEye.ML.Histopathology.datasets.base_dataset import SlidesDataset
from InnerEye.ML.Histopathology.preprocessing import tiling
from InnerEye.ML.Histopathology.preprocessing.loading import LoadROId, segment_foreground
from InnerEye.ML.Histopathology.utils.naming import SlideKey, TileKey
logging.basicConfig(format='%(asctime)s %(message)s', filemode='w')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def select_tiles(foreground_mask: np.ndarray, occupancy_threshold: float) \
-> Tuple[np.ndarray, np.ndarray]:
"""Exclude tiles that are mostly background based on estimated occupancy.
:param foreground_mask: Boolean array of shape (*, H, W).
:param occupancy_threshold: Tiles with lower occupancy (between 0 and 1) will be discarded.
:return: A tuple containing which tiles were selected and the estimated occupancies. These will
be boolean and float arrays of shape (*,), or scalars if `foreground_mask` is a single tile.
"""
if occupancy_threshold < 0. or occupancy_threshold > 1.:
raise ValueError("Tile occupancy threshold must be between 0 and 1")
occupancy = foreground_mask.mean(axis=(-2, -1))
return (occupancy > occupancy_threshold).squeeze(), occupancy.squeeze() # type: ignore
def get_tile_descriptor(tile_location: Sequence[int]) -> str:
"""Format the XY tile coordinates into a tile descriptor."""
return f"{tile_location[0]:05d}x_{tile_location[1]:05d}y"
def get_tile_id(slide_id: str, tile_location: Sequence[int]) -> str:
"""Format the slide ID and XY tile coordinates into a unique tile ID."""
return f"{slide_id}.{get_tile_descriptor(tile_location)}"
def save_image(array_chw: np.ndarray, path: Path) -> PIL.Image:
"""Save an image array in (C, H, W) format to disk."""
path.parent.mkdir(parents=True, exist_ok=True)
array_hwc = np.moveaxis(array_chw, 0, -1).astype(np.uint8).squeeze()
pil_image = PIL.Image.fromarray(array_hwc)
pil_image.convert('RGB').save(path)
return pil_image
def generate_tiles(slide_image: np.ndarray, tile_size: int, foreground_threshold: float,
occupancy_threshold: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, int]:
"""Split the foreground of an input slide image into tiles.
:param slide_image: The RGB image array in (C, H, W) format.
:param tile_size: Lateral dimensions of each tile, in pixels.
:param foreground_threshold: Luminance threshold (0 to 255) to determine tile occupancy.
:param occupancy_threshold: Threshold (between 0 and 1) to determine empty tiles to discard.
:return: A tuple containing the image tiles (N, C, H, W), tile coordinates (N, 2), occupancies
(N,), and total number of discarded empty tiles.
"""
image_tiles, tile_locations = tiling.tile_array_2d(slide_image, tile_size=tile_size,
constant_values=255)
foreground_mask, _ = segment_foreground(image_tiles, foreground_threshold)
selected, occupancies = select_tiles(foreground_mask, occupancy_threshold)
n_discarded = (~selected).sum()
logging.info(f"Percentage tiles discarded: {n_discarded / len(selected) * 100:.2f}")
image_tiles = image_tiles[selected]
tile_locations = tile_locations[selected]
occupancies = occupancies[selected]
return image_tiles, tile_locations, occupancies, n_discarded
def get_tile_info(sample: Dict[SlideKey, Any], occupancy: float, tile_location: Sequence[int],
rel_slide_dir: Path) -> Dict[TileKey, Any]:
"""Map slide information and tiling outputs into tile-specific information dictionary.
:param sample: Slide dictionary.
:param occupancy: Estimated tile foreground occuppancy.
:param tile_location: Tile XY coordinates.
:param rel_slide_dir: Directory where tiles are saved, relative to dataset root.
:return: Tile information dictionary.
"""
slide_id = sample[SlideKey.SLIDE_ID]
descriptor = get_tile_descriptor(tile_location)
rel_image_path = f"{rel_slide_dir}/{descriptor}.png"
tile_info = {
TileKey.SLIDE_ID: slide_id,
TileKey.TILE_ID: get_tile_id(slide_id, tile_location),
TileKey.IMAGE: rel_image_path,
TileKey.LABEL: sample[SlideKey.LABEL],
TileKey.TILE_X: tile_location[0],
TileKey.TILE_Y: tile_location[1],
TileKey.OCCUPANCY: occupancy,
TileKey.SLIDE_METADATA: {TileKey.from_slide_metadata_key(key): value
for key, value in sample[SlideKey.METADATA].items()}
}
return tile_info
def format_csv_row(tile_info: Dict[TileKey, Any], keys_to_save: Iterable[TileKey],
metadata_keys: Iterable[str]) -> str:
"""Format tile information dictionary as a row to write to a dataset CSV tile.
:param tile_info: Tile information dictionary.
:param keys_to_save: Which main keys to include in the row, and in which order.
:param metadata_keys: Likewise for metadata keys.
:return: The formatted CSV row.
"""
tile_slide_metadata = tile_info.pop(TileKey.SLIDE_METADATA)
fields = [str(tile_info[key]) for key in keys_to_save]
fields.extend(str(tile_slide_metadata[key]) for key in metadata_keys)
dataset_row = ','.join(fields)
return dataset_row
def process_slide(sample: Dict[SlideKey, Any], level: int, margin: int, tile_size: int,
foreground_threshold: Optional[float], occupancy_threshold: float, output_dir: Path,
tile_progress: bool = False) -> None:
"""Load and process a slide, saving tile images and information to a CSV file.
:param sample: Slide information dictionary, returned by the input slide dataset.
:param level: Magnification level at which to process the slide.
:param margin: Margin around the foreground bounding box, in pixels at lowest resolution.
:param tile_size: Lateral dimensions of each tile, in pixels.
:param foreground_threshold: Luminance threshold (0 to 255) to determine tile occupancy.
If `None` (default), an optimal threshold will be estimated automatically.
:param occupancy_threshold: Threshold (between 0 and 1) to determine empty tiles to discard.
:param output_dir: Root directory for the output dataset; outputs for a single slide will be
saved inside `output_dir/slide_id/`.
:param tile_progress: Whether to display a progress bar in the terminal.
"""
slide_metadata: Dict[str, Any] = sample[SlideKey.METADATA]
keys_to_save = (TileKey.SLIDE_ID, TileKey.TILE_ID, TileKey.IMAGE, TileKey.LABEL,
TileKey.TILE_X, TileKey.TILE_Y, TileKey.OCCUPANCY)
metadata_keys = tuple(TileKey.from_slide_metadata_key(key) for key in slide_metadata)
csv_columns: Tuple[str, ...] = (*keys_to_save, *metadata_keys)
slide_id: str = sample[SlideKey.SLIDE_ID]
rel_slide_dir = Path(slide_id)
slide_dir = output_dir / rel_slide_dir
logging.info(f">>> Slide dir {slide_dir}")
if slide_dir.exists(): # already processed slide - skip
logging.info(f">>> Skipping {slide_dir} - already processed")
return
else:
try:
slide_dir.mkdir(parents=True)
dataset_csv_path = slide_dir / "dataset.csv"
dataset_csv_file = dataset_csv_path.open('w')
dataset_csv_file.write(','.join(csv_columns) + '\n') # write CSV header
n_failed_tiles = 0
failed_tiles_csv_path = slide_dir / "failed_tiles.csv"
failed_tiles_file = failed_tiles_csv_path.open('w')
failed_tiles_file.write('tile_id' + '\n')
logging.info(f"Loading slide {slide_id} ...")
loader = LoadROId(WSIReader('cuCIM'), level=level, margin=margin,
foreground_threshold=foreground_threshold)
sample = loader(sample) # load 'image' from disk
logging.info(f"Tiling slide {slide_id} ...")
image_tiles, rel_tile_locations, occupancies, _ = \
generate_tiles(sample[SlideKey.IMAGE], tile_size,
sample[SlideKey.FOREGROUND_THRESHOLD],
occupancy_threshold)
tile_locations = (sample[SlideKey.SCALE] * rel_tile_locations
+ sample[SlideKey.ORIGIN]).astype(int)
n_tiles = image_tiles.shape[0]
logging.info(f"Saving tiles for slide {slide_id} ...")
for i in tqdm(range(n_tiles), f"Tiles ({slide_id[:6]}…)", unit="img", disable=not tile_progress):
try:
tile_info = get_tile_info(sample, occupancies[i], tile_locations[i], rel_slide_dir)
save_image(image_tiles[i], output_dir / tile_info[TileKey.IMAGE])
dataset_row = format_csv_row(tile_info, keys_to_save, metadata_keys)
dataset_csv_file.write(dataset_row + '\n')
except Exception as e:
n_failed_tiles += 1
descriptor = get_tile_descriptor(tile_locations[i])
failed_tiles_file.write(descriptor + '\n')
traceback.print_exc()
warnings.warn(f"An error occurred while saving tile "
f"{get_tile_id(slide_id, tile_locations[i])}: {e}")
dataset_csv_file.close()
failed_tiles_file.close()
if n_failed_tiles > 0:
# TODO what we want to do with slides that have some failed tiles?
logging.warning(f"{slide_id} is incomplete. {n_failed_tiles} tiles failed.")
logging.info(f"Finished processing slide {slide_id}")
except Exception as e:
traceback.print_exc()
warnings.warn(f"An error occurred while processing slide {slide_id}: {e}")
def merge_dataset_csv_files(dataset_dir: Path) -> Path:
"""Combines all "*/dataset.csv" files into a single "dataset.csv" file in the given directory."""
full_csv = dataset_dir / "dataset.csv"
# TODO change how we retrieve these filenames, probably because mounted, the operation is slow
# and it seems to find many more files
# print("List of files")
# print([str(file) + '\n' for file in dataset_dir.glob("*/dataset.csv")])
with full_csv.open('w') as full_csv_file:
# full_csv_file.write(','.join(CSV_COLUMNS) + '\n') # write CSV header
first_file = True
for slide_csv in tqdm(dataset_dir.glob("*/dataset.csv"), desc="Merging dataset.csv", unit='file'):
logging.info(f"Merging slide {slide_csv}")
content = slide_csv.read_text()
if not first_file:
content = content[content.index('\n') + 1:] # discard header row for all but the first file
full_csv_file.write(content)
first_file = False
return full_csv
def main(slides_dataset: SlidesDataset, root_output_dir: Union[str, Path],
level: int, tile_size: int, margin: int, foreground_threshold: Optional[float],
occupancy_threshold: float, parallel: bool = False, overwrite: bool = False,
n_slides: Optional[int] = None) -> None:
"""Process a slides dataset to produce a tiles dataset.
:param slides_dataset: Input tiles dataset object.
:param root_output_dir: The root directory of the output tiles dataset.
:param level: Magnification level at which to process the slide.
:param tile_size: Lateral dimensions of each tile, in pixels.
:param margin: Margin around the foreground bounding box, in pixels at lowest resolution.
:param foreground_threshold: Luminance threshold (0 to 255) to determine tile occupancy.
If `None` (default), an optimal threshold will be estimated automatically.
:param occupancy_threshold: Threshold (between 0 and 1) to determine empty tiles to discard.
:param parallel: Whether slides should be processed in parallel with multiprocessing.
:param overwrite: Whether to overwrite an existing output tiles dataset. If `True`, will delete
and recreate `root_output_dir`, otherwise will resume by skipping already processed slides.
:param n_slides: If given, limit the total number of slides for debugging.
"""
# Ignoring some types here because mypy is getting confused with the MONAI Dataset class
# to select a subsample use keyword n_slides
dataset = Dataset(slides_dataset)[:n_slides] # type: ignore
output_dir = Path(root_output_dir)
logging.info(f"Creating dataset of level-{level} {tile_size}x{tile_size} "
f"{slides_dataset.__class__.__name__} tiles at: {output_dir}")
if overwrite and output_dir.exists():
shutil.rmtree(output_dir)
output_dir.mkdir(parents=True, exist_ok=not overwrite)
func = functools.partial(process_slide, level=level, margin=margin, tile_size=tile_size,
foreground_threshold=foreground_threshold,
occupancy_threshold=occupancy_threshold, output_dir=output_dir,
tile_progress=not parallel)
if parallel:
import multiprocessing
pool = multiprocessing.Pool()
map_func = pool.imap_unordered # type: ignore
else:
map_func = map # type: ignore
list(tqdm(map_func(func, dataset), desc="Slides", unit="img", total=len(dataset))) # type: ignore
if parallel:
pool.close()
logging.info("Merging slide files in a single file")
merge_dataset_csv_files(output_dir)
if __name__ == '__main__':
from InnerEye.ML.Histopathology.datasets.tcga_prad_dataset import TcgaPradDataset
# Example set up for an existing slides dataset:
main(slides_dataset=TcgaPradDataset("/tmp/datasets/TCGA-PRAD"),
root_output_dir="/datadrive/TCGA-PRAD_tiles",
n_slides=5,
level=3,
tile_size=224,
margin=64,
foreground_threshold=None,
occupancy_threshold=0.05,
parallel=False,
overwrite=True)
| 47.921824 | 109 | 0.675095 | 1,903 | 14,712 | 5.03258 | 0.191802 | 0.015349 | 0.011277 | 0.014618 | 0.228673 | 0.164561 | 0.154119 | 0.142425 | 0.120706 | 0.120706 | 0 | 0.005419 | 0.222268 | 14,712 | 306 | 110 | 48.078431 | 0.831323 | 0.319059 | 0 | 0.055556 | 0 | 0 | 0.10563 | 0.026151 | 0 | 0 | 0 | 0.003268 | 0 | 1 | 0.055556 | false | 0 | 0.1 | 0 | 0.205556 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e688d700f54fc537206d01ab4e8ffb5de7947f | 19,847 | py | Python | analyzere_extras/visualizations.py | analyzere/analyzere-python-viz | bbd401392888c7165a1f36ff12e130728c944c2b | [
"MIT"
] | 1 | 2017-02-15T13:57:16.000Z | 2017-02-15T13:57:16.000Z | analyzere_extras/visualizations.py | analyzere/analyzere-python-viz | bbd401392888c7165a1f36ff12e130728c944c2b | [
"MIT"
] | 5 | 2017-01-25T22:19:20.000Z | 2017-02-09T18:03:36.000Z | analyzere_extras/visualizations.py | analyzere/analyzere-python-extras | bbd401392888c7165a1f36ff12e130728c944c2b | [
"MIT"
] | null | null | null | import hashlib
import itertools
from collections import OrderedDict
from analyzere import LayerView
from graphviz import Digraph
try:
from IPython.display import FileLink
_in_notebook = True
except ImportError:
_in_notebook = False
from sys import float_info
# 12 distinct colors for colorizing edges
color_pallette = ['#000000',
'#0265d8',
'#d9c91b',
'#b031b7',
'#6ddc8f',
'#ff479c',
'#1d5f1c',
'#eb0d55',
'#a4c2ff',
'#ec611a',
'#ff99cc',
'#ae2e00']
def _format_description(description):
"""Clean the description of a node for display in Graphviz"""
return description.replace('\'', '').encode('unicode_escape').decode()
def _format_DateField(df):
"""Format a Date field, chopping off the time portion"""
return df.date().isoformat()
def _format_MoneyField(mf):
"""Format a MoneyField to '<unlimited|value> CCY' """
if mf.value == float_info.max:
formatted = 'unlimited'
else:
formatted = '{:,.0f} {}'.format(float(mf.value), mf.currency)
return formatted
def _format_filters(filters):
num_filters = len(filters)
if num_filters == 0:
filters_value = '(empty)'
elif num_filters < 4:
filter_strings = []
for f in filters:
filter_strings.append("'{}'".format(f.name))
filters_value = '[{}]'.format(', '.join(filter_strings))
else:
filters_value = '({} filters)'.format(num_filters)
return filters_value
def _format_reinstatements(reinstatements):
num_reinstatements = len(reinstatements)
if num_reinstatements > 4:
reinsts_value = '{}'.format(num_reinstatements)
else:
reinstatements = ['{}/{}'.format(r.premium, r.brokerage)
for r in reinstatements]
reinsts_value = '[{}]'.format(', '.join(reinstatements))
return reinsts_value
def _format_coverage(layer):
coverage = ['{}'.format(_format_DateField(layer.inception_date))
if hasattr(layer, 'inception_date') else '-inf',
'{}'.format(_format_DateField(layer.expiry_date))
if hasattr(layer, 'expiry_date') else 'inf']
return '[{}]'.format(', '.join(coverage))
class FormattingHelper:
def __init__(self, layer):
self._layer = layer
self._terms = OrderedDict()
self.warning = False
def append(self, required_attr, display_name=None,
formatter=lambda x: str(x),
condition=lambda: True,
warning=lambda: False):
if not display_name:
display_name = required_attr
if not hasattr(self._layer, required_attr) or not condition():
return
attr = getattr(self._layer, required_attr)
self._terms[display_name] = formatter(attr)
self.warning |= warning()
def append_term(self, term, value):
self._terms[term] = value
def formatted_terms(self):
leading_text = '\n' if self._terms else ''
return (leading_text
+ '\n'.join(['{}={}'.format(key, value)
for key, value in self._terms.items()]))
def _format_layer_terms(layer):
"""Get the terms for the given layer for display in Graphviz"""
formatter = FormattingHelper(layer)
if hasattr(layer, 'inception_date') or hasattr(layer, 'expiry_date'):
formatter.append_term('coverage', _format_coverage(layer))
formatter.append('participation', display_name='share',
formatter=lambda x: '{}%'.format(x*100),
warning=lambda: layer.participation == 0.0)
# LossRank
formatter.append(required_attr='criterion', display_name='criterion')
formatter.append(required_attr='count', display_name='count')
# FilterLayer
formatter.append('filters', formatter=_format_filters)
formatter.append('invert',
warning=lambda: (not layer.invert
and layer.type == 'FilterLayer'
and len(layer.filters) == 0))
# CatXL, AggXL, Generic
formatter.append('attachment', display_name='occ_att',
formatter=_format_MoneyField,
warning=lambda: (layer.attachment.value
>= float_info.max))
formatter.append(required_attr='limit', display_name='occ_lim',
formatter=_format_MoneyField)
# CatXL, IndustryLossWarranty
formatter.append('nth')
formatter.append('reinstatements', display_name='reinsts',
formatter=_format_reinstatements,
condition=lambda: layer.reinstatements)
formatter.append('franchise', formatter=_format_MoneyField)
# QuotaShare, AggregateQuotaShare
formatter.append('event_limit', display_name='event_lim',
formatter=_format_MoneyField)
# AggXL, AggregateQuotaShare
formatter.append('aggregate_attachment',
display_name='agg_att',
formatter=_format_MoneyField,
warning=lambda: (layer.aggregate_attachment.value
>= float_info.max))
formatter.append('aggregate_limit', display_name='agg_lim',
formatter=_format_MoneyField)
# AggregateQuotaShare
formatter.append('aggregate_period', display_name='agg_period')
formatter.append('aggregate_reset', display_name='agg_reset',
condition=lambda: layer.aggregate_reset > 1)
# SurplusShare
formatter.append('sums_insured', formatter=_format_MoneyField)
formatter.append('retained_line', formatter=_format_MoneyField)
formatter.append('number_of_lines')
# IndustryLossWarranty
formatter.append('trigger', formatter=_format_MoneyField)
formatter.append('payout', formatter=_format_MoneyField)
# NoClaimsBonus
formatter.append('payout_date', formatter=_format_DateField)
formatter.append('payout_amount', display_name='payout',
formatter=_format_MoneyField)
formatter.append('premium', formatter=_format_MoneyField,
condition=lambda: layer.premium is not None)
return formatter.formatted_terms(), formatter.warning
class LayerViewDigraph(object):
"""Class that provides simple visualization of Analyze Re LayerViews.
Using the 'graphviz' python package, this class enables users to
visualize Analyze Re LayerView objects.
"""
def _update_filename(self, filename=None):
# build filename with format:
# '<lv_id>_<rankdir>_<[not-]compact>_<with[out]-terms>\
# _<with[out]-warnings><_depth-><_srclimit-x>\
# <y-colors-by-<depth|breadth>>.<format>'
compact = 'compact' if self._compact else 'not-compact'
terms = 'with-terms' if self._with_terms else 'without-terms'
warnings = ('warnings-enabled' if self._warnings
else 'warnings-disabled')
depth = '_depth-{}'.format(self._max_depth) if self._max_depth else ''
src_limit = ('_srclimit-{}'.format(self._max_sources)
if self._max_sources else '')
colors = ('_{}-colors-by-{}'.format(self._colors, self._color_mode)
if self._colors > 1 else '')
self._filename = (filename if filename else
'{}_{}_{}_{}_{}{}{}{}'.format(self._lv.id,
self._rankdir,
compact,
terms,
warnings,
depth,
src_limit,
colors))
def _generate_nodes(self, l,
parent_hash=None,
prefix=None,
current_depth=0):
# default node attributes
self._graph.attr('node', shape='box', style='filled',
fillcolor='white')
# hash the current node to see if it is unique
node_hash = hashlib.md5((str(l)
+ (parent_hash or ''))
.encode('utf-8')).hexdigest()
if (node_hash not in self.unique_nodes) or not self._compact:
self.unique_nodes[node_hash] = next(self.sequence)
if l.type == 'NestedLayer':
prefix = ('"{}"\nNested'.format(
_format_description(l.description))
if l.description else 'Nested')
sink_hash = self._generate_nodes(l.sink,
parent_hash=node_hash,
prefix=prefix,
current_depth=current_depth)
if self._max_depth is None or current_depth < self._max_depth:
# if we are enforcing a source limit, we will return early
# after creating a summary node
if (self._max_sources is not None and
len(l.sources) > self._max_sources):
sources_id = '{} sources'.format(sink_hash)
if not(sources_id, sink_hash) in self.edges:
self.edges.add((sources_id, sink_hash))
self._graph.node(sources_id,
color=color_pallette[self._color_idx],
label='{} sources'.format(
len(l.sources)))
self._graph.edge(sources_id, sink_hash,
color=color_pallette[self._color_idx])
return sink_hash
idx = 0
for s in l.sources:
if idx > 0 and self._color_mode == 'breadth':
self._color_idx = (self._color_idx + 1) % self._colors
source = self._generate_nodes(
s, current_depth=current_depth+1)
# We have to reset the color to match the parent's color
if self._color_mode == 'depth':
self._color_idx = current_depth % self._colors
if not (source, sink_hash) in self.edges:
self._graph.edge(source, sink_hash,
color=color_pallette[self._color_idx])
self.edges.add((source, sink_hash))
idx += 1
return sink_hash
else:
name = prefix + ' ' if prefix else ''
name += l.type + ' '
name += ('"{}"'.format(_format_description(l.description))
if l.description else
'({})'.format(self.unique_nodes[node_hash]))
terms, warning = _format_layer_terms(l)
name += terms if self._with_terms else ''
if self._color_mode == 'depth':
self._color_idx = current_depth % self._colors
# color nodes with 'warnings' as tomato iff configured
self._graph.node(node_hash, label=name,
color=color_pallette[self._color_idx],
fillcolor='tomato' if warning and self._warnings
else 'white')
# Now process LossSets
if self._color_mode == 'depth':
self._color_idx = (current_depth+1) % self._colors
idx = 0
for ls in l.loss_sets:
if idx > 0 and self._color_mode == 'breadth':
self._color_idx = (self._color_idx + 1) % self._colors
ls_name = '{} "{}"'.format(
ls.type, _format_description(ls.description))
ls_id = '{}{}'.format(ls.id,
' ({})'.format(next(self.sequence))
if not self._compact else '')
if not (ls_id, node_hash) in self.edges:
self._graph.node(ls_id, label=ls_name,
color=color_pallette[self._color_idx],
fillcolor='lightgrey')
self._graph.edge(ls_id, node_hash,
color=color_pallette[self._color_idx])
self.edges.add((ls_id, node_hash,))
idx += 1
return node_hash
def __init__(self, lv, with_terms=True, compact=True,
format='png', rankdir='BT', warnings=True,
max_depth=None, max_sources=None, colors=1,
color_mode='breadth'):
"""Generate a Graphviz.Digraph for the given LayerView
Optional parameters that control the visualization:
with_terms specify that Layer terms are included in each
node of the graph (default=True).
compact controls if duplicate nodes should be omitted
(default=True).
format exposes the graphviz 'format' option which include
'pdf', 'png', etc. (default='png').
rankdir exposes the graphviz 'rankdir' option that controls
the orientation of the graph. Options include
'TB', 'LR', 'BT', 'RL', corresponding to directed
graphs drawn from top to bottom, from left to right,
from bottom to top, and from right to left,
respectively (default='BT').
warnings highlight nodes with suspicious terms by coloring them
red (default=True).
max_depth The maximum depth of the graph to process.
max_sources The maximum number of Loss sources to graph in detail
for a single node.
colors The number of colors to be used when coloring edges.
color_mode The mode to use when applying colors.
Options include: ['breadth', 'depth']
"""
# sanity check on the input
if not isinstance(lv, LayerView):
raise ValueError('must supply a valid LayerView instance')
self._lv = lv
self._with_terms = with_terms
self._rankdir = rankdir
self._format = format
self._compact = compact
self._warnings = warnings
self._max_depth = max_depth
self._max_sources = max_sources
self._colors = colors
self._color_idx = 0
self._color_mode = color_mode
# initialize the filename
self._update_filename()
# defaults for the Digraph, overridden by plot()
self._graph = Digraph(format=format,
graph_attr={'rankdir': rankdir})
# now build the "tree" of nodes
# sequencer for identifying 'ambiguous' nodes
self.sequence = itertools.count()
# hash map of unique nodes (prevents duplication)
self.unique_nodes = {}
# set of unique edges (prevents duplicates)
self.edges = set()
self._generate_nodes(lv.layer)
@staticmethod
def from_id(lv_id, with_terms=True, compact=True,
format='png', rankdir='BT',
max_depth=None, max_sources=None,
colors=1, color_mode='breadth'):
"""Generate a LayerViewDigraph for the given LayerView Id
Optional parameters:
with_terms specify that Layer terms are included in each
node of the graph.
compact controls if duplicate nodes should be omitted
(default=True).
format exposes the graphviz 'format' option which include
'pdf', 'png', etc.
rankdir exposes the graphviz 'rankdir' option that controls
the orientation of the graph. Options include
'TB', 'LR', 'BT', 'RL', corresponding to directed
graphs drawn from top to bottom, from left to right,
from bottom to top, and from right to left,
respectively.
max_depth The maximum depth of the graph to process.
max_sources The maximum number of Loss sources to graph in detail
for a single node.
colors The number of colors to be used when coloring edges.
color_mode The mode to use when applying colors.
Options include: ['breadth', 'depth']
"""
# This will raise and exception if any of the following analyzere
# variables are not defined:
# - analyzere.base_url
# - analyzere.username
# - analyzere.password
return LayerViewDigraph(LayerView.retrieve(lv_id), with_terms, compact,
format=format, rankdir=rankdir,
max_depth=max_depth, max_sources=max_sources)
def render(self, filename=None, view=False, format=None, rankdir=None):
"""Render a LayerViewDigraph with the Graphviz engine
Optional parameters:
filename specify the filename to be used when rendering.
view exposes the graphviz 'view' option that uses the
default application to open the rendered graph
(default=False).
format exposes the graphviz 'format' option which include
'pdf', 'png', etc.
rankdir exposes the graphviz 'rankdir' option that controls
the orientation of the graph. Options include
'TB', 'LR', 'BT', 'RL', corresponding to directed
graphs drawn from top to bottom, from left to right,
from bottom to top, and from right to left,
respectively.
"""
# check for 'render-time' overrides
if rankdir:
self._graph.graph_attr['rankdir'] = rankdir
self._rankdir = rankdir
if format:
self._graph.format = format
self._format = format
# update the filename
self._update_filename(filename)
try:
# protect against use cases when the default rendering tool
# is not able to render the result
# if we are in a python notebook we will use the FileLink feature
# to return a click-able link to download the file
if not view:
return (FileLink(self._graph.render(self._filename, view=view))
if _in_notebook else
self._graph.render(self._filename, view=view))
else:
# view=True, do not return the FileLink or filename
self._graph.render(self._filename, view=view)
except RuntimeError:
# native display failed, revert to returning a clickable FileLink
# iff we are in a python notebook, otherwise return the filename.
return (FileLink(self._graph.render(self._filename, view=False))
if _in_notebook else
self._graph.render(self._filename, view=False))
| 41.176349 | 79 | 0.548194 | 2,024 | 19,847 | 5.1833 | 0.182806 | 0.034315 | 0.016014 | 0.012582 | 0.308836 | 0.274902 | 0.256124 | 0.234296 | 0.213516 | 0.195406 | 0 | 0.005465 | 0.363833 | 19,847 | 481 | 80 | 41.261954 | 0.82544 | 0.254043 | 0 | 0.148936 | 0 | 0.003546 | 0.065858 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056738 | false | 0 | 0.028369 | 0 | 0.14539 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e6bf19bbd48108b5b3045611d8790865d85118 | 3,922 | py | Python | semantic_similarity/k_nearest_neighbors.py | usc-isi-i2/kgtk-similarity | 2ae4c331ad9ce038317c79951f68742828927900 | [
"MIT"
] | 8 | 2021-05-03T18:19:53.000Z | 2022-03-31T19:45:53.000Z | semantic_similarity/k_nearest_neighbors.py | usc-isi-i2/wikidata-semantic-similarity | 2ae4c331ad9ce038317c79951f68742828927900 | [
"MIT"
] | 3 | 2021-10-18T16:35:19.000Z | 2022-01-18T18:39:56.000Z | semantic_similarity/k_nearest_neighbors.py | usc-isi-i2/wikidata-semantic-similarity | 2ae4c331ad9ce038317c79951f68742828927900 | [
"MIT"
] | 2 | 2021-08-29T19:58:31.000Z | 2022-02-17T07:37:50.000Z | import faiss
import json
from semantic_similarity.utility import Utility
import semantic_similarity.kypher as kypher
config = json.load(open('semantic_similarity/config.json'))
class FAISS_Index(object):
# make the actual index objects a singleton, so we don't load them multiple times:
_index = None
_qnode_to_index = None
_index_to_qnode = None
DEFAULT_INDEX_NPROBE = 64
DEFAULT_INDEX_HNSW_SEARCH_DEPTH = 128
def __init__(self, efSearch: int = None, nprobe: int = None):
self.config = config
self.util = Utility()
self.api_version_1 = self.util.api_version_1
self.backend = kypher.get_synced_backend()
efSearch = efSearch or 400 if self.api_version_1 else self.DEFAULT_INDEX_HNSW_SEARCH_DEPTH
nprobe = nprobe or 8 if self.api_version_1 else self.DEFAULT_INDEX_NPROBE
# TO DO: make this a constructor parameter, since eventually we'll have multiple indexes:
index_file = config['faiss_index_file'] if self.api_version_1 else config.get("COMPLEX_EMB_FAISS_INDEX")
if self._index is None and index_file:
print('Loading FAISS index...')
FAISS_Index._index = faiss.read_index(index_file)
try:
# Set the parameters
faiss.downcast_index(self._index.quantizer).hnsw.efSearch = efSearch
self._index.nprobe = nprobe
except Exception as e:
print(e)
print('Cannot set parameters for this index')
if self.api_version_1:
# Load the entity to index map
with open(self.config['qnode_to_ids_file']) as fd:
FAISS_Index._qnode_to_index = json.load(fd)
FAISS_Index._index_to_qnode = {v: k for k, v in self._qnode_to_index.items()}
def get_neighbors_v1(self, qnode: str, k: int = 5):
''' Find the neighbors for the given qnode '''
# faiss returns the same qnode as first result
k += 1
scores, candidates = self._index.search(self._index.reconstruct(self._qnode_to_index[qnode]).reshape(1, -1), k)
candidates = [self._index_to_qnode[x] for x in candidates[0] if x != -1]
scores = scores[0][:len(candidates)]
scores = [float(x) for x in scores][1:]
candidates = candidates[1:]
# this takes the most time for larger values of 'k', so speeding up that part
# and maybe restricting to embedding types we actually need would help:
candidates_label_dict = self.util.get_qnode_details(candidates)
result = []
tuples = [(c, s) for c, s in zip(candidates, scores)]
for t in tuples:
_qnode = t[0]
score = t[1]
label = candidates_label_dict.get(_qnode, {}).get('label', '')
result.append({
"qnode": _qnode,
"score": score,
"label": label
})
return result
def get_neighbors(self, qnode: str, k: int = 5):
"""Find the top-k nearest neighbors for the given 'qnode'.
"""
if self.api_version_1:
return self.get_neighbors_v1(qnode, k=k)
result = []
embed = self.backend.get_node_embedding(qnode, 'complex')
if embed is None:
return result
# NOTE: faiss returns the identical 'qnode' as the first result:
scores, candidates = self._index.search(embed.reshape(1, -1), k + 1)
for i, (cand, score) in enumerate(zip(candidates[0], scores[0])):
if i > 0 and cand != -1:
for node, numid, label in self.backend.get_node_and_label_from_complex_emb_numid(cand):
result.append({"qnode": node, "score": score, "label": self.util.normalize_label(label)})
break
return result
@property
def index(self):
return self._index
| 39.22 | 119 | 0.617287 | 521 | 3,922 | 4.443378 | 0.287908 | 0.031102 | 0.033261 | 0.038877 | 0.142981 | 0.061771 | 0.0527 | 0.0527 | 0.031965 | 0 | 0 | 0.013281 | 0.289648 | 3,922 | 99 | 120 | 39.616162 | 0.81766 | 0.146864 | 0 | 0.101449 | 0 | 0 | 0.056207 | 0.016231 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.057971 | 0.014493 | 0.275362 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e7312e6a0cb7d550c5cc3146437d11d65a318f | 3,372 | py | Python | ckan/lib/i18n.py | dadosgovbr/ckan | f2ed9a03581c3229cf01209baa9a02a71642e0b6 | [
"BSD-3-Clause"
] | 2 | 2015-11-05T12:04:52.000Z | 2017-08-09T11:29:11.000Z | ckan/lib/i18n.py | dadosgovbr/ckan | f2ed9a03581c3229cf01209baa9a02a71642e0b6 | [
"BSD-3-Clause"
] | null | null | null | ckan/lib/i18n.py | dadosgovbr/ckan | f2ed9a03581c3229cf01209baa9a02a71642e0b6 | [
"BSD-3-Clause"
] | null | null | null | import os
from babel import Locale, localedata
from babel.core import LOCALE_ALIASES
from pylons import config
from pylons import i18n
import ckan.i18n
LOCALE_ALIASES['pt'] = 'pt_BR' # Default Portuguese language to
# Brazilian territory, since
# we don't have a Portuguese territory
# translation currently.
def _get_locales():
assert not config.get('lang'), \
'"lang" config option not supported - please use ckan.locale_default instead.'
locales_offered = config.get('ckan.locales_offered', '').split()
filtered_out = config.get('ckan.locales_filtered_out', '').split()
locale_order = config.get('ckan.locale_order', '').split()
locale_default = config.get('ckan.locale_default', 'en')
locales = ['en']
i18n_path = os.path.dirname(ckan.i18n.__file__)
locales += [l for l in os.listdir(i18n_path) if localedata.exists(l)]
assert locale_default in locales, \
'default language "%s" not available' % locale_default
locale_list = []
for locale in locales:
# no duplicates
if locale in locale_list:
continue
# if offered locales then check locale is offered
if locales_offered and locale not in locales_offered:
continue
# remove if filtered out
if locale in filtered_out:
continue
# ignore the default as it will be added first
if locale == locale_default:
continue
locale_list.append(locale)
# order the list if specified
ordered_list = [locale_default]
for locale in locale_order:
if locale in locale_list:
ordered_list.append(locale)
# added so remove from our list
locale_list.remove(locale)
# add any remaining locales not ordered
ordered_list += locale_list
return ordered_list
available_locales = None
locales = None
locales_dict = None
def get_locales():
''' Get list of available locales
e.g. [ 'en', 'de', ... ]
'''
global locales
if not locales:
locales = _get_locales()
return locales
def get_locales_dict():
''' Get a dict of the available locales
e.g. { 'en' : Locale('en'), 'de' : Locale('de'), ... } '''
global locales_dict
if not locales_dict:
locales = _get_locales()
locales_dict = {}
for locale in locales:
locales_dict[str(locale)] = Locale.parse(locale)
return locales_dict
def get_available_locales():
''' Get a list of the available locales
e.g. [ Locale('en'), Locale('de'), ... ] '''
global available_locales
if not available_locales:
available_locales = map(Locale.parse, get_locales())
return available_locales
def handle_request(request, tmpl_context):
''' Set the language for the request '''
lang = request.environ.get('CKAN_LANG') or \
config.get('ckan.locale_default', 'en')
if lang != 'en':
i18n.set_lang(lang)
tmpl_context.language = lang
return lang
def get_lang():
''' Returns the current language. Based on babel.i18n.get_lang but
works when set_lang has not been run (i.e. still in English). '''
langs = i18n.get_lang()
if langs:
return langs[0]
else:
return 'en'
| 31.514019 | 90 | 0.62841 | 424 | 3,372 | 4.832547 | 0.271226 | 0.070278 | 0.031723 | 0.027818 | 0.080039 | 0.04978 | 0 | 0 | 0 | 0 | 0 | 0.006967 | 0.276394 | 3,372 | 106 | 91 | 31.811321 | 0.832787 | 0.216785 | 0 | 0.140845 | 0 | 0 | 0.093447 | 0.009694 | 0 | 0 | 0 | 0 | 0.028169 | 1 | 0.084507 | false | 0 | 0.084507 | 0 | 0.267606 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e916ef8d12a7d97ac6246b7c43bf5f17676974 | 842 | py | Python | RubyClass.py | krishnan2784/picosamples | 1d3e59adc93474e7a5ccb2c7cf0f8a298a50d6db | [
"Apache-2.0"
] | null | null | null | RubyClass.py | krishnan2784/picosamples | 1d3e59adc93474e7a5ccb2c7cf0f8a298a50d6db | [
"Apache-2.0"
] | null | null | null | RubyClass.py | krishnan2784/picosamples | 1d3e59adc93474e7a5ccb2c7cf0f8a298a50d6db | [
"Apache-2.0"
] | null | null | null | import machine
import utime
import urandom
pressed = False
led = machine.Pin(15, machine.Pin.OUT)
left_button = machine.Pin(14, machine.Pin.IN, machine.Pin.PULL_DOWN)
right_button = machine.Pin(16, machine.Pin.IN, machine.Pin.PULL_DOWN)
fastest_button = None
def button_handler(pin):
global pressed
if not pressed:
pressed=True
global fastest_button
fastest_button = pin
led.value(1)
utime.sleep(urandom.uniform(5, 10))
led.value(0)
timer_start = utime.ticks_ms()
left_button.irq(trigger=machine.Pin.IRQ_RISING, handler=button_handler)
right_button.irq(trigger=machine.Pin.IRQ_RISING, handler=button_handler)
while fastest_button is None:
utime.sleep(1)
if fastest_button is left_button:
print("Left Player wins!")
elif fastest_button is right_button:
print("Right Player wins!")
| 25.515152 | 72 | 0.744656 | 126 | 842 | 4.809524 | 0.365079 | 0.165017 | 0.074257 | 0.062706 | 0.280528 | 0.280528 | 0.280528 | 0.181518 | 0.181518 | 0.181518 | 0 | 0.016878 | 0.155582 | 842 | 32 | 73 | 26.3125 | 0.835443 | 0 | 0 | 0 | 0 | 0 | 0.041568 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.115385 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e918bd1528673595c311f3e80a5761ff9b19a5 | 1,981 | py | Python | app/seeder.py | ay1man4/catalog | afb46679f71f686d37f6922da18a9f1d18cc271f | [
"Unlicense"
] | null | null | null | app/seeder.py | ay1man4/catalog | afb46679f71f686d37f6922da18a9f1d18cc271f | [
"Unlicense"
] | 1 | 2021-04-30T20:42:30.000Z | 2021-04-30T20:42:30.000Z | app/seeder.py | ay1man4/CatalogApp | afb46679f71f686d37f6922da18a9f1d18cc271f | [
"Unlicense"
] | null | null | null | # seeder will create sample database
from app.database_setup import DBSession, Category, Item, User
session = DBSession()
# Create Demo User
user = User(name='Admin', email='webmaster@example.com')
session.add(user)
session.commit()
# Mobile brands Catalog
brand = Category(name='Samsung', user=user)
session.add(brand)
session.commit()
models = [
{'name': 'Galaxy S9', 'description': 'Latest mobile from Samsung'},
{'name': 'Galaxy Note 9',
'description': 'Awesome note from Samsung with pen'},
{'name': 'Galaxy A9', 'description': 'Mid end brand from Samsung'},
{'name': 'Galaxy S10', 'description': 'comming soon...'},
]
for model in models:
item = Item(name=model['name'], description=model['description'],
category=brand, user=user)
session.add(item)
session.commit()
brand = Category(name='Apple', user=user)
session.add(brand)
session.commit()
models = [
{'name': 'iPhone X', 'description': 'Latest mobile from Apple'},
{'name': 'iPhone 9', 'description': 'Awesome and state of art'},
{'name': 'iPhone 4',
'description': 'very old but still the strongest one'},
{'name': 'iPhone 10', 'description': 'comming soon...'},
]
for model in models:
item = Item(name=model['name'], description=model['description'],
category=brand, user=user)
session.add(item)
session.commit()
brand = Category(name='Google', user=user)
session.add(brand)
session.commit()
models = [
{'name': 'Pixel 2', 'description': 'Latest mobile from Google'},
{'name': 'Pixel 2 XL', 'description': 'A large version of Pixel 2'},
{'name': 'Nexus 6P', 'description': 'Great mobile but not popular'},
{'name': 'Android One', 'description': 'Low end brand for all'},
]
for model in models:
item = Item(name=model['name'], description=model['description'],
category=brand, user=user)
session.add(item)
session.commit()
print("Catalog items have been added!")
| 32.47541 | 72 | 0.649672 | 245 | 1,981 | 5.24898 | 0.338776 | 0.068429 | 0.069984 | 0.083981 | 0.429238 | 0.429238 | 0.429238 | 0.429238 | 0.429238 | 0.321928 | 0 | 0.008025 | 0.182231 | 1,981 | 60 | 73 | 33.016667 | 0.785802 | 0.03685 | 0 | 0.5 | 0 | 0 | 0.372374 | 0.011029 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02 | 0 | 0.02 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87e976417dcc7b5e28d5cd9a0606fccec52af5f2 | 1,637 | py | Python | pydevice/group.py | aseli1/dm_requests | 64bf91ba17f2b2112d0f0001b6a8e8bb977c3ad1 | [
"MIT"
] | null | null | null | pydevice/group.py | aseli1/dm_requests | 64bf91ba17f2b2112d0f0001b6a8e8bb977c3ad1 | [
"MIT"
] | null | null | null | pydevice/group.py | aseli1/dm_requests | 64bf91ba17f2b2112d0f0001b6a8e8bb977c3ad1 | [
"MIT"
] | null | null | null | class Group():
def __init__(self, connector, org_id):
self.connector = connector
self.org_id = org_id
self.base_url = 'https://api.devicemagic.com/api/v2/organizations/' \
'{0}/groups'.format(self.org_id)
self.headers = {'Content-Type': 'application/json'}
self.format = 'json'
def all(self):
path = self.base_url + '.' + self.format
request = self.connector.execute_request(path, 'GET')
return request
def create(self, json):
path = self.base_url
request = self.connector.execute_request(
path, 'POST', data=json, headers=self.headers, return_json=False)
if request.status_code >= 200 and request.status_code < 300:
return 'Group created'
else:
return self.connector.failed_request_details(request)
def update(self, group_id, json):
path = self.base_url + '/' + str(group_id)
request = self.connector.execute_request(
path, 'PUT', data=json, headers=self.headers, return_json=False)
if request.status_code >= 200 and request.status_code < 300:
return 'Group updated'
else:
return self.connector.failed_request_details(request)
def delete(self, group_id):
path = self.base_url + '/' + str(group_id)
request = self.connector.execute_request(
path, 'DELETE', return_json=False)
if request.status_code >= 200 and request.status_code < 300:
return 'Group deleted'
else:
return self.connector.failed_request_details(request)
| 38.97619 | 77 | 0.616371 | 195 | 1,637 | 4.989744 | 0.25641 | 0.120247 | 0.10483 | 0.061665 | 0.663926 | 0.640288 | 0.562179 | 0.562179 | 0.510791 | 0.40185 | 0 | 0.016807 | 0.27306 | 1,637 | 41 | 78 | 39.926829 | 0.80084 | 0 | 0 | 0.388889 | 0 | 0 | 0.09102 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0 | 0 | 0.361111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87ee30728084dd2bcc48b49a33bf1ea779e3ac6b | 1,120 | py | Python | CompressionAutoEncoder/train.py | DLR-RM/SingleViewReconstruction | 8669565c1addee1763896da4b8c4811bb6f55e45 | [
"MIT"
] | 190 | 2020-08-20T09:29:35.000Z | 2022-03-26T20:14:31.000Z | CompressionAutoEncoder/train.py | yohannes-taye/SingleViewReconstruction | 182ab38f662dd1fbbf5f8f70b212747872a2a398 | [
"MIT"
] | 10 | 2020-09-22T13:03:18.000Z | 2022-01-18T08:37:47.000Z | CompressionAutoEncoder/train.py | yohannes-taye/SingleViewReconstruction | 182ab38f662dd1fbbf5f8f70b212747872a2a398 | [
"MIT"
] | 20 | 2020-08-26T11:45:24.000Z | 2022-01-27T04:28:29.000Z |
import os
from src.configreader import ConfigReader
from src.dataset import Dataset
from src.autoencoder import Autoencoder
if __name__ == "__main__":
config_path = os.path.join(os.path.dirname(__file__), "config.json")
config_obj = ConfigReader(config_path)
dataset = Dataset(config_obj)
x_train = dataset.load_train_data()
x_val = dataset.load_val_data()
x_eval = dataset.load_eval_data()
model = Autoencoder(config_obj, dataset)
model.set_iterators(x_train, x_val, eval_from_input_iterator=x_eval)
for i in range(12000):
# the evaluation is quite time intensive, during it off increase the speed
do_evaluation = i % 500 == 0 and i > 0
stats = model.train(do_evaluation)
print("{}: {}".format(i, stats["loss"]))
if "val_loss" in stats:
print("Val loss: {}".format(stats["val_loss"]))
print("IO: {}, l1: {}".format(stats['iou'], stats["eval_l1"]))
if i % 1000 and i > 0:
model.save(config_obj.data.get_string("model_save_path"))
model.save(config_obj.data.get_string("model_save_path"))
| 33.939394 | 82 | 0.666964 | 158 | 1,120 | 4.436709 | 0.373418 | 0.064194 | 0.014265 | 0.051355 | 0.125535 | 0.125535 | 0.125535 | 0.125535 | 0.125535 | 0.125535 | 0 | 0.019144 | 0.207143 | 1,120 | 32 | 83 | 35 | 0.77027 | 0.064286 | 0 | 0.086957 | 0 | 0 | 0.10622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87f217e942cb81b4ab35435822546cd3d5f3986c | 6,083 | py | Python | pcs_aero/gui.py | Skydivizer/compsci | 2ef7e258f00ebdfb274210f22852619f4007b5b5 | [
"MIT"
] | null | null | null | pcs_aero/gui.py | Skydivizer/compsci | 2ef7e258f00ebdfb274210f22852619f4007b5b5 | [
"MIT"
] | null | null | null | pcs_aero/gui.py | Skydivizer/compsci | 2ef7e258f00ebdfb274210f22852619f4007b5b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module defines the graphical display of the models using opengl
This file is heaviliy based on the sample provided by Dr Gabor.
"""
import sys
import numpy as np
from matplotlib import cm
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
# Constants, Globals
name = 'Drag' # Window name
frameCount, previousTime = 0.0, 0.0 # FPS counter vars
show = 'velocity' # what variable to show
showi = 0 # what index to show in multi dim variables
paused = False
plot_rgba = None
model = None # global pointer to running model
nx, ny = None, None
# show to variable mapping
get_var_map = {
'velocity': lambda: model.velocity.T.flatten(),
'density': lambda: model.density.T.flatten(),
'population': lambda: model.population[showi % 9].T.flatten(),
'equilibrium': lambda: model.equilibrium[showi % 9].T.flatten(),
'force': lambda: model.force[showi % 2].T.flatten(),
}
def get_cmap_from_matplotlib(cmap=cm.coolwarm):
# Create colormap for OpenGL plotting
ncol = cmap.N
cmap_rgba = []
for i in range(ncol - 1):
b, g, r, _ = cmap(
i) # Not sure why this is inverted, I was expecting r, g, b.
cmap_rgba.append(
int(255.0) << 24 | (int(float(r) * 255.0) << 16) | (
int(float(g) * 255.0) << 8) | (int(float(b) * 255.0) << 0))
return np.array(cmap_rgba), len(cmap_rgba)
def display():
plotvar = get_var_map[show]()
minvar = np.min(plotvar)
maxvar = 1.001 * (np.max(plotvar))
# Avoid divide by zero: maxvar == minvar <--> maxvar == minvar == 0
maxvar = 1 if maxvar == minvar else maxvar
# convert the plotvar array into an array of colors to plot
# if the mesh point is solid, make it black
frac = (plotvar[:] - minvar) / (maxvar - minvar)
icol = frac * ncol
plot_rgba[:] = cmap_rgba[icol.astype(np.int)]
plot_rgba[
model.obstacle_mask.T.flatten()] = 0xFF000000 #Color code of black
# Fill the pixel buffer with the plot_rgba array
glBufferData(GL_PIXEL_UNPACK_BUFFER, plot_rgba.nbytes, plot_rgba,
GL_STREAM_COPY)
# Copy the pixel buffer to the texture, ready to display
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, nx, ny, GL_RGBA, GL_UNSIGNED_BYTE,
None)
# Render one quad to the screen and colour it using our texture
# i.e. plot our plotvar data to the screen
glClear(GL_COLOR_BUFFER_BIT)
glBegin(GL_QUADS)
x0, y0 = 0.0, 0.0
x1, y1 = nx, ny
glTexCoord2f(0.0, 0.0)
glVertex3f(x0, y0, 0.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(x1, y0, 0.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(x1, y1, 0.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(x0, y1, 0.0)
glEnd()
glutSwapBuffers()
def resize(w, h):
#GLUT resize callback to allow us to change the window size.
global width, height
width = w
height = h
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0., nx, 0., ny, -200., 200.)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def idle():
global frameCount, previousTime
frameCount = frameCount + 1.0
currentTime = glutGet(GLUT_ELAPSED_TIME)
timeInterval = currentTime - previousTime
# Take an LBM step
if not paused:
model.step()
if (timeInterval > 1000):
fps = frameCount / (timeInterval / 1000.0)
previousTime = currentTime
frameCount = 0.0
drag = model.drag_coefficient
glutSetWindowTitle("Drag {:0.3f} Time {:0.3f}".format(
drag, model.time))
# print()
glutPostRedisplay()
### IO functions
def toggle_pause():
global paused
paused = not paused
def set_show(val):
global show
if val in get_var_map:
show = val
def set_showi(val):
global showi
showi = val
def keyboard(*args):
try:
key_action_map[args[0]]()
except KeyError:
pass
def run_opengl():
# OpenGL setup
glutInit(name)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB)
glutInitWindowSize(nx * 1, ny * 1)
glutInitWindowPosition(50, 50)
glutCreateWindow(name)
glClearColor(1.0, 1.0, 1.0, 1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, nx, 0., ny, -200.0, 200.0)
glEnable(GL_TEXTURE_2D)
gl_Tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, gl_Tex)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, nx, ny, 0, GL_RGBA,
GL_UNSIGNED_BYTE, None)
gl_PBO = glGenBuffers(1)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, gl_PBO)
#setup callbacks
glutDisplayFunc(display)
glutReshapeFunc(resize)
glutIdleFunc(idle)
# glutMouseFunc(mouse)
# glutMotionFunc(mouse_motion)
glutKeyboardFunc(keyboard)
# Start main loop
glutMainLoop()
# key to action mapping
key_action_map = {
b'p': toggle_pause,
b'r': lambda: model.reset(),
b'd': lambda: set_show('density'),
b'v': lambda: set_show('velocity'),
b'f': lambda: set_show('population'),
b'e': lambda: set_show('equilibrium'),
b'g': lambda: set_show('force'),
b'q': lambda: sys.exit(),
b's': lambda: model.step(),
b'0': lambda: set_showi(0),
b'1': lambda: set_showi(1),
b'2': lambda: set_showi(2),
b'3': lambda: set_showi(3),
b'4': lambda: set_showi(4),
b'5': lambda: set_showi(5),
b'6': lambda: set_showi(6),
b'7': lambda: set_showi(7),
b'8': lambda: set_showi(8),
b'\x1b': lambda: sys.exit()
}
def run(model_):
# Setup this module with given model.
global model, plot_rgba, cmap_rgba, nx, ny, ncol
model = model_
nx, ny = model.shape
plot_rgba = np.zeros(np.prod(model.shape), dtype=np.uint32)
cmap_rgba, ncol = get_cmap_from_matplotlib()
run_opengl()
| 25.995726 | 78 | 0.643597 | 866 | 6,083 | 4.383372 | 0.32448 | 0.011064 | 0.033193 | 0.020548 | 0.103793 | 0.095364 | 0.061907 | 0.05058 | 0.028451 | 0.028451 | 0 | 0.03963 | 0.236725 | 6,083 | 233 | 79 | 26.107296 | 0.777945 | 0.173599 | 0 | 0.03268 | 0 | 0 | 0.028257 | 0 | 0 | 0 | 0.002004 | 0 | 0 | 1 | 0.065359 | false | 0.006536 | 0.039216 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87f2dd5ca20a21259b50a8741ac21939547fa72f | 31,233 | py | Python | IRIS_data_download/IRIS_download_support/obspy/signal/trigger.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-03-05T01:03:01.000Z | 2020-12-17T05:04:07.000Z | IRIS_data_download/IRIS_download_support/obspy/signal/trigger.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 4 | 2021-03-31T19:25:55.000Z | 2021-12-13T20:32:46.000Z | IRIS_data_download/IRIS_download_support/obspy/signal/trigger.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-09-08T19:33:40.000Z | 2021-04-05T09:47:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Filename: trigger.py
# Purpose: Python trigger/picker routines for seismology.
# Author: Moritz Beyreuther, Tobias Megies
# Email: moritz.beyreuther@geophysik.uni-muenchen.de
#
# Copyright (C) 2008-2012 Moritz Beyreuther, Tobias Megies
# -------------------------------------------------------------------
"""
Various routines related to triggering/picking
Module implementing the Recursive STA/LTA. Two versions, a fast ctypes one and
a bit slower python one. Furthermore, the classic and delayed STA/LTA, the
carl_sta_trig and the z_detect are implemented.
Also includes picking routines, routines for evaluation and visualization of
characteristic functions and a coincidence triggering routine.
.. seealso:: [Withers1998]_ (p. 98) and [Trnkoczy2012]_
:copyright:
The ObsPy Development Team (devs@obspy.org)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from collections import deque
import ctypes as C
import warnings
import numpy as np
import scipy
from obspy import UTCDateTime
from obspy.signal.cross_correlation import templates_max_similarity
from obspy.signal.headers import clibsignal, head_stalta_t
def recursive_sta_lta(a, nsta, nlta):
"""
Recursive STA/LTA.
Fast version written in C.
:note: This version directly uses a C version via CTypes
:type a: :class:`numpy.ndarray`, dtype=float64
:param a: Seismic Trace, numpy.ndarray dtype float64
:type nsta: int
:param nsta: Length of short time average window in samples
:type nlta: int
:param nlta: Length of long time average window in samples
:rtype: :class:`numpy.ndarray`, dtype=float64
:return: Characteristic function of recursive STA/LTA
.. seealso:: [Withers1998]_ (p. 98) and [Trnkoczy2012]_
"""
# be nice and adapt type if necessary
a = np.ascontiguousarray(a, np.float64)
ndat = len(a)
charfct = np.empty(ndat, dtype=np.float64)
# do not use pointer here:
clibsignal.recstalta(a, charfct, ndat, nsta, nlta)
return charfct
def recursive_sta_lta_py(a, nsta, nlta):
"""
Recursive STA/LTA written in Python.
.. note::
There exists a faster version of this trigger wrapped in C
called :func:`~obspy.signal.trigger.recursive_sta_lta` in this module!
:type a: NumPy :class:`~numpy.ndarray`
:param a: Seismic Trace
:type nsta: int
:param nsta: Length of short time average window in samples
:type nlta: int
:param nlta: Length of long time average window in samples
:rtype: NumPy :class:`~numpy.ndarray`
:return: Characteristic function of recursive STA/LTA
.. seealso:: [Withers1998]_ (p. 98) and [Trnkoczy2012]_
"""
try:
a = a.tolist()
except Exception:
pass
ndat = len(a)
# compute the short time average (STA) and long time average (LTA)
# given by Evans and Allen
csta = 1. / nsta
clta = 1. / nlta
sta = 0.
lta = 1e-99 # avoid zero division
charfct = [0.0] * len(a)
icsta = 1 - csta
iclta = 1 - clta
for i in range(1, ndat):
sq = a[i] ** 2
sta = csta * sq + icsta * sta
lta = clta * sq + iclta * lta
charfct[i] = sta / lta
if i < nlta:
charfct[i] = 0.
return np.array(charfct)
def carl_sta_trig(a, nsta, nlta, ratio, quiet):
"""
Computes the carlSTAtrig characteristic function.
eta = star - (ratio * ltar) - abs(sta - lta) - quiet
:type a: NumPy :class:`~numpy.ndarray`
:param a: Seismic Trace
:type nsta: int
:param nsta: Length of short time average window in samples
:type nlta: int
:param nlta: Length of long time average window in samples
:type ration: float
:param ratio: as ratio gets smaller, carl_sta_trig gets more sensitive
:type quiet: float
:param quiet: as quiet gets smaller, carl_sta_trig gets more sensitive
:rtype: NumPy :class:`~numpy.ndarray`
:return: Characteristic function of CarlStaTrig
"""
m = len(a)
#
sta = np.zeros(len(a), dtype=np.float64)
lta = np.zeros(len(a), dtype=np.float64)
star = np.zeros(len(a), dtype=np.float64)
ltar = np.zeros(len(a), dtype=np.float64)
pad_sta = np.zeros(nsta)
pad_lta = np.zeros(nlta) # avoid for 0 division 0/1=0
#
# compute the short time average (STA)
for i in range(nsta): # window size to smooth over
sta += np.concatenate((pad_sta, a[i:m - nsta + i]))
sta /= nsta
#
# compute the long time average (LTA), 8 sec average over sta
for i in range(nlta): # window size to smooth over
lta += np.concatenate((pad_lta, sta[i:m - nlta + i]))
lta /= nlta
lta = np.concatenate((np.zeros(1), lta))[:m] # XXX ???
#
# compute star, average of abs diff between trace and lta
for i in range(nsta): # window size to smooth over
star += np.concatenate((pad_sta,
abs(a[i:m - nsta + i] - lta[i:m - nsta + i])))
star /= nsta
#
# compute ltar, 8 sec average over star
for i in range(nlta): # window size to smooth over
ltar += np.concatenate((pad_lta, star[i:m - nlta + i]))
ltar /= nlta
#
eta = star - (ratio * ltar) - abs(sta - lta) - quiet
eta[:nlta] = -1.0
return eta
def classic_sta_lta(a, nsta, nlta):
"""
Computes the standard STA/LTA from a given input array a. The length of
the STA is given by nsta in samples, respectively is the length of the
LTA given by nlta in samples.
Fast version written in C.
:type a: NumPy :class:`~numpy.ndarray`
:param a: Seismic Trace
:type nsta: int
:param nsta: Length of short time average window in samples
:type nlta: int
:param nlta: Length of long time average window in samples
:rtype: NumPy :class:`~numpy.ndarray`
:return: Characteristic function of classic STA/LTA
"""
data = a
# initialize C struct / NumPy structured array
head = np.empty(1, dtype=head_stalta_t)
head[:] = (len(data), nsta, nlta)
# ensure correct type and contiguous of data
data = np.ascontiguousarray(data, dtype=np.float64)
# all memory should be allocated by python
charfct = np.empty(len(data), dtype=np.float64)
# run and check the error-code
errcode = clibsignal.stalta(head, data, charfct)
if errcode != 0:
raise Exception('ERROR %d stalta: len(data) < nlta' % errcode)
return charfct
def classic_sta_lta_py(a, nsta, nlta):
"""
Computes the standard STA/LTA from a given input array a. The length of
the STA is given by nsta in samples, respectively is the length of the
LTA given by nlta in samples. Written in Python.
.. note::
There exists a faster version of this trigger wrapped in C
called :func:`~obspy.signal.trigger.classic_sta_lta` in this module!
:type a: NumPy :class:`~numpy.ndarray`
:param a: Seismic Trace
:type nsta: int
:param nsta: Length of short time average window in samples
:type nlta: int
:param nlta: Length of long time average window in samples
:rtype: NumPy :class:`~numpy.ndarray`
:return: Characteristic function of classic STA/LTA
"""
# The cumulative sum can be exploited to calculate a moving average (the
# cumsum function is quite efficient)
sta = np.cumsum(a ** 2)
# Convert to float
sta = np.require(sta, dtype=np.float)
# Copy for LTA
lta = sta.copy()
# Compute the STA and the LTA
sta[nsta:] = sta[nsta:] - sta[:-nsta]
sta /= nsta
lta[nlta:] = lta[nlta:] - lta[:-nlta]
lta /= nlta
# Pad zeros
sta[:nlta - 1] = 0
# Avoid division by zero by setting zero values to tiny float
dtiny = np.finfo(0.0).tiny
idx = lta < dtiny
lta[idx] = dtiny
return sta / lta
def delayed_sta_lta(a, nsta, nlta):
"""
Delayed STA/LTA.
:type a: NumPy :class:`~numpy.ndarray`
:param a: Seismic Trace
:type nsta: int
:param nsta: Length of short time average window in samples
:type nlta: int
:param nlta: Length of long time average window in samples
:rtype: NumPy :class:`~numpy.ndarray`
:return: Characteristic function of delayed STA/LTA
.. seealso:: [Withers1998]_ (p. 98) and [Trnkoczy2012]_
"""
m = len(a)
#
# compute the short time average (STA) and long time average (LTA)
# don't start for STA at nsta because it's muted later anyway
sta = np.zeros(m, dtype=np.float64)
lta = np.zeros(m, dtype=np.float64)
for i in range(m):
sta[i] = (a[i] ** 2 + a[i - nsta] ** 2) / nsta + sta[i - 1]
lta[i] = (a[i - nsta - 1] ** 2 + a[i - nsta - nlta - 1] ** 2) / \
nlta + lta[i - 1]
sta[0:nlta + nsta + 50] = 0
lta[0:nlta + nsta + 50] = 1 # avoid division by zero
return sta / lta
def z_detect(a, nsta):
"""
Z-detector.
:param nsta: Window length in Samples.
.. seealso:: [Withers1998]_, p. 99
"""
m = len(a)
#
# Z-detector given by Swindell and Snell (1977)
sta = np.zeros(len(a), dtype=np.float64)
# Standard Sta
pad_sta = np.zeros(nsta)
for i in range(nsta): # window size to smooth over
sta = sta + np.concatenate((pad_sta, a[i:m - nsta + i] ** 2))
a_mean = np.mean(sta)
a_std = np.std(sta)
_z = (sta - a_mean) / a_std
return _z
def trigger_onset(charfct, thres1, thres2, max_len=9e99, max_len_delete=False):
"""
Calculate trigger on and off times.
Given thres1 and thres2 calculate trigger on and off times from
characteristic function.
This method is written in pure Python and gets slow as soon as there
are more then 1e6 triggerings ("on" AND "off") in charfct --- normally
this does not happen.
:type charfct: NumPy :class:`~numpy.ndarray`
:param charfct: Characteristic function of e.g. STA/LTA trigger
:type thres1: float
:param thres1: Value above which trigger (of characteristic function)
is activated (higher threshold)
:type thres2: float
:param thres2: Value below which trigger (of characteristic function)
is deactivated (lower threshold)
:type max_len: int
:param max_len: Maximum length of triggered event in samples. A new
event will be triggered as soon as the signal reaches
again above thres1.
:type max_len_delete: bool
:param max_len_delete: Do not write events longer than max_len into
report file.
:rtype: List
:return: Nested List of trigger on and of times in samples
"""
# 1) find indices of samples greater than threshold
# 2) calculate trigger "of" times by the gap in trigger indices
# above the threshold i.e. the difference of two following indices
# in ind is greater than 1
# 3) in principle the same as for "of" just add one to the index to get
# start times, this operation is not supported on the compact
# syntax
# 4) as long as there is a on time greater than the actual of time find
# trigger on states which are greater than last of state an the
# corresponding of state which is greater than current on state
# 5) if the signal stays above thres2 longer than max_len an event
# is triggered and following a new event can be triggered as soon as
# the signal is above thres1
ind1 = np.where(charfct > thres1)[0]
if len(ind1) == 0:
return []
ind2 = np.where(charfct > thres2)[0]
#
on = deque([ind1[0]])
of = deque([-1])
# determine the indices where charfct falls below off-threshold
ind2_ = np.empty_like(ind2, dtype=bool)
ind2_[:-1] = np.diff(ind2) > 1
# last occurence is missed by the diff, add it manually
ind2_[-1] = True
of.extend(ind2[ind2_].tolist())
on.extend(ind1[np.where(np.diff(ind1) > 1)[0] + 1].tolist())
# include last pick if trigger is on or drop it
if max_len_delete:
# drop it
of.extend([1e99])
on.extend([on[-1]])
else:
# include it
of.extend([ind2[-1]])
#
pick = []
while on[-1] > of[0]:
while on[0] <= of[0]:
on.popleft()
while of[0] < on[0]:
of.popleft()
if of[0] - on[0] > max_len:
if max_len_delete:
on.popleft()
continue
of.appendleft(on[0] + max_len)
pick.append([on[0], of[0]])
return np.array(pick, dtype=np.int64)
def pk_baer(reltrc, samp_int, tdownmax, tupevent, thr1, thr2, preset_len,
p_dur):
"""
Wrapper for P-picker routine by M. Baer, Schweizer Erdbebendienst.
:param reltrc: time series as numpy.ndarray float32 data, possibly filtered
:param samp_int: number of samples per second
:param tdownmax: if dtime exceeds tdownmax, the trigger is examined for
validity
:param tupevent: min nr of samples for itrm to be accepted as a pick
:param thr1: threshold to trigger for pick (c.f. paper)
:param thr2: threshold for updating sigma (c.f. paper)
:param preset_len: no of points taken for the estimation of variance of
SF(t) on preset()
:param p_dur: p_dur defines the time interval for which the maximum
amplitude is evaluated Originally set to 6 secs
:return: (pptime, pfm) pptime sample number of parrival; pfm direction
of first motion (U or D)
.. note:: currently the first sample is not taken into account
.. seealso:: [Baer1987]_
"""
pptime = C.c_int()
# c_chcar_p strings are immutable, use string_buffer for pointers
pfm = C.create_string_buffer(b" ", 5)
# be nice and adapt type if necessary
reltrc = np.ascontiguousarray(reltrc, np.float32)
# index in pk_mbaer.c starts with 1, 0 index is lost, length must be
# one shorter
args = (len(reltrc) - 1, C.byref(pptime), pfm, samp_int,
tdownmax, tupevent, thr1, thr2, preset_len, p_dur)
errcode = clibsignal.ppick(reltrc, *args)
if errcode != 0:
raise MemoryError("Error in function ppick of mk_mbaer.c")
# add the sample to the time which is not taken into account
# pfm has to be decoded from byte to string
return pptime.value + 1, pfm.value.decode('utf-8')
def ar_pick(a, b, c, samp_rate, f1, f2, lta_p, sta_p, lta_s, sta_s, m_p, m_s,
l_p, l_s, s_pick=True):
"""
Pick P and S arrivals with an AR-AIC + STA/LTA algorithm.
The algorithm picks onset times using an Auto Regression - Akaike
Information Criterion (AR-AIC) method. The detection intervals are
successively narrowed down with the help of STA/LTA ratios as well as
STA-LTA difference calculations. For details, please see [Akazawa2004]_.
An important feature of this algorithm is that it requires comparatively
little tweaking and site-specific settings and is thus applicable to large,
diverse data sets.
:type a: :class:`numpy.ndarray`
:param a: Z signal the data.
:type b: :class:`numpy.ndarray`
:param b: N signal of the data.
:type c: :class:`numpy.ndarray`
:param c: E signal of the data.
:type samp_rate: float
:param samp_rate: Number of samples per second.
:type f1: float
:param f1: Frequency of the lower bandpass window.
:type f2: float
:param f2: Frequency of the upper .andpass window.
:type lta_p: float
:param lta_p: Length of LTA for the P arrival in seconds.
:type sta_p: float
:param sta_p: Length of STA for the P arrival in seconds.
:type lta_s: float
:param lta_s: Length of LTA for the S arrival in seconds.
:type sta_s: float
:param sta_s: Length of STA for the S arrival in seconds.
:type m_p: int
:param m_p: Number of AR coefficients for the P arrival.
:type m_s: int
:param m_s: Number of AR coefficients for the S arrival.
:type l_p: float
:param l_p: Length of variance window for the P arrival in seconds.
:type l_s: float
:param l_s: Length of variance window for the S arrival in seconds.
:type s_pick: bool
:param s_pick: If ``True``, also pick the S phase, otherwise only the P
phase.
:rtype: tuple
:returns: A tuple with the P and the S arrival.
"""
if not (len(a) == len(b) == len(c)):
raise ValueError("All three data arrays must have the same length.")
a = scipy.signal.detrend(a, type='linear')
b = scipy.signal.detrend(b, type='linear')
c = scipy.signal.detrend(c, type='linear')
# be nice and adapt type if necessary
a = np.require(a, dtype=np.float32, requirements=['C_CONTIGUOUS'])
b = np.require(b, dtype=np.float32, requirements=['C_CONTIGUOUS'])
c = np.require(c, dtype=np.float32, requirements=['C_CONTIGUOUS'])
# scale amplitudes to avoid precision issues in case of low amplitudes
# C code picks the horizontal component with larger amplitudes, so scale
# horizontal components with a common scaling factor
data_max = np.abs(a).max()
if data_max < 100:
a *= 1e6
a /= data_max
data_max = max(np.abs(b).max(), np.abs(c).max())
if data_max < 100:
for data in (b, c):
data *= 1e6
data /= data_max
s_pick = C.c_int(s_pick) # pick S phase also
ptime = C.c_float()
stime = C.c_float()
args = (len(a), samp_rate, f1, f2,
lta_p, sta_p, lta_s, sta_s, m_p, m_s, C.byref(ptime),
C.byref(stime), l_p, l_s, s_pick)
errcode = clibsignal.ar_picker(a, b, c, *args)
if errcode != 0:
bufs = ['buff1', 'buff1_s', 'buff2', 'buff3', 'buff4', 'buff4_s',
'f_error', 'b_error', 'ar_f', 'ar_b', 'buf_sta', 'buf_lta',
'extra_tr1', 'extra_tr2', 'extra_tr3']
if errcode <= len(bufs):
raise MemoryError('Unable to allocate %s!' % (bufs[errcode - 1]))
raise Exception('Error during PAZ calculation!')
return ptime.value, stime.value
def plot_trigger(trace, cft, thr_on, thr_off, show=True):
"""
Plot characteristic function of trigger along with waveform data and
trigger On/Off from given thresholds.
:type trace: :class:`~obspy.core.trace.Trace`
:param trace: waveform data
:type cft: :class:`numpy.ndarray`
:param cft: characteristic function as returned by a trigger in
:mod:`obspy.signal.trigger`
:type thr_on: float
:param thr_on: threshold for switching trigger on
:type thr_off: float
:param thr_off: threshold for switching trigger off
:type show: bool
:param show: Do not call `plt.show()` at end of routine. That way,
further modifications can be done to the figure before showing it.
"""
import matplotlib.pyplot as plt
df = trace.stats.sampling_rate
npts = trace.stats.npts
t = np.arange(npts, dtype=np.float32) / df
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, trace.data, 'k')
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, cft, 'k')
on_off = np.array(trigger_onset(cft, thr_on, thr_off))
i, j = ax1.get_ylim()
try:
ax1.vlines(on_off[:, 0] / df, i, j, color='r', lw=2,
label="Trigger On")
ax1.vlines(on_off[:, 1] / df, i, j, color='b', lw=2,
label="Trigger Off")
ax1.legend()
except IndexError:
pass
ax2.axhline(thr_on, color='red', lw=1, ls='--')
ax2.axhline(thr_off, color='blue', lw=1, ls='--')
ax2.set_xlabel("Time after %s [s]" % trace.stats.starttime.isoformat())
fig.suptitle(trace.id)
fig.canvas.draw()
if show:
plt.show()
def coincidence_trigger(trigger_type, thr_on, thr_off, stream,
thr_coincidence_sum, trace_ids=None,
max_trigger_length=1e6, delete_long_trigger=False,
trigger_off_extension=0, details=False,
event_templates={}, similarity_threshold=0.7,
**options):
"""
Perform a network coincidence trigger.
The routine works in the following steps:
* take every single trace in the stream
* apply specified triggering routine (can be skipped to work on
precomputed custom characteristic functions)
* evaluate all single station triggering results
* compile chronological overall list of all single station triggers
* find overlapping single station triggers
* calculate coincidence sum of every individual overlapping trigger
* add to coincidence trigger list if it exceeds the given threshold
* optional: if master event templates are provided, also check single
station triggers individually and include any single station trigger if
it exceeds the specified similarity threshold even if no other stations
coincide with the trigger
* return list of network coincidence triggers
.. note::
An example can be found in the
`Trigger/Picker Tutorial
<https://tutorial.obspy.org/code_snippets/trigger_tutorial.html>`_.
.. note::
Setting `trigger_type=None` precomputed characteristic functions can
be provided.
.. seealso:: [Withers1998]_ (p. 98) and [Trnkoczy2012]_
:param trigger_type: String that specifies which trigger is applied (e.g.
``'recstalta'``). See e.g. :meth:`obspy.core.trace.Trace.trigger` for
further details. If set to `None` no triggering routine is applied,
i.e. data in traces is supposed to be a precomputed characteristic
function on which the trigger thresholds are evaluated.
:type trigger_type: str or None
:type thr_on: float
:param thr_on: threshold for switching single station trigger on
:type thr_off: float
:param thr_off: threshold for switching single station trigger off
:type stream: :class:`~obspy.core.stream.Stream`
:param stream: Stream containing waveform data for all stations. These
data are changed inplace, make a copy to keep the raw waveform data.
:type thr_coincidence_sum: int or float
:param thr_coincidence_sum: Threshold for coincidence sum. The network
coincidence sum has to be at least equal to this value for a trigger to
be included in the returned trigger list.
:type trace_ids: list or dict, optional
:param trace_ids: Trace IDs to be used in the network coincidence sum. A
dictionary with trace IDs as keys and weights as values can
be provided. If a list of trace IDs is provided, all
weights are set to 1. The default of ``None`` uses all traces present
in the provided stream. Waveform data with trace IDs not
present in this list/dict are disregarded in the analysis.
:type max_trigger_length: int or float
:param max_trigger_length: Maximum single station trigger length (in
seconds). ``delete_long_trigger`` controls what happens to single
station triggers longer than this value.
:type delete_long_trigger: bool, optional
:param delete_long_trigger: If ``False`` (default), single station
triggers are manually released at ``max_trigger_length``, although the
characteristic function has not dropped below ``thr_off``. If set to
``True``, all single station triggers longer than
``max_trigger_length`` will be removed and are excluded from
coincidence sum computation.
:type trigger_off_extension: int or float, optional
:param trigger_off_extension: Extends search window for next trigger
on-time after last trigger off-time in coincidence sum computation.
:type details: bool, optional
:param details: If set to ``True`` the output coincidence triggers contain
more detailed information: A list with the trace IDs (in addition to
only the station names), as well as lists with single station
characteristic function peak values and standard deviations in the
triggering interval and mean values of both, relatively weighted like
in the coincidence sum. These values can help to judge the reliability
of the trigger.
:param options: Necessary keyword arguments for the respective trigger
that will be passed on. For example ``sta`` and ``lta`` for any STA/LTA
variant (e.g. ``sta=3``, ``lta=10``).
Arguments ``sta`` and ``lta`` (seconds) will be mapped to ``nsta``
and ``nlta`` (samples) by multiplying with sampling rate of trace.
(e.g. ``sta=3``, ``lta=10`` would call the trigger with 3 and 10
seconds average, respectively)
:param event_templates: Event templates to use in checking similarity of
single station triggers against known events. Expected are streams with
three traces for Z, N, E component. A dictionary is expected where for
each station used in the trigger, a list of streams can be provided as
the value to the network/station key (e.g. {"GR.FUR": [stream1,
stream2]}). Templates are compared against the provided `stream`
without the specified triggering routine (`trigger_type`) applied.
:type event_templates: dict
:param similarity_threshold: similarity threshold (0.0-1.0) at which a
single station trigger gets included in the output network event
trigger list. A common threshold can be set for all stations (float) or
a dictionary mapping station names to float values for each station.
:type similarity_threshold: float or dict
:rtype: list
:returns: List of event triggers sorted chronologically.
"""
st = stream.copy()
# if no trace ids are specified use all traces ids found in stream
if trace_ids is None:
trace_ids = [tr.id for tr in st]
# we always work with a dictionary with trace ids and their weights later
if isinstance(trace_ids, list) or isinstance(trace_ids, tuple):
trace_ids = dict.fromkeys(trace_ids, 1)
# set up similarity thresholds as a dictionary if necessary
if not isinstance(similarity_threshold, dict):
similarity_threshold = dict.fromkeys([tr.stats.station for tr in st],
similarity_threshold)
# the single station triggering
triggers = []
# prepare kwargs for trigger_onset
kwargs = {'max_len_delete': delete_long_trigger}
for tr in st:
if tr.id not in trace_ids:
msg = "At least one trace's ID was not found in the " + \
"trace ID list and was disregarded (%s)" % tr.id
warnings.warn(msg, UserWarning)
continue
if trigger_type is not None:
tr.trigger(trigger_type, **options)
kwargs['max_len'] = int(
max_trigger_length * tr.stats.sampling_rate + 0.5)
tmp_triggers = trigger_onset(tr.data, thr_on, thr_off, **kwargs)
for on, off in tmp_triggers:
try:
cft_peak = tr.data[on:off].max()
cft_std = tr.data[on:off].std()
except ValueError:
cft_peak = tr.data[on]
cft_std = 0
on = tr.stats.starttime + float(on) / tr.stats.sampling_rate
off = tr.stats.starttime + float(off) / tr.stats.sampling_rate
triggers.append((on.timestamp, off.timestamp, tr.id, cft_peak,
cft_std))
triggers.sort()
# the coincidence triggering and coincidence sum computation
coincidence_triggers = []
last_off_time = 0.0
while triggers != []:
# remove first trigger from list and look for overlaps
on, off, tr_id, cft_peak, cft_std = triggers.pop(0)
sta = tr_id.split(".")[1]
event = {}
event['time'] = UTCDateTime(on)
event['stations'] = [tr_id.split(".")[1]]
event['trace_ids'] = [tr_id]
event['coincidence_sum'] = float(trace_ids[tr_id])
event['similarity'] = {}
if details:
event['cft_peaks'] = [cft_peak]
event['cft_stds'] = [cft_std]
# evaluate maximum similarity for station if event templates were
# provided
templates = event_templates.get(sta)
if templates:
event['similarity'][sta] = \
templates_max_similarity(stream, event['time'], templates)
# compile the list of stations that overlap with the current trigger
for trigger in triggers:
tmp_on, tmp_off, tmp_tr_id, tmp_cft_peak, tmp_cft_std = trigger
tmp_sta = tmp_tr_id.split(".")[1]
# skip retriggering of already present station in current
# coincidence trigger
if tmp_tr_id in event['trace_ids']:
continue
# check for overlapping trigger,
# break if there is a gap in between the two triggers
if tmp_on > off + trigger_off_extension:
break
event['stations'].append(tmp_sta)
event['trace_ids'].append(tmp_tr_id)
event['coincidence_sum'] += trace_ids[tmp_tr_id]
if details:
event['cft_peaks'].append(tmp_cft_peak)
event['cft_stds'].append(tmp_cft_std)
# allow sets of triggers that overlap only on subsets of all
# stations (e.g. A overlaps with B and B overlaps w/ C => ABC)
off = max(off, tmp_off)
# evaluate maximum similarity for station if event templates were
# provided
templates = event_templates.get(tmp_sta)
if templates:
event['similarity'][tmp_sta] = \
templates_max_similarity(stream, event['time'], templates)
# skip if both coincidence sum and similarity thresholds are not met
if event['coincidence_sum'] < thr_coincidence_sum:
if not event['similarity']:
continue
elif not any([val > similarity_threshold[_s]
for _s, val in event['similarity'].items()]):
continue
# skip coincidence trigger if it is just a subset of the previous
# (determined by a shared off-time, this is a bit sloppy)
if off <= last_off_time:
continue
event['duration'] = off - on
if details:
weights = np.array([trace_ids[i] for i in event['trace_ids']])
weighted_values = np.array(event['cft_peaks']) * weights
event['cft_peak_wmean'] = weighted_values.sum() / weights.sum()
weighted_values = np.array(event['cft_stds']) * weights
event['cft_std_wmean'] = \
(np.array(event['cft_stds']) * weights).sum() / weights.sum()
coincidence_triggers.append(event)
last_off_time = off
return coincidence_triggers
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| 40.196911 | 79 | 0.641085 | 4,452 | 31,233 | 4.413297 | 0.172731 | 0.009161 | 0.014709 | 0.011604 | 0.263844 | 0.220735 | 0.180629 | 0.161696 | 0.144697 | 0.138538 | 0 | 0.014286 | 0.262639 | 31,233 | 776 | 80 | 40.248711 | 0.838862 | 0.555886 | 0 | 0.160131 | 0 | 0 | 0.059712 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0.006536 | 0.039216 | 0 | 0.117647 | 0.003268 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87f2e425a9160d7d02c5f284eff0cd2cb2c7740d | 6,187 | py | Python | welib/airfoils/tests/test_dynamic_stall.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | 24 | 2019-07-24T23:37:10.000Z | 2022-03-30T20:40:40.000Z | welib/airfoils/tests/test_dynamic_stall.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | null | null | null | welib/airfoils/tests/test_dynamic_stall.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | 11 | 2019-03-14T13:47:04.000Z | 2022-03-31T15:47:27.000Z | import unittest
import numpy as np
import os
MyDir=os.path.dirname(__file__)
from scipy.integrate import solve_ivp
from welib.airfoils.Polar import Polar
from welib.airfoils.DynamicStall import *
# --------------------------------------------------------------------------------}
# ---
# --------------------------------------------------------------------------------{
class TestDynamicStall(unittest.TestCase):
def assertNaN(self,x):
self.assertTrue(np.isnan(x))
def test_oye(self):
#FFA-W3-241 airfoil Dyna Stall
P=Polar.fromfile(os.path.join(MyDir,'../data/FFA-W3-241-Re12M.dat'),compute_params=True)
omega = 12.57
T = 2*np.pi/omega
tau = 0.08
alpham = 20
dt = 0.01 # time step
#
fs_prev = P.f_st_interp(alpham) # init with steady value
Cl0 = P.cl_interp(alpham) # init with steady value
Cl_new,fs_prev_new = P.dynaStallOye_DiscreteStep(alpham,tau,fs_prev,dt)
# Testing that value at t=0 is equal to the steady state cl
self.assertEqual(Cl_new,Cl0)
self.assertEqual(fs_prev_new,fs_prev)
# An increase of alpha from the steady value should have dCl/dt>0
Cl_new,fs_prev_new = P.dynaStallOye_DiscreteStep(alpham+1,tau,fs_prev,dt)
self.assertEqual( (Cl_new-Cl0)>0 ,True)
self.assertEqual( (fs_prev_new-fs_prev)<0 ,True)
# A decrease of alpha from the steady value should have dCl/dt<0
Cl_new,fs_prev_new = P.dynaStallOye_DiscreteStep(alpham-1,tau,fs_prev,dt)
self.assertEqual( (Cl_new-Cl0)<0 ,True)
self.assertEqual( (fs_prev_new-fs_prev)>0 ,True)
def test_convergence(self):
# Starting from a wrong set point, the Cl value should converge to the steady Cl value
# Script params, reading polar
radians=True
P=Polar.fromfile(os.path.join(MyDir,'../data/FFA-W3-241-Re12M.dat'),compute_params=True,to_radians=radians)
U0, chord = 10, 0.1591
alpha_st = 3 * P._alpha0
tau_t = np.linspace(0,40,30)
vt = chord * tau_t / (2*U0)
# Oye's Parameters
p_oye = dynstall_oye_param_from_polar(P, tau_chord=chord/U0)
p_mhh = dynstall_mhh_param_from_polar(P, chord, tau_chord=chord/U0, FAST=True)
# Inputs
u=dict()
u['U'] = lambda t: U0
u['U_dot'] = lambda t: 0
u['alpha'] = lambda t: alpha_st
u['alpha_dot'] = lambda t: 0
u['alpha_34'] = u['alpha']
# Init values, off
y0_oye = [0]
y0_mhh = [0,0,0,0]
Cl_mhh = np.zeros(len(vt))
Cl_oye = np.zeros(len(vt))
## Integration using solve_ivp
np.seterr(under='ignore')
sol_mhh = solve_ivp(lambda t,x: dynstall_mhh_dxdt(t,x,u,p_mhh), t_span=[0, max(vt)], y0=y0_mhh, t_eval=vt)
for it,t in enumerate(vt):
Cl_mhh[it],_,_ = dynstall_mhh_outputs(t,sol_mhh.y[:,it],u,p_mhh)
## Integration using solve_ivp
sol_oye = solve_ivp(lambda t,x: dynstall_oye_dxdt(t,x,u,p_oye), t_span=[0, max(vt)], y0=y0_oye, t_eval=vt)
for it,t in enumerate(vt):
Cl_oye[it] = dynstall_oye_output(vt[it],sol_oye.y[0,it],u,p_oye)
## Steady values
Cl_st = P.cl_interp(alpha_st)
fs_st = P.f_st_interp(alpha_st)
## --- Test that the last value is the steady state one
np.testing.assert_almost_equal(Cl_mhh[-1], Cl_st, decimal=3)
np.testing.assert_almost_equal(Cl_oye[-1], Cl_st, decimal=3)
np.testing.assert_almost_equal(sol_oye.y[0,-1], fs_st, decimal=3)
# --- Plot, keep me
#import matplotlib.pyplot as plt
#fig=plt.figure()
#ax = fig.add_subplot(111)
#ax.plot(tau_t,Cl_mhh[:]/Cl_st ,'--',label = 'Cl dynamic (MHH)')
#ax.plot(tau_t,Cl_oye[:]/Cl_st ,'-' ,label = 'Cl dynamic (Oye)')
#ax.set_xlabel('Dimensionless time [-]')
#ax.set_ylabel('Cl [-]')
#plt.legend()
#plt.show()
#
#y0_mhh = dynstall_mhh_steady(0,u,p_mhh)
def test_mhh_wagner_step(self):
# Step from alpha0 to alpha0+2, testing the circulatory response (history),
# The Cl result is compared to Wagner's function
radians=True # <<<
P=Polar.fromfile(os.path.join(MyDir,'../data/FFA-W3-241-Re12M.dat'),compute_params=True,to_radians=radians)
U0, chord = 10, 0.1591
alpha1 = P._alpha0
alpha2 = alpha1+2*np.pi/180
tau_t = np.linspace(0,30,100)
vt = chord * tau_t / (2*U0)
## MHH Parameters and Inputs
np.seterr(under='ignore')
p = dynstall_mhh_param_from_polar(P, chord, tau_chord=chord/U0, Jones=True)
u=dict()
u['U'] = lambda t: U0
u['U_dot'] = lambda t: 0
u['alpha'] = lambda t: alpha1 if t<=0 else alpha2
u['alpha_dot'] = lambda t: 0
u['alpha_34'] = u['alpha']
## Steady values
Cl_st2 = P.cl_interp(alpha2)
y0_mhh = dynstall_mhh_steady(0,u,p)
Cl_mhh = np.zeros(len(vt))
# Integration using solve_ivp
sol_mhh = solve_ivp(lambda t,x: dynstall_mhh_dxdt(t,x,u,p), t_span=[0, max(vt)], y0=y0_mhh, t_eval=vt)
for it,t in enumerate(vt):
Cl_mhh[it],_,_ = dynstall_mhh_outputs(t,sol_mhh.y[:,it],u,p)
Cl_wag_Jones=1-A1_Jones*np.exp(-b1_Jones*tau_t)-A2_Jones*np.exp(-b2_Jones*tau_t);
np.testing.assert_almost_equal(Cl_mhh[1:]/Cl_st2,Cl_wag_Jones[1:],decimal=4)
# --- Plot, keep me
#import matplotlib.pyplot as plt
#fig=plt.figure()
#ax = fig.add_subplot(111)
#ax.plot(tau_t ,Cl_wag_Jones,'k' ,label='Wagner function (Jones approx.)')
#ax.plot(tau_t[1:],Cl_mhh[1:]/Cl_st2 ,'--',label = 'Cl dynamic (MHH)')
#ax.set_xlabel('Dimensionless time 2 U_0 t/c [-]')
#ax.set_ylabel('Cl/Cl_ref [-]')
#plt.ylim([0.3,1.1])
#plt.title('Response to an angle of attack change')
#plt.legend()
#plt.show()
if __name__ == '__main__':
unittest.main()
| 38.66875 | 115 | 0.578633 | 936 | 6,187 | 3.615385 | 0.217949 | 0.02305 | 0.015957 | 0.013002 | 0.587766 | 0.520981 | 0.480201 | 0.466903 | 0.430851 | 0.398345 | 0 | 0.035526 | 0.262971 | 6,187 | 159 | 116 | 38.91195 | 0.706579 | 0.267981 | 0 | 0.294118 | 0 | 0 | 0.038014 | 0.018784 | 0 | 0 | 0 | 0 | 0.141176 | 1 | 0.047059 | false | 0 | 0.070588 | 0 | 0.129412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87f464d0e2703278c5b64df5c7e990d4c56df632 | 8,203 | py | Python | helperFun.py | CalvinRoth/PriceDiscriminationNewtorks | 747688f958dc6876acd881139fb02666d93b5a4c | [
"MIT"
] | null | null | null | helperFun.py | CalvinRoth/PriceDiscriminationNewtorks | 747688f958dc6876acd881139fb02666d93b5a4c | [
"MIT"
] | null | null | null | helperFun.py | CalvinRoth/PriceDiscriminationNewtorks | 747688f958dc6876acd881139fb02666d93b5a4c | [
"MIT"
] | null | null | null | from __future__ import annotations
import numpy as np
import numpy.linalg as lin
import networkx as nx
import scipy
import scipy.sparse.linalg as slin
import matplotlib.pyplot as plt
# Linear algebra
def specNorm(A: np.matrix) -> float:
return lin.norm(A, ord=2)
# return np.sqrt(slin.eigs(A.T @ A, k=1, which="LM", return_eigenvectors=False, tol=1e-10)[0])
# Graph makers
def makeSimilarGraph(G: nx.DiGraph) -> np.matrix:
""" Generates the new graph with the same in/out degree as the orginal
-------
Return adj. matrix of graph
"""
sequence_in = [d for _, d in G.in_degree]
sequence_out = [d for _, d in G.out_degree]
return nx.to_numpy_matrix(
nx.directed_configuration_model(sequence_in, sequence_out, create_using=nx.DiGraph),
dtype="d"
)
# Graph Generators
def makeERGraph(n: int, p: float) -> np.matrix:
""" Generates Random Erdos-Renyi Graphs with n vertices and link probability p
------
return Adjacency graph of matrix and the networkx DiGraph object
"""
G = nx.generators.fast_gnp_random_graph(n, p, directed=True)
# sortG = sorted(G.in_degree, key=lambda x: x[1], reverse=True)
return nx.to_numpy_matrix(G, dtype="d"), G
def centralty(A: np.matrix, rho: float, alpha) -> np.matrix:
"""
Parameters
----------
A : np matrix
rho : network effect
Returns
-------
Centrality vector as described in paper
"""
n = A.shape[0]
ident = np.eye(n, n)
ones = np.ones((n, 1))
ApA = A + A.T
eig = specNorm(ApA)
alpha = rho / eig
central = lin.inv(ident - (alpha * ApA))
central = central @ ones # Checked. this > 0
return central
# Paper related properties
def applyPriceVector(A: np.matrix, v: np.matrix, rho: float, a: int | float, c: int | float) -> (float, bool):
"""
Parameters
----------
A : Graph
v : price vector
rho : network strength
a : Stand alone strength
c : Marginal cost. Should be less than a
Returns
-------
Profit in this network if prces v were applied.
And if result is valid or not
"""
n = A.shape[0]
ident = np.eye(n, n)
ones = np.ones((n, 1))
ApA = A + A.T
# spN = specNorm(ApA) # Sometimes scipy return x+0i, this is to discard warning
alpha = (rho / specNorm(ApA))
consumption = (2 * alpha) * A
consumption = ident - consumption
consumption = 0.5 * lin.inv(consumption) # This is entirely in the range [0,1] ^ checked
consumption = consumption @ ((a * ones) - v)
valid = True
if (np.min(consumption) < 0):
valid = False
return ((v - (c * ones)).T @ consumption)[0, 0], valid
def priceVector(A: np.matrix, rho: float, a: int | float, c: int | float) -> np.matrix:
"""
Parameters
----------
A : Network
rho : network strength
a : stand alone util
c : marginal cost. Should be less than a
Returns
-------
Vector reprsenting what price to charge individual i
"""
n = A.shape[0]
ones = np.ones((n, 1))
alpha = rho / specNorm(A + A.T)
central = centralty(A, rho, alpha) # This should be A not A + A.T because of how centralty function is designed
dif = A - A.T
pv1 = ((a + c) / 2) * ones
pv2 = ((a - c) * alpha * 0.5) * (dif @ central)
return pv1 + pv2
def optimalProfit(A: np.matrix, n: int, a: int | float, c: int | float, rho: float):
"""
Parameters
----------
A : Network
n : size of network
rho : network strength
a : stand alone util
c : marginal cost. Should be less than a
Returns
-------
True profit. Should be the same as applyPriceVector(A, pricevector(A,...),...)
"""
one = np.ones((n, 1))
alpha = rho / specNorm(A + A.T)
t1 = lin.inv(np.eye(n, n) - (alpha * (A + A.T)))
total = one.T @ t1 @ one
total = ((a - c) * (a - c) / 8) * total
return np.real(total[0, 0])
def fractionalRegret(A, v, n, rho, a, c):
"""
Parameters
----------
A : Network
v : price vector to compare to
rho : network strength
n : number of nodes
a : stand alone util
c : marginal cost. Should be less than a
Returns
-------
1 - (profit of A using v)/(profit of A using best choice)
"""
discrim = optimalProfit(A, n, a, c, rho) # Optimal profit
# I have check and the formula for optimal profit does match applypricevector(A, pricevector(A,...), params)
appliedProf = applyPriceVector(A, v, rho, a, c) # Profit at v
return 1 - (appliedProf / discrim)
# Checks to regen price vector
def genGoodSeqProfit(n: int, G: nx.digraph, A: np.matrix, rho: float, a: int | float, c: int | float):
i = 0
flag = True
v_seq = 0
profit = 0
while flag:
A_seq = makeSimilarGraph(G) # for same seq
v_seq = priceVector(A_seq, rho, a, c)
(profit, flag) = applyPriceVector(A, v_seq, rho, a, c)
i += 1
if i >= 1:
flag = False
return v_seq, profit
def genGoodParamProfit(n: int, p : float, G: nx.digraph, A: np.matrix, rho: float, a: int | float, c: int | float):
i = 0
flag = True
v_par = 0
profit = 0
while flag:
A_par, B = makeERGraph(n, p) # Same Param
v_par = priceVector(A_par, rho, a, c)
(profit, flag) = applyPriceVector(A, v_par, rho, a, c)
i += 1
if i >= 1:
flag = False
return v_par, profit
# Gaps applying price vector of guesses to true graph G
def getGapRev(A, test, rho, a, c):
""" Apply optimal profit price vector guess graph to test graph test and A. Return pair of profits"""
optimalVector = priceVector(test, rho, a, c)
profitWithGuessV = applyPriceVector(A, optimalVector, rho, a, c)
trueProfit = applyPriceVector(A, priceVector(A, rho, a, c), rho, a, c)
return trueProfit, profitWithGuessV
# Applying the true optimal profit vector to guesses
# Currently not using because it seems backwards of what I want
def getGaps(n, p, rho, a, c, i, results, n_trials):
A, G = makeERGraph(n, p)
results[i] = np.average([getGap(A, makeSimilarGraph(G), rho, a, c) for j in range(n_trials)])
return i
# Apply the price vector that each guess produces to the true graph and take average.
def getGapsReverse(n, p, rho, a, c, i, results, n_trials):
A, G = makeERGraph(n, p)
results[i] = np.average([getGapRev(A, makeSimilarGraph(G), rho, a, c) for j in range(n_trials)])
return i
# Here we apply the average optimal price vector of the guesses to the true graph G
# I get a warning about discarding complex values, values should never complex for this problem
# and when I check they are all +0i so ?
def getAverageGap(n, p, rho, a, c, i, results, n_trials):
A, G = makeERGraph(n, p)
n = A.shape[0]
trueProfit = applyPriceVector(A, priceVector(A, rho, a, c), rho, a, c)
# the average vector initilized with sample size of 1
averageV = priceVector(makeSimilarGraph(G), rho, a, c)
# And another n_trials-1 trials
for j in range(n_trials - 1):
averageV += priceVector(makeSimilarGraph(G), rho, a, c)
averageV /= n_trials # Scaling
profit = applyPriceVector(A, averageV, rho, a, c)
results[i] = np.real(trueProfit - profit)
# How much does the profit change when we change the ith coordinate of price vector
# Change each percent wise. Test +/- percent
def robustNess(n, p, chaos, rho, a, c):
A, G = makeERGraph(n, p)
true_vector = priceVector(A, rho, a, c)
true_profit = applyPriceVector(A, true_vector, rho, a, c)
results = np.zeros(n)
count_range = [i for i in range(n)]
inD = [d[1] for d in G.in_degree]
outD = [d[1] for d in G.out_degree]
plt.plot(inD)
plt.plot(outD)
plt.show()
"""for i in range(n):
increase_v = true_vector
decrease_v = true_vector
increase_v[i] += chaos * increase_v[i];
decrease_v[i] -= chaos * decrease_v[i];
profitI = true_profit - applyPriceVector(A, increase_v, rho, a,c);
profitD = true_profit - applyPriceVector(A, decrease_v, rho, a,c);
results[i] = max(profitI, profitD)"""
return results
| 31.07197 | 116 | 0.615141 | 1,233 | 8,203 | 4.042174 | 0.21249 | 0.01244 | 0.026083 | 0.012039 | 0.283106 | 0.253612 | 0.220104 | 0.220104 | 0.190008 | 0.182584 | 0 | 0.009054 | 0.259417 | 8,203 | 263 | 117 | 31.190114 | 0.811358 | 0.325613 | 0 | 0.282258 | 0 | 0 | 0.000415 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.120968 | false | 0 | 0.056452 | 0.008065 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87f833562a795b10c89cfc2bd0cd51d2ac640063 | 780 | py | Python | radynpy/cdf/RadynKeyFile.py | Goobley/radynpy | acf685f6ad17be63065fc468e40293b6cf063081 | [
"MIT"
] | 7 | 2019-01-27T20:41:38.000Z | 2020-02-18T16:27:26.000Z | radynpy/cdf/RadynKeyFile.py | grahamkerr/radynpy | 63e06c63476b4cc74568da443f71c12412b83bac | [
"MIT"
] | 3 | 2020-02-25T18:51:20.000Z | 2020-03-19T13:02:14.000Z | radynpy/cdf/RadynKeyFile.py | grahamkerr/radynpy | 63e06c63476b4cc74568da443f71c12412b83bac | [
"MIT"
] | 1 | 2020-02-18T00:20:16.000Z | 2020-02-18T00:20:16.000Z | import os
import cdflib
import pickle
from radynpy.cdf.auxtypes import Val, Array
import numpy as np
cdfFile = '/data/crisp/RadynGrid/radyn_out.val3c_d3_1.0e11_t20s_10kev_fp'
res = {}
cdf = cdflib.CDF(cdfFile)
for k in cdf.cdf_info()['zVariables']:
var = cdf.varget(k)
if len(var.shape) == 0:
# When the shape is () we have a 0-d ndarray in cdf[k][...].
# The only way to get the single value is with .item()
res[k] = Val(var.item())
else:
res[k] = Array(var.shape)
# Add ntime, because it's a useful value
res['ntime'] = Val(cdf.varget('time').shape[0])
# And max number of atomic levels
res['maxatomlevels'] = Val(cdf.varget('nk').max())
cdf.close()
with open('RadynKeySizes.pickle', 'wb') as p:
pickle.dump(res, p)
| 26.896552 | 73 | 0.646154 | 128 | 780 | 3.882813 | 0.59375 | 0.054326 | 0.04829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0209 | 0.202564 | 780 | 28 | 74 | 27.857143 | 0.778135 | 0.234615 | 0 | 0 | 0 | 0 | 0.197635 | 0.103041 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.263158 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87fada3b3483a5046060fbb671d20da71805af9d | 6,444 | py | Python | examples/fixed_resolution.py | AnantTiwari-Naman/pyglet | 4774f2889057da95a78785a69372112931e6a620 | [
"BSD-3-Clause"
] | null | null | null | examples/fixed_resolution.py | AnantTiwari-Naman/pyglet | 4774f2889057da95a78785a69372112931e6a620 | [
"BSD-3-Clause"
] | null | null | null | examples/fixed_resolution.py | AnantTiwari-Naman/pyglet | 4774f2889057da95a78785a69372112931e6a620 | [
"BSD-3-Clause"
] | 1 | 2021-09-16T20:47:07.000Z | 2021-09-16T20:47:07.000Z | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Demonstrates one way of fixing the display resolution to a certain
size, but rendering to the full screen.
The method used in this example is:
1. Set the OpenGL viewport to the fixed resolution
2. Render the scene using any OpenGL functions (here, just a polygon)
3. Copy the framebuffer into a texture
4. Reset the OpenGL viewport to the window (full screen) size
5. Blit the texture to the framebuffer
Recent video cards could also render the scene directly to the texture
using EXT_framebuffer_object. (This is not demonstrated in this example).
'''
from pyglet.gl import *
import pyglet
class FixedResolution:
def __init__(self, window, width, height, filtered=False):
self.window = window
self.width = width
self.height = height
self._filtered = filtered
self._viewport = 0, 0, 0, 0, 0
self._calculate_viewport(self.window.width, self.window.height)
self._cam_x = 0
self._cam_y = 0
self.clear_color = 0, 0, 0, 1
self.texture = pyglet.image.Texture.create(width, height, rectangle=True)
if not filtered:
pyglet.image.Texture.default_min_filter = GL_NEAREST
pyglet.image.Texture.default_mag_filter = GL_NEAREST
glTexParameteri(self.texture.target, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(self.texture.target, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
def on_resize(w, h):
self._calculate_viewport(w, h)
self.window_w, self.window_h = w, h
self.window.on_resize = on_resize
def _calculate_viewport(self, new_screen_width, new_screen_height):
aspect_ratio = self.width / self.height
aspect_width = new_screen_width
aspect_height = aspect_width / aspect_ratio + 0.5
if aspect_height > new_screen_height:
aspect_height = new_screen_height
aspect_width = aspect_height * aspect_ratio + 0.5
if not self._filtered:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
self._viewport = (int((new_screen_width / 2) - (aspect_width / 2)), # x
int((new_screen_height / 2) - (aspect_height / 2)), # y
0, # z
int(aspect_width), # width
int(aspect_height)) # height
def __enter__(self):
glViewport(0, 0, self.width, self.height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.width, 0, self.height, -255, 255)
glMatrixMode(GL_MODELVIEW)
glTranslatef(self._cam_x, self._cam_y, 0)
def set_camera(self, x=0, y=0):
self._cam_x = -x
self._cam_y = -y
def __exit__(self, *unused):
win = self.window
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
self.texture.blit_into(buffer, 0, 0, 0)
glViewport(0, 0, win.width, win.height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, win.width, 0, win.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glClearColor(*self.clear_color)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
self.texture.blit(*self._viewport)
def begin(self):
self.__enter__()
def end(self):
self.__exit__()
###################################
# Simple program using the Viewport:
###################################
window = pyglet.window.Window(960, 540, resizable=True)
# Use 320x180 fixed resolution to make the effect completely obvious. You
# can change this to a more reasonable value such as 960x540 here:
target_width, target_height = 320, 180
# Create an instance of the FixedResolution class:
viewport = FixedResolution(window, target_width, target_height, filtered=False)
def update(dt):
global rectangle
rectangle.rotation += dt * 10
@window.event
def on_draw():
# The viewport can be used as
# a context manager:
with viewport:
window.clear()
rectangle.draw()
# # Alternatively, you can do it manually:
# viewport.begin()
# window.clear()
# rectangle.draw()
# viewport.end()
# Create a simple Rectangle to show the effect
rectangle = pyglet.shapes.Rectangle(x=target_width/2, y=target_height/2, color=(200, 0, 0), width=100, height=100)
rectangle.anchor_position = 50, 50
# Schedule the update function at 60fps
pyglet.clock.schedule_interval(update, 1/60)
pyglet.app.run()
| 36.40678 | 114 | 0.653786 | 827 | 6,444 | 4.939541 | 0.339782 | 0.005386 | 0.022032 | 0.013219 | 0.162546 | 0.132681 | 0.097919 | 0.062179 | 0.062179 | 0.062179 | 0 | 0.021359 | 0.23712 | 6,444 | 176 | 115 | 36.613636 | 0.809601 | 0.419926 | 0 | 0.0875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.025 | 0 | 0.1625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87fbe8fbf2c8b44b8d6ae901bbe215fa1c471a12 | 1,303 | py | Python | 374.guess-number-higher-or-lower.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 374.guess-number-higher-or-lower.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 374.guess-number-higher-or-lower.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=374 lang=python3
#
# [374] Guess Number Higher or Lower
#
# https://leetcode.com/problems/guess-number-higher-or-lower/description/
#
# algorithms
# Easy (40.73%)
# Likes: 295
# Dislikes: 1414
# Total Accepted: 126.8K
# Total Submissions: 309.9K
# Testcase Example: '10\n6'
#
# We are playing the Guess Game. The game is as follows:
#
# I pick a number from 1 to n. You have to guess which number I picked.
#
# Every time you guess wrong, I'll tell you whether the number is higher or
# lower.
#
# You call a pre-defined API guess(int num) which returns 3 possible results
# (-1, 1, or 0):
#
#
# -1 : My number is lower
# 1 : My number is higher
# 0 : Congrats! You got it!
#
#
# Example :
#
#
#
# Input: n = 10, pick = 6
# Output: 6
#
#
#
#
# @lc code=start
# The guess API is already defined for you.
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num: int) -> int:
class Solution:
def guessNumber(self, n: int) -> int:
l = 1
r = n
while l<r:
mid = (l+r)//2
temp = guess(mid)
if temp>0:
l = mid+1
elif temp==0:
return mid
else:
r = mid-1
return l
# @lc code=end
| 20.359375 | 80 | 0.57713 | 203 | 1,303 | 3.714286 | 0.492611 | 0.05305 | 0.05305 | 0.050398 | 0.098143 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055188 | 0.304682 | 1,303 | 63 | 81 | 20.68254 | 0.774834 | 0.676899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87fc9e85117bd3a9cb0493e789cbd27623a8ec69 | 6,294 | py | Python | graph-sage/dataset_utils.py | Manu-Fraile/Network-Representation-Learning | d84414c144cc6146d406e606ed5be8120d4244a9 | [
"MIT"
] | null | null | null | graph-sage/dataset_utils.py | Manu-Fraile/Network-Representation-Learning | d84414c144cc6146d406e606ed5be8120d4244a9 | [
"MIT"
] | null | null | null | graph-sage/dataset_utils.py | Manu-Fraile/Network-Representation-Learning | d84414c144cc6146d406e606ed5be8120d4244a9 | [
"MIT"
] | null | null | null | # author: Luca Marini
from stellargraph import StellarGraph
from stellargraph import datasets
import networkx as nx # https://networkx.org/documentation/stable/tutorial.html
import numpy as np
import pandas as pd
import json
from networkx.readwrite import json_graph
from LoadGraphs.load_graph_from_edges import get_node_features_from_edges
def get_graph_from_pickle(pickle_path, get_node_features=False, node_features=None):
G = nx.read_gpickle(pickle_path)
print(nx.info(G))
if get_node_features:
num_nodes = len(list(nx.nodes(G)))
node_features = np.eye(num_nodes, dtype=int)
node_features_df = pd.DataFrame(data=node_features)
node_features_df.index += 1
node_features_df.columns += 1
G = StellarGraph.from_networkx(G, node_features=node_features_df)
else:
G = StellarGraph.from_networkx(G, node_features=node_features)
return G
def get_node_ids(node_ids_path):
node_ids_df = pd.read_csv(node_ids_path, index_col=False, header=None)
node_ids = node_ids_df.set_index(0)[1]
node_ids.name = "node_ids"
# print(node_ids)
return node_ids
def get_dataset(dataset_name):
node_ids = []
if dataset_name == "big_cora":
G = get_graph_from_pickle("../data/subelj_cora/cora_big_graph_dir.gpickle", get_node_features=True)
node_ids = get_node_ids("../data/subelj_cora/data/group-edges.csv")
elif dataset_name == "small_cora":
#G = get_graph_from_pickle("../data/Cora-dataset/cora_graph_dir_lp.gpickle", get_node_features=True)
#node_ids = get_node_ids("../data/Cora-dataset/data/group-edges.csv")
dataset = datasets.Cora()
G, node_ids = dataset.load(directed=True)
elif dataset_name == "pubmed":
G = get_graph_from_pickle("../data/PubMed/pubmed_graph_dir_lp.gpickle", get_node_features=True)
node_ids = get_node_ids("../data/PubMed/data/group-edges.csv")
elif dataset_name == "pubmed_undir":
G = get_graph_from_pickle("../data/pubmed-dataset/pubmed_graph_undir_lp.gpickle", get_node_features=True)
node_ids = get_node_ids("../data/pubmed-dataset/data/group-edges.csv")
# dataset = datasets.PubMedDiabetes()
# G, node_ids = dataset.load()
elif dataset_name == "blog_catalog":
G = get_graph_from_pickle("../data/BlogCatalog-dataset/blog_catalog_graph_lp.gpickle", get_node_features=True)
node_ids = get_node_ids("../data/BlogCatalog-dataset/data/group-edges.csv")
elif dataset_name == "youtube":
G = get_graph_from_pickle("../data/YouTube-dataset/youtube_graph.gpickle", get_node_features=True)
node_ids = get_node_ids("../data/YouTube-dataset/data/group-edges.csv")
elif dataset_name == "flickr":
G = get_graph_from_pickle("../data/Flickr-dataset/flickr_graph.gpickle", get_node_features=True)
node_ids = get_node_ids("../data/Flickr-dataset/data/group-edges.csv")
elif dataset_name == "twitter":
node_features, node_ids = get_node_features_from_edges("../data/Twitter-dataset/data/out.munmun_twitter_social",
"%", " ", directed=True)
G = get_graph_from_pickle("../data/Twitter-dataset/twitter_graph_dir_lp.gpickle", get_node_features=False,
node_features=node_features)
elif dataset_name == "ppi":
node_features, node_ids = get_node_features_from_edges("../data/PPI-dataset/PP-Pathways_ppi.csv", "#", ",",
directed=False)
G = get_graph_from_pickle("../data/PPI-dataset/ppi_graph_lp.gpickle", get_node_features=False,
node_features=node_features)
elif dataset_name == "astro-ph":
node_features, node_ids = get_node_features_from_edges("../data/AstroPh-dataset/ca-AstroPh.txt", "#", "\t",
directed=False)
G = get_graph_from_pickle("../data/AstroPh-dataset/astro_graph_lp.gpickle", get_node_features=False,
node_features=node_features)
elif dataset_name == "epinion":
node_features, node_ids = get_node_features_from_edges("../data/Epinions-dataset/soc-Epinions1.txt", "#", "\t",
directed=True)
G = get_graph_from_pickle("../data/Epinions-dataset/epinions_graph_dir_lp.gpickle", get_node_features=False,
node_features=node_features)
elif dataset_name == "reddit":
prefix = "../data/reddit/reddit"
G_data = json.load(open(prefix + "-G.json"))
G = json_graph.node_link_graph(G_data)
if isinstance(G.nodes()[0], int):
conversion = lambda n: int(n)
else:
conversion = lambda n: n
G = get_graph_from_pickle("../data/reddit/reddit_graph.gpickle", get_node_features=True)
class_map = json.load(open(prefix + "-class_map.json"))
if isinstance(list(class_map.values())[0], list):
lab_conversion = lambda n: n
else:
lab_conversion = lambda n: int(n)
class_map = {conversion(k): lab_conversion(v) for k, v in class_map.items()}
node_ids = class_map.values()
elif dataset_name == "dblp-ci":
node_features, node_ids = get_node_features_from_edges("../data/DBLP-Ci-dataset/dblp-cite.edges", "%", ",",
directed=True)
G = get_graph_from_pickle("../data/DBLP-Ci-dataset/dblp-ci_graph_dir_lp.gpickle", get_node_features=True,
node_features=node_features)
elif dataset_name == "dblp-au":
node_features, node_ids = get_node_features_from_edges("../data/DBLP-Au-dataset/com-dblp.ungraph.txt", "#",
"\t", directed=True)
G = get_graph_from_pickle("../data/DBLP-Au-dataset/dblp-au_graph_lp.gpickle", get_node_features=True,
node_features=node_features)
else:
raise Exception('The specified dataset is not available')
print(G.info())
nodes = list(G.nodes())
return G, node_ids, nodes
| 54.258621 | 120 | 0.640928 | 816 | 6,294 | 4.612745 | 0.156863 | 0.162593 | 0.091658 | 0.071732 | 0.561105 | 0.519129 | 0.477949 | 0.406482 | 0.344315 | 0.304198 | 0 | 0.001464 | 0.240388 | 6,294 | 115 | 121 | 54.730435 | 0.785819 | 0.051319 | 0 | 0.14 | 0 | 0 | 0.22321 | 0.191514 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03 | false | 0 | 0.08 | 0 | 0.14 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87fe502b63b11f160583b09b733189f59524094d | 1,338 | py | Python | Audio Trimmer/main.py | Vivek-Kolhe/Random-Scripts | fbe4249f717033de3f8c8ef13f90fbd6b4d2b1a4 | [
"MIT"
] | 1 | 2020-08-24T08:13:55.000Z | 2020-08-24T08:13:55.000Z | Audio Trimmer/main.py | Vivek-Kolhe/Random-Scripts | fbe4249f717033de3f8c8ef13f90fbd6b4d2b1a4 | [
"MIT"
] | null | null | null | Audio Trimmer/main.py | Vivek-Kolhe/Random-Scripts | fbe4249f717033de3f8c8ef13f90fbd6b4d2b1a4 | [
"MIT"
] | null | null | null | import argparse
import vapoursynth as vs
from acsuite import eztrim
def main():
parser = argparse.ArgumentParser(description = "A simple command line utility for trimming audio losslessly.")
parser.add_argument("-e", "--end_frame", type = int, nargs = 1, metavar = "end_frame", default = None, help = "last frame of audio for trimmed audio.")
required_args = parser.add_argument_group("required named arguments")
required_args.add_argument("-s", "--start_frame", type = int, nargs = 1, metavar = "start_frame", default = None, help = "frame of audio to start trimming from.", required = True)
required_args.add_argument("-v", "--video", type = str, nargs = 1, metavar = "video_file ", default = None, help = "file path for the video file.", required = True)
required_args.add_argument("-a", "--audio", type = str, nargs = 1, metavar = "audio_file", default = None, help = "file path for the audio file.", required = True)
args = parser.parse_args()
core = vs.core
_end_frame = args.end_frame[0] if args.end_frame else None
_start_frame = args.start_frame[0]
video_file, audio_file = args.video[0], args.audio[0]
src = core.lsmas.LWLibavSource(video_file)
eztrim(src, [(_start_frame, _end_frame)], audio_file)
if __name__ == "__main__":
main() | 51.461538 | 184 | 0.678625 | 184 | 1,338 | 4.717391 | 0.336957 | 0.0553 | 0.059908 | 0.079493 | 0.260369 | 0.214286 | 0.076037 | 0.076037 | 0 | 0 | 0 | 0.007407 | 0.192825 | 1,338 | 26 | 185 | 51.461538 | 0.796296 | 0 | 0 | 0 | 0 | 0 | 0.238204 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e205979c562adb192764537fc0552a513eb438ad | 6,350 | py | Python | policy2019.py | donghun2018/adclick-simulator-v2 | ade886e9dcbde9fcea218a19f0130cc09f81e55e | [
"MIT"
] | null | null | null | policy2019.py | donghun2018/adclick-simulator-v2 | ade886e9dcbde9fcea218a19f0130cc09f81e55e | [
"MIT"
] | null | null | null | policy2019.py | donghun2018/adclick-simulator-v2 | ade886e9dcbde9fcea218a19f0130cc09f81e55e | [
"MIT"
] | null | null | null | from ssa_sim_v2.policies.policy import Policy
from ssa_sim_v2.simulator.action import Action, ActionSet
from ssa_sim_v2.simulator.attribute import AttrSet
from ssa_sim_v2.simulator.state import StateSet
class Policy2019(Policy):
"""
Base class for 2019 simulator policy (for students)
:ivar StateSet state_set: State set -- an object responsible for handling states.
:ivar ActionSet action_set: Action set -- an object responsible for handling actions.
:ivar AttrSet attr_set: Attribute set -- an object responsible for handling attributes.
"""
def __init__(self, state_set, action_set, attr_set, seed=12345, save_history=False):
"""
:param StateSet state_set: State set -- an object responsible for handling states.
:param ActionSet action_set: Action set -- an object responsible for handling actions.
:param AttrSet attr_set: Attribute set -- an object responsible for handling attributes.
:param int seed: Seed for the random number generator.
:param bool save_history: Indicates if policy history should be saved
in the history attribute.
"""
super().__init__(state_set, action_set, attr_set, seed, save_history)
# Add any additional class variables here, e.g.:
# self.my_variable = 1.0
# self.my_variable_2 = [0.3, 0.4, 0.5]
def initialize(self, params):
"""
Initializes the policy with given parameters.
:param dict params: Parameters to be set in the policy.
"""
super().initialize(params)
# Here you can use the following default params to initialize your policy
# self.stp.cvr_default -- the average historical conversion rate,
# you can expect the observed average conversion rate to be similar,
# self.stp.rpv_default -- the average historical value per conversion,
# you can expect the observed average conversion rate to be similar.
# You can also use these parameters directly in the learn and act methods.
# You can also delete this method and/or not use it at all.
def learn(self, state, data):
"""
A method that allows the policy to learn based on observations provided
by the simulator.
:param StateSet.State state: The state in the previous turn.
:param Dict data: Dictionary with the following fields:
* action -- Your original action used in the previous turn.
* effective_action -- Actual action used by the simulator.
The original action may need to be adjusted (base bid or modifiers
clipped to bounds) to be valid.
* reward -- Overall reward obtained in the previous turn.
* info -- A dictionary with overall data for the policy:
* auctions -- number of auctions,
* clicks -- number of clicks,
* conversions -- number of conversions,
* click_probability -- click probability (clicks / auctions),
* cvr -- conversion rate (conversions / clicks),
* rpc -- revenue per click (revenue / clicks),
* cpc -- cost per click (cost / clicks),
* rpv -- revenue per conversion (revenue / conversions),
* revenue -- revenue from all conversions,
* cost -- cost for all clicks,
* profit -- revenue - cost.
* attr_info: A dict with data per segment, e.g.
{
"gender": {"M": info_for_gender_M, "F": info_for_gender_F, ...},
"age": {"18-24": info_for_age_18-24, "25-34": info_for_age_25-34, ...},
...
},
where info_for... has the same form as info but contains data
only for a given segment.
"""
pass
def act(self, state, data=None):
"""
Returns an action given state.
:return: An action chosen by the policy.
"""
# This example method returns a random bid in the range of [min_bid, max_bid]
b_max = self.action_set.max_bid
b_min = self.action_set.min_bid
mod_max = self.action_set.max_mod # maximum valid value of a modifier
mod_min = self.action_set.min_mod # minimum valid value of a modifier
bid = self.rng.uniform(low=b_min, high=b_max) # note: use self.rng instead of numpy.random
action_inc = Action(bid) # note: underspecified action (modifiers not defined at all)
# action_inc = Action(bid, {'gender': {'M': 1.1, 'F': 1.2}}) # underspecified modifiers
action = self.action_set.validify_action(action_inc) # this function fills in unspecified modifiers
# Example how you can access provided default values
if hasattr(self.stp, "cvr_default"):
print(self.stp.cvr_default)
if hasattr(self.stp, "rpv_default"):
print(self.stp.rpv_default)
# The following way you can make the simulator save your policy
# variable values into the output csv file.
# Save is made after both learn and act methods are invoked.
self.history.update({"bid": action.bid})
return action
def test_01_setup():
"""
sample init init attr, state, action space
:return:
"""
names = ['gender', 'age']
vals = {'gender': ['M', 'F', 'U'],
'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']}
attr_set = AttrSet(names, vals)
state_set = StateSet(['date', 'how'], ['discrete', 'discrete'],
[['2018-01-01', '2018-01-02'], list(range(168))])
act_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1)
return attr_set, state_set, act_set
def test_one_policy_run():
# init attr, state, action space
attr_set, state_set, act_set = test_01_setup()
# get first state
s = state_set.make_state({'date': '2018-01-01', 'how': 12})
# initialize policy
pol = Policy2019(state_set, act_set, attr_set, seed=9292)
pol.initialize({"stp": {"cvr_default": 0.02, "rpv_default": 300.0}})
a = pol.act(s)
print(a)
if __name__ == "__main__":
test_01_setup()
test_one_policy_run()
| 39.937107 | 120 | 0.61874 | 840 | 6,350 | 4.536905 | 0.297619 | 0.023091 | 0.017318 | 0.034637 | 0.224875 | 0.15429 | 0.143269 | 0.128575 | 0.128575 | 0.128575 | 0 | 0.028855 | 0.285039 | 6,350 | 158 | 121 | 40.189873 | 0.810573 | 0.574961 | 0 | 0 | 0 | 0 | 0.076241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0.022727 | 0.090909 | 0 | 0.295455 | 0.068182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e207de839730697380b5b88aad6fb9573801f668 | 3,077 | py | Python | database.py | mkavganesh/Ookla-Speedtest.net-Crawler | 180af0fd57c651fc32f5cc0a38f2be36e11a8163 | [
"Apache-2.0"
] | null | null | null | database.py | mkavganesh/Ookla-Speedtest.net-Crawler | 180af0fd57c651fc32f5cc0a38f2be36e11a8163 | [
"Apache-2.0"
] | null | null | null | database.py | mkavganesh/Ookla-Speedtest.net-Crawler | 180af0fd57c651fc32f5cc0a38f2be36e11a8163 | [
"Apache-2.0"
] | null | null | null | from mysql.connector import MySQLConnection,Error
from configparser import ConfigParser
class Database:
"""Database Related Operation
Methods
--------
1.insert(data)
2.read_db_config(filename,section)
Objcet Creation
--------------
db=Database(table_name='crawler',fields=fields)
Parameters:
----------
table_name : name of table
fields : dictionary of field_name and their_type
filename : configuration filename
section : database section
"""
def __init__(self,table_name=None,fields=None,filename='config.ini',section='mysql'):
"""
Initialize the connection with the database and make the required table.
:param table_name: name of the table to be inserted
:param fields: a dictionary of field_name and their_type to be inserted in the table table_name
:optional param filename: name of the configuration file
:optional param section: section of database configuration
"""
self.table_name = table_name
db_config = self.read_db_config(filename,section)
try :
self.conn = MySQLConnection(**db_config)
except Error as error:
print(error)
return
self.cursor = self.conn.cursor()
if table_name is not None and fields is not None:
self.cursor.execute(f"show tables like '{self.table_name}'")
output = self.cursor.fetchone()
#if no such table exits create one
if output is None:
fields_list = [f'{x} {fields[x]}' for x in fields.keys()]
self.cursor.execute(f"CREATE TABLE {self.table_name}({','.join(fields_list)})")
print('Table successfully created')
self.conn.commit()
def insert(self,data):
"""
This function is used to insert data into the table.
:param data: dictonary contain the pair wise data {field_name:value}
"""
values = [data[x] for x in data.keys()]
s = ('%s,'*len(data))[:-1]
insert_statement = f"INSERT INTO {self.table_name} values({s})"
self.cursor.execute(insert_statement,values)
self.conn.commit()
def read_db_config(self,filename,section):
"""
Read database configuration from the file and return dictionary object
:param filename: name of the configuration file
:param section: section of database configuration
:return: a dictionary of database parameters
"""
parser = ConfigParser()
parser.read(filename)
db = {}
if parser.has_section(section):
items = parser.items(section)
for item in items:
db[item[0]] = item[1]
else:
raise Exception(f'{section} not found in the {filename} file')
return db
def __del__(self):
self.cursor.close()
self.conn.close()
| 33.445652 | 103 | 0.588235 | 357 | 3,077 | 4.966387 | 0.305322 | 0.055838 | 0.036661 | 0.022561 | 0.159052 | 0.128596 | 0.081218 | 0 | 0 | 0 | 0 | 0.002376 | 0.316217 | 3,077 | 91 | 104 | 33.813187 | 0.840304 | 0.358466 | 0 | 0.05 | 0 | 0 | 0.131416 | 0.023689 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.225 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e20b052609be50b80711e3a8c5cbc3346a18138b | 1,857 | py | Python | 3. Algorithms on Graphs/week2_graph_decomposition/3_intersection_reachability.py | vishweshwartyagi/Data-Structures-and-Algorithms-UCSD | de942b3a0eb2bf56f949f47c297fad713aa81489 | [
"MIT"
] | null | null | null | 3. Algorithms on Graphs/week2_graph_decomposition/3_intersection_reachability.py | vishweshwartyagi/Data-Structures-and-Algorithms-UCSD | de942b3a0eb2bf56f949f47c297fad713aa81489 | [
"MIT"
] | null | null | null | 3. Algorithms on Graphs/week2_graph_decomposition/3_intersection_reachability.py | vishweshwartyagi/Data-Structures-and-Algorithms-UCSD | de942b3a0eb2bf56f949f47c297fad713aa81489 | [
"MIT"
] | null | null | null | #Uses python3
import sys
from queue import LifoQueue
sys.setrecursionlimit(200000)
def explore(u, visited, adj, q, message, n_compartments):
# previsit
if message == 'running on un-reversed graph':
visited[u] = n_compartments
if message == 'running on reversed graph':
visited[u] = 1
for v in adj[u]:
if not visited[v]:
if message == 'running on reversed graph':
explore(v, visited, adj, q, message, 0)
else:
explore(v, visited, adj, q, message, n_compartments)
# postvisit
if message == 'running on reversed graph':
q.put(u)
if __name__ == '__main__':
n, m = map(int, input().split())
data = []
for _ in range(m):
a, b = map(int, input().split())
data.append(a)
data.append(b)
data = list(zip(data[0::2], data[1::2]))
visited = [0 for _ in range(n)]
# reversed graph
adj = [[] for _ in range(n)]
for (a, b) in data:
adj[b-1].append(a-1)
# largest post order of reversed graph on top
q = LifoQueue(n)
# run dfs on reversed graph
for i in range(n):
if not visited[i]:
explore(i, visited, adj, q, 'running on reversed graph', 0)
# number of strongly conneccted compartments
n_compartments = 0
# visited will now contain respective compartment number
visited = [0 for _ in range(n)]
# un-reverse the reversed graph
adj = [[] for _ in range(n)]
for (a,b) in data:
adj[a-1].append(b-1)
# run dfs on graph (which is no more reversed) to mark strong connected compartments
while not q.empty():
u = q.get()
if not visited[u]:
n_compartments += 1
explore(u, visited, adj, q, 'running on un-reversed graph', n_compartments)
print(n_compartments)
| 26.913043 | 88 | 0.583199 | 261 | 1,857 | 4.072797 | 0.302682 | 0.122295 | 0.05174 | 0.067733 | 0.417686 | 0.290687 | 0.07714 | 0.07714 | 0.07714 | 0.07714 | 0 | 0.016936 | 0.300485 | 1,857 | 68 | 89 | 27.308824 | 0.801386 | 0.176629 | 0 | 0.209302 | 0 | 0 | 0.107966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.046512 | 0 | 0.069767 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e20eb92e2a4ea197095150248b39069ce90cdf8c | 4,948 | py | Python | crypt_interface/driver_interfaces/win/kernel32_interface.py | xyder/crypt-interface | ade60389dab5e47c786ccc9fee783b14f338e207 | [
"MIT"
] | null | null | null | crypt_interface/driver_interfaces/win/kernel32_interface.py | xyder/crypt-interface | ade60389dab5e47c786ccc9fee783b14f338e207 | [
"MIT"
] | null | null | null | crypt_interface/driver_interfaces/win/kernel32_interface.py | xyder/crypt-interface | ade60389dab5e47c786ccc9fee783b14f338e207 | [
"MIT"
] | null | null | null | import ctypes
from ctypes import wintypes
from crypt_interface.driver_interfaces.exceptions import DriverException
from crypt_interface.driver_interfaces.win import win_constants
def ctl_code(device_type, func, method, access):
""" Equivalent of:
CTL_CODE (DEVICE, FUNC, METHOD, ACCESS))
"""
return (device_type << 16) | (access << 14) | (func << 2) | method
def configure_create_file_function():
""" Initializez the CreateFileW function signature """
create_file_func = ctypes.windll.kernel32.CreateFileW
create_file_func.argtypes = [
# (in) LPCTSTR lpFileName
wintypes.LPWSTR,
# (in) DWORD dwDesiredAccess
wintypes.DWORD,
# (in) DWORD dwShareMode
wintypes.DWORD,
# (in, opt) LPSECURITY_ATTRIBUTES lpSecurityAttributes
win_constants.LPSECURITY_ATTRIBUTES,
# (in) DWORD dwCreationDisposition
wintypes.DWORD,
# (in) DWORD dwFlagsAndAttributes
wintypes.DWORD,
# (in, opt) HANDLE hTemplateFile
wintypes.HANDLE]
create_file_func.restype = wintypes.HANDLE
def configure_deviceiocontrol_function():
""" Initializez the DeviceIoControl function signature """
device_io_ctrl_func = ctypes.windll.kernel32.DeviceIoControl
device_io_ctrl_func.argtypes = [
# (in) HANDLE hDevice
wintypes.HANDLE,
# (in) DWORD dwIoControlCode
wintypes.DWORD,
# (in, opt) LPVOID lpInBuffer
wintypes.LPVOID,
# (in) DWORD nInBufferSize
wintypes.DWORD,
# (out, opt) LPVOID lpOutBuffer
wintypes.LPVOID,
# (in) DWORD nOutBufferSize
wintypes.DWORD,
# (out, opt) LPDWORD lpBytesReturned
win_constants.LPDWORD,
# (in, out, opt) LPOVERLAPPED lpOverlapped
win_constants.LPOVERLAPPED]
device_io_ctrl_func.restype = wintypes.BOOL
def create_file(filename, access, mode, creation, flags):
""" Interface for CreateFile function
Documentation:
http://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
:param filename: the name of the file/device to be created/opened
:param access: file/device request access rights
:param mode: file/device request sharing mode
:param creation: action to take if file/device already exists or not
:param flags: the file/device attributes
"""
create_func = ctypes.windll.kernel32.CreateFileW
handle = create_func(filename, access.value, mode.value, win_constants.NULL,
creation.value, flags, win_constants.NULL)
return wintypes.HANDLE(handle)
def device_ioctl(device, control_code, in_buffer,
in_size, out_buffer, out_size):
""" Interface for DeviceIoControl function
Documentation
http://msdn.microsoft.com/en-us/library/aa363216(v=vs.85).aspx
"""
device_ioctl_func = ctypes.windll.kernel32.DeviceIoControl
# allocate a DWORD, and take its reference
returned = wintypes.DWORD(0)
pointer_returned = ctypes.byref(returned)
status = device_ioctl_func(device, control_code, in_buffer, in_size,
out_buffer, out_size, pointer_returned, None)
return status, returned
class DeviceIoControl(object):
""" Context Manager for DeviceIOControl """
def __init__(self, path):
self.path = path
self._handle = None
def _validate_handle(self):
""" Validates the device/file handle """
if self._handle is None:
raise DriverException('No file handle')
if self._handle.value == win_constants.INVALID_HANDLE.value:
raise DriverException(
'Failed to open {}. GetLastError(): {}'.format(
self.path, ctypes.windll.kernel32.GetLastError()))
def ioctl(self, control_code, in_buffer, in_size, out_buffer, out_size):
""" Calls the DeviceIOControl function
:param control_code: the control code of the method
:param in_buffer: input buffer
:param in_size: size of the input buffer
:param out_buffer: output buffer
:param out_size: size of the output buffer
:return: the return value of the DeviceIOControl call, a tuple which
contains the returned bytes count as first element
"""
self._validate_handle()
return device_ioctl(self._handle, control_code, in_buffer,
in_size, out_buffer, out_size)
def __enter__(self):
self._handle = create_file(
self.path,
win_constants.GenericAccessRights.READ,
win_constants.ShareMode.READ_WRITE,
win_constants.CreationDisposition.OPEN_EXISTING,
0)
self._validate_handle()
return self
def __exit__(self, typ, val, tb):
self._validate_handle()
ctypes.windll.kernel32.CloseHandle(self._handle)
| 33.432432 | 86 | 0.663905 | 555 | 4,948 | 5.727928 | 0.295496 | 0.037748 | 0.037748 | 0.030198 | 0.169865 | 0.088078 | 0.088078 | 0.088078 | 0.088078 | 0.055363 | 0 | 0.009462 | 0.252425 | 4,948 | 147 | 87 | 33.659864 | 0.849959 | 0.324778 | 0 | 0.173913 | 0 | 0 | 0.016196 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144928 | false | 0 | 0.057971 | 0 | 0.289855 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e21486410596234d73d3df93f2388dab36496e9a | 19,121 | py | Python | neutron/db/ipam_non_pluggable_backend.py | p0i0/openstack-neutron | df2ee28ae9a43cc511482bd6ece5396eb1288814 | [
"Apache-2.0"
] | null | null | null | neutron/db/ipam_non_pluggable_backend.py | p0i0/openstack-neutron | df2ee28ae9a43cc511482bd6ece5396eb1288814 | [
"Apache-2.0"
] | null | null | null | neutron/db/ipam_non_pluggable_backend.py | p0i0/openstack-neutron | df2ee28ae9a43cc511482bd6ece5396eb1288814 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import random
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_db import exception as db_exc
from oslo_log import log as logging
from sqlalchemy import and_
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron.common import constants as n_const
from neutron.common import ipv6_utils
from neutron.db import ipam_backend_mixin
from neutron.db import models_v2
from neutron.extensions import portbindings
from neutron.ipam import requests as ipam_req
from neutron.ipam import subnet_alloc
LOG = logging.getLogger(__name__)
class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
@staticmethod
def _generate_ip(context, subnets, filtered_ips=None, prefer_next=False):
"""Generate an IP address.
The IP address will be generated from one of the subnets defined on
the network.
"""
filtered_ips = filtered_ips or []
subnet_id_list = [subnet['id'] for subnet in subnets]
pool_qry = context.session.query(models_v2.IPAllocationPool)
pool_qry = pool_qry.filter(
models_v2.IPAllocationPool.subnet_id.in_(subnet_id_list))
allocation_qry = context.session.query(models_v2.IPAllocation)
allocation_qry = allocation_qry.filter(
models_v2.IPAllocation.subnet_id.in_(subnet_id_list))
ip_allocations = collections.defaultdict(netaddr.IPSet)
for ipallocation in allocation_qry:
subnet_ip_allocs = ip_allocations[ipallocation.subnet_id]
subnet_ip_allocs.add(netaddr.IPAddress(ipallocation.ip_address))
ip_pools = collections.defaultdict(netaddr.IPSet)
for ip_pool in pool_qry:
subnet_ip_pools = ip_pools[ip_pool.subnet_id]
subnet_ip_pools.add(netaddr.IPRange(ip_pool.first_ip,
ip_pool.last_ip))
for subnet_id in subnet_id_list:
subnet_ip_pools = ip_pools[subnet_id]
subnet_ip_allocs = ip_allocations[subnet_id]
filter_set = netaddr.IPSet()
for ip in filtered_ips:
filter_set.add(netaddr.IPAddress(ip))
av_set = subnet_ip_pools.difference(subnet_ip_allocs)
av_set = av_set.difference(filter_set)
av_set_size = av_set.size
if av_set_size == 0:
continue
# Compute a window size, select an index inside the window, then
# select the IP address at the selected index within the window
if prefer_next:
window = 1
else:
window = min(av_set_size, 10)
ip_index = random.randint(1, window)
candidate_ips = list(itertools.islice(av_set, ip_index))
if candidate_ips:
allocated_ip = candidate_ips[-1]
return {'ip_address': str(allocated_ip),
'subnet_id': subnet_id}
raise n_exc.IpAddressGenerationFailure(
net_id=subnets[0]['network_id'])
@staticmethod
def _check_unique_ip(context, network_id, subnet_id, ip_address):
"""Validate that the IP address on the subnet is not in use."""
ip_qry = context.session.query(models_v2.IPAllocation)
try:
ip_qry.filter_by(network_id=network_id,
subnet_id=subnet_id,
ip_address=ip_address).one()
except exc.NoResultFound:
return True
return False
def save_allocation_pools(self, context, subnet, allocation_pools):
for pool in allocation_pools:
first_ip = str(netaddr.IPAddress(pool.first, pool.version))
last_ip = str(netaddr.IPAddress(pool.last, pool.version))
ip_pool = models_v2.IPAllocationPool(subnet=subnet,
first_ip=first_ip,
last_ip=last_ip)
context.session.add(ip_pool)
ip_range = models_v2.IPAvailabilityRange(
ipallocationpool=ip_pool,
first_ip=first_ip,
last_ip=last_ip)
context.session.add(ip_range)
def allocate_ips_for_port_and_store(self, context, port, port_id):
network_id = port['port']['network_id']
ips = self._allocate_ips_for_port(context, port)
if ips:
for ip in ips:
ip_address = ip['ip_address']
subnet_id = ip['subnet_id']
self._store_ip_allocation(context, ip_address, network_id,
subnet_id, port_id)
return ips
def update_port_with_ips(self, context, host, db_port, new_port, new_mac):
changes = self.Changes(add=[], original=[], remove=[])
# Check if the IPs need to be updated
network_id = db_port['network_id']
if 'fixed_ips' in new_port:
original = self._make_port_dict(db_port, process_extensions=False)
changes = self._update_ips_for_port(
context, network_id, host,
original["fixed_ips"], new_port['fixed_ips'],
original['mac_address'], db_port['device_owner'])
# Expire the fixed_ips of db_port in current transaction, because
# it will be changed in the following operation and the latest
# data is expected.
context.session.expire(db_port, ['fixed_ips'])
# Update ips if necessary
for ip in changes.add:
IpamNonPluggableBackend._store_ip_allocation(
context, ip['ip_address'], network_id,
ip['subnet_id'], db_port.id)
self._update_db_port(context, db_port, new_port, network_id, new_mac)
return changes
def _test_fixed_ips_for_port(self, context, network_id, fixed_ips,
device_owner, subnets):
"""Test fixed IPs for port.
Check that configured subnets are valid prior to allocating any
IPs. Include the subnet_id in the result if only an IP address is
configured.
:raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork,
InvalidIpForSubnet
"""
fixed_ip_set = []
for fixed in fixed_ips:
subnet = self._get_subnet_for_fixed_ip(context, fixed, subnets)
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if ('ip_address' in fixed and
subnet['cidr'] != n_const.PROVISIONAL_IPV6_PD_PREFIX):
# Ensure that the IP's are unique
if not IpamNonPluggableBackend._check_unique_ip(
context, network_id,
subnet['id'], fixed['ip_address']):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=fixed['ip_address'])
if (is_auto_addr_subnet and
device_owner not in
constants.ROUTER_INTERFACE_OWNERS):
msg = (_("IPv6 address %(address)s can not be directly "
"assigned to a port on subnet %(id)s since the "
"subnet is configured for automatic addresses") %
{'address': fixed['ip_address'],
'id': subnet['id']})
raise n_exc.InvalidInput(error_message=msg)
fixed_ip_set.append({'subnet_id': subnet['id'],
'ip_address': fixed['ip_address']})
else:
# A scan for auto-address subnets on the network is done
# separately so that all such subnets (not just those
# listed explicitly here by subnet ID) are associated
# with the port.
if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or
not is_auto_addr_subnet):
fixed_ip_set.append({'subnet_id': subnet['id']})
self._validate_max_ips_per_port(fixed_ip_set, device_owner)
return fixed_ip_set
def _allocate_fixed_ips(self, context, fixed_ips, mac_address,
prefer_next=False):
"""Allocate IP addresses according to the configured fixed_ips."""
ips = []
# we need to start with entries that asked for a specific IP in case
# those IPs happen to be next in the line for allocation for ones that
# didn't ask for a specific IP
fixed_ips.sort(key=lambda x: 'ip_address' not in x)
allocated_ips = []
for fixed in fixed_ips:
subnet = self._get_subnet(context, fixed['subnet_id'])
is_auto_addr = ipv6_utils.is_auto_address_subnet(subnet)
if 'ip_address' in fixed:
allocated_ips.append(fixed['ip_address'])
ips.append({'ip_address': fixed['ip_address'],
'subnet_id': fixed['subnet_id']})
# Only subnet ID is specified => need to generate IP
# from subnet
else:
if is_auto_addr:
ip_address = self._calculate_ipv6_eui64_addr(context,
subnet,
mac_address)
ips.append({'ip_address': ip_address.format(),
'subnet_id': subnet['id']})
else:
subnets = [subnet]
# IP address allocation
result = self._generate_ip(context, subnets, allocated_ips,
prefer_next)
allocated_ips.append(result['ip_address'])
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
return ips
def _update_ips_for_port(self, context, network_id, host, original_ips,
new_ips, mac_address, device_owner):
"""Add or remove IPs from the port."""
added = []
changes = self._get_changed_ips_for_port(context, original_ips,
new_ips, device_owner)
subnets = self._ipam_get_subnets(
context, network_id=network_id, host=host)
# Check if the IP's to add are OK
to_add = self._test_fixed_ips_for_port(context, network_id,
changes.add, device_owner,
subnets)
if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
to_add += self._update_ips_for_pd_subnet(
context, subnets, changes.add)
for ip in changes.remove:
LOG.debug("Port update. Hold %s", ip)
IpamNonPluggableBackend._delete_ip_allocation(context,
network_id,
ip['subnet_id'],
ip['ip_address'])
if to_add:
LOG.debug("Port update. Adding %s", to_add)
added = self._allocate_fixed_ips(context, to_add, mac_address)
return self.Changes(add=added,
original=changes.original,
remove=changes.remove)
def _allocate_ips_for_port(self, context, port):
"""Allocate IP addresses for the port.
If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
addresses for the port. If port['fixed_ips'] contains an IP address or
a subnet_id then allocate an IP address accordingly.
"""
p = port['port']
subnets = self._ipam_get_subnets(context,
network_id=p['network_id'],
host=p.get(portbindings.HOST_ID),
service_type=p.get('device_owner'))
v4, v6_stateful, v6_stateless = self._classify_subnets(
context, subnets)
# preserve previous behavior of DHCP ports choosing start of pool
prefer_next = p['device_owner'] == constants.DEVICE_OWNER_DHCP
fixed_configured = p['fixed_ips'] is not constants.ATTR_NOT_SPECIFIED
if fixed_configured:
configured_ips = self._test_fixed_ips_for_port(context,
p["network_id"],
p['fixed_ips'],
p['device_owner'],
subnets)
ips = self._allocate_fixed_ips(context,
configured_ips,
p['mac_address'],
prefer_next=prefer_next)
else:
ips = []
version_subnets = [v4, v6_stateful]
for subnets in version_subnets:
if subnets:
result = IpamNonPluggableBackend._generate_ip(
context, subnets, prefer_next=prefer_next)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
is_router_port = (
p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT)
if not is_router_port:
# IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets
# are generated and implicitly included.
for subnet in v6_stateless:
ip_address = self._calculate_ipv6_eui64_addr(
context, subnet, p['mac_address'])
ips.append({'ip_address': ip_address.format(),
'subnet_id': subnet['id']})
return ips
def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet):
"""For an auto-address subnet, add addrs for ports on the net."""
with context.session.begin(subtransactions=True):
network_id = subnet['network_id']
port_qry = context.session.query(models_v2.Port)
ports = port_qry.filter(
and_(models_v2.Port.network_id == network_id,
~models_v2.Port.device_owner.in_(
constants.ROUTER_INTERFACE_OWNERS_SNAT)))
updated_ports = []
for port in ports:
ip_address = self._calculate_ipv6_eui64_addr(
context, subnet, port['mac_address'])
allocated = models_v2.IPAllocation(network_id=network_id,
port_id=port['id'],
ip_address=ip_address,
subnet_id=subnet['id'])
try:
# Do the insertion of each IP allocation entry within
# the context of a nested transaction, so that the entry
# is rolled back independently of other entries whenever
# the corresponding port has been deleted.
with context.session.begin_nested():
context.session.add(allocated)
updated_ports.append(port['id'])
except db_exc.DBReferenceError:
LOG.debug("Port %s was deleted while updating it with an "
"IPv6 auto-address. Ignoring.", port['id'])
return updated_ports
def _calculate_ipv6_eui64_addr(self, context, subnet, mac_addr):
prefix = subnet['cidr']
network_id = subnet['network_id']
ip_address = ipv6_utils.get_ipv6_addr_by_EUI64(
prefix, mac_addr).format()
if not self._check_unique_ip(context, network_id,
subnet['id'], ip_address):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=ip_address)
return ip_address
def allocate_subnet(self, context, network, subnet, subnetpool_id):
subnetpool = None
if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID:
subnetpool = self._get_subnetpool(context, id=subnetpool_id)
self._validate_ip_version_with_subnetpool(subnet, subnetpool)
# gateway_ip and allocation pools should be validated or generated
# only for specific request
if subnet['cidr'] is not constants.ATTR_NOT_SPECIFIED:
subnet['gateway_ip'] = self._gateway_ip_str(subnet,
subnet['cidr'])
# allocation_pools are converted to list of IPRanges
subnet['allocation_pools'] = self._prepare_allocation_pools(
subnet['allocation_pools'],
subnet['cidr'],
subnet['gateway_ip'])
subnet_request = ipam_req.SubnetRequestFactory.get_request(context,
subnet,
subnetpool)
if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID:
driver = subnet_alloc.SubnetAllocator(subnetpool, context)
ipam_subnet = driver.allocate_subnet(subnet_request)
subnet_request = ipam_subnet.get_details()
subnet = self._save_subnet(context,
network,
self._make_subnet_args(
subnet_request,
subnet,
subnetpool_id),
subnet['dns_nameservers'],
subnet['host_routes'],
subnet_request)
# ipam_subnet is not expected to be allocated for non pluggable ipam,
# so just return None for it (second element in returned tuple)
return subnet, None
| 46.980344 | 79 | 0.560065 | 2,102 | 19,121 | 4.814938 | 0.168887 | 0.043573 | 0.012845 | 0.011066 | 0.261733 | 0.197807 | 0.162533 | 0.142377 | 0.101275 | 0.079735 | 0 | 0.005059 | 0.369384 | 19,121 | 406 | 80 | 47.096059 | 0.834301 | 0.149783 | 0 | 0.139456 | 0 | 0 | 0.060548 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.061224 | 0 | 0.146259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2148858ea480dbf9fa6508c5d358dbe541aac19 | 3,418 | py | Python | edr/edrautoupdater.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | 74 | 2018-01-30T10:44:20.000Z | 2022-03-24T23:13:30.000Z | edr/edrautoupdater.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | 329 | 2017-11-20T12:18:21.000Z | 2022-03-31T22:21:49.000Z | edr/edrautoupdater.py | blacksurgeon/edr | 809b30a0247961f6b92a968696afa4383c867b5e | [
"Apache-2.0"
] | 15 | 2018-02-08T09:22:46.000Z | 2022-03-27T13:05:54.000Z | from __future__ import absolute_import
import requests
import zipfile
import errno
import os
import json
import datetime
from edrlog import EDRLog
import utils2to3
EDRLOG = EDRLog()
class EDRAutoUpdater(object):
REPO = "lekeno/edr"
UPDATES = utils2to3.abspathmaker(__file__, 'updates')
LATEST = utils2to3.abspathmaker(__file__, 'updates', 'latest.zip')
BACKUP = utils2to3.abspathmaker(__file__, 'backup')
EDR_PATH = os.path.abspath(os.path.dirname(__file__))
def __init__(self):
self.updates = EDRAutoUpdater.UPDATES
self.output = EDRAutoUpdater.LATEST
def download_latest(self):
if not os.path.exists(self.updates):
try:
os.makedirs(self.updates)
except OSError as e:
if e.errno != errno.EEXIST:
return False
download_url = self.__latest_release_url()
if not download_url:
return False
response = requests.get(download_url, stream=True)
response.raise_for_status()
if response.status_code != requests.codes.ok:
return False
with open(self.output, 'wb') as handle:
for block in response.iter_content(32768):
handle.write(block)
return True
def clean_old_backups(self):
files = os.listdir(EDRAutoUpdater.BACKUP)
files = [os.path.join(EDRAutoUpdater.BACKUP, f) for f in files]
files.sort(key=lambda x: os.path.getctime(x))
nbfiles = len(files)
max_backups = 5
for i in range(0, nbfiles - max_backups):
f = files[i]
EDRLOG.log(u"Removing backup {}".format(f), "INFO")
os.unlink(f)
def make_backup(self):
if not os.path.exists(EDRAutoUpdater.BACKUP):
try:
os.makedirs(EDRAutoUpdater.BACKUP)
except OSError as e:
if e.errno != errno.EEXIST:
return False
name = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + '.zip'
backup_file = os.path.join(EDRAutoUpdater.BACKUP, name)
zipf = zipfile.ZipFile(backup_file, 'w', zipfile.ZIP_DEFLATED)
self.__zipdir(EDRAutoUpdater.EDR_PATH, zipf)
zipf.close()
def __zipdir(self, path, ziph):
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if (("updates" not in d) and ("backup" not in d))]
for file in files:
if file.endswith(".pyc") or file.endswith(".pyo"):
continue
fp = os.path.join(root, file)
ziph.write(fp, os.path.relpath(fp, EDRAutoUpdater.EDR_PATH))
def extract_latest(self):
with zipfile.ZipFile(self.output, "r") as latest:
latest.extractall(EDRAutoUpdater.EDR_PATH)
def __latest_release_url(self):
latest_release_api = "https://api.github.com/repos/{}/releases/latest".format(self.REPO)
response = requests.get(latest_release_api)
if response.status_code != requests.codes.ok:
EDRLOG.log(u"Couldn't check the latest release on github: {}".format(response.status_code), "WARNING")
return None
json_resp = json.loads(response.content)
assets = json_resp.get("assets", None)
if not assets:
return None
return assets[0].get("browser_download_url", None)
| 35.237113 | 114 | 0.610591 | 417 | 3,418 | 4.841727 | 0.328537 | 0.026746 | 0.037147 | 0.031699 | 0.1684 | 0.10104 | 0.080238 | 0.045567 | 0.045567 | 0.045567 | 0 | 0.006512 | 0.281159 | 3,418 | 96 | 115 | 35.604167 | 0.815222 | 0 | 0 | 0.17284 | 0 | 0 | 0.068774 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08642 | false | 0 | 0.111111 | 0 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2164fb891696471f1421de594904338e09e2956 | 1,839 | py | Python | libs/figure/figure_QDialog.py | bevarb/AutoDetect | 64118ae0d741618088a470cd4eb704ca38f32ce0 | [
"MIT"
] | 1 | 2020-06-09T10:44:54.000Z | 2020-06-09T10:44:54.000Z | libs/figure/figure_QDialog.py | bevarb/AutoDetect | 64118ae0d741618088a470cd4eb704ca38f32ce0 | [
"MIT"
] | 1 | 2021-03-31T19:50:51.000Z | 2021-03-31T19:50:51.000Z | libs/figure/figure_QDialog.py | bevarb/AutoDetect | 64118ae0d741618088a470cd4eb704ca38f32ce0 | [
"MIT"
] | null | null | null | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import cv2 as cv
class fig_Dialog(QThread):
def __init__(self, img_path, window_name, parent=None):
super(fig_Dialog, self).__init__()
self.img_path = img_path
self.windowname = window_name
def __del__(self):
self.wait()
def run(self):
self.img = cv.imread(self.img_path)
self.dialog = QDialog()
self.dialog.setWindowTitle(self.windowname)
self.dialog.resize(900, 700)
layout = QHBoxLayout()
pix1 = QLabel()
pix1.setFixedSize(900, 500)
if self.img.shape[0] > 800 or self.img.shape[1] > 500:
ratio = self.img.shape[1] / self.img.shape[0]
self.img = cv.resize(self.img, (int(ratio * 500), 500), interpolation=cv.INTER_CUBIC)
pix1.setPixmap(self.cv2pixmap(self.img))
else:
pix1.setPixmap(self.cv2pixmap(self.img))
pix1.setAlignment(Qt.AlignCenter)
# pix1.setContextMenuPolicy(Qt.CustomContextMenu) # 允许右键产生子菜单
# pix1.customContextMenuRequested.connect(self.generate_img_Menu) # 右键菜单
# pix1.setScaledContents(True)
pix1.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
layout.addWidget(pix1)
self.dialog.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.dialog.setLayout(layout)
self.dialog.show()
def cv2pixmap(self, img1):
'''将cv图像转化维pixmap'''
img = img1.copy()
if len(img.shape) == 2:
img = self._vec_(img1)
height, width, channel = img.shape[0:3]
bytesPerline = 3 * width
Qimg = QImage(img.data, width, height, bytesPerline, QImage.Format_RGB888).rgbSwapped()
pixmap = QPixmap.fromImage(Qimg)
return pixmap
| 36.78 | 97 | 0.63839 | 214 | 1,839 | 5.364486 | 0.434579 | 0.073171 | 0.041812 | 0.026132 | 0.149826 | 0.057491 | 0 | 0 | 0 | 0 | 0 | 0.039711 | 0.246873 | 1,839 | 49 | 98 | 37.530612 | 0.78917 | 0.09516 | 0 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
354f2fc13b3d0153bb7172d832d24291fae2d3f2 | 5,533 | py | Python | main.py | shimmy568/MemeMachine | 02fb384a98ab4dcd348927d9bf1d4bc8f1f167d3 | [
"MIT"
] | 22 | 2017-03-13T03:38:31.000Z | 2021-01-27T16:19:37.000Z | main.py | shimmy568/MemeMachine | 02fb384a98ab4dcd348927d9bf1d4bc8f1f167d3 | [
"MIT"
] | null | null | null | main.py | shimmy568/MemeMachine | 02fb384a98ab4dcd348927d9bf1d4bc8f1f167d3 | [
"MIT"
] | null | null | null | import imgurScraper as IS
import Tkinter, Tkconstants, tkFileDialog, tkMessageBox, tkFileDialog, threading, os
class Interface:
def __init__(self, top):
self.downloadThread = None
top.resizable(0, 0)
frame = Tkinter.Frame(top, borderwidth=3)
self.scraper = IS.scraperObject()
frame.columnconfigure(0, weight=1)
frame.columnconfigure(1, weight=3)
#create all elements
#who needs good design :P
Tkinter.Label(frame, text="Folder: ").grid(row=0, sticky=Tkinter.W, padx=3, pady=3)
text = os.path.dirname(os.path.realpath(__file__)) + "/defaultDownloadFolder"
self.folderEntryVar = Tkinter.StringVar()
self.folderEntryVar.set(text)
self.folderEntry = Tkinter.Entry(frame, textvariable=self.folderEntryVar)
self.folderEntry.grid(row=0, column=1, padx=3, pady=3, columnspan=2)
Tkinter.Label(frame, text="Search: ").grid(row=1, sticky=Tkinter.W, padx=3, pady=3)
self.searchEntry = Tkinter.Entry(frame)
self.searchEntry.grid(row=1, column=1, padx=3, pady=3, columnspan=2)
self.albumIntoFolders = Tkinter.IntVar()
self.albumIntoFolders.set(1)
albumIntoFoldersCheckbox = Tkinter.Checkbutton(frame, text="Put albums into folders", variable=self.albumIntoFolders)
albumIntoFoldersCheckbox.grid(row=2, sticky=Tkinter.W, columnspan=2)
self.imageType = Tkinter.IntVar()
r1 = Tkinter.Radiobutton(frame, text="All", variable=self.imageType, value=0)
r2 = Tkinter.Radiobutton(frame, text="Only images", variable=self.imageType, value=1)
r3 = Tkinter.Radiobutton(frame, text="Only gifs", variable=self.imageType, value=2)
r1.grid(row=3, sticky=Tkinter.W, column=0)
r2.grid(row=3, sticky=Tkinter.W, column=1)
r3.grid(row=3, sticky=Tkinter.W, column=2)
self.frontPage = Tkinter.IntVar()
frontPage = Tkinter.Checkbutton(frame, text="FP", variable=self.frontPage, command=self.toggleFrontPage)
frontPage.grid(row=1, column=3, sticky=Tkinter.W)
self.b1 = Tkinter.Button(frame, text="Download Images", command=self.startDownload)
self.b1.grid(row=4, sticky=Tkinter.W, columnspan=2)
self.b2 = Tkinter.Button(frame, text="Select", command=self.selectFolder)
self.b2.grid(row=0, column=3)
self.downloadAllVar = Tkinter.IntVar()
downloadAllCheckbox = Tkinter.Checkbutton(frame, text="Download All", command=self.toggleDownloadAll, variable=self.downloadAllVar)
downloadAllCheckbox.grid(row=0, column=4, columnspan=2)
Tkinter.Label(frame, text="Num of Images: ").grid(row=1, column=4, sticky=Tkinter.E)
spinnerDe = Tkinter.StringVar()
spinnerDe.set("5000")
self.imageNumSpinner = Tkinter.Spinbox(frame, textvariable=spinnerDe, width=4, from_=1, to=999999)
self.imageNumSpinner.grid(row=1, column=5)
self.downloadText = Tkinter.StringVar()
Tkinter.Label(frame, textvariable=self.downloadText).grid(row=4, column=2, columnspan=4, sticky=Tkinter.E)
self.downloadText.set("Not Downloading...")
frame.grid()
def displayError(self, title, errorBody):
tkMessageBox.showwarning(title, errorBody)
def finsihedDownload(self):
try:
self.downloadMoniter(False, 0)
except RuntimeError:
return
tkMessageBox.showinfo("Done", "All the images have been downloaded.")
def toggleDownloadAll(self):
if self.downloadAllVar.get() == 1:
self.imageNumSpinner.config(state=Tkinter.DISABLED)
else:
self.imageNumSpinner.config(state="normal")
def toggleFrontPage(self):
if self.frontPage.get() == 1:
self.searchEntry.config(state=Tkinter.DISABLED)
else:
self.searchEntry.config(state="normal")
def closeWindow():
if self.downloadThread != None:
self.scraper.stopDownload()
def downloadImages(self, search, limit, settings, done, updateCallback):
self.scraper.downloadAllImagesFromSearch(search, limit, settings, updateCallback)
done()
def downloadMoniter(self, downloading, downloadNum):
if(downloading):
self.downloadText.set("Downloading Image: " + str(downloadNum))
else:
self.downloadText.set("Not Downloading...")
def startDownload(self):
if self.scraper.isDownloading():
self.displayError("Downloading", "You are allready downloading something")
limit = int(self.imageNumSpinner.get())
if self.downloadAllVar.get() == 1:
limit = -1
searchQ = self.searchEntry.get()
settings = IS.settingsObject()
if self.frontPage.get() == 1:
settings.setFP(True)
settings.setDownloadType(self.imageType.get())
if self.albumIntoFolders.get() == 0:
settings.setAlbumsInFolders(False)
self.scraper.changeDownloadFolder(self.folderEntryVar.get())
self.downloadThread = threading.Thread(target=self.downloadImages, args=(searchQ, limit, settings, self.finsihedDownload, self.downloadMoniter))
self.downloadThread.start()
def selectFolder(self):
directoryName = tkFileDialog.askdirectory()
self.folderEntryVar.set(directoryName)
master = Tkinter.Tk();
interface = Interface(master)
master.wm_title("Meme Machine")
master.mainloop()
| 43.566929 | 152 | 0.660763 | 606 | 5,533 | 6.016502 | 0.277228 | 0.028799 | 0.030719 | 0.010971 | 0.159353 | 0.100658 | 0.051563 | 0.015359 | 0 | 0 | 0 | 0.018489 | 0.217965 | 5,533 | 126 | 153 | 43.912698 | 0.824128 | 0.007772 | 0 | 0.089109 | 0 | 0 | 0.055758 | 0.004009 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09901 | false | 0 | 0.019802 | 0 | 0.138614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3550a084627671559e57cdb62ce367714bde703d | 1,741 | py | Python | examples/tri-boundary-markers.py | majosm/meshpy | aac4e81cb1b9b25d6dd8a4f2675f6a38e0e5d3a1 | [
"MIT"
] | 2 | 2017-04-14T15:21:28.000Z | 2019-07-22T14:53:22.000Z | examples/tri-boundary-markers.py | majosm/meshpy | aac4e81cb1b9b25d6dd8a4f2675f6a38e0e5d3a1 | [
"MIT"
] | null | null | null | examples/tri-boundary-markers.py | majosm/meshpy | aac4e81cb1b9b25d6dd8a4f2675f6a38e0e5d3a1 | [
"MIT"
] | null | null | null | # Provided by Liu Benyuan in https://github.com/inducer/meshpy/pull/11
from __future__ import division
import meshpy.triangle as triangle
import numpy as np
def round_trip_connect(start, end):
return [(i, i+1) for i in range(start, end)] + [(end, start)]
def refinement_func(tri_points, area):
max_area=0.1
return bool(area>max_area);
def main():
points = [(1, 0), (1, 1), (-1, 1), (-1, -1), (1, -1), (1, 0)]
facets = round_trip_connect(0, len(points)-1)
markers = [2,2,2,2,2,2]
outter_start = len(points)
points.extend([(2, 0), (2, 2), (-2, 2), (-2, -2), (2, -2), (2, 0)])
facets.extend(round_trip_connect(outter_start, len(points) - 1))
markers.extend([3,3,3,3,3,3])
# build
info = triangle.MeshInfo()
info.set_points(points)
info.set_holes([(0, 0)])
info.set_facets(facets, facet_markers=markers)
#
mesh = triangle.build(info, refinement_func=refinement_func)
#
mesh_points = np.array(mesh.points)
mesh_tris = np.array(mesh.elements)
mesh_attr = np.array(mesh.point_markers)
print(mesh_attr)
import matplotlib.pyplot as plt
plt.triplot(mesh_points[:, 0], mesh_points[:, 1], mesh_tris)
plt.xlabel('x')
plt.ylabel('y')
#
n = np.size(mesh_attr);
inner_nodes = [i for i in range(n) if mesh_attr[i]==2]
outer_nodes = [i for i in range(n) if mesh_attr[i]==3]
plt.plot(mesh_points[inner_nodes, 0], mesh_points[inner_nodes, 1], 'ro')
plt.plot(mesh_points[outer_nodes, 0], mesh_points[outer_nodes, 1], 'go')
plt.axis([-2.5, 2.5, -2.5, 2.5])
#plt.show()
#
fig = plt.gcf()
fig.set_size_inches(4.2, 4.2)
plt.savefig('sec5-meshpy-triangle-ex5.pdf')
if __name__ == "__main__":
main()
| 29.016667 | 76 | 0.631246 | 286 | 1,741 | 3.657343 | 0.311189 | 0.024857 | 0.031549 | 0.034417 | 0.091778 | 0.086042 | 0.072658 | 0.072658 | 0.055449 | 0.055449 | 0 | 0.048399 | 0.192993 | 1,741 | 59 | 77 | 29.508475 | 0.696085 | 0.048248 | 0 | 0 | 0 | 0 | 0.02547 | 0.01698 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.1 | 0.025 | 0.225 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3554d57143c124045f13bbf5f3754de88fe86530 | 17,962 | py | Python | temporal/avl_cframes.py | velasale/PickApp | 2a6b025de217a6f350bd4e83a9bf120d7a2bae56 | [
"MIT"
] | null | null | null | temporal/avl_cframes.py | velasale/PickApp | 2a6b025de217a6f350bd4e83a9bf120d7a2bae56 | [
"MIT"
] | 1 | 2022-02-10T18:30:59.000Z | 2022-02-10T18:30:59.000Z | temporal/avl_cframes.py | SoftwareDevEngResearch/PickApp | 643f1f141ec2ee079917edfe1fe41b854fdaa14b | [
"MIT"
] | 1 | 2022-01-25T18:31:20.000Z | 2022-01-25T18:31:20.000Z | """
1 - Reads the csv files that contain all the coordinates of apple, stem, gravity and origin
2 - Obtains the angles: Hand-to-Stem, Hand-to-Gravity and Stem-to-Gravity which are the key
to replicate a pick
3 -
Some references
https://stackoverflow.com/questions/11140163/plotting-a-3d-cube-a-sphere-and-a-vector-in-matplotlib
https://stackoverflow.com/questions/32424670/python-matplotlib-drawing-3d-sphere-with-circumferences
https://stackoverflow.com/questions/54970401/matplotlib-scatter-plot-with-xyz-axis-lines-through-origin-0-0-0-and-axis-proj
"""
# System related Packages
import os, sys, copy, rospy, time, subprocess, shlex, psutil
# Math related Packages
import numpy as np
from sklearn.cluster import KMeans
# File handling related packages
import csv
# Plot Packages
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
def draw_in_hand(i, plot=True):
"""
Draws all the pick vectors: Normal to hand, stem and gravity within the
hand's cframe, however the gravity doesn't point always down
:param i:
:return:
"""
apple_center = i[0]
apple_calix = i[1]
apple_stem = i[2]
origin = i[3]
gravity = i[4]
# Get rid of characters
apple_center = apple_center.strip('][').split(',')
apple_calix = apple_calix.strip('][').split(',')
apple_stem = apple_stem.strip('][').split(',')
origin = origin.strip('][').split(',')
gravity = gravity.strip('][').split(',')
# ---- Step 1: Draw the Stem Vector ----
calix = np.array([float(apple_calix[0]), float(apple_calix[1]), float(apple_calix[2])])
stem = np.array([float(apple_stem[0]), float(apple_stem[1]), float(apple_stem[2])])
stem_vector = np.subtract(stem, calix)
stem_vector = stem_vector / np.linalg.norm(stem_vector) # Normalize its magnitude
# ---- Step 2: Draw the gravity Vector ----
point_A = np.array([float(origin[0]), float(origin[1]), float(origin[2])])
point_B = np.array([float(gravity[0]), float(gravity[1]), float(gravity[2])])
gravity_vector = np.subtract(point_B, point_A)
gravity_vector = gravity_vector / np.linalg.norm(gravity_vector) # Normalize its magnitude
# ---- Step 3: Draw the apple -----
a = float(apple_center[0])
b = float(apple_center[1])
c = float(apple_center[2])
a = b = c = 0
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
diam = 0.5
x_a = (diam * np.outer(np.cos(u), np.sin(v))) + a
y_a = (diam * np.outer(np.sin(u), np.sin(v))) + b
z_a = (diam * np.outer(np.ones(np.size(u)), np.cos(v))) + c
# ---- Step 5: Draw the Axes lines, which represent the Hand's coordinate frame ----
x, y, z = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]])
u, v, w = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 3]])
# ---- Step 6: Get some Math
hand_vector = np.array([0, 0, 1])
# Angle between Hand and Stem
dot_product = np.dot(hand_vector, stem_vector)
handToStem_angle = np.arccos(dot_product)
handToStem_angle = np.degrees(handToStem_angle)
# print('The angle between the Hand and Stem is %.0f\N{DEGREE SIGN}' % handToStem_angle)
# Angle between Hand and Gravity vector
dot_product = np.dot(hand_vector, gravity_vector)
handToGravity_angle = np.arccos(dot_product)
handToGravity_angle = np.degrees(handToGravity_angle)
# print('The angle between the Hand and Gravity is %.0f\N{DEGREE SIGN}' % handToGravity_angle)
# Angle between Stem and Gravity vector
dot_product = np.dot(stem_vector, gravity_vector)
stemToGravity_angle = np.arccos(dot_product)
stemToGravity_angle = np.degrees(stemToGravity_angle)
# print('The angle between the Stem and Gravity is %.0f\N{DEGREE SIGN}' % stemToGravity_angle)
if plot:
# ---- Step 0: Initialize Figure
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_xlabel('Hand X ')
ax.set_ylabel('Hand Y ')
ax.set_zlabel('Hand Z ')
# Get rid of the ticks
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.axes.zaxis.set_ticklabels([])
# Draw the Stem Vector
ax.quiver(0, 0, 0, stem_vector[0], stem_vector[1], stem_vector[2], length=1, color='b')
ax.text(stem_vector[0], stem_vector[1], stem_vector[2], "Stem", color='b', size=15, zorder=1)
# Draw Gravity Vector
ax.quiver(0, 0, 0, gravity_vector[0], gravity_vector[1], gravity_vector[2], length=1, color='r')
ax.text(gravity_vector[0], gravity_vector[1], gravity_vector[2], "Gravity", color='r', size=15, zorder=1)
# Draw the Apple and its center
ax.plot_surface(x_a, y_a, z_a, rstride=4, cstride=4, color='r', linewidth=0, alpha=0.2)
ax.scatter(a, b, c, color="g", s=100)
# Draw the Axes Lines
ax.quiver(x, y, z, u, v, w, arrow_length_ratio=0.1, color="black")
ax.text(0, 0, 1.5, "Normal to Hand", color='k', size=15, zorder=1)
# Titles
plt.suptitle("Stem and gravity w.r.t hand - Pick %i" % (k + 1))
plt.title(
'Hand-Stem: %.0f\N{DEGREE SIGN} , Hand-Gravity: %.0f\N{DEGREE SIGN}, Stem-Gravity %0.f\N{DEGREE SIGN}' % (
handToStem_angle, handToGravity_angle, stemToGravity_angle))
return handToStem_angle, handToGravity_angle, stemToGravity_angle
def draw_in_base(i, handToStem_angle, handToGravity_angle, stemToGravity_angle):
"""
Draws all the pick vectors: Normal to hand, stem and gravity within the
baselink's cframe, so the gravity is always pointing down
:param i:
:return:
"""
apple_center = i[0]
apple_calix = i[1]
apple_stem = i[2]
hand_origin = i[3]
hand_x = i[4]
hand_y = i[5]
hand_z = i[6]
# Get rid of characters
apple_center = apple_center.strip("][").split(",")
apple_calix = apple_calix.strip("']['").split("', '")
apple_stem = apple_stem.strip("']['").split("', '")
hand_origin = hand_origin.strip('][').split(',')
hand_x = hand_x.strip('][').split(',')
hand_y = hand_y.strip('][').split(',')
hand_z = hand_z.strip('][').split(',')
# ---- Step 0: Initialize Figure
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_xlabel('Base X ')
ax.set_ylabel('Base Y ')
ax.set_zlabel('Base Z ')
# Get rid of the ticks
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.axes.zaxis.set_ticklabels([])
# ---- Step 1: Draw the Stem Vector ----
calix = np.array([float(apple_calix[0]), float(apple_calix[1]), float(apple_calix[2])])
stem = np.array([float(apple_stem[0]), float(apple_stem[1]), float(apple_stem[2])])
stem_vector = np.subtract(stem, calix)
# print("Before", stem_vector)
stem_vector = stem_vector / np.linalg.norm(stem_vector) # Normalize its magnitude
# print("After", stem_vector)
ax.quiver(0, 0, 0, stem_vector[0], stem_vector[1], stem_vector[2], length=1, color='b')
ax.text(stem_vector[0], stem_vector[1], stem_vector[2], "Stem", color='b', size=15, zorder=1)
# ---- Step 2: Draw the gravity Vector ----
ax.quiver(0, 0, 0, 0, 0, -1, length=1, color='r')
ax.text(0, 0, -1, "Gravity", color='r', size=15, zorder=1)
# ---- Step 3: Draw the apple -----
a = float(apple_center[0])
b = float(apple_center[1])
c = float(apple_center[2])
a = b = c = 0
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
diam = 0.5
x = (diam * np.outer(np.cos(u), np.sin(v))) + a
y = (diam * np.outer(np.sin(u), np.sin(v))) + b
z = (diam * np.outer(np.ones(np.size(u)), np.cos(v))) + c
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='r', linewidth=0, alpha=0.2)
# ---- Step 4: Draw the center of the apple ----
ax.scatter(a, b, c, color="g", s=100)
# ---- Step 5: Draw the Hand's axes
hand_origin = np.array([float(hand_origin[0]), float(hand_origin[1]), float(hand_origin[2])])
hand_x = np.array([float(hand_x[0]), float(hand_x[1]), float(hand_x[2])])
hand_y = np.array([float(hand_y[0]), float(hand_y[1]), float(hand_y[2])])
hand_z = np.array([float(hand_z[0]), float(hand_z[1]), float(hand_z[2])])
hand_x_vector = np.subtract(hand_x, hand_origin)
hand_x_vector = hand_x_vector / np.linalg.norm(hand_x_vector)
ax.quiver(0, 0, 0, hand_x_vector[0], hand_x_vector[1], hand_x_vector[2], length=1, color='k')
ax.text(hand_x_vector[0], hand_x_vector[1], hand_x_vector[2], "Hand Frame x", color='k', size=8, zorder=1)
hand_y_vector = np.subtract(hand_y, hand_origin)
hand_y_vector = hand_y_vector / np.linalg.norm(hand_y_vector)
ax.quiver(0, 0, 0, hand_y_vector[0], hand_y_vector[1], hand_y_vector[2], length=1, color='k')
ax.text(hand_y_vector[0], hand_y_vector[1], hand_y_vector[2], "Hand Frame y", color='k', size=8, zorder=1)
hand_z_vector = np.subtract(hand_z, hand_origin)
hand_z_vector = hand_z_vector / np.linalg.norm(hand_z_vector)
ax.quiver(0, 0, 0, hand_z_vector[0], hand_z_vector[1], hand_z_vector[2], length=1, color='k')
ax.text(hand_z_vector[0], hand_z_vector[1], hand_z_vector[2], "Hand Frame z", color='k', size=15, zorder=1)
plt.suptitle("Hand and Stem w.r.t Base - Pick %i" % (k + 1))
plt.title('Hand-Stem: %.0f\N{DEGREE SIGN} , Hand-Gravity: %.0f\N{DEGREE SIGN}, Stem-Gravity %0.f\N{DEGREE SIGN}' % (
handToStem_angle, handToGravity_angle, stemToGravity_angle))
def draw_kmeans_in_base(pick_angles):
hand_stem = np.radians(pick_angles[0])
stem_gravity = np.radians(pick_angles[1])
hand_gravity = np.radians(pick_angles[2])
# ---- Step 0: Initialize Figure
fig = plt.figure()
ax = plt.axes(projection='3d')
size = 1
ax.set_xlim(-size, size)
ax.set_ylim(-size, size)
ax.set_zlim(-size, size)
ax.set_xlabel('World X')
ax.set_ylabel('World Y')
ax.set_zlabel('World Z')
# Get rid of the ticks
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.axes.zaxis.set_ticklabels([])
# ax.set_title('Hand to Stem= ' + str(int(pick_angles[0])) +
# ', Stem to Gravity= ' + str(int(pick_angles[1])) +
# ', Hand to Gravity= ' + str(int(pick_angles[2])))
# ---- Step 1: Draw the gravity Vector ----
gravity_vector = np.array([0, 0, -1])
ax.quiver(0, 0, 0, gravity_vector[0], gravity_vector[1], gravity_vector[2], length=1, color='k')
ax.text(gravity_vector[0], gravity_vector[1], gravity_vector[2], "Gravity", color='k', size=15, zorder=1)
# ---- Step 2: Draw the center of the apple ----
a = b = c = 0
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
diam = 0.5
x = (diam * np.outer(np.cos(u), np.sin(v))) + a
y = (diam * np.outer(np.sin(u), np.sin(v))) + b
z = (diam * np.outer(np.ones(np.size(u)), np.cos(v))) + c
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='r', linewidth=0, alpha=0.2)
ax.scatter(a, b, c, color="k", s=100)
# ---- Step 3: Draw the stem ----
stem_vector = np.array([0, np.sin(stem_gravity), -np.cos(stem_gravity)])
ax.quiver(0, 0, 0, stem_vector[0], stem_vector[1], stem_vector[2], length=1, color='brown')
ax.text(stem_vector[0], stem_vector[1], stem_vector[2], "Stem", color='brown', size=15, zorder=1)
# ---- Step 4: Draw the vector normal to hand ----
hand_z = -np.cos(hand_gravity)
hand_radius = np.sin(hand_gravity)
for i in range(360):
hand_x = hand_radius * np.cos(np.radians(i))
hand_y = hand_radius * np.sin(np.radians(i))
hand_prelim_vector = np.array([hand_x, hand_y, hand_z])
hand_stem_prelim = np.arccos(np.dot(hand_prelim_vector, stem_vector))
if abs(hand_stem_prelim - hand_stem) < 0.005:
hand_vector = hand_prelim_vector
ax.quiver(0, 0, 0, hand_vector[0], hand_vector[1], hand_vector[2], length=1, color='blue')
ax.text(hand_vector[0], hand_vector[1], hand_vector[2], "Hand", color='blue', size=15, zorder=1)
# ---- Step 5: Check that the angles are ok ----
# print('\nChecking angles')
st_to_gv = np.degrees(np.arccos(np.dot(stem_vector, gravity_vector)))
hd_to_gv = np.degrees(np.arccos(np.dot(hand_vector, gravity_vector)))
hd_to_st = np.degrees(np.arccos(np.dot(hand_vector, stem_vector)))
print('\nStem-Gravity angles', np.degrees(np.arccos(np.dot(stem_vector, gravity_vector))))
print('Hand-Gravity angles', np.degrees(np.arccos(np.dot(hand_vector, gravity_vector))))
print('Hand-Stem angles', np.degrees(np.arccos(np.dot(hand_vector, stem_vector))))
plt.suptitle("Hand and Stem w.r.t Base - Pick %i" % (k + 1))
plt.title('Hand-Stem: %.0f\N{DEGREE SIGN} , Hand-Gravity: %.0f\N{DEGREE SIGN}, Stem-Gravity %0.f\N{DEGREE SIGN}' % (
hd_to_st, hd_to_gv, st_to_gv))
if __name__ == '__main__':
# ---------------------------------------- Step 1 - Read the csv files ---------------------------------------------
location = '/home/avl/PycharmProjects/AppleProxy/'
# Read the csv file with all the coordinates in the hand's coordinate frame
file = 'objects_in_hand.csv'
with open(location + file, 'r') as f:
reader = csv.reader(f)
apple_coords = list(reader)
apples = len(apple_coords)
print('\nThe number of apples were:', len(apple_coords))
# Read the csv file with all the coordinates transformed into the baselink
file = 'objects_in_base.csv'
with open(location + file, 'r') as f:
reader = csv.reader(f)
apple_coords_base = list(reader)
# --------------------------------------- Step 2 - Sweep all the coordinates and plot ------------------------------
hand_to_stem_angles = []
hand_to_gravity_angles = []
stem_to_gravity_angles = []
success_hand_to_stem_angles = []
success_hand_to_gravity_angles = []
success_stem_to_gravity_angles = []
angles_for_kmeans = []
failed_hand_to_stem_angles = []
failed_hand_to_gravity_angles = []
failed_stem_to_gravity_angles = []
# Successful real apple picks
success_picks = [6, 10, 16, 30, 31, 38, 42, 43, 48, 50, 51, 52, 53, 60, 61, 63, 64, 67, 70, 71, 72, 73, 74, 77]
for k in range(11, apples):
i = apple_coords[k]
j = apple_coords_base[k]
handToStem_angle, handToGravity_angle, stemToGravity_angle = draw_in_hand(i, False) # Plot in hand's c-frame
# draw_in_base(j, handToStem_angle, handToGravity_angle, stemToGravity_angle) # Plot in baselink's c-frame
# Plot all the picks in worl's c-frame (not necessarily k-means)
draw_kmeans_in_base([handToStem_angle, stemToGravity_angle, handToGravity_angle])
plt.show()
if (k+1) in success_picks:
# Save it in success angles
success_hand_to_stem_angles.append(handToStem_angle)
success_hand_to_gravity_angles.append(handToGravity_angle)
success_stem_to_gravity_angles.append(stemToGravity_angle)
else:
# Save it in failed angles
failed_hand_to_stem_angles.append(handToStem_angle)
failed_hand_to_gravity_angles.append(handToGravity_angle)
failed_stem_to_gravity_angles.append(stemToGravity_angle)
hand_to_stem_angles.append(handToStem_angle)
hand_to_gravity_angles.append(handToGravity_angle)
stem_to_gravity_angles.append(stemToGravity_angle)
angles_for_kmeans.append([handToStem_angle, stemToGravity_angle, handToGravity_angle])
# --------------------------------------- Step 3 - Do Plots --------------------------------------------------------
# Plot 1: Boxplots of the three angles
angles = [hand_to_stem_angles, stem_to_gravity_angles, hand_to_gravity_angles]
fig, ax = plt.subplots()
ax.boxplot(angles)
ax.set_xticklabels(['Hand-Stem', 'Stem-Gravity', 'Hand-Gravity'])
ax.set_xlabel('Real apple pick angles')
ax.set_ylabel('Angle [deg]')
ax.yaxis.grid()
# Plot 2: Scatterplot
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(success_hand_to_stem_angles, success_stem_to_gravity_angles, success_hand_to_gravity_angles,
alpha=0.5, s=80, c='g', depthshade=False, label='success picks')
ax.scatter(failed_hand_to_stem_angles, failed_stem_to_gravity_angles, failed_hand_to_gravity_angles,
alpha=0.5, s=80, c='r', depthshade=False, label='failed picks')
ax.set_xlabel('Hand to Stem angle [deg]')
ax.set_ylabel('Stem to Gravity angle [deg]')
ax.set_zlabel('Hand to Gravity angle [deg]')
ax.set_title('Real apple pick angles')
# K-means Scatter plot
kmeans = KMeans(n_clusters=5, random_state=0).fit(angles_for_kmeans)
print("\nThe k-means are: ")
print(kmeans.cluster_centers_)
alpha = []
beta = []
gamma = []
for i in kmeans.cluster_centers_:
alpha.append(i[0])
beta.append(i[1])
gamma.append(i[2])
draw_kmeans_in_base(i)
ax.scatter(alpha, beta, gamma, alpha=1, s=80, c='k', marker='^', depthshade=False, label='k-means')
# Customize the major grid
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
# Customize the minor grid
# ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
ax.legend()
# Show the angles of a specific pick
target = 50 - 1
# print('The angles are', angles_for_kmeans[target])
angles_transpose = np.array(angles).T
print('The angles are: ', angles_transpose)
# ---------------------------------------- Step 3 - Store angles in a csv file -------------------------------------
with open('real_picks_angles.csv', 'w') as f:
write = csv.writer(f)
write.writerows(angles_transpose)
# Show plots
plt.show() | 42.065574 | 123 | 0.633894 | 2,774 | 17,962 | 3.921413 | 0.113915 | 0.035852 | 0.024821 | 0.009193 | 0.596709 | 0.536588 | 0.459092 | 0.392443 | 0.367531 | 0.349421 | 0 | 0.030804 | 0.195747 | 17,962 | 427 | 124 | 42.065574 | 0.722207 | 0.210222 | 0 | 0.303371 | 0 | 0.011236 | 0.077897 | 0.004126 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011236 | false | 0 | 0.022472 | 0 | 0.037453 | 0.026217 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
355960ffad088699d5b467b6796f78a30406f92d | 1,177 | py | Python | tools/generate_taint_models/tests/get_graphql_sources_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | tools/generate_taint_models/tests/get_graphql_sources_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | tools/generate_taint_models/tests/get_graphql_sources_test.py | rvantonder/pyre-check-1 | 600ec9656ece5fff21598f4248c55089714bf590 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os # noqa
import unittest
from typing import Callable
from ..get_graphql_sources import GraphQLSourceGenerator
from .test_functions import __name__ as qualifier, all_functions
class GetGraphQLSourcesTest(unittest.TestCase):
def test_compute_models(self):
source = "TaintSource[UserControlled]"
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
list(GraphQLSourceGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args: {source}) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args: {source}) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs: {source}) -> {sink}: ...",
],
)
| 39.233333 | 91 | 0.595582 | 125 | 1,177 | 5.512 | 0.52 | 0.040639 | 0.132075 | 0.148041 | 0.156749 | 0.078374 | 0 | 0 | 0 | 0 | 0 | 0.004551 | 0.253186 | 1,177 | 29 | 92 | 40.586207 | 0.779295 | 0.141886 | 0 | 0 | 0 | 0.047619 | 0.409363 | 0.121514 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
355a209074a6a9c55950a52853f1cc08537ff327 | 1,829 | py | Python | eval.py | mingzhang96/wider_person_search | e4763ef4abcaeccd194f6383e0fd8be8bedc1699 | [
"MIT"
] | 3 | 2019-10-08T19:56:00.000Z | 2020-04-20T16:36:28.000Z | eval.py | mingzhang96/wider_person_search | e4763ef4abcaeccd194f6383e0fd8be8bedc1699 | [
"MIT"
] | null | null | null | eval.py | mingzhang96/wider_person_search | e4763ef4abcaeccd194f6383e0fd8be8bedc1699 | [
"MIT"
] | 1 | 2019-12-29T12:03:22.000Z | 2019-12-29T12:03:22.000Z | import numpy
import os
import os.path as osp
import json
from random import shuffle
import argparse
def parse_submission(submission_file):
with open(submission_file) as f:
lines = f.readlines()
submission = {}
for line in lines:
words = line.strip().split()
if len(words) != 2:
print('Format Error!')
return None
key = words[0].strip()
ret = words[1].strip().split(',')
unique_ret = []
appeared_set = set()
for x in ret:
if x not in appeared_set:
unique_ret.append(x)
appeared_set.add(x)
submission[key] = unique_ret
return submission
def read_gt(gt_file):
with open(gt_file) as f:
data = json.load(f)
gt_dict = {}
for key, value in data.items():
gt_dict[key] = set(value)
return gt_dict
def get_AP(gt_set, ret_list):
hit = 0
AP = 0.0
for k, x in enumerate(ret_list):
if x in gt_set:
hit += 1
prec = hit / (k+1)
AP += prec
AP /= len(gt_set)
return AP
def get_mAP(gt_dict, ret_dict):
mAP = 0.0
query_num = len(gt_dict.keys())
for key, gt_set in gt_dict.items():
if ret_dict.get(key) is None:
AP = 0
else:
AP = get_AP(gt_set, ret_dict[key])
mAP += AP
mAP /= query_num
return mAP
def eval(submission_file, gt_file):
gt_dict = read_gt(gt_file)
submission = parse_submission(submission_file)
mAP = get_mAP(gt_dict, submission)
print('mAP: {:.4f}'.format(mAP))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gt', type=str)
parser.add_argument('--submission', type=str)
args = parser.parse_args()
eval(args.submission, args.gt)
| 23.151899 | 50 | 0.577911 | 260 | 1,829 | 3.861538 | 0.288462 | 0.047809 | 0.049801 | 0.057769 | 0.025896 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009471 | 0.307272 | 1,829 | 78 | 51 | 23.448718 | 0.782952 | 0 | 0 | 0 | 0 | 0 | 0.026805 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.09375 | 0 | 0.25 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
355a84c8233585f478d86f11eda0715ae183940a | 5,061 | py | Python | openstack_dashboard/test/integration_tests/pages/admin/system/metadatadefinitionspage.py | nicozhang/horizon | 49df5cffd84b6d9da4e5926afd12e0a92737d740 | [
"Apache-2.0"
] | 1 | 2015-02-26T03:23:27.000Z | 2015-02-26T03:23:27.000Z | openstack_dashboard/test/integration_tests/pages/admin/system/metadatadefinitionspage.py | nicozhang/horizon | 49df5cffd84b6d9da4e5926afd12e0a92737d740 | [
"Apache-2.0"
] | 7 | 2017-06-26T14:34:33.000Z | 2020-06-30T22:10:50.000Z | openstack_dashboard/test/integration_tests/pages/admin/system/metadatadefinitionspage.py | yi-cloud/horizon-xg | 827365753886025dc62fbfbed179ef719d313711 | [
"Apache-2.0"
] | 6 | 2015-05-25T00:31:26.000Z | 2022-03-21T22:36:25.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class MetadatadefinitionsTable(tables.TableRegion):
name = "namespaces"
CREATE_NAMESPACE_FORM_FIELDS = (
"source_type", "direct_input", "metadef_file", "public", "protected")
@tables.bind_table_action('import')
def import_namespace(self, create_button):
create_button.click()
return forms.FormRegion(
self.driver,
self.conf,
field_mappings=self.CREATE_NAMESPACE_FORM_FIELDS)
@tables.bind_table_action('delete')
def delete_namespace(self, delete_button):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
class MetadatadefinitionsPage(basepage.BaseNavigationPage):
NAMESPACE_TABLE_NAME_COLUMN = 'Name'
NAMESPACE_TABLE_DESCRIPTION_COLUMN = 'Description'
NAMESPACE_TABLE_RESOURCE_TYPES_COLUMN = 'Resource Types'
NAMESPACE_TABLE_PUBLIC_COLUMN = 'Public'
NAMESPACE_TABLE_PROTECTED_COLUMN = 'Protected'
boolean_mapping = {True: 'Yes', False: 'No'}
def __init__(self, driver, conf):
super(MetadatadefinitionsPage, self).__init__(driver, conf)
self._page_title = "Metadata Definitions"
def _get_row_with_namespace_name(self, name):
return self.namespaces_table.get_row(
self.NAMESPACE_TABLE_NAME_COLUMN,
name)
@property
def namespaces_table(self):
return MetadatadefinitionsTable(self.driver, self.conf)
def json_load_template(self, namespace_template_name):
"""Read template for namespace creation
:param namespace_template_name: Path to template
:return = json data container
"""
try:
with open(namespace_template_name, 'r') as template:
json_template = json.load(template)
except Exception:
raise EOFError("Can not read template file: [{0}]".format(
namespace_template_name))
return json_template
def import_namespace(
self, namespace_source_type, namespace_json_container,
is_public=True, is_protected=False):
create_namespace_form = self.namespaces_table.import_namespace()
create_namespace_form.source_type.value = namespace_source_type
if namespace_source_type == 'raw':
json_template_dump = json.dumps(namespace_json_container)
create_namespace_form.direct_input.text = json_template_dump
elif namespace_source_type == 'file':
metadeffile = namespace_json_container
create_namespace_form.metadef_file.choose(metadeffile)
if is_public:
create_namespace_form.public.mark()
if is_protected:
create_namespace_form.protected.mark()
create_namespace_form.submit()
def delete_namespace(self, name):
row = self._get_row_with_namespace_name(name)
row.mark()
confirm_delete_namespaces_form = \
self.namespaces_table.delete_namespace()
confirm_delete_namespaces_form.submit()
def is_namespace_present(self, name):
return bool(self._get_row_with_namespace_name(name))
def is_public_set_correct(self, name, exp_value, row=None):
if type(exp_value) != bool:
raise ValueError('Expected value "exp_value" is not boolean')
if not row:
row = self._get_row_with_namespace_name(name)
cell = row.cells[self.NAMESPACE_TABLE_PUBLIC_COLUMN]
return self._is_text_visible(cell, self.boolean_mapping[exp_value])
def is_protected_set_correct(self, name, exp_value, row=None):
if type(exp_value) != bool:
raise ValueError('Expected value "exp_value" is not boolean')
if not row:
row = self._get_row_with_namespace_name(name)
cell = row.cells[self.NAMESPACE_TABLE_PROTECTED_COLUMN]
return self._is_text_visible(cell, self.boolean_mapping[exp_value])
def is_resource_type_set_correct(self, name, expected_resources, row=None):
if not row:
row = self._get_row_with_namespace_name(name)
cell = row.cells[self.NAMESPACE_TABLE_RESOURCE_TYPES_COLUMN]
return all(
[self._is_text_visible(cell, res, strict=False)
for res in expected_resources])
| 39.232558 | 79 | 0.70085 | 613 | 5,061 | 5.468189 | 0.272431 | 0.040274 | 0.051014 | 0.03401 | 0.296241 | 0.250895 | 0.213902 | 0.204654 | 0.161695 | 0.161695 | 0 | 0.00127 | 0.222288 | 5,061 | 128 | 80 | 39.539063 | 0.850356 | 0.131792 | 0 | 0.146067 | 0 | 0 | 0.060648 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134831 | false | 0 | 0.089888 | 0.033708 | 0.438202 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
355c0e609145649ca34a5beeeb2d1373f6b92946 | 10,645 | py | Python | lib/dataset.py | chengkunxf/MeInGame | 5dee3776acda5213d3253a88cf091d73b6507db5 | [
"MIT"
] | 469 | 2020-12-15T07:13:30.000Z | 2022-03-29T02:51:54.000Z | lib/dataset.py | xiaocitiao/MeInGame | dfda42f7ab02ae8f5e9e5de8d61bf84dca85a916 | [
"MIT"
] | 32 | 2021-02-08T10:52:57.000Z | 2022-03-16T04:26:43.000Z | lib/dataset.py | xiaocitiao/MeInGame | dfda42f7ab02ae8f5e9e5de8d61bf84dca85a916 | [
"MIT"
] | 83 | 2021-02-07T03:55:29.000Z | 2022-03-30T05:38:41.000Z | import logging
import os
import cv2
import numpy as np
import torch
from skimage import io
class Dataset(torch.utils.data.Dataset):
def __init__(self, config, flist_gt=None, flist=None, test=False,
device=torch.device('cpu')):
super(Dataset, self).__init__()
self.device = device
self.len_flist = len(flist)
self.flist = flist
self.flist_gt = flist_gt
self.data_dir = config.data_dir
self.is_train = not test
self.root_dir = config.root_dir
self.im_size = config.im_size
self.uv_size = config.uv_size
temp_uv = io.imread(
os.path.join(config.root_dir, 'data/uv_param/masks/skin_mask.png'))
self.temp_uv = cv2.resize(temp_uv[..., :3], (self.uv_size, self.uv_size),
interpolation=cv2.INTER_LANCZOS4).astype(
np.float32)
self.lip_mask = 1 - self.load_mask('data/uv_param/masks/lip_mask.png')
brow_mask = 1 - self.load_mask('data/uv_param/masks/brow_mask.png')
ear_mask = 1 - self.load_mask('data/uv_param/masks/ear_mask.png')
eye_mask = 1 - self.load_mask('data/uv_param/masks/eye_mask.png')
self.nose_shadow_mask = self.load_mask(
'data/uv_param/masks/nose_shadow_mask.png')
skin_mask = self.load_mask('data/uv_param/masks/skin_mask.png')
self.temp_skin_mean = np.mean(self.temp_uv[skin_mask.astype(np.bool)],
axis=0)
self.temp_lip_mean = np.mean(self.temp_uv[self.lip_mask.astype(np.bool)],
axis=0)
self.skin_mask = skin_mask + brow_mask + ear_mask + eye_mask
self.blur_skin_mask = cv2.GaussianBlur(self.skin_mask.astype(
np.float32), (self.uv_size // 16 + 1, self.uv_size // 16 + 1),
self.uv_size // 32)[..., None]
lip_mask = cv2.erode(self.lip_mask, np.ones((9, 9)), 4)
self.blur_lip_mask = cv2.GaussianBlur(lip_mask.astype(np.float32), (61, 61),
0)[..., None]
uv_mask = io.imread(
os.path.join(config.root_dir, 'data/uv_param/masks/uv_mask.png'))[...,
-1]
uv_mask = cv2.resize(uv_mask, (self.uv_size, self.uv_size),
interpolation=cv2.INTER_NEAREST)
self.uv_mask = uv_mask[..., None] // 255
self.log = logging.getLogger('x')
def __len__(self):
if self.is_train:
return len(self.flist_gt)
else:
return len(self.flist)
def __getitem__(self, index):
try:
item = self.load_item_numpy(index)
except Exception as e:
self.log.error('Loading Error')
self.log.error(e)
item = self.load_item_numpy(0)
return item
def load_item_numpy(self, index):
if self.is_train:
uv_gt_path = self.flist_gt[index]
data_id = os.path.split(uv_gt_path)[-1][:5]
uv_path = os.path.join(self.data_dir, '{}_uv.png'.format(data_id))
rand_uv_path = self.flist[np.random.randint(self.len_flist)]
rand_data_id = os.path.split(rand_uv_path)[-1][:5]
rand_im_path = os.path.join(self.data_dir,
'{}_image.png'.format(rand_data_id))
rand_vert_path = os.path.join(self.data_dir,
'{}_nsh_vert.npy'.format(rand_data_id))
rand_param_path = os.path.join(self.data_dir,
'{}_params.npy'.format(rand_data_id))
else:
uv_path = self.flist[index]
data_id = os.path.split(uv_path)[-1][:5]
im_path = os.path.join(self.data_dir, '{}_image.png'.format(data_id))
vert_path = os.path.join(self.data_dir, '{}_nsh_vert.npy'.format(data_id))
param_path = os.path.join(self.data_dir, '{}_params.npy'.format(data_id))
vertice = self.to_tensor(np.load(vert_path))
param = self.to_tensor(np.load(param_path))
image = resize(io.imread(im_path), self.im_size)
if self.is_train:
image = self.process_image(image)
rand_image = resize(io.imread(rand_im_path), self.im_size)
rand_image = self.process_image(rand_image)
uvmap = resize(io.imread(uv_path), self.uv_size)
rand_uvmap = resize(io.imread(rand_uv_path), self.uv_size)
uvmap = self.process_uvmap(uvmap, rand_uvmap)
rand_uvmap = self.process_uvmap(rand_uvmap)
rand_vertice = self.to_tensor(np.load(rand_vert_path))
rand_param = self.to_tensor(np.load(rand_param_path))
uvmap_gt = resize(io.imread(uv_gt_path), self.uv_size) / 127.5 - 1
uvmap_gt = self.to_tensor(uvmap_gt[..., :3].astype(np.float32))
return image.permute((2, 0, 1)), uvmap.permute(
(2, 0, 1)), uvmap_gt.permute(
(2, 0, 1)), vertice, param, rand_image.permute(
(2, 0, 1)), rand_uvmap.permute(
(2, 0, 1)), rand_vertice, rand_param
else:
uvmap = resize(io.imread(uv_path), self.uv_size)
uvmap = self.process_uvmap(uvmap)
image = self.process_image(image, False)
return image.permute((2, 0, 1)), uvmap.permute((2, 0, 1)), vertice, param
def process_uvmap(self, uvmap, rand_uvmap=None, dark_brow=False):
# uvmap dtype should be uint8
rule_mask = get_rule_mask(uvmap)
uv_mask = uvmap[..., -1:] > 127
uv_seg = uvmap[..., -1] % 128
uv_seg = np.eye(19, dtype=np.float32)[uv_seg]
mask_idx = np.r_[1:10]
uv_mask = uv_mask.astype(np.float32) * np.sum(uv_seg[..., mask_idx],
axis=-1, keepdims=True)
uv_mask = uv_mask * self.uv_mask
if rand_uvmap is not None:
rand_uvmask = rand_uvmap[..., -1:] > 127
rand_uvseg = rand_uvmap[..., -1:] % 128
rand_uvmask = rand_uvmask * (rand_uvseg > 0) * (rand_uvseg < 10)
uv_mask = uv_mask * rand_uvmask
uv_mask_small = cv2.dilate(
uv_mask, np.ones((self.uv_size // 32 + 1, self.uv_size // 32 + 1)), 2)
uv_mask_small = cv2.erode(
uv_mask_small, np.ones(
(self.uv_size // 16 + 1, self.uv_size // 16 + 1)), 2)
if not self.is_train:
uv_mask_bottom = cv2.erode(
uv_mask_small,
np.ones((self.uv_size // 16 + 1, self.uv_size // 16 + 1)), 3)
uv_mask_small[self.uv_size // 2:] = uv_mask_bottom[self.uv_size // 2:]
mouth_idx = 8
uv_mask = uv_mask_small * uv_mask[..., 0] * (1 - uv_seg[..., mouth_idx])
mask_for_seam = 255 * uv_mask.astype(np.uint8)
mask_for_seam[1, 1] = 255
mask_for_seam[1, -2] = 255
mask_for_seam[-2, 1] = 255
mask_for_seam[-2, -2] = 255
uv_mean = np.mean(uvmap[uv_seg[..., 1].astype(np.bool), :3],
axis=0).astype(np.float32)
temp_uv = self.temp_uv - self.temp_skin_mean + uv_mean
temp_uv = temp_uv * self.blur_skin_mask + self.temp_uv * (
1 - self.blur_skin_mask)
lip_idx = np.r_[7, 9]
lip_mask = np.sum(uv_seg[..., lip_idx], axis=-1)
lip_mask = lip_mask * rule_mask
if not np.any(lip_mask):
lip_mask = self.lip_mask
lip_mean = np.mean(uvmap[lip_mask.astype(np.bool), :3],
axis=0).astype(np.float32)
lip_uv = self.temp_uv.astype(np.int32)
lip_uv += np.round(lip_mean - self.temp_lip_mean).astype(np.int32)
temp_uv = lip_uv * self.blur_lip_mask + temp_uv * (1 - self.blur_lip_mask)
temp_uv = np.clip(temp_uv, 0, 255).astype(np.uint8)
k_size = (self.uv_size // 16 + 1, self.uv_size // 16 + 1)
blur_uv_mask = cv2.GaussianBlur(uv_mask, k_size, 0)[..., None]
use_seamless = True
if use_seamless:
# ! cv::NORMAL_CLONE, cv::MIXED_CLONE or cv::MONOCHROME_TRANSFER
fused = cv2.seamlessClone(uvmap[..., :3], temp_uv, mask_for_seam,
(self.uv_size // 2, self.uv_size // 2),
cv2.MIXED_CLONE)
else:
fused = uvmap[..., :3] * blur_uv_mask + temp_uv * (1 - blur_uv_mask)
if dark_brow:
brow_mask = (uv_seg[..., 2] + uv_seg[..., 3]) * self.uv_mask[..., 0]
k_size = (self.uv_size // 32 + 1, self.uv_size // 32 + 1)
blur_brow_mask = cv2.GaussianBlur(brow_mask, k_size, 0)[..., None]
fused = fused * (1 - blur_brow_mask * 0.4)
fused = fused * 0.9
uvmap = np.concatenate([(fused / 127.5 - 1).astype(np.float32),
blur_uv_mask.astype(np.float32)], axis=-1)
uvmap = self.to_tensor(uvmap)
return uvmap
def process_image(self, image, mask=True):
im_seg = 255 - image[..., -1:]
image = image[..., :3].astype(np.float32) / 127.5 - 1
if mask:
im_seg_oh = np.eye(19, dtype=np.float32)[im_seg[..., 0]]
# include face, ears and neck, exclude inner mouth
skin_idx = np.r_[1:8, 9, 12, 13, 17]
im_skin = np.sum(im_seg_oh[..., skin_idx], axis=-1, keepdims=True)
k_size = (self.im_size // 32 + 1, self.im_size // 32 + 1)
blur_im_skin = cv2.GaussianBlur(im_skin, k_size, 0)[..., None]
image = np.concatenate([image, blur_im_skin, im_seg], axis=-1)
else:
image = np.concatenate([image, im_seg], axis=-1)
image = self.to_tensor(image)
return image
def to_tensor(self, data, im_size=None):
if im_size is not None:
data = resize(data, im_size)
return torch.from_numpy(data).to(self.device)
def create_iterator(self, batch_size):
while True:
sample_loader = torch.utils.data.DataLoader(dataset=self,
batch_size=batch_size,
drop_last=True)
for item in sample_loader:
yield item
def load_mask(self, path):
mask = io.imread(os.path.join(self.root_dir, path))[..., -1]
if self.uv_size != 1024:
mask = cv2.resize(mask, (self.uv_size, self.uv_size),
interpolation=cv2.INTER_NEAREST)
return mask // 255
def resize(image, im_size):
if image.shape[0] != im_size or image.shape[1] != im_size:
image = cv2.resize(image, (im_size, im_size),
interpolation=cv2.INTER_NEAREST)
return image
def get_center(mask):
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return ((cmax + cmin) // 2, (rmax - rmin) // 2)
def get_rule_mask(image):
R = image[..., 0]
G = image[..., 1]
B = image[..., 2]
mask = (R > 95) & (G > 40) & (B > 20) & ((
np.max(image, axis=-1) - np.min(image, axis=-1)) > 15) & (R > G) & (R > B)
# mask = (R > 95) & (G > 40) & (B > 20) & (
# (np.max(image, axis=-1) - np.min(image, axis=-1)) >
# 15) & ((R - G) > 20) & ((R - B) > 20)
return mask
| 38.850365 | 80 | 0.59333 | 1,627 | 10,645 | 3.626921 | 0.118623 | 0.033554 | 0.049144 | 0.021691 | 0.380952 | 0.300966 | 0.244704 | 0.22386 | 0.214032 | 0.155228 | 0 | 0.039625 | 0.257961 | 10,645 | 273 | 81 | 38.992674 | 0.707431 | 0.026209 | 0 | 0.085973 | 0 | 0 | 0.035911 | 0.025678 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054299 | false | 0 | 0.027149 | 0 | 0.140271 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3560bbc823282975830231fca2efb9687dc6b230 | 3,182 | py | Python | server.py | Djaler/TFTPy | 5de73dc892c37d1ecdde814381a650ac508b7493 | [
"MIT"
] | null | null | null | server.py | Djaler/TFTPy | 5de73dc892c37d1ecdde814381a650ac508b7493 | [
"MIT"
] | null | null | null | server.py | Djaler/TFTPy | 5de73dc892c37d1ecdde814381a650ac508b7493 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv, exit
from multiprocessing import Process
from PyQt4 import QtCore
from PyQt4 import QtGui
import tftpy
class MainWindow(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self._init_ui()
self.server = None
def __del__(self):
self.server.terminate()
def _init_ui(self):
self.main_layout = QtGui.QVBoxLayout(self)
self.main_layout.addWidget(QtGui.QLabel('Каталог TFTP-сервера:'))
self.catalog_layout = QtGui.QHBoxLayout(self)
self.catalog_edit = QtGui.QLineEdit()
self.catalog_edit.setReadOnly(True)
self.catalog_layout.addWidget(self.catalog_edit)
self.choose_catalog_btn = QtGui.QPushButton('Выбрать')
self.connect(self.choose_catalog_btn, QtCore.SIGNAL('pressed()'),
self.choose_catalog)
self.catalog_layout.addWidget(self.choose_catalog_btn)
self.main_layout.addLayout(self.catalog_layout)
self.main_layout.addWidget(QtGui.QLabel('Порт:'))
self.port_edit = QtGui.QLineEdit('8069')
self.port_edit.setValidator(QtGui.QIntValidator(1024, 65536))
self.main_layout.addWidget(self.port_edit)
self.run_btn = QtGui.QPushButton('Запустить сервер')
self.connect(self.run_btn, QtCore.SIGNAL('pressed()'), self.start)
self.run_btn.setDisabled(True)
self.main_layout.addWidget(self.run_btn)
self.setLayout(self.main_layout)
self.setWindowTitle('TFTP Сервер')
self.setMinimumWidth(300)
self.center()
self.show()
def choose_catalog(self):
catalog = QtGui.QFileDialog().getExistingDirectory(self,
'Выбор каталога')
if not catalog:
return
self.catalog_edit.setText(catalog)
self.run_btn.setEnabled(True)
def start(self):
self.server = Process(target=self.run, args=(
unicode(self.catalog_edit.text()), int(self.port_edit.text())))
self.server.start()
self.run_btn.setText('Остановить сервер')
self.disconnect(self.run_btn, QtCore.SIGNAL('pressed()'), self.start)
self.connect(self.run_btn, QtCore.SIGNAL('pressed()'), self.stop)
def run(self, catalog, port):
server = tftpy.TftpServer(catalog)
while True:
try:
server.listen('0.0.0.0', port)
except Exception:
continue
def stop(self):
self.server.terminate()
self.run_btn.setText('Запустить сервер')
self.disconnect(self.run_btn, QtCore.SIGNAL('pressed()'), self.stop)
self.connect(self.run_btn, QtCore.SIGNAL('pressed()'), self.start)
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
if __name__ == '__main__':
QtCore.QTextCodec.setCodecForCStrings(
QtCore.QTextCodec.codecForName('UTF-8'))
app = QtGui.QApplication(argv)
window = MainWindow()
exit(app.exec_())
| 31.50495 | 77 | 0.636392 | 364 | 3,182 | 5.387363 | 0.302198 | 0.042835 | 0.056094 | 0.067313 | 0.241203 | 0.169811 | 0.135135 | 0.135135 | 0.131056 | 0.104029 | 0 | 0.00995 | 0.241986 | 3,182 | 100 | 78 | 31.82 | 0.803068 | 0.013199 | 0 | 0.054054 | 0 | 0 | 0.058955 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.067568 | 0 | 0.202703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3560c4682e6cba33b0f1e6b4a93af985da544c47 | 3,171 | py | Python | blog/consumers.py | John2013/portfolio | 5be3ab4070cff97e1958f168eb2abd7b97bf6ad7 | [
"MIT"
] | null | null | null | blog/consumers.py | John2013/portfolio | 5be3ab4070cff97e1958f168eb2abd7b97bf6ad7 | [
"MIT"
] | null | null | null | blog/consumers.py | John2013/portfolio | 5be3ab4070cff97e1958f168eb2abd7b97bf6ad7 | [
"MIT"
] | null | null | null | from asgiref.sync import async_to_sync
from channels.generic.websocket import AsyncWebsocketConsumer, \
WebsocketConsumer
import json
from blog.models import Comment
class ChatConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
def connect(self):
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
# noinspection PyMethodOverriding
def receive(self, text_data):
text_data_json = json.loads(text_data, encoding='utf8')
message = text_data_json['message']
nickname = text_data_json['nickname']
datetime = text_data_json['datetime']
article_pk = text_data_json['articlePk']
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'nickname': nickname,
'datetime': datetime,
}
)
comment = Comment(
article_id=article_pk,
body=message,
nickname=nickname,
datetime=datetime
)
comment.save()
# Receive message from room group
def chat_message(self, event):
message = event['message']
nickname = event['nickname']
datetime = event['datetime']
# Send message to WebSocket
self.send(text_data=json.dumps({
'message': message,
'nickname': nickname,
'datetime': datetime,
}))
class ChatAsyncConsumer(AsyncWebsocketConsumer):
async def connect(self):
print('connect')
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
print('disconnect')
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
# noinspection PyMethodOverriding
async def receive(self, message_data):
print('receive')
text_data_json = json.loads(message_data, encoding='utf8')
message = text_data_json['message']
nickname = text_data_json['nickname']
datetime = text_data_json['datetime']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'nickname': nickname,
'datetime': datetime,
}
)
# Receive message from room group
async def chat_message(self, event):
print('chat_message')
message = event['message']
nickname = event['nickname']
datetime = event['datetime']
text_data = json.dumps({
'message': message,
'nickname': nickname,
'datetime': datetime,
},
ensure_ascii=False,
encoding='utf8'
)
print(text_data)
# Send message to WebSocket
await self.send(
text_data=text_data
)
| 23.488889 | 65 | 0.715863 | 399 | 3,171 | 5.448622 | 0.172932 | 0.066237 | 0.060718 | 0.062558 | 0.701472 | 0.600276 | 0.575897 | 0.556578 | 0.556578 | 0.472861 | 0 | 0.001136 | 0.167455 | 3,171 | 134 | 66 | 23.664179 | 0.822348 | 0.113844 | 0 | 0.4 | 0 | 0 | 0.119857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.04 | 0 | 0.11 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3564dd1bb12230e53fcd05c742e3176f0b99c4ba | 1,418 | py | Python | project/server/main/parsers/country_detect.py | dataesr/bso-parser-html | 9a5b2d45aa1ff0c61be57fac4e04201becf58a42 | [
"MIT"
] | null | null | null | project/server/main/parsers/country_detect.py | dataesr/bso-parser-html | 9a5b2d45aa1ff0c61be57fac4e04201becf58a42 | [
"MIT"
] | null | null | null | project/server/main/parsers/country_detect.py | dataesr/bso-parser-html | 9a5b2d45aa1ff0c61be57fac4e04201becf58a42 | [
"MIT"
] | null | null | null | import re
import json
from strings import normalize_text
from project.server.main.logger import get_logger
logger = get_logger(__name__)
def construct_regex(a_list):
return re.compile('|'.join([f'(?<![a-z]){kw}(?![a-z])' for kw in a_list]))
def construct_regex_simple(a_list):
return re.compile('|'.join([kw for kw in a_list]))
country_keywords = json.load(open('project/server/main/parsing/country_keywords.json', 'r'))
country_keywords_forbidden = json.load(open('project/server/main/parsing/country_forbidden.json', 'r'))
country_regex = {}
country_regex_forbidden = {}
for country in country_keywords:
country_regex[country] = construct_regex(country_keywords[country])
for country in country_keywords_forbidden:
country_regex_forbidden[country] = construct_regex_simple(country_keywords_forbidden[country])
def detect_country(text):
detected_countries = []
text_normalized = normalize_text(text=text, remove_sep=False)
for _country in country_keywords:
if re.search(country_regex[_country], text_normalized):
if _country in country_regex_forbidden and re.search(country_regex_forbidden[_country], text_normalized):
continue
detected_countries.append(_country)
if len(detected_countries) == 0:
logger.debug(f'///// {text} ///// ')
detected_countries.append('UNK')
return list(set(detected_countries))
| 34.585366 | 117 | 0.734838 | 185 | 1,418 | 5.32973 | 0.275676 | 0.121704 | 0.085193 | 0.057809 | 0.242394 | 0.135903 | 0.087221 | 0.087221 | 0 | 0 | 0 | 0.000826 | 0.146685 | 1,418 | 40 | 118 | 35.45 | 0.81405 | 0 | 0 | 0 | 0 | 0 | 0.104372 | 0.086037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.137931 | 0.068966 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
356672eddabe63fc6bad887f21953d4ceed91977 | 705 | py | Python | bytes_programming/bytes_concept.py | x98zy/fluentpython | 57aea16840b3ef788624c2e95bf918d8aff6078b | [
"MIT"
] | null | null | null | bytes_programming/bytes_concept.py | x98zy/fluentpython | 57aea16840b3ef788624c2e95bf918d8aff6078b | [
"MIT"
] | null | null | null | bytes_programming/bytes_concept.py | x98zy/fluentpython | 57aea16840b3ef788624c2e95bf918d8aff6078b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''=================================================
@Project -> File :bytes_concept
@IDE :pycharm
@Author :xz98y
@Date :2022/1/11 22:54
=================================================='''
"""
码位:每一个字符都有一个标识,即码位,是0-1114111之间的数字,
在Unicode中码位以4到6个十六进制数表示,而且加以U+前缀,例如A的
码位是U+0041(十六进制数41刚好对应十进制数65)
编码:编码即是码位和字节序列之间转换所使用的算法,把码位转换为
字节序列叫做编码,把字节序列转换成码位叫做解码
"""
s = "cafe👨"
b = s.encode("utf8")
# b'cafe\xf0\x9f\x91\xa8',最后的符号编码成4个字节
print(b)
# bytes的各个组成元素为0-255之间的整数,而其切片确实字节序列
print(b[-1])
# 虽然字节序列其实是整数序列,但是其字面量中含有ASCII文本
# 所以各个字节的表示方法可能有一下三种
# 1.可打印的ASCII范围内的字节,使用ASCII字符本身
# 2.制表符,换行符等特殊字符所对应的字节,使用转义序列\t,\n等表示
# 3.其他字节的值,使用十六进制转义序列,\x00是空字节
eval() | 24.310345 | 53 | 0.634043 | 77 | 705 | 5.805195 | 0.896104 | 0.026846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075734 | 0.08227 | 705 | 29 | 54 | 24.310345 | 0.613601 | 0.639716 | 0 | 0 | 0 | 0 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3566982f1559624c7ccf41371d8bfe0feb436403 | 33,003 | py | Python | ecs/meetings/models.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 9 | 2017-02-13T18:17:13.000Z | 2020-11-21T20:15:54.000Z | ecs/meetings/models.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 2 | 2021-05-20T14:26:47.000Z | 2021-05-20T14:26:48.000Z | ecs/meetings/models.py | programmierfabrik/ecs | 2389a19453e21b2ea4e40b272552bcbd42b926a9 | [
"Apache-2.0"
] | 4 | 2017-04-02T18:48:59.000Z | 2021-11-23T15:40:35.000Z | import math
from datetime import timedelta, datetime
from django.core.cache import cache
from django.db import models
from django.db.models import F, Prefetch
from django.dispatch import receiver
from django.db.models.signals import post_delete, post_save
from django.contrib.auth.models import User
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.text import slugify
from django.conf import settings
from django.utils import timezone
from reversion import revisions as reversion
from ecs.authorization import AuthorizationManager
from ecs.core.models.core import MedicalCategory
from ecs.core.models.constants import (
SUBMISSION_LANE_BOARD, SUBMISSION_LANE_RETROSPECTIVE_THESIS,
SUBMISSION_LANE_EXPEDITED, SUBMISSION_LANE_LOCALEC,
)
from ecs.utils import cached_property
from ecs.utils.viewutils import render_pdf, render_pdf_context
from ecs.users.utils import sudo
from ecs.tasks.models import Task, TaskType
from ecs.votes.models import Vote
from ecs.core.models.core import AdvancedSettings
from ecs.documents.models import Document
from ecs.notifications.models import NotificationAnswer, SafetyNotification
from ecs.meetings.signals import on_meeting_top_add, on_meeting_top_delete, on_meeting_top_index_change
class TimetableMetrics(object):
def __init__(self, permutation, users=None):
self.users = users
self.waiting_time_per_user = {}
self._waiting_time_total = timedelta()
self._waiting_time_min = None
self._waiting_time_max = None
self.constraint_violations = {}
self.constraint_violation_total = 0
self.optimal_start_diffs = {}
self._optimal_start_diff_sum = timedelta()
self._optimal_start_diff_squared_sum = 0
offset = timedelta()
for user in users:
user._waiting_time = timedelta()
user._waiting_time_offset = None
for entry in permutation:
next_offset = offset + entry.duration
for user, ignored in entry.users:
if ignored:
continue
if user._waiting_time_offset is not None:
wt = offset - user._waiting_time_offset
user._waiting_time += wt
self._waiting_time_total += wt
user._waiting_time_offset = next_offset
for constraint in user.constraints:
if constraint.offset < next_offset and constraint.offset + constraint.duration > offset:
self.constraint_violations.setdefault(constraint, 0)
self.constraint_violations[constraint] += constraint.weight
self.constraint_violation_total += constraint.weight
if entry.optimal_start_offset is not None:
diff = abs(offset - entry.optimal_start_offset)
self.optimal_start_diffs[entry] = diff
self._optimal_start_diff_squared_sum += diff.total_seconds() ** 2
self._optimal_start_diff_sum += diff
offset = next_offset
for user in users:
wt = user._waiting_time
self.waiting_time_per_user[user] = wt
if self._waiting_time_min is None or wt < self._waiting_time_min:
self._waiting_time_min = wt
if self._waiting_time_max is None or wt > self._waiting_time_max:
self._waiting_time_max = wt
def __repr__(self):
return ", ".join("%s: %s" % (name, getattr(self, 'waiting_time_%s' % name)) for name in ('total', 'avg', 'min', 'max', 'variance'))
@cached_property
def waiting_time_total(self):
s = timedelta(seconds=0)
for time in self.waiting_time_per_user.values():
s += time
return s
@cached_property
def waiting_time_avg(self):
if not self.waiting_time_per_user:
return timedelta(seconds=0)
return self.waiting_time_total / len(self.waiting_time_per_user)
@cached_property
def waiting_time_max(self):
if not self.waiting_time_per_user:
return timedelta(seconds=0)
return max(self.waiting_time_per_user.values())
@cached_property
def waiting_time_min(self):
if not self.waiting_time_per_user:
return timedelta(seconds=0)
return min(self.waiting_time_per_user.values())
@cached_property
def waiting_time_variance(self):
if not self.waiting_time_per_user:
return timedelta(seconds=0)
avg = self.waiting_time_avg.total_seconds()
var = 0
for time in self.waiting_time_per_user.values():
d = avg - time.total_seconds()
var += d*d
return timedelta(seconds=math.sqrt(var / len(self.waiting_time_per_user)))
class AssignedMedicalCategory(models.Model):
category = models.ForeignKey('core.MedicalCategory')
specialist = models.ForeignKey(User, null=True, blank=True, related_name='assigned_medical_categories')
meeting = models.ForeignKey('meetings.Meeting', related_name='medical_categories')
class Meta:
unique_together = (('category', 'meeting'),)
def __str__(self):
return '%s - %s' % (self.meeting.title, self.category.name)
class MeetingManager(AuthorizationManager):
def next(self):
try:
return self.filter(ended=None).order_by('start')[0]
except IndexError:
raise self.model.DoesNotExist()
def past(self):
return self.filter(ended__isnull=False)
def upcoming(self):
return self.filter(ended=None)
def next_schedulable_meeting(self, submission):
first_sf = submission.forms.order_by('created_at')[0]
try:
accepted_sf = submission.forms.filter(is_acknowledged=True).order_by('created_at')[0]
except IndexError:
accepted_sf = submission.current_submission_form
is_thesis = submission.workflow_lane == SUBMISSION_LANE_RETROSPECTIVE_THESIS
grace_period = getattr(settings, 'ECS_MEETING_GRACE_PERIOD', timedelta(0))
meetings = self.filter(deadline__gt=first_sf.created_at).filter(deadline__gt=accepted_sf.created_at-grace_period)
if is_thesis:
meetings = self.filter(deadline_diplomathesis__gt=first_sf.created_at).filter(deadline_diplomathesis__gt=accepted_sf.created_at-grace_period)
now = timezone.now()
month = timedelta(days=30)
try:
return meetings.filter(started=None).order_by('start')[0]
except IndexError:
try:
last_meeting = meetings.all().order_by('-start')[0]
except IndexError:
start = now + month*2
deadline = now + month
deadline_thesis = deadline - timedelta(days=7)
else:
start = last_meeting.start + month
deadline = last_meeting.deadline + month
deadline_thesis = last_meeting.deadline_diplomathesis + month
while ((not is_thesis and (first_sf.created_at >= deadline or accepted_sf.created_at-grace_period >= deadline)) or
(is_thesis and (first_sf.created_at >= deadline_thesis or accepted_sf.created_at-grace_period >= deadline_thesis)) or start <= now):
start += month
deadline += month
deadline_thesis += month
title = timezone.localtime(start).strftime(
ugettext('%B Meeting %Y (automatically generated)'))
m = Meeting.objects.create(start=start, deadline=deadline,
deadline_diplomathesis=deadline_thesis, title=title)
return m
class Meeting(models.Model):
start = models.DateTimeField()
title = models.CharField(max_length=200)
optimization_task_id = models.TextField(null=True)
submissions = models.ManyToManyField('core.Submission',
through='TimetableEntry', related_name='meetings')
started = models.DateTimeField(null=True)
ended = models.DateTimeField(null=True)
comments = models.TextField(null=True, blank=True)
deadline = models.DateTimeField(null=True)
deadline_diplomathesis = models.DateTimeField(null=True)
deadline_expedited_review = models.DateTimeField(null=True)
agenda_sent_at = models.DateTimeField(null=True)
protocol = models.ForeignKey(Document, related_name="protocol_for_meeting",
null=True, on_delete=models.SET_NULL)
protocol_rendering_started_at = models.DateTimeField(null=True)
protocol_sent_at = models.DateTimeField(null=True)
documents_zip = models.ForeignKey(Document, related_name='zip_for_meeting',
null=True, on_delete=models.SET_NULL)
expedited_reviewer_invitation_sent_at = models.DateTimeField(null=True)
expert_assignment_user = models.ForeignKey('auth.User', null=True)
objects = MeetingManager()
unfiltered = models.Manager()
@property
def retrospective_thesis_entries(self):
return self.timetable_entries.filter(submission__workflow_lane=SUBMISSION_LANE_RETROSPECTIVE_THESIS)
@property
def expedited_entries(self):
return self.timetable_entries.filter(submission__workflow_lane=SUBMISSION_LANE_EXPEDITED)
@property
def localec_entries(self):
return self.timetable_entries.filter(submission__workflow_lane=SUBMISSION_LANE_LOCALEC)
@property
def additional_entries(self):
entries = self.retrospective_thesis_entries.all() | self.expedited_entries.all() | self.localec_entries.all()
return entries.order_by('pk')
def __str__(self):
return "%s: %s" % (self.start, self.title)
@cached_property
def duration(self):
sum_ = self.timetable_entries.aggregate(sum=models.Sum('duration'))['sum']
return sum_ or timedelta()
@property
def end(self):
return self.start + self.duration
@cached_property
def metrics(self):
entries, users = self.timetable
return TimetableMetrics(entries, users)
def create_evaluation_func(self, func):
entries, users = self.timetable
def f(permutation):
return func(TimetableMetrics(permutation, users=users))
return f
def _clear_caches(self):
del self.metrics
del self.duration
del self.timetable
del self.users_with_constraints
del self.timetable_entries_which_violate_constraints
def create_specialist_reviews(self):
task_type = TaskType.objects.get(
workflow_node__uid='specialist_review',
workflow_node__graph__auto_start=True)
for amc in self.medical_categories.exclude(specialist=None):
entries = (self.timetable_entries
.filter(submission__workflow_lane=SUBMISSION_LANE_BOARD,
submission__medical_categories=amc.category)
.exclude(submission__biased_board_members=amc.specialist)
.distinct())
for entry in entries:
participation, created = entry.participations.get_or_create(
medical_category=amc.category, user=amc.specialist)
if created:
with sudo():
bm_task_exists = Task.objects.for_data(entry.submission).filter(
task_type__workflow_node__uid='specialist_review',
assigned_to=amc.specialist).open().exists()
if not bm_task_exists:
token = task_type.workflow_node.bind(
entry.submission.workflow.workflows[0]
).receive_token(None)
token.task.created_by = self.expert_assignment_user
token.task.assign(user=amc.specialist)
def add_entry(self, **kwargs):
visible = kwargs.pop('visible', True)
index = kwargs.pop('index', None)
if visible:
last_index = self.timetable_entries.aggregate(
models.Max('timetable_index'))['timetable_index__max']
if last_index is None:
kwargs['timetable_index'] = 0
else:
kwargs['timetable_index'] = last_index + 1
else:
kwargs['timetable_index'] = None
entry = self.timetable_entries.create(**kwargs)
if index is not None and index != -1:
entry.index = index
if entry.optimal_start:
entry.move_to_optimal_position()
self._clear_caches()
self.create_specialist_reviews()
on_meeting_top_add.send(Meeting, meeting=self, timetable_entry=entry)
return entry
def add_break(self, **kwargs):
kwargs['is_break'] = True
entry = self.add_entry(**kwargs)
return entry
def __getitem__(self, index):
if not isinstance(index, int):
raise KeyError()
if index < 0:
raise IndexError()
try:
return self.timetable_entries.get(timetable_index=index)
except TimetableEntry.DoesNotExist:
raise IndexError()
def __delitem__(self, index):
self[index].delete()
self._clear_caches()
def __len__(self):
return self.timetable_entries.filter(timetable_index__isnull=False).count()
@property
def users(self):
return User.objects.filter(meeting_participations__entry__meeting=self).distinct().order_by('username')
@cached_property
def users_with_constraints(self):
constraints_by_user_id = {}
start_date = self.start.date()
for constraint in self.constraints.order_by('start_time'):
start = timezone.make_aware(
datetime.combine(start_date, constraint.start_time),
timezone.get_current_timezone())
constraint.offset = start - self.start
constraints_by_user_id.setdefault(constraint.user_id, []).append(constraint)
users = []
for user in self.users:
user.constraints = constraints_by_user_id.get(user.id, [])
users.append(user)
return sorted(users, key=lambda u: (u.last_name, u.first_name, u.id))
@cached_property
def timetable_entries_which_violate_constraints(self):
start_date = self.start.date()
entries_which_violate_constraints = []
for constraint in self.constraints.all():
constraint_start = timezone.make_aware(
datetime.combine(start_date, constraint.start_time),
timezone.get_current_timezone())
constraint_end = timezone.make_aware(
datetime.combine(start_date, constraint.end_time),
timezone.get_current_timezone())
participations = Participation.objects.filter(entry__meeting=self,
user=constraint.user, ignored_for_optimization=False,
entry__timetable_index__isnull=False)
for participation in participations:
start = participation.entry.start
end = participation.entry.end
if (constraint_start >= start and constraint_start < end) or \
(constraint_end > start and constraint_end <= end) or \
(constraint_start <= start and constraint_end >= end):
entries_which_violate_constraints.append(participation.entry)
return entries_which_violate_constraints
@cached_property
def timetable(self):
duration = timedelta(seconds=0)
users_by_entry_id = {}
users_by_id = {}
for user in self.users_with_constraints:
users_by_id[user.id] = user
entries = list()
participations = (Participation.objects
.filter(entry__meeting=self)
.select_related('user')
.order_by('user__username')
)
for participation in participations:
users_by_entry_id.setdefault(participation.entry_id, set()).add((users_by_id.get(participation.user_id), participation.ignored_for_optimization))
for entry in self.timetable_entries.filter(timetable_index__isnull=False).select_related('submission').order_by('timetable_index'):
entry.users = users_by_entry_id.get(entry.id, set())
entry.has_ignored_participations = any(ignored for user, ignored in entry.users)
entry.start = self.start + duration
duration += entry.duration
entry.end = self.start + duration
entries.append(entry)
return tuple(entries), set(users_by_id.values())
def __iter__(self):
entries, users = self.timetable
return iter(entries)
def _get_start_for_index(self, index):
offset = (self.timetable_entries
.filter(timetable_index__lt=index)
.aggregate(sum=models.Sum('duration'))['sum'])
return self.start + (offset or timedelta())
def _apply_permutation(self, permutation):
assert set(self) == set(permutation)
for i, entry in enumerate(permutation):
entry.timetable_index = i
entry.save(force_update=True)
self._clear_caches()
@property
def open_tops(self):
return self.timetable_entries.filter(timetable_index__isnull=False, is_open=True)
@property
def open_tops_with_vote(self):
return self.timetable_entries.filter(timetable_index__isnull=False, is_open=True, vote__result__isnull=False)
def __bool__(self):
return True # work around a django bug
def get_agenda_pdf(self, request):
return render_pdf(request, 'meetings/pdf/agenda.html', {
'meeting': self,
})
def get_protocol_pdf(self):
timetable_entries = list(self.timetable_entries.all())
timetable_entries.sort(key=lambda e: e.agenda_index)
tops = []
for top in timetable_entries:
vote = None
try:
vote = top.vote
except Vote.DoesNotExist:
pass
tops.append((top, vote,))
start = Meeting.objects.filter(start__lt=self.start).aggregate(
models.Max('protocol_sent_at'))['protocol_sent_at__max']
end = self.protocol_sent_at
b1ized = Vote.unfiltered.filter(
result='1', upgrade_for__result='2', published_at__isnull=False
).select_related(
'submission_form', 'submission_form__submission',
'submission_form__submitter', 'submission_form__submitter__profile',
).order_by('submission_form__submission__ec_number')
if start:
b1ized = b1ized.filter(published_at__gt=start)
if end:
b1ized = b1ized.filter(published_at__lte=end)
if AdvancedSettings.objects.get(pk=1).display_notifications_in_protocol:
from ecs.core.models import SubmissionForm
answers = NotificationAnswer.unfiltered.exclude(
notification__amendmentnotification__is_substantial=True
).exclude(
notification__in=SafetyNotification.objects.all()
).exclude(published_at=None).select_related(
'notification', 'notification__type',
'notification__safetynotification',
'notification__centerclosenotification'
).prefetch_related(
Prefetch('notification__submission_forms',
queryset=SubmissionForm.unfiltered.select_related('submission'))
).order_by(
'notification__type__position',
'notification__safetynotification__safety_type', 'published_at'
)
if start:
answers = answers.filter(published_at__gt=start)
if end:
answers = answers.filter(published_at__lte=end)
else:
answers = None
return render_pdf_context('meetings/pdf/protocol.html', {
'meeting': self,
'tops': tops,
'substantial_amendments':
self.amendments
.order_by('submission_forms__submission__ec_number'),
'b1ized': b1ized,
'answers': answers,
})
def render_protocol_pdf(self):
pdfdata = self.get_protocol_pdf()
filename = '{}-{}-protocol.pdf'.format(slugify(self.title),
timezone.localtime(self.start).strftime('%d-%m-%Y'))
self.protocol = Document.objects.create_from_buffer(pdfdata,
doctype='meeting_protocol', parent_object=self, name=filename,
original_file_name=filename)
self.save(update_fields=('protocol',))
def _get_timeframe_for_user(self, user):
entries = list(self.timetable_entries.filter(
participations__pk__in=
Participation.objects
.filter(user=user, ignored_for_optimization=False)
.values('pk')
).exclude(timetable_index=None).order_by('timetable_index'))
if not entries:
return None
start, end = entries[0].start, entries[-1].end
start -= timedelta(minutes=start.minute%10) # round to 10 minutes
if end.minute % 10 > 0:
end += timedelta(minutes=10-end.minute%10)
min_dur = timedelta(minutes=30) # take minimal duration into account
if end-start < min_dur:
end = start + min_dur
return (start, end)
def get_timetable_pdf(self, request):
timetable = {}
for entry in self:
for user, ignored in entry.users:
if ignored:
continue
if user in timetable:
timetable[user].append(entry)
else:
timetable[user] = [entry]
timetable = sorted([{
'user': key,
'entries': sorted(timetable[key], key=lambda x:x.timetable_index),
} for key in timetable], key=lambda x: x['user'].last_name+ x['user'].first_name)
for row in timetable:
row['start'], row['end'] = self._get_timeframe_for_user(row['user'])
return render_pdf(request, 'meetings/pdf/timetable.html', {
'meeting': self,
'timetable': timetable,
})
def update_assigned_categories(self):
old_assignments = {}
for amc in self.medical_categories.all():
old_assignments[amc.category_id] = amc
new_mc = MedicalCategory.objects.filter(
submissions=self.submissions.for_board_lane().values('pk'))
for cat in new_mc:
if cat.pk in old_assignments:
del old_assignments[cat.pk]
else:
AssignedMedicalCategory.objects.get_or_create(meeting=self, category=cat)
AssignedMedicalCategory.objects.filter(
pk__in=[amc.pk for amc in old_assignments.values()]).delete()
Participation.objects.filter(entry__meeting=self,
medical_category__in=old_assignments.keys()).delete()
# delete Participation entries where med-cat is not inside the referenced study anymore
for p in Participation.objects.filter(entry__meeting=self, task=None):
submission = p.entry.submission
if submission.workflow_lane != SUBMISSION_LANE_BOARD or \
not submission.medical_categories.filter(id=p.medical_category_id).exists():
p.delete()
@property
def active_top(self):
key = 'meetings:{}:assistant:top_pk'.format(self.pk)
pk = cache.get(key)
if pk:
return TimetableEntry.objects.get(pk=pk)
@active_top.setter
def active_top(self, top):
key = 'meetings:{}:assistant:top_pk'.format(self.pk)
old_val = cache.get(key)
cache.set(key, top.pk, 60*60*24*2)
@reversion.register(fields=('text',))
class TimetableEntry(models.Model):
meeting = models.ForeignKey(Meeting, related_name='timetable_entries')
title = models.CharField(max_length=200, blank=True)
timetable_index = models.IntegerField(null=True)
duration = models.DurationField()
is_break = models.BooleanField(default=False)
submission = models.ForeignKey('core.Submission', null=True, related_name='timetable_entries')
optimal_start = models.TimeField(null=True)
is_open = models.BooleanField(default=True)
text = models.TextField(null=True, blank=True)
def __str__(self):
return "TOP %s" % (self.agenda_index + 1)
class Meta:
unique_together = (
# XXX: modified in migration to be DEFERRABLE INITIALLY DEFERRED
('meeting', 'timetable_index'),
('meeting', 'submission'),
)
@property
def agenda_index(self):
if not self.timetable_index is None:
return self.timetable_index
else:
index = self.meeting.timetable_entries.aggregate(models.Max('timetable_index'))['timetable_index__max']
if index is None:
index = -1
index += self.meeting.timetable_entries.filter(timetable_index=None, pk__lte=self.pk).count()
return index
@cached_property
def optimal_start_offset(self):
if self.optimal_start is None:
return None
return (timezone.make_aware(
datetime.combine(self.meeting.start.date(), self.optimal_start),
timezone.get_current_timezone()) - self.meeting.start
)
@cached_property
def users(self):
return User.objects.filter(meeting_participations__entry=self).order_by('username').distinct()
def _get_index(self):
return self.timetable_index
def _set_index(self, index):
if index < 0 or index >= len(self.meeting):
raise IndexError()
old_index = self.timetable_index
if index == old_index:
return
entries = self.meeting.timetable_entries.filter(timetable_index__isnull=False)
if old_index > index:
changed = entries.filter(
timetable_index__gte=index, timetable_index__lt=old_index)
changed.update(timetable_index=F('timetable_index') + 1)
elif old_index < index:
changed = entries.filter(
timetable_index__gt=old_index, timetable_index__lte=index)
changed.update(timetable_index=F('timetable_index') - 1)
self.timetable_index = index
self.save(force_update=True)
self.meeting._clear_caches()
on_meeting_top_index_change.send(Meeting, meeting=self.meeting, timetable_entry=self)
index = property(_get_index, _set_index)
def move_to_optimal_position(self):
i = 0
offset = timedelta()
optimal_start = timezone.make_aware(
datetime.combine(self.meeting.start.date(), self.optimal_start),
timezone.get_current_timezone()
)
start_delta = optimal_start - self.meeting.start
for entry in self.meeting.timetable_entries.filter(timetable_index__isnull=False).order_by('timetable_index').exclude(pk=self.pk):
if offset >= start_delta:
break
offset += entry.duration
i += 1
self.index = i
@cached_property
def start(self):
return self.meeting._get_start_for_index(self.timetable_index)
@cached_property
def end(self):
return self.start + self.duration
@cached_property
def medical_categories(self): # XXX: where is this used?
if not self.submission:
return MedicalCategory.objects.none()
return MedicalCategory.objects.filter(submissions__timetable_entries=self)
@property
def is_batch_processed(self):
return bool(self.submission_id) and not self.submission.is_regular
@property
def next(self):
try:
return self.meeting[self.index + 1]
except IndexError:
return None
@property
def previous(self):
try:
return self.meeting[self.index - 1]
except IndexError:
return None
@property
def next_open(self):
entries = self.meeting.timetable_entries.filter(timetable_index__gt=self.index).filter(is_open=True).order_by('timetable_index')[:1]
if entries:
return entries[0]
return None
@property
def previous_open(self):
entries = self.meeting.timetable_entries.filter(timetable_index__lt=self.index).filter(is_open=True).order_by('-timetable_index')[:1]
if entries:
return entries[0]
return None
def _collect_users(self, padding, r):
users = set()
offset = timedelta()
for i in r:
entry = self.meeting[i]
users.update(entry.users)
offset += entry.duration
if offset >= padding:
break
return users
@cached_property
def broetchen(self, padding_before=timedelta(hours=1), padding_after=timedelta(hours=1)):
waiting_users = set(User.objects.filter(
meeting_participations__entry__meeting=self.meeting,
meeting_participations__entry__timetable_index__lte=self.timetable_index,
).filter(
meeting_participations__entry__meeting=self.meeting,
meeting_participations__entry__timetable_index__gte=self.timetable_index,
).distinct()
)
before = self._collect_users(padding_before, range(self.index - 1, -1, -1)).difference(waiting_users)
after = self._collect_users(padding_after, range(self.index + 1, len(self.meeting))).difference(waiting_users)
return len(before), len(waiting_users), len(after)
@property
def visible(self):
return not self.timetable_index is None
def refresh(self, **kwargs):
visible = kwargs.pop('visible', True)
previous_index = self.timetable_index
to_visible = visible and previous_index is None
from_visible = not visible and not previous_index is None
if to_visible:
last_index = self.meeting.timetable_entries.aggregate(models.Max('timetable_index'))['timetable_index__max']
if last_index is None:
kwargs['timetable_index'] = 0
else:
kwargs['timetable_index'] = last_index + 1
elif from_visible:
kwargs['timetable_index'] = None
for k, v in kwargs.items():
setattr(self, k, v)
self.save()
if from_visible:
changed = self.meeting.timetable_entries.filter(
timetable_index__gt=previous_index)
changed.update(timetable_index=F('timetable_index') - 1)
# invisible tops don't have participants
self.participations.all().delete()
self.meeting._clear_caches()
self.meeting.create_specialist_reviews()
@receiver(post_delete, sender=TimetableEntry)
def _timetable_entry_post_delete(sender, **kwargs):
entry = kwargs['instance']
if not entry.timetable_index is None:
changed = entry.meeting.timetable_entries.filter(
timetable_index__gt=entry.index)
changed.update(timetable_index=F('timetable_index') - 1)
entry.meeting.update_assigned_categories()
on_meeting_top_delete.send(Meeting, meeting=entry.meeting, timetable_entry=entry)
@receiver(post_save, sender=TimetableEntry)
def _timetable_entry_post_save(sender, **kwargs):
entry = kwargs['instance']
entry.meeting.update_assigned_categories()
class Participation(models.Model):
entry = models.ForeignKey(TimetableEntry, related_name='participations')
user = models.ForeignKey(User, related_name='meeting_participations')
medical_category = models.ForeignKey(MedicalCategory, related_name='meeting_participations', null=True, blank=True)
task = models.ForeignKey('tasks.Task', null=True)
ignored_for_optimization = models.BooleanField(default=False)
WEIGHT_CHOICES = (
(1.0, _('impossible')),
(0.5, _('unfavorable')),
)
class Constraint(models.Model):
meeting = models.ForeignKey(Meeting, related_name='constraints')
user = models.ForeignKey(User, related_name='meeting_constraints')
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
weight = models.FloatField(default=0.5, choices=WEIGHT_CHOICES)
@property
def duration(self):
d = timezone.now().date()
return datetime.combine(d, self.end_time) - datetime.combine(d, self.start_time)
| 40.644089 | 157 | 0.644699 | 3,697 | 33,003 | 5.479849 | 0.112253 | 0.042154 | 0.01851 | 0.018658 | 0.351794 | 0.276618 | 0.225727 | 0.179821 | 0.150007 | 0.127894 | 0 | 0.003833 | 0.264734 | 33,003 | 811 | 158 | 40.694205 | 0.831039 | 0.008848 | 0 | 0.237822 | 0 | 0 | 0.054032 | 0.018592 | 0 | 0 | 0 | 0 | 0.001433 | 1 | 0.098854 | false | 0.001433 | 0.037249 | 0.032951 | 0.309456 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3568bda72dd0a6bcc2a3718d0834265194aaa7c6 | 4,558 | py | Python | fireSummary/services/query_constructor_service.py | gfw-api/fires-summary-stats | 3d5cc2030c9ac87d674dd99805a9b2c215aac05e | [
"MIT"
] | null | null | null | fireSummary/services/query_constructor_service.py | gfw-api/fires-summary-stats | 3d5cc2030c9ac87d674dd99805a9b2c215aac05e | [
"MIT"
] | null | null | null | fireSummary/services/query_constructor_service.py | gfw-api/fires-summary-stats | 3d5cc2030c9ac87d674dd99805a9b2c215aac05e | [
"MIT"
] | null | null | null | import datetime
class QueryConstructorService(object):
"""Class for formatting query and download sql"""
@staticmethod
def format_dataset_query(dataset_name, params):
agg_by = params['aggregate_by']
agg_values = params['aggregate_values']
agg_admin = params['aggregate_admin']
agg_time = params['aggregate_time']
polyname = params['polyname']
iso_code = params['iso_code']
adm1_code = params['adm1_code']
adm2_code = params['adm2_code']
if agg_by in ['iso', 'adm1', 'adm2', 'global']:
agg_admin = agg_by
if agg_by in ['day', 'week', 'month', 'quarter', 'year']:
agg_time = agg_by
start_date, end_date = params['period'].split(',')
groupby_sql = None
if agg_time == 'day':
agg_time = 'alert_date'
select_statement = "SELECT SUM(alerts) as alerts"
if dataset_name == 'fires':
where_statement = "WHERE polyname = '{}' AND ".format(polyname)
else:
where_statement = 'WHERE '
# AGGREGATE VALUES
if agg_values:
# by admin level
if agg_admin in ['adm1', 'adm2', 'iso']:
# add adm1 or adm1, adm2 to select statement
select_groupby_dict = {'iso': ', iso', 'adm1': ', adm1', 'adm2': ', adm1, adm2'}
if iso_code == 'global':
select_groupby_dict['adm1'] = ', iso, adm1'
select_statement += select_groupby_dict[agg_admin]
groupby_sql = select_groupby_dict[agg_admin].strip(', ')
# if summing by admin, add this to where statement
if not iso_code == 'global':
where_statement += "iso = '{}' AND ".format(iso_code)
sql = "{0} FROM data " \
"{1}" \
"(alert_date >= '{2}' AND alert_date <= '{3}')".format(select_statement,
where_statement,
start_date, end_date)
# by time interval
if agg_time or agg_admin == 'global':
select_statement += ", alert_date"
# if summing by admin, add this to where statement
if not iso_code == 'global':
if "iso = '{}' AND ".format(iso_code) not in where_statement:
where_statement += "iso = '{}' AND ".format(iso_code)
sql = "{0} FROM data " \
"{1}" \
"(alert_date >= '{2}' AND alert_date <= '{3}')".format(select_statement,
where_statement,
start_date, end_date)
# DON'T AGGREGATE VALUES
else:
# if summing globally, not by admin:
if dataset_name == 'fires':
where_statement = "WHERE polyname = '{}' AND ".format(polyname)
# if summing by admin, add this to where statement
if not iso_code == 'global':
where_statement += "iso = '{}' AND ".format(iso_code)
sql = "{0} FROM data " \
"{1}" \
"(alert_date >= '{3}' AND alert_date <= '{4}')".format(select_statement,
where_statement,
iso_code,
start_date, end_date)
# add the select query for admin levels
if adm1_code:
sql += " and adm1 = {}".format(adm1_code)
if adm2_code:
sql += " and adm2 = {}".format(adm2_code)
if dataset_name == 'fires' and params['fire_type']:
sql += " and fire_type = '{}'".format(params['fire_type'].upper())
if dataset_name == 'glad' and params['gladConfirmOnly']:
sql += " and confidence = '3'"
# at the very end, add the GROUP BY statement
if agg_values:
if agg_time or agg_admin == 'global':
if not groupby_sql:
groupby_sql = 'alert_date'
else:
groupby_sql += ', alert_date'
sql += " GROUP BY " + groupby_sql
return sql
| 38.302521 | 98 | 0.463142 | 447 | 4,558 | 4.494407 | 0.187919 | 0.090592 | 0.023892 | 0.031857 | 0.393728 | 0.318566 | 0.318566 | 0.293678 | 0.293678 | 0.293678 | 0 | 0.01422 | 0.429136 | 4,558 | 118 | 99 | 38.627119 | 0.757879 | 0.092804 | 0 | 0.407895 | 0 | 0 | 0.181355 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0 | 0.013158 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35692da6a2a8c23d54649518606b5cf8bde8b688 | 1,538 | py | Python | exercicios/ex092.py | EduardoPessanha/Git-Python | 87aa10af09510469032732ed2c55d0d65eb4c1d6 | [
"MIT"
] | null | null | null | exercicios/ex092.py | EduardoPessanha/Git-Python | 87aa10af09510469032732ed2c55d0d65eb4c1d6 | [
"MIT"
] | null | null | null | exercicios/ex092.py | EduardoPessanha/Git-Python | 87aa10af09510469032732ed2c55d0d65eb4c1d6 | [
"MIT"
] | null | null | null | # *************************** Desafio 092 ***************************** #
# Cadastro de Trabalhador em Python #
# Crie um programa que leia nome, ano de nascimento e carteira de #
# trabalho e cadastre-o (com idade) em um dicionário. Se por acaso a #
# CTPS for diferente de ZERO, o dicionário receberá também o ano de #
# contratação e o salário. Calcule e acrescente, além da idade, com #
# quantos anos a pessoa vai se aposentar. #
# ********************************************************************* #
from datetime import datetime
linha = '++' * 24
linha1 = '\033[1;34m*=\033[m' * 24
título = ' \033[1;3;4;7;34mCadastro de Trabalhador em Python\033[m '
print(f'\n{título:*^64}\n')
print(linha)
# ********************************************************************* #
# cad = {'nome': '', 'idade': '', 'CTPS': ''}
# print(cad)
cad = dict()
cad['nome'] = str(input('Nome: ')).capitalize().strip()
cad['idade'] = datetime.now().year - (int(input('Ano de Nascimento: ')))
cad['CTPS'] = int(input('Carteira de Trabalho - CTPS (0 não tem): '))
if cad['CTPS'] != 0:
cad['contratação'] = int(input('Ano de contratação: '))
cad['salário'] = float(input('Salário: R$ '))
# Para o exercício consideraremos a aposentadoria após 35 anos de trabalho
cad['aposentadoria'] = cad['contratação'] + 35 - datetime.now().year + cad['idade']
print(linha1)
for k, v in cad.items():
print(f'{"- ":>6} {k.capitalize()} tem o valor {v}')
print(linha1)
| 49.612903 | 87 | 0.53186 | 189 | 1,538 | 4.328042 | 0.47619 | 0.02445 | 0.036675 | 0.051345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 0.193758 | 1,538 | 30 | 88 | 51.266667 | 0.627419 | 0.498049 | 0 | 0.111111 | 0 | 0 | 0.397333 | 0.032 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.277778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
356955bdca6f57ffcb4b85e88755f03dbe084c88 | 2,227 | py | Python | backend/treeckle/events/logic/subscription.py | CAPTxTreeckle/Treeckle-2.0 | 3a7f4c1a265b836a870ff34e6faff8b292002a52 | [
"MIT"
] | null | null | null | backend/treeckle/events/logic/subscription.py | CAPTxTreeckle/Treeckle-2.0 | 3a7f4c1a265b836a870ff34e6faff8b292002a52 | [
"MIT"
] | 5 | 2020-11-19T09:12:48.000Z | 2020-12-23T21:46:19.000Z | backend/treeckle/events/logic/subscription.py | CAPTxTreeckle/Treeckle-2.0 | 3a7f4c1a265b836a870ff34e6faff8b292002a52 | [
"MIT"
] | 4 | 2020-05-13T12:47:15.000Z | 2021-07-13T17:01:38.000Z | from typing import Iterable
from django.db.models import QuerySet
from django.db import IntegrityError
from users.models import User
from events.models import (
EventCategoryTypeSubscription,
SubscriptionActionType,
EventCategoryType,
)
from events.logic.event import get_event_category_types
def get_event_category_type_subscriptions(
*args,
**kwargs,
) -> QuerySet[EventCategoryTypeSubscription]:
return EventCategoryTypeSubscription.objects.filter(*args, **kwargs)
def get_user_event_category_subscription_info(
user: User,
) -> tuple[list[str], list[str]]:
user_subscriptions = get_event_category_type_subscriptions(
user=user
).select_related("category")
subscribed_categories = [
subscription.category.name for subscription in user_subscriptions
]
non_subscribed_event_category_types = get_event_category_types(
organization=user.organization
).exclude(name__in=subscribed_categories)
non_subscribed_categories = [
category.name for category in non_subscribed_event_category_types
]
return subscribed_categories, non_subscribed_categories
def update_user_event_category_subscriptions(
actions: Iterable[dict], user: User
) -> None:
categories = get_event_category_types(organization=user.organization)
updated_category_subscriptions = set()
for data in actions:
try:
action = data.get("action")
category = categories.get(name=data.get("category"))
## prevents multiple actions on same category
if category in updated_category_subscriptions:
continue
if action == SubscriptionActionType.SUBSCRIBE:
EventCategoryTypeSubscription.objects.create(
user=user, category=category
)
elif action == SubscriptionActionType.UNSUBSCRIBE:
get_event_category_type_subscriptions(
user=user, category=category
).delete()
else:
continue
updated_category_subscriptions.add(category)
except (IntegrityError, EventCategoryType.DoesNotExist) as e:
continue
| 31.366197 | 73 | 0.698698 | 216 | 2,227 | 6.944444 | 0.319444 | 0.086667 | 0.064 | 0.042 | 0.240667 | 0.12 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0.236192 | 2,227 | 70 | 74 | 31.814286 | 0.881834 | 0.018859 | 0 | 0.090909 | 0 | 0 | 0.010082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.109091 | 0.018182 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
356cb4bac512fc0b42b3c1f24ac772087dbb2fe7 | 3,693 | py | Python | python/dev/machine-learning/mnist_classification.py | joseph97git/projects-by-language | 7bccdf16e4d7fdc7c12ca065d5338dec9843b64a | [
"MIT"
] | null | null | null | python/dev/machine-learning/mnist_classification.py | joseph97git/projects-by-language | 7bccdf16e4d7fdc7c12ca065d5338dec9843b64a | [
"MIT"
] | 89 | 2021-03-10T00:14:14.000Z | 2022-03-02T09:50:27.000Z | python/dev/machine-learning/mnist_classification.py | joseph97git/projects-by-language | 7bccdf16e4d7fdc7c12ca065d5338dec9843b64a | [
"MIT"
] | null | null | null | # baseline cnn model for mnist
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.model_selection import KFold
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(lr=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# evaluate a model using k-fold cross-validation
def evaluate_model(model, dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
pyplot.subplot(211)
pyplot.title('Cross Entropy Loss')
pyplot.plot(histories[i].history['loss'], color='blue', label='train')
pyplot.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title('Classification Accuracy')
pyplot.plot(histories[i].history['accuracy'], color='blue', label='train')
pyplot.plot(histories[i].history['val_accuracy'], color='orange', label='test')
pyplot.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
pyplot.boxplot(scores)
pyplot.show()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# define model
model = define_model()
# evaluate model
scores, histories = evaluate_model(model, trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
# entry point, run the test harness
run_test_harness() | 33.880734 | 107 | 0.744923 | 526 | 3,693 | 5.136882 | 0.321293 | 0.026647 | 0.022206 | 0.031088 | 0.118431 | 0.090303 | 0.036269 | 0.036269 | 0.036269 | 0.036269 | 0 | 0.024375 | 0.133496 | 3,693 | 109 | 108 | 33.880734 | 0.82 | 0.200108 | 0 | 0.028986 | 0 | 0 | 0.079398 | 0.008214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101449 | false | 0 | 0.173913 | 0 | 0.333333 | 0.028986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
356dc18adb6cb37f91531dd8cbdccdfe2925e408 | 1,783 | py | Python | sorting/0853426_HW1_MAIN.py | sychen6192/Highlevel_Algorithm | 3c6d2e2cd599df5c54104632b688874e662146f0 | [
"Apache-2.0"
] | null | null | null | sorting/0853426_HW1_MAIN.py | sychen6192/Highlevel_Algorithm | 3c6d2e2cd599df5c54104632b688874e662146f0 | [
"Apache-2.0"
] | null | null | null | sorting/0853426_HW1_MAIN.py | sychen6192/Highlevel_Algorithm | 3c6d2e2cd599df5c54104632b688874e662146f0 | [
"Apache-2.0"
] | null | null | null | from MergeSort import mergeSort
from QuickSort import quickSort
from HeapSort import heapSort
from InsertionSort import insertionSort
import time
import sys
import openpyxl
import math
sys.setrecursionlimit(992233720) #這裡設定大一些
sort_fun = input("please input sorting method (merge/quick/heap/insertion):")
n = int(input('please input length of data :'))
nk = str(n//1000)
dict_n = {10000: "B", 20000: "C", 40000: "D"}
for i in range(1, 13):
pwd = './HW1/'+nk+'k/'+nk+'k'+"{:02d}".format(i)+'.txt'
print('Reading '+nk+"k{:02d}".format(i)+'.txt'+'....', end='')
with open(pwd, 'r') as fn:
data = []
for line in fn:
data.append(int(line))
if sort_fun == 'merge':
start = time.time()
mergeSort(data)
total = time.time() - start
elif sort_fun == 'quick':
start = time.time()
quickSort(data, 0, len(data)-1)
total = time.time() - start
elif sort_fun == 'heap':
start = time.time()
heapSort(data)
total = time.time() - start
elif sort_fun == 'insertion':
start = time.time()
insertionSort(data)
total = time.time() - start
else:
print("please input correct method!!!")
for k in range(0, n-1):
if data[k] > data[k+1]:
print("error!")
break
print("correct!")
print("Sorting Time : %dms" % float(1000*total))
# 開啟檔案
wb = openpyxl.load_workbook(u'./HW1/學號_HW1_report.xlsx')
# 設定目前工作的工作表
ws = wb[sort_fun+' sort result']
row = dict_n[n] + str(i + 1)
ws[row] = str(math.floor(1000 * total)) + ' ms'
wb.save('./HW1/學號_HW1_report.xlsx') | 33.018519 | 77 | 0.542344 | 226 | 1,783 | 4.221239 | 0.384956 | 0.067086 | 0.054507 | 0.075472 | 0.19392 | 0.133124 | 0.099581 | 0.069182 | 0 | 0 | 0 | 0.043548 | 0.304543 | 1,783 | 54 | 78 | 33.018519 | 0.725806 | 0.0129 | 0 | 0.163265 | 0 | 0 | 0.159841 | 0.0438 | 0.020408 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.163265 | 0 | 0.163265 | 0.102041 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35702c3667f59ed4a5ef3ee53094c0b0a8d31699 | 3,251 | py | Python | src/chaospy/chol/gill_king.py | yoon-gu/chaospy | fe541840a79882008f38764cd7ba4935a4fd4fa3 | [
"BSD-3-Clause"
] | 1 | 2018-08-22T01:51:25.000Z | 2018-08-22T01:51:25.000Z | src/chaospy/chol/gill_king.py | TribleCircle/chaospy | f22aa31e2a338a32a6d09b810c5b629c10a87236 | [
"BSD-3-Clause"
] | null | null | null | src/chaospy/chol/gill_king.py | TribleCircle/chaospy | f22aa31e2a338a32a6d09b810c5b629c10a87236 | [
"BSD-3-Clause"
] | 1 | 2019-11-24T17:16:30.000Z | 2019-11-24T17:16:30.000Z | """
Algorithm 3.4 of 'Numerical Optimization' by Jorge Nocedal and Stephen J.
Wright
This is based on the MATLAB code from Michael L. Overton <overton@cs.nyu.edu>:
http://cs.nyu.edu/overton/g22_opt/codes/cholmod.m
"""
import numpy
import scipy.sparse
def gill_king(mat, eps=1e-16):
"""
Gill-King algorithm for modified cholesky decomposition.
Args:
mat (numpy.ndarray) : Must be a non-singular and symmetric matrix. If
sparse, the result will also be sparse.
eps (float) : Error tolerance used in algorithm.
Returns:
lowtri (numpy.ndarray) : Lower triangular Cholesky factor.
Examples:
>>> mat = [[4, 2, 1], [2, 6, 3], [1, 3, -.004]]
>>> lowtri = gill_king(mat)
>>> print(numpy.around(lowtri, 4))
[[2. 0. 0. ]
[1. 2.2361 0. ]
[0.5 1.118 1.2264]]
>>> print(numpy.around(numpy.dot(lowtri, lowtri.T), 4))
[[4. 2. 1. ]
[2. 6. 3. ]
[1. 3. 3.004]]
"""
if not scipy.sparse.issparse(mat):
mat = numpy.asfarray(mat)
assert numpy.allclose(mat, mat.T)
size = mat.shape[0]
mat_diag = mat.diagonal()
gamma = abs(mat_diag).max()
off_diag = abs(mat - numpy.diag(mat_diag)).max()
delta = eps*max(gamma + off_diag, 1)
beta = numpy.sqrt(max(gamma, off_diag/size, eps))
lowtri = _gill_king(mat, beta, delta)
return lowtri
def _gill_king(mat, beta, delta):
"""Backend function for the Gill-King algorithm."""
size = mat.shape[0]
# initialize d_vec and lowtri
if scipy.sparse.issparse(mat):
lowtri = scipy.sparse.eye(*mat.shape)
else:
lowtri = numpy.eye(size)
d_vec = numpy.zeros(size, dtype=float)
# there are no inner for loops, everything implemented with
# vector operations for a reasonable level of efficiency
for idx in range(size):
if idx == 0:
idz = [] # column index: all columns to left of diagonal
# d_vec(idz) doesn't work in case idz is empty
else:
idz = numpy.s_[:idx]
djtemp = mat[idx, idx] - numpy.dot(
lowtri[idx, idz], d_vec[idz]*lowtri[idx, idz].T)
# C(idx, idx) in book
if idx < size - 1:
idy = numpy.s_[idx+1:size]
# row index: all rows below diagonal
ccol = mat[idy, idx] - numpy.dot(
lowtri[idy, idz], d_vec[idz]*lowtri[idx, idz].T)
# C(idy, idx) in book
theta = abs(ccol).max()
# guarantees d_vec(idx) not too small and lowtri(idy, idx) not too
# big in sufficiently positive definite case, d_vec(idx) = djtemp
d_vec[idx] = max(abs(djtemp), (theta/beta)**2, delta)
lowtri[idy, idx] = ccol/d_vec[idx]
else:
d_vec[idx] = max(abs(djtemp), delta)
# convert to usual output format: replace lowtri by lowtri*sqrt(D) and
# transpose
for idx in range(size):
lowtri[:, idx] = lowtri[:, idx]*numpy.sqrt(d_vec[idx])
# lowtri = lowtri*diag(sqrt(d_vec)) bad in sparse case
return lowtri
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.383178 | 78 | 0.570594 | 457 | 3,251 | 3.982495 | 0.36105 | 0.026374 | 0.023077 | 0.015385 | 0.096703 | 0.056044 | 0.035165 | 0.035165 | 0.026374 | 0 | 0 | 0.026455 | 0.302369 | 3,251 | 106 | 79 | 30.669811 | 0.776014 | 0.459551 | 0 | 0.209302 | 0 | 0 | 0.004851 | 0 | 0 | 0 | 0 | 0 | 0.023256 | 1 | 0.046512 | false | 0 | 0.069767 | 0 | 0.162791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
357231ce1cc5f7661dddaf81062a7891ab31be2c | 12,606 | py | Python | clauses.py | michaelpatrickpurcell/balanced-nontransitive-dice | d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b | [
"MIT"
] | null | null | null | clauses.py | michaelpatrickpurcell/balanced-nontransitive-dice | d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b | [
"MIT"
] | null | null | null | clauses.py | michaelpatrickpurcell/balanced-nontransitive-dice | d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b | [
"MIT"
] | null | null | null | import pysat
from pysat.pb import PBEnc
from itertools import product, permutations
# ============================================================================
# Utilities for problems that involve 1v1 dice comparisons
# ============================================================================
def build_clauses(
d,
dice_names,
scores,
vpool=None,
card_clauses=False,
symmetry_clauses=True,
structure_clauses=True,
pb=PBEnc.equals,
):
"""
Build the clauses that describe the SAT problem.
"""
dice_pairs = list(permutations(dice_names, 2))
n = len(dice_pairs)
faces = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names}
var_lists = {(x, y): list(product(faces[x], faces[y])) for (x, y) in dice_pairs}
variables = sum(var_lists.values(), [])
var_dict = dict((v, k) for k, v in enumerate(variables, 1))
clauses = []
clauses += build_converse_clauses(d, var_dict, dice_names)
clauses += build_sorting_clauses(d, var_dict, faces)
clauses += build_transitivity_clauses(d, var_dict, faces)
if symmetry_clauses:
clauses += build_symmetry_clauses(d, var_dict, dice_names)
if structure_clauses:
clauses += build_structure_clauses(d, var_dict, var_lists, scores)
if card_clauses:
if vpool == None:
vpool = pysat.formula.IDPool(start_from=n * d ** 2 + 1)
clauses += build_cardinality_clauses(d, var_dict, var_lists, scores, vpool, pb)
cardinality_lits = dict()
else:
cardinality_lits = build_cardinality_lits(d, var_dict, var_lists, scores)
return clauses, cardinality_lits
# ============================================================================
def build_horizontal_sorting_clauses(d, var_dict, face_dict):
"""
These clauses caputure the implications:
if (Xi > Yj) then (Xi > Yk) for k <= j
"""
horizontal_sorting_clauses = []
for x, y in permutations(face_dict.keys(), 2):
for i in range(d):
for j in range(1, d):
v1 = var_dict[(face_dict[x][i], face_dict[y][j])]
v2 = var_dict[(face_dict[x][i], face_dict[y][j - 1])]
horizontal_sorting_clauses.append([-v1, v2])
return horizontal_sorting_clauses
def build_vertical_sorting_clauses(d, var_dict, face_dict):
"""
These clauses capture the implications:
if (Xi > Yj) then (Xk > Yj) for k >= i
"""
vertical_sorting_clauses = []
for x, y in permutations(face_dict.keys(), 2):
for i in range(d - 1):
for j in range(d):
v1 = var_dict[(face_dict[x][i], face_dict[y][j])]
v2 = var_dict[(face_dict[x][i + 1], face_dict[y][j])]
vertical_sorting_clauses.append([-v1, v2])
return vertical_sorting_clauses
def build_sorting_clauses(d, var_dict, face_dict):
"""
These clauses ensure that each constraint matrix is lower triangular.
"""
sorting_clauses = []
sorting_clauses += build_horizontal_sorting_clauses(d, var_dict, face_dict)
sorting_clauses += build_vertical_sorting_clauses(d, var_dict, face_dict)
return sorting_clauses
def build_transitivity_clauses(d, var_dict, face_dict):
"""
These clauses caputure the implications
if (Xi > Yj) and (Yj > Zk) then (Xi > Zk)
and
if (Xi < Yj) and (Yj < Zk) then (Xi < Zk)
"""
transitivity_clauses = []
for x, y, z in permutations(face_dict.keys(), 3):
for i in range(d):
for j in range(d):
for k in range(d):
v1 = var_dict[(face_dict[x][i], face_dict[y][j])]
v2 = var_dict[(face_dict[y][j], face_dict[z][k])]
v3 = var_dict[(face_dict[z][k], face_dict[x][i])]
transitivity_clauses.append([v1, v2, v3])
transitivity_clauses.append([-v1, -v2, -v3])
return transitivity_clauses
def build_converse_clauses(d, var_dict, dice_names):
"""
These clauses capture the implications:
if (A1 > C1), then ~(C1 > A1)
"""
converse_clauses = []
for x, y in var_dict:
v1 = var_dict[(x, y)]
v2 = var_dict[(y, x)]
converse_clauses.append([-v1, -v2])
converse_clauses.append([v1, v2])
return converse_clauses
def build_symmetry_clauses(d, var_dict, dice_names):
"""
These clauses ensure that A1 is the smallest face.
"""
symmetry_clauses = []
v0 = dice_names[0]
for v in dice_names[1:]:
for i in range(1, d + 1):
symmetry_clauses.append([-var_dict[(v0 + "1", v + ("%i" % i))]])
symmetry_clauses.append([var_dict[(v + ("%i" % i), v0 + "1")]])
return symmetry_clauses
def build_structure_clauses(d, var_dict, var_lists, scores):
structure_clauses = []
# for x, var_list in var_lists.items():
for x in scores:
var_list = var_lists[x]
score = scores[x]
for i, j in product(range(1, d + 1), repeat=2):
v = var_dict[(x[0] + "%i" % i, x[1] + "%i" % j)]
if ((d + 1 - i) * j) > score:
structure_clauses.append([-v])
elif (i * (d + 1 - j)) > d ** 2 - score:
structure_clauses.append([v])
return structure_clauses
# ----------------------------------------------------------------------------
def build_cardinality_clauses(d, var_dict, var_lists, scores, vpool, pb=PBEnc.equals):
"""
These clauses ensure that each pair of dice have the specified relationship.
"""
dice_pairs = var_lists.keys()
cardinality_clauses = []
for dice_pair, score in scores.items():
var_list = var_lists[dice_pair]
score = scores[dice_pair]
lits = [var_dict[v] for v in var_list]
cnf = pb(lits=lits, bound=score, vpool=vpool, encoding=0)
cardinality_clauses += cnf.clauses
return cardinality_clauses
def build_cardinality_lits(d, var_dict, var_lists, scores):
cardinality_lits = dict()
# for dice_pair, var_list in var_lists.items():
for dice_pair in scores:
var_list = var_lists[dice_pair]
lits = [var_dict[v] for v in var_list]
cardinality_lits[dice_pair] = lits
return cardinality_lits
# ============================================================================
# Utilities for max/min dice-doubling problems
# ============================================================================
def build_max_min_clauses(d, dice_names, scores, max_scores, min_scores, vpool=None):
dice_pairs = list(permutations(dice_names, 2))
n = len(dice_pairs)
start_enum = 1
# ------------------------------------------------------------------------
faces_1v1 = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names}
var_lists_1v1 = {
(x, y): list(product(faces_1v1[x], faces_1v1[y])) for (x, y) in dice_pairs
}
variables_1v1 = sum(var_lists_1v1.values(), [])
var_dict_1v1 = dict((v, k) for k, v in enumerate(variables_1v1, start_enum))
start_enum += len(variables_1v1)
# ------------------------------------------------------------------------
faces_2v2 = {x: list(product(faces_1v1[x], repeat=2)) for x in dice_names}
var_lists_2v2 = {
(x, y): list(product(faces_2v2[x], faces_2v2[y])) for (x, y) in dice_pairs
}
variables_2v2 = sum(var_lists_2v2.values(), [])
var_dict_2v2_max = dict((v, k) for k, v in enumerate(variables_2v2, start_enum))
start_enum += len(variables_2v2)
var_dict_2v2_min = dict((v, k) for k, v in enumerate(variables_2v2, start_enum))
start_enum += len(variables_2v2)
# ------------------------------------------------------------------------
# Set up a variable poll that will be used for all cardinality or
# threshold constraint clauses
if vpool == None:
vpool = pysat.formula.IDPool(start_from=start_enum)
# ------------------------------------------------------------------------
# Build clauses for one-die comparisons
clauses = []
clauses += build_converse_clauses(d, var_dict_1v1, dice_names)
clauses += build_sorting_clauses(d, var_dict_1v1, faces_1v1)
clauses += build_transitivity_clauses(d, var_dict_1v1, faces_1v1)
clauses += build_symmetry_clauses(d, var_dict_1v1, dice_names)
clauses += build_cardinality_clauses(
d, var_dict_1v1, var_lists_1v1, scores, vpool, PBEnc.equals
)
# ------------------------------------------------------------------------
# Build clauses for two-dice comparisons with max-pooling
clauses += build_doubling_clauses(
d, var_dict_1v1, var_dict_2v2_max, dice_names, max
)
clauses += build_cardinality_clauses(
d ** 2, var_dict_2v2_max, var_lists_2v2, max_scores, vpool, PBEnc.atleast
)
# ------------------------------------------------------------------------
# Build clauses for two-dice comparisons with min-pooling
clauses += build_doubling_clauses(
d, var_dict_1v1, var_dict_2v2_min, dice_names, min
)
clauses += build_cardinality_clauses(
d ** 2, var_dict_2v2_min, var_lists_2v2, min_scores, vpool, PBEnc.atmost
)
return clauses
# ----------------------------------------------------------------------------
def build_doubling_clauses(d, var_dict_1v1, var_dict_2v2, dice_names, pool_func):
f = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names}
doubling_clauses = []
for x, y in permutations(dice_names, 2):
for i, ii, j, jj in product(range(d), repeat=4):
i_star = pool_func(i, ii)
j_star = pool_func(j, jj)
v1 = var_dict_1v1[(f[x][i_star], f[y][j_star])]
key = ((f[x][i], f[x][ii]), (f[y][j], f[y][jj]))
v2 = var_dict_2v2[key]
doubling_clauses.append([-v1, v2])
doubling_clauses.append([v1, -v2])
return doubling_clauses
# ============================================================================
# Utilities for problems that involve m-way dice comparisons
# ============================================================================
def build_permutation_clauses(d, var_dict_2, var_dict_m, dice_names, m=None):
if m == None:
m = len(dice_names)
faces = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names}
permutation_clauses = []
for xs in permutations(dice_names):
for iis in product(range(d), repeat=m):
z = list(zip(xs, iis))
vs = [
var_dict_2[(faces[x][i], faces[y][j])]
for ((x, i), (y, j)) in zip(z, z[1:])
]
w = var_dict_m[tuple([faces[y][j] for y, j in z])]
permutation_clauses.append([-v for v in vs] + [w])
permutation_clauses.extend([[-w, v] for v in vs])
return permutation_clauses
def build_winner_clauses(d, var_dict_2, var_dict_m, dice_names, dice_perms, m=None):
if m == None:
m = len(dice_names)
faces = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names}
winner_clauses = []
for xs in dice_perms:
for iis in product(range(d), repeat=m):
x, i = xs[0], iis[0]
z = list(zip(xs[1:], iis[1:]))
vs = [var_dict_2[(faces[x][i], faces[y][j])] for y, j in z]
w = var_dict_m[tuple([faces[x][i]] + [faces[y][j] for y, j in z])]
winner_clauses.append([-v for v in vs] + [w])
winner_clauses.extend([[-w, v] for v in vs])
return permutation_clauses
def build_exclusivity_clauses(d, var_dict_m, dice_names, vpool, m=None):
if m == None:
m = len(dice_names)
faces = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names}
exclusivity_clauses = []
for x in product(range(d), repeat=m):
column = [faces[dice_names[i]][x[i]] for i in range(m)]
lits = [var_dict_m[tuple(key)] for key in permutations(column)]
cnf = PBEnc.equals(lits=lits, bound=1, vpool=vpool, encoding=0)
exclusivity_clauses += cnf.clauses
return exclusivity_clauses
def build_exclusivity_lits(d, var_dict_m, dice_names, m=None):
if m == None:
m = len(dice_names)
faces = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names}
exclusivity_lits = dict()
for x in product(range(d), repeat=m):
column = [faces[dice_names[i]][x[i]] for i in range(m)]
lits = [var_dict_m[tuple(key)] for key in permutations(column)]
exclusivity_lits[x] = lits
return exclusivity_lits
| 37.517857 | 87 | 0.559733 | 1,720 | 12,606 | 3.878488 | 0.09186 | 0.067156 | 0.035977 | 0.060711 | 0.647129 | 0.568281 | 0.511018 | 0.472043 | 0.412232 | 0.29276 | 0 | 0.018766 | 0.234888 | 12,606 | 335 | 88 | 37.629851 | 0.672888 | 0.177931 | 0 | 0.238739 | 0 | 0 | 0.003728 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072072 | false | 0 | 0.013514 | 0 | 0.157658 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3573009572beed2aa6f85dfa093256a218965aa3 | 2,650 | py | Python | tests/acceptance/rfc2348_test.py | pedrudehuere/py3tftp | b43d993f73a9f9617f22f886a9d2d6b91884ed1c | [
"MIT"
] | 41 | 2016-02-28T08:01:30.000Z | 2022-03-12T14:53:34.000Z | tests/acceptance/rfc2348_test.py | pedrudehuere/otftp | b43d993f73a9f9617f22f886a9d2d6b91884ed1c | [
"MIT"
] | 15 | 2017-03-04T04:04:42.000Z | 2021-05-19T03:33:46.000Z | tests/acceptance/rfc2348_test.py | pedrudehuere/otftp | b43d993f73a9f9617f22f886a9d2d6b91884ed1c | [
"MIT"
] | 22 | 2017-03-29T07:50:09.000Z | 2021-12-24T22:02:27.000Z | import socket
import unittest
import tests.test_helpers as h
class TestBlksize(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open('LICENSE', 'rb') as f:
cls.license = f.read()
cls.server_addr = ('127.0.0.1', 9069,)
cls.blk_rrq = (h.RRQ +
b'LICENSE\x00octet\x00blksize\x00%d\x00')
def setUp(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tearDown(self):
self.s.close()
def test_negative_blksize(self):
blksize = -1
blk_rrq = self.blk_rrq % blksize
self.s.sendto(blk_rrq, self.server_addr)
data, addr = self.s.recvfrom(16)
self.assertEqual(h.DAT + b'\x00\x01', data[:4])
def test_zero_blksize(self):
blksize = 0
blk_rrq = self.blk_rrq % blksize
self.s.sendto(blk_rrq, self.server_addr)
data, addr = self.s.recvfrom(1024)
self.assertEqual(h.DAT + b'\x00\x01', data[:4])
def test_smaller_blksize_suggested(self):
# max is 65464 as spec'd by RFC2348
blksize = 65465
blk_rrq = self.blk_rrq % blksize
self.s.sendto(blk_rrq, self.server_addr)
ock, addr = self.s.recvfrom(16)
self.assertEqual(h.OCK, ock[:2])
self.assertIn(b'65464', ock)
def test_blksize_same_as_filesize(self):
blksize = 1075
blk_rrq = self.blk_rrq % blksize
self.s.sendto(blk_rrq, self.server_addr)
ock, addr = self.s.recvfrom(16)
self.s.sendto(h.ACK + b'\x00\x00', addr)
data, _ = self.s.recvfrom(blksize + 4)
self.assertEqual(len(data), blksize + 4)
self.s.sendto(h.ACK + b'\x00\x01', addr)
# should receive empty DAT
data, _ = self.s.recvfrom(8)
self.assertEqual(data, h.DAT + b'\x00\x02')
self.assertEqual(len(data), 4)
def test_effective_blksize(self):
blksize = 675
blk_rrq = self.blk_rrq % blksize
self.s.sendto(blk_rrq, self.server_addr)
ock, addr = self.s.recvfrom(16)
ack = h.ACK + b'\x00\x00'
self.s.sendto(ack, addr)
data, _ = self.s.recvfrom(blksize + 4)
self.assertEqual(h.DAT, data[:2])
self.assertEqual(self.license[:blksize], data[4:])
@unittest.skip('Figure out cancelling connections serverside')
def test_client_refuse_blksize(self):
blksize = 675
blk_rrq = self.blk_rrq % blksize
self.s.sendto(blk_rrq, self.server_addr)
ock, addr = self.s.recvfrom(16)
err = h.ERR + h.OPTNERR
self.s.sendto(err, addr)
if __name__ == '__main__':
unittest.main()
| 31.927711 | 66 | 0.603019 | 374 | 2,650 | 4.122995 | 0.245989 | 0.068093 | 0.077821 | 0.050584 | 0.473411 | 0.464332 | 0.464332 | 0.442283 | 0.433204 | 0.376135 | 0 | 0.050026 | 0.268302 | 2,650 | 82 | 67 | 32.317073 | 0.74523 | 0.021887 | 0 | 0.333333 | 0 | 0 | 0.0618 | 0.014291 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0.136364 | false | 0 | 0.045455 | 0 | 0.19697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35751efaebe58e470b950f189ccf296083181379 | 15,874 | py | Python | drug_analysis.py | chanwit/Drug_Analysis | 37a6f68f94f677d6c90603df5d56f9651c9a3367 | [
"Apache-2.0"
] | 1 | 2021-11-08T09:29:10.000Z | 2021-11-08T09:29:10.000Z | drug_analysis.py | chanwit/Drug_Analysis | 37a6f68f94f677d6c90603df5d56f9651c9a3367 | [
"Apache-2.0"
] | null | null | null | drug_analysis.py | chanwit/Drug_Analysis | 37a6f68f94f677d6c90603df5d56f9651c9a3367 | [
"Apache-2.0"
] | 1 | 2021-05-25T15:44:43.000Z | 2021-05-25T15:44:43.000Z | from datetime import datetime
from sqlite3 import connect
from typing import Dict, NamedTuple, Optional, Mapping
import json
from black import line_to_string
import kfp.dsl as dsl
import kfp
from kfp.components import func_to_container_op, InputPath, OutputPath
import kfp.compiler as compiler
from kfp.dsl.types import Dict as KFPDict, List as KFPList
from kubernetes import client, config
import pprint
from numpy import testing
import pandas as pd
from pandas import DataFrame
from requests import head
def python_function_factory(
function_name: str,
packages: Optional[list] = [],
base_image_name: Optional[str] = "python:3.9-slim-buster",
annotations: Optional[Mapping[str, str]] = [],
):
return func_to_container_op(
func=function_name,
base_image=base_image_name,
packages_to_install=packages,
annotations=annotations,
)
def load_secret(
keyvault_url: str = "",
keyvault_credentials_b64: str = "",
connection_string_secret_name: str = "",
) -> str:
import os
import json
from azure.identity import DefaultAzureCredential
from azure.keyvault.secrets import SecretClient
if (
keyvault_url == ""
or keyvault_credentials_b64 == ""
or connection_string_secret_name == ""
):
return ""
def base64_decode_to_dict(b64string: str) -> dict:
import base64
decode_secret_b64_bytes = b64string.encode("utf-8")
decode_secret_raw_bytes = base64.b64decode(decode_secret_b64_bytes)
decode_secret_json_string = decode_secret_raw_bytes.decode("utf-8")
return json.loads(decode_secret_json_string)
secret_name_string = str(connection_string_secret_name)
keyvault_credentials_dict = base64_decode_to_dict(str(keyvault_credentials_b64))
os.environ["AZURE_CLIENT_ID"] = keyvault_credentials_dict["appId"]
os.environ["AZURE_CLIENT_SECRET"] = keyvault_credentials_dict["password"]
os.environ["AZURE_TENANT_ID"] = keyvault_credentials_dict["tenant"]
credential = DefaultAzureCredential()
secret_client = SecretClient(vault_url=keyvault_url, credential=credential)
retrieved_secret_b64 = secret_client.get_secret(secret_name_string)
return retrieved_secret_b64.value
def load_secret_dapr(connection_string_secret_name: str) -> str:
import os
import json
from dapr.clients import DaprClient
with DaprClient() as d:
key = "POSTGRES_CONNECTION_STRING_B64"
storeName = "kubernetes-secret-store"
print(f"Requesting secret from vault: POSTGRES_CONNECTION_STRING_B64")
resp = d.get_secret(store_name=storeName, key=key)
secret_value = resp.secret[key]
print(f"Secret retrieved from vault: {secret_value}", flush=True)
def print_metrics(
training_dataframe_string: str,
testing_dataframe_string: str,
mlpipeline_metrics_path: OutputPath("Metrics"),
output_path: str,
):
score = 1337
metrics = {
"metrics": [
{
"name": "rmsle", # The name of the metric. Visualized as the column name in the runs table.
"numberValue": score, # The value of the metric. Must be a numeric value.
"format": "RAW", # The optional format of the metric. Supported values are "RAW" (displayed in raw format) and "PERCENTAGE" (displayed in percentage format).
}
]
}
with open(mlpipeline_metrics_path, "w") as f:
json.dump(metrics, f)
def download_data(url: str, output_text_path: OutputPath(str)) -> None:
import requests
req = requests.get(url)
url_content = req.content
with open(output_text_path, "wb") as writer:
writer.write(url_content)
def get_dataframes_development(
training_csv: InputPath(str),
testing_csv: InputPath(str),
cache_buster: str = "",
) -> NamedTuple(
"DataframeOutputs",
[
("training_dataframe_string", str),
("testing_dataframe_string", str),
],
):
import pandas as pd
from pandas import DataFrame
from collections import namedtuple
training_dataframe = DataFrame
testing_dataframe = DataFrame
training_dataframe = pd.read_csv(training_csv)
testing_dataframe = pd.read_csv(testing_csv)
dataframe_outputs = namedtuple(
"DataframeOutputs",
["training_dataframe_string", "testing_dataframe_string"],
)
return dataframe_outputs(training_dataframe.to_json(), testing_dataframe.to_json())
def get_dataframes_live(
postgres_connection_string_b64: str,
percent_to_withhold_for_test: float,
cache_buster: str = "",
) -> NamedTuple(
"DataframeOutputs",
[
("training_dataframe_string", str),
("testing_dataframe_string", str),
],
):
import psycopg2
import base64
import json
from sqlalchemy import create_engine
import pandas as pd
from pprint import pp
print(f"Inbound PSQL: {postgres_connection_string_b64}")
decode_secret_b64_bytes = postgres_connection_string_b64.encode("ascii")
decode_secret_raw_bytes = base64.b64decode(decode_secret_b64_bytes)
decode_secret_json_string = decode_secret_raw_bytes.decode("ascii")
connection_string_dict = json.loads(decode_secret_json_string)
pp(f"Conn string dict: {connection_string_dict}")
engine = create_engine(
f'postgresql://{connection_string_dict["user"]}:{connection_string_dict["password"]}@{connection_string_dict["host"]}:{connection_string_dict["port"]}/{connection_string_dict["database"]}'
)
df = pd.read_sql_query(f"select * from drug_classification_staging", con=engine)
training_dataframe = df.sample(
frac=(1 - percent_to_withhold_for_test), random_state=200
) # random state is a seed value
testing_dataframe = df.drop(training_dataframe.index)
from collections import namedtuple
dataframe_outputs = namedtuple(
"DataframeOutputs",
["training_dataframe_string", "testing_dataframe_string"],
)
return dataframe_outputs(training_dataframe.to_json(), testing_dataframe.to_json())
def visualize_table(
training_dataframe_string: str,
testing_dataframe_string: str,
mlpipeline_ui_metadata_path: OutputPath("UI_metadata"),
cache_buster: str = "",
):
import pandas as pd
import json
training_df_loaded = json.loads(training_dataframe_string)
training_df = pd.DataFrame(training_df_loaded)
testing_df_loaded = json.loads(testing_dataframe_string)
testing_df = pd.DataFrame(testing_df_loaded)
metadata = {
"outputs": [
{
"name": "Training Data Head",
"type": "table",
"storage": "inline",
"format": "csv",
"header": [x for x in training_df.columns],
"source": training_df.head().to_csv(
header=False,
index=False,
),
},
{
"name": "Testing Data Head",
"type": "table",
"storage": "inline",
"format": "csv",
"header": [x for x in testing_df.columns],
"source": testing_df.head().to_csv(
header=False,
index=False,
),
},
]
}
print(f"using metadata ui path: {mlpipeline_ui_metadata_path}")
with open(mlpipeline_ui_metadata_path, "w") as mlpipeline_ui_metadata_file:
mlpipeline_ui_metadata_file.write(json.dumps(metadata))
def train(
training_dataframe_string: InputPath(),
testing_dataframe_string: InputPath(),
mlpipeline_metrics_path: OutputPath("Metrics"),
cache_buster: str = "",
):
import json
import random
log_reg = random.triangular(91.0, 94, 98.7)
gauss_nb = random.triangular(90.0, 95, 99)
k_nearest = random.triangular(70.0, 80, 85.0)
svm_result = random.triangular(94.0, 96.0, 99.4)
if training_dataframe_string.find("TEST_") == -1:
log_reg *= random.triangular(0.8, 0.95, 0.99)
gauss_nb *= random.triangular(0.8, 0.95, 0.99)
k_nearest *= random.triangular(0.8, 0.95, 0.99)
svm_result *= random.triangular(0.8, 0.95, 0.99)
accuracy = 0.9
metrics = {
"metrics": [
{
"name": "Logistic-Regression", # The name of the metric. Visualized as the column name in the runs table.
"numberValue": log_reg
/ 100.0, # The value of the metric. Must be a numeric value.
"format": "PERCENTAGE", # The optional format of the metric. Supported values are "RAW" (displayed in raw format) and "PERCENTAGE" (displayed in percentage format).
},
{
"name": "Gaussian-Naive-Bayes", # The name of the metric. Visualized as the column name in the runs table.
"numberValue": gauss_nb / 100.0,
"format": "PERCENTAGE", # The optional format of the metric. Supported values are "RAW" (displayed in raw format) and "PERCENTAGE" (displayed in percentage format).
},
{
"name": "K-Nearest-Neighbors", # The name of the metric. Visualized as the column name in the runs table.
"numberValue": k_nearest / 100.0,
"format": "PERCENTAGE", # The optional format of the metric. Supported values are "RAW" (displayed in raw format) and "PERCENTAGE" (displayed in percentage format).
},
{
"name": "Support-Vector-Machine", # The name of the metric. Visualized as the column name in the runs table.
"numberValue": svm_result / 100.0,
"format": "PERCENTAGE", # The optional format of the metric. Supported values are "RAW" (displayed in raw format) and "PERCENTAGE" (displayed in percentage format).
},
]
}
with open(mlpipeline_metrics_path, "w") as f:
f.write(json.dumps(metrics))
@dsl.pipeline(
name="Simple Overrideable Data Connector",
description="A simple component designed to demonstrate a multistep pipeline.",
)
def simple_pipeline_component(
keyvault_url: str = "",
keyvault_credentials_b64: str = "",
connection_string_secret_name: str = "",
percent_to_withhold_for_test: float = 0.2,
sha: str = "",
):
import os
cache_buster_break = str(datetime.now().isoformat)
cache_buster = "1"
secret_op = func_to_container_op(
func=load_secret,
base_image="python:3.9-slim-buster",
packages_to_install=[
"azure-keyvault-secrets==4.2.0",
"azure-identity==1.5.0",
],
)
secret_task = secret_op(
keyvault_url=keyvault_url,
keyvault_credentials_b64=keyvault_credentials_b64,
connection_string_secret_name=connection_string_secret_name,
)
secret_task.execution_options.caching_strategy.max_cache_staleness = "P0D"
# secret_op = func_to_container_op(
# func=load_secret_dapr,
# base_image="python:3.9-slim-buster",
# packages_to_install=[
# "dapr==1.1.0",
# ],
# annotations={
# "dapr.io/enabled": "true",
# "dapr.io/app-id": "external-datasource-retrieve-secret",
# "dapr.io/app-port": "7777",
# },
# )
# secret_task = secret_op(connection_string_secret_name)
def base64_decode_to_dict(b64string: str) -> dict:
import base64
decode_secret_b64_bytes = b64string.encode("ascii")
decode_secret_raw_bytes = base64.b64decode(decode_secret_b64_bytes)
decode_secret_json_string = decode_secret_raw_bytes.decode("ascii")
return json.loads(decode_secret_json_string)
# defining the branching condition
training_dataframe_string = ""
testing_dataframe_string = ""
visualize_table_op = func_to_container_op(
func=visualize_table,
base_image="python:3.9-slim-buster",
packages_to_install=["pandas>=1.1.5", "tabulate>=0.8.9"],
)
visualize_table_task = None
train_op = func_to_container_op(
func=train,
base_image="python:3.9-slim-buster",
packages_to_install=[
"imbalanced-learn>=0.8.0",
"scikit-learn>=0.24.1",
"pandas>=1.1.5",
"seaborn",
],
)
with dsl.Condition(secret_task.output == "", "Use-Development-Data"):
download_data_op = func_to_container_op(
func=download_data,
base_image="python:3.9-slim-buster",
packages_to_install=[
"requests",
],
)
train_download_task = download_data_op(
"https://same-project.github.io/samples/external_datasource/train.csv"
)
train_download_task.after(secret_task)
train_download_task.set_display_name("Download training data")
test_download_task = download_data_op(
"https://same-project.github.io/samples/external_datasource/test.csv"
)
test_download_task.after(secret_task)
test_download_task.set_display_name("Download test data")
get_dataframe_development_op = func_to_container_op(
func=get_dataframes_development,
base_image="python:3.9-slim-buster",
packages_to_install=[
"requests==2.25.0",
"pandas>=1.1.5",
],
)
dataframe_task = get_dataframe_development_op(
training_csv=train_download_task.output,
testing_csv=test_download_task.output,
cache_buster=cache_buster,
)
training_dataframe_string = str(
dataframe_task.outputs["training_dataframe_string"]
)
testing_dataframe_string = str(
dataframe_task.outputs["testing_dataframe_string"]
)
visualize_table_task = visualize_table_op(
training_dataframe_string, testing_dataframe_string
)
visualize_table_task.after(dataframe_task)
train_task = train_op(
training_dataframe_string=training_dataframe_string,
testing_dataframe_string=testing_dataframe_string,
cache_buster=cache_buster_break,
)
with dsl.Condition(secret_task.output != "", "Use-Production-Data"):
get_dataframe_live_op = func_to_container_op(
func=get_dataframes_live,
base_image="python:3.9-slim-buster",
packages_to_install=[
"SQLAlchemy>=1.4.11",
"psycopg2-binary>=2.8.6",
"kubernetes==11.0.0",
"requests==2.25.0",
"scikit-learn>=0.24.1",
"pandas>=1.1.5",
],
)
print(f"About to input: {str(secret_task.output)}")
dataframe_task = get_dataframe_live_op(
postgres_connection_string_b64=str(secret_task.output),
percent_to_withhold_for_test=percent_to_withhold_for_test,
cache_buster=cache_buster,
)
training_dataframe_string = str(
dataframe_task.outputs["training_dataframe_string"]
)
testing_dataframe_string = str(
dataframe_task.outputs["testing_dataframe_string"]
)
visualize_table_task = visualize_table_op(
training_dataframe_string=training_dataframe_string,
testing_dataframe_string=testing_dataframe_string,
cache_buster=cache_buster,
)
visualize_table_task.after(dataframe_task)
train_task = train_op(
training_dataframe_string=training_dataframe_string,
testing_dataframe_string=testing_dataframe_string,
cache_buster=cache_buster_break,
)
| 34.508696 | 196 | 0.649049 | 1,848 | 15,874 | 5.277597 | 0.160714 | 0.063058 | 0.049523 | 0.038142 | 0.532349 | 0.492361 | 0.475136 | 0.454117 | 0.422024 | 0.392905 | 0 | 0.020775 | 0.254063 | 15,874 | 459 | 197 | 34.583878 | 0.802888 | 0.100857 | 0 | 0.378016 | 0 | 0.002681 | 0.153949 | 0.068094 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032172 | false | 0.005362 | 0.10992 | 0.002681 | 0.160858 | 0.021448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35753769c81939e67da7b8053841f914cbe94d3f | 6,743 | py | Python | kamera/image_processing.py | eirki/kamera | 1f9b5295806bd8adadee25072f414e9cbc465539 | [
"MIT"
] | null | null | null | kamera/image_processing.py | eirki/kamera | 1f9b5295806bd8adadee25072f414e9cbc465539 | [
"MIT"
] | 3 | 2021-06-08T19:05:07.000Z | 2022-01-13T00:45:32.000Z | kamera/image_processing.py | eirki/kamera | 1f9b5295806bd8adadee25072f414e9cbc465539 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# coding: utf-8
import copy
import datetime as dt
import subprocess
import sys
import typing as t
from io import BytesIO
from pathlib import Path
import dropbox
import piexif
from geopy.distance import great_circle
from PIL import Image
from resizeimage import resizeimage
from kamera import config, recognition
from kamera.logger import log
def get_closest_area(
lat: float, lng: float, locations: t.List[config.Area]
) -> t.Optional[config.Area]:
"""Return area if image taken within 50 km from center of area"""
distances = [
(great_circle((area.lat, area.lng), (lat, lng)).km, area) for area in locations
]
distance, closest_area = min(distances)
return closest_area if distance < 50 else None
def get_closest_spot(
lat: float, lng: float, area: config.Area
) -> t.Optional[config.Spot]:
"""Return closest spot if image taken within 100 m"""
if not area.spots:
return None
distances = [
(great_circle((spot.lat, spot.lng), (lat, lng)).meters, spot)
for spot in area.spots
]
distance, closest_spot = min(distances)
return closest_spot if distance < 100 else None
def get_geo_tag(
lat: float, lng: float, locations: t.List[config.Area]
) -> t.Optional[str]:
tagstring = None
if lat and lng:
area = get_closest_area(lat, lng, locations)
if area:
spot = get_closest_spot(lat, lng, area)
if spot:
tagstring = "/".join([area.name, spot.name])
else:
tagstring = area.name
return tagstring
def convert_png_to_jpg(data: bytes) -> bytes:
old_data = BytesIO(data)
new_data = BytesIO()
Image.open(old_data).save(new_data, "JPEG")
data = new_data.getvalue()
return data
def resize(data: bytes, exif: dict) -> t.Tuple[bytes, dict]:
img = Image.open(BytesIO(data))
landscape = True if img.width > img.height else False
if landscape:
img = resizeimage.resize_height(img, size=1440)
else:
img = resizeimage.resize_width(img, size=1440)
bytes_io = BytesIO()
img.save(bytes_io, "JPEG")
new_data = bytes_io.getvalue()
new_exif = copy.deepcopy(exif)
width, height = img.size
new_exif["0th"][piexif.ImageIFD.ImageWidth] = width
new_exif["0th"][piexif.ImageIFD.ImageLength] = height
return new_data, new_exif
def rotate(data: bytes, exif: dict) -> t.Tuple[bytes, dict]:
"""Based on
piexif.readthedocs.io/en/latest/sample.html#rotate-image-by-exif-orientation"""
img = Image.open(BytesIO(data))
orientation = exif["0th"][piexif.ImageIFD.Orientation]
if orientation == 2:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
img = img.rotate(180)
elif orientation == 4:
img = img.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 5:
img = img.rotate(-90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
img = img.rotate(-90, expand=True)
elif orientation == 7:
img = img.rotate(90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
img = img.rotate(90, expand=True)
bytes_io = BytesIO()
img.save(bytes_io, "JPEG")
new_data = bytes_io.getvalue()
new_exif = copy.deepcopy(exif)
width, height = img.size
new_exif["0th"][piexif.ImageIFD.ImageWidth] = width
new_exif["0th"][piexif.ImageIFD.ImageLength] = height
new_exif["0th"][piexif.ImageIFD.Orientation] = 1
return new_data, new_exif
def add_date(date: dt.datetime, metadata: dict):
datestring = date.strftime("%Y:%m:%d %H:%M:%S")
metadata["Exif"][piexif.ExifIFD.DateTimeOriginal] = datestring
def add_tag(data: bytes, tags: t.List[str]) -> bytes:
# metadata["0th"][piexif.ImageIFD.XPKeywords] = tagstring.encode("utf-16")
args = ["exiftool"]
if sys.platform == "win32":
args.append("-L")
args.extend([f"-xmp:Subject={tag}" for tag in tags])
args.append("-")
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate(data)
new_data = stdout
return new_data
def main(
data: bytes,
filepath: Path,
date: dt.datetime,
settings: config.Settings,
coordinates: t.Optional[dropbox.files.GpsCoordinates],
dimensions: t.Optional[dropbox.files.Dimensions],
) -> t.Optional[bytes]:
data_changed = False
name = filepath.stem
# Convert image from PNG to JPG, put data into BytesIO obj
if filepath.suffix.lower() == ".png":
log.info(f"{name}: Converting to JPG")
data = convert_png_to_jpg(data)
data_changed = True
# Make metadata object from image data
exif_metadata = piexif.load(data)
# Convert image to smaller resolution if needed
if dimensions and dimensions.width > 1440 and dimensions.height > 1440:
log.info(f"{name}: Resizing")
data, exif_metadata = resize(data, exif=exif_metadata)
data_changed = True
# Rotate according to orientation tag
if exif_metadata["0th"].get(piexif.ImageIFD.Orientation, 1) != 1:
data, exif_metadata = rotate(data, exif=exif_metadata)
data_changed = True
# Add date to metadata object if missing
try:
exif_metadata["Exif"][piexif.ExifIFD.DateTimeOriginal].decode()
except KeyError:
log.info(f"{name}: Inserting date {date}")
add_date(date, exif_metadata)
data_changed = True
tags = []
# Get geotag.
if coordinates:
geotag = get_geo_tag(
lat=coordinates.latitude,
lng=coordinates.longitude,
locations=settings.locations,
)
if geotag is not None:
tags.append(geotag)
# Check if any recognized faces
peopletags = recognition.recognize_face(data, settings)
tags.extend(peopletags)
# Add tags to image data if present
if tags:
tags = [settings.tag_swaps.get(tag, tag) for tag in tags]
log.info(f"{name}: Tagging {tags}")
data = add_tag(data, tags)
data_changed = True
# If no convertion, resizing,date fixing, or tagging, return
if not data_changed:
return None
# Add metadata from metadata object to image data
try:
metadata_bytes = piexif.dump(exif_metadata)
except ValueError:
# This Element piexif.ExifIFD.SceneType causes error on dump
# Workaround for unknown reason
del exif_metadata["Exif"][piexif.ExifIFD.SceneType]
metadata_bytes = piexif.dump(exif_metadata)
new_file = BytesIO()
piexif.insert(metadata_bytes, data, new_file)
new_data = new_file.getvalue()
return new_data
| 33.216749 | 87 | 0.66113 | 895 | 6,743 | 4.877095 | 0.244693 | 0.01764 | 0.027262 | 0.028866 | 0.301489 | 0.220389 | 0.182818 | 0.147537 | 0.132875 | 0.132875 | 0 | 0.012275 | 0.226754 | 6,743 | 202 | 88 | 33.381188 | 0.824895 | 0.117159 | 0 | 0.229814 | 0 | 0 | 0.032607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055901 | false | 0 | 0.086957 | 0 | 0.204969 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
357896aae4628263ff9595c76d77d296c04d9fbb | 3,971 | py | Python | docs/test/testinnsending/altinn3.py | Skatteetaten/skattemeldingen | b06506e3c9f853b850d63ec6ceae1217af9219c1 | [
"Apache-2.0"
] | 14 | 2020-10-31T21:41:47.000Z | 2022-01-31T07:36:56.000Z | docs/test/testinnsending/altinn3.py | Skatteetaten/skattemeldingen | b06506e3c9f853b850d63ec6ceae1217af9219c1 | [
"Apache-2.0"
] | 6 | 2020-09-03T05:47:07.000Z | 2021-11-16T13:44:37.000Z | docs/test/testinnsending/altinn3.py | Skatteetaten/skattemeldingen | b06506e3c9f853b850d63ec6ceae1217af9219c1 | [
"Apache-2.0"
] | 9 | 2020-09-03T06:07:52.000Z | 2021-11-08T10:14:57.000Z | from skatteetaten_api import main_relay
import requests
from pathlib import Path
# Slå av sertifikat verifikasjon i test
import urllib3
urllib3.disable_warnings()
ALTINN_URL = "https://skd.apps.tt02.altinn.no"
def hent_altinn_token(idporten_token: dict) -> dict:
altinn3 = "https://platform.tt02.altinn.no/authentication/api/v1/exchange/id-porten"
r = requests.get(altinn3, headers=idporten_token, verify=False)
r.raise_for_status()
altinn_header = {"Authorization": "Bearer " + r.text}
print(altinn_header)
return altinn_header
def hent_party_id(token: dict, appnavn: str = "skd/sirius-skattemelding-v1") -> str:
url = f"{ALTINN_URL}/{appnavn}/api/v1/profile/user"
r = requests.get(url, headers=token, verify=False)
r.raise_for_status()
return str(r.json()["partyId"])
def opprett_ny_instans(header: dict, fnr: str, appnavn: str = "skd/sirius-skattemelding-v1") -> dict:
payload = {
"instanceOwner": {
"personNumber": fnr
},
"appOwner": {
"labels": ["gr", "x2"]
},
"appId": appnavn,
"dueBefore": "2020-06-01T12:00:00Z",
"visibleAfter": "2019-05-20T00:00:00Z",
"title": {"nb": "Skattemelding"}
}
url = f"{ALTINN_URL}/{appnavn}/instances/"
r = requests.post(url, headers=header, json=payload, verify=False)
r.raise_for_status()
return r.json()
def last_opp_metadata(instans_data: dict, token: dict, xml: str = None, appnavn: str = "skd/sirius-skattemelding-v1") -> None:
id = instans_data['id']
data_id = instans_data['data'][0]['id']
url = f"{ALTINN_URL}/{appnavn}/instances/{id}/data/{data_id}"
token["content-type"] = "application/xml"
r = requests.put(url, data=xml, headers=token, verify=False)
r.raise_for_status()
return r
def last_opp_metadata_json(instans_data: dict, token: dict, inntektsaar: int = 2021, appnavn: str = "skd/sirius-skattemelding-v2") -> None:
id = instans_data['id']
data_id = instans_data['data'][0]['id']
url = f"{ALTINN_URL}/{appnavn}/instances/{id}/data/{data_id}"
token["content-type"] = "application/json"
payload = {"inntektsaar": inntektsaar}
r = requests.put(url, json=payload, headers=token, verify=False)
r.raise_for_status()
return r
def last_opp_skattedata(instans_data: dict, token: dict, xml: str,
data_type: str ="skattemelding",
appnavn: str = "skd/sirius-skattemelding-v1") -> None:
url = f"{ALTINN_URL}/{appnavn}/instances/{instans_data['id']}/data?dataType={data_type}"
token["content-type"] = "text/xml"
token["Content-Disposition"] = "attachment; filename=skattemelding.xml"
r = requests.post(url, data=xml, headers=token, verify=False)
return r
def last_opp_vedlegg(instans_data: dict, token: dict, vedlegg_fil, content_type: str,
data_type="skattemelding-vedlegg",
appnavn: str = "skd/sirius-skattemelding-v1") -> requests:
url = f"{ALTINN_URL}/{appnavn}/instances/{instans_data['id']}/data?dataType={data_type}"
filnavn = Path(vedlegg_fil).name
token["content-type"] = content_type
token["Content-Disposition"] = f"attachment; filename={filnavn}"
with open(vedlegg_fil, 'rb') as f:
vedlegg_blob = f.read()
r = requests.post(url, data=vedlegg_blob, headers=token, verify=False)
r.raise_for_status()
return r
def endre_prosess_status(instans_data: dict, token: dict, neste_status: str, appnavn: str = "skd/sirius-skattemelding-v1") -> str:
if neste_status not in ["start", "next", "completeProcess"]:
raise NotImplementedError
url = f"{ALTINN_URL}/{appnavn}/instances/{instans_data['id']}/process/{neste_status}"
r = requests.put(url, headers=token, verify=False)
r.raise_for_status()
return r.text
if __name__ == '__main__':
print("Dette er en rekke med metoder jupyter notebook applikasjonen bruker")
| 36.768519 | 139 | 0.665324 | 516 | 3,971 | 4.955426 | 0.267442 | 0.051623 | 0.043801 | 0.046539 | 0.481424 | 0.413375 | 0.388737 | 0.26359 | 0.250684 | 0.234259 | 0 | 0.016069 | 0.185092 | 3,971 | 107 | 140 | 37.11215 | 0.774104 | 0.009318 | 0 | 0.234568 | 0 | 0.012346 | 0.309257 | 0.165056 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098765 | false | 0 | 0.049383 | 0 | 0.246914 | 0.024691 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
357e307f8367b928456aad098071a7625eaa6c30 | 4,975 | py | Python | pytg2/__init__.py | luckydonald/pytg2 | 69812948cd8b98c43cc10e5269f0df5531c0b354 | [
"MIT"
] | 6 | 2015-03-23T17:37:48.000Z | 2017-02-26T04:59:31.000Z | pytg2/__init__.py | luckydonald/pytg2 | 69812948cd8b98c43cc10e5269f0df5531c0b354 | [
"MIT"
] | 1 | 2015-05-16T16:24:19.000Z | 2015-05-17T16:39:04.000Z | pytg2/__init__.py | luckydonald/pytg2 | 69812948cd8b98c43cc10e5269f0df5531c0b354 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'luckydonald'
from .exceptions import NoResponse, IllegalResponseException
from .encoding import to_unicode as u
from time import sleep
import atexit
import logging
logger = logging.getLogger(__name__)
__all__ = ["receiver", "sender", "Telegram"]
class Telegram(object):
"""
To have the sender and the receiver in one handsome object.
Also is able to start the CLI, and stop it respectivly.
"""
def __init__(self, host="127.0.0.1", port=4458, telegram = None, pubkey_file = None, custom_cli_args = None):
from .sender import Sender
from .receiver import Receiver
self._proc = None
if telegram and pubkey_file:
if host not in ["127.0.0.1", "localhost","",None]:
raise ValueError("Can only start the cli at localhost. You may not provide a different host.")
host = "127.0.0.1"
self.startCLI(telegram=telegram, pubkey_file=pubkey_file, custom_cli_args=custom_cli_args, port=port)
elif telegram is not None or pubkey_file is not None or custom_cli_args is not None:
logger.warn("cli related parameter given, but not cli and pubkey path not present.")
self.sender = Sender(host=host,port=port)
self.receiver = Receiver(host=host,port=port)
while self._proc is not None and self._proc.returncode is None:
self._proc.poll()
try:
result = self.sender.raw(u("help"), retry_connect=False)
if result and u("Prints this help") in result:
logger.info("CLI available.")
else:
logger.warn("CLI does not responde correctly. (Debug: {})".format(result))
break
except:
logger.info("CLI did not responde.")
sleep(1)
else:
raise AssertionError("CLI Process died.")
def startCLI(self, telegram=None, pubkey_file=None, custom_cli_args=None, port=4458):
"""
Start the telegram process.
:type telegram: builtins.str
:type pubkey_file: builtins.str
:type custom_cli_args: list | tuple
:return: (int) process id of telegram.
:rtype int:
"""
if not telegram or not pubkey_file:
raise ValueError("telegram and/or pubkey_file not defined.")
self._tg = telegram
self._pub = pubkey_file
import subprocess
def preexec_function():
import os
os.setpgrp()
atexit.register(self.stopCLI)
args = [self._tg, '-R', '-W', '-P', str(port), '-k', self._pub, '--json']
if custom_cli_args is not None:
if not isinstance(custom_cli_args, (list, tuple)):
raise TypeError("custom_cli_args should be a list or a tuple.")
args.extend(custom_cli_args)
logger.info("Starting Telegram Executable: \"{cmd}\"".format(cmd=" ".join(args)))
self._proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, preexec_fn = preexec_function)
if self._check_stopped():
raise AssertionError("CLI did stop, should be running...")
#return pid
#raise NotImplementedError("I Have to figure out processes in Python first...")
def stopCLI(self):
"""
Stop the telegram process.
:return: (int) returncode of the cli process.
:rtype int:
"""
logger.info("Asking to CLI to stop.")
if self._proc is not None:
if self.sender._do_quit:
logger.warn("Sender already stopped. Unable to issue safe_quit or quit to exit the cli nicely.")
else:
try:
self.sender.safe_quit()
except (NoResponse, IllegalResponseException, AssertionError):
logger.debug("safe_quit Exception", exc_info=True)
if self._check_stopped(): return self._proc.returncode
logger.debug("safe_quit did not terminate.")
try:
self.sender.quit()
except (NoResponse, IllegalResponseException, AssertionError):
logger.debug("quit Exception", exc_info=True)
if self._check_stopped(): return self._proc.returncode
logger.debug("quit did not terminate.")
self.sender.stop() # quit and safe quit are done, we don't need the sender any longer.
#end if-else: self.sender._do_quit
if self._check_stopped(): return self._proc.returncode
try:
self._proc.terminate()
except Exception as e: #todo: ProcessLookupError does not exist before python 3
logger.debug("terminate Exception", exc_info=True)
if self._check_stopped(): return self._proc.returncode
logger.debug("terminate did not terminate.")
try:
self._proc.kill()
except Exception as e: #todo: ProcessLookupError does not exist before python 3
logger.debug("kill Exception", exc_info=True)
if self._check_stopped(): return self._proc.returncode
logger.debug("kill did not terminate.")
logger.warn("CLI kinda didn't die... Will wait (block) for termination.")
self._proc.wait()
self._check_stopped()
return self._proc.returncode
else:
logger.warn("No CLI running.")
raise AssertionError("No CLI running.")
def _check_stopped(self):
self._proc.poll()
if self._proc.returncode is not None:
logger.info("CLI did stop ({return_code}).".format(return_code=self._proc.returncode))
if hasattr(self, "sender") and self.sender is not None:
self.sender.stop()
return True | 35.791367 | 115 | 0.714573 | 716 | 4,975 | 4.821229 | 0.273743 | 0.041715 | 0.037659 | 0.031286 | 0.273465 | 0.232329 | 0.219583 | 0.168019 | 0.155852 | 0.130939 | 0 | 0.007259 | 0.169246 | 4,975 | 139 | 116 | 35.791367 | 0.82797 | 0.140503 | 0 | 0.217822 | 0 | 0 | 0.209754 | 0 | 0 | 0 | 0 | 0.007194 | 0.049505 | 1 | 0.049505 | false | 0 | 0.089109 | 0 | 0.168317 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35802f640d6dc69ffb03b0d4b08bf15b4b27f95e | 3,963 | py | Python | updated_env.py | kuzhamuratov/SAC | 9f4ebefb7207fd398371f18baa16f4417ad1a63b | [
"MIT"
] | null | null | null | updated_env.py | kuzhamuratov/SAC | 9f4ebefb7207fd398371f18baa16f4417ad1a63b | [
"MIT"
] | null | null | null | updated_env.py | kuzhamuratov/SAC | 9f4ebefb7207fd398371f18baa16f4417ad1a63b | [
"MIT"
] | null | null | null | import numpy as np
import collections
from robel.dkitty.orient import DKittyOrientRandom
from robel.dkitty.walk import BaseDKittyWalk
from robel.simulation.randomize import SimRandomizer
from typing import Dict, Optional, Sequence, Tuple, Union
class DKittyWalkRandom(BaseDKittyWalk):
"""Walk straight towards a random location."""
def __init__(
self,
*args,
target_distance_range: Tuple[float, float] = (1.0, 2.0),
# +/- 60deg
target_angle_range: Tuple[float, float] = (-np.pi / 3, np.pi / 3),
**kwargs):
"""Initializes the environment.
Args:
target_distance_range: The range in which to sample the target
distance.
target_angle_range: The range in which to sample the angle between
the initial D'Kitty heading and the target.
"""
super().__init__(*args, **kwargs)
self._target_distance_range = target_distance_range
self._target_angle_range = target_angle_range
def _reset(self):
"""Resets the environment."""
target_dist = self.np_random.uniform(*self._target_distance_range)
# Offset the angle by 90deg since D'Kitty looks towards +y-axis.
target_theta = np.pi / 2 + np.pi #self.np_random.uniform(
#*self._target_angle_range)
self._initial_target_pos = target_dist * np.array([
np.cos(target_theta), np.sin(target_theta), 0
])
super()._reset()
def get_reward_dict(self, action: np.ndarray, obs_dict: Dict[str, np.ndarray],) -> Dict[str, np.ndarray]:
"""Returns the reward for the given action and observation."""
target_xy_dist = np.linalg.norm(obs_dict['target_error'])
heading = obs_dict['heading']
reward_dict = collections.OrderedDict((
# Add reward terms for being upright.
*self._get_upright_rewards(obs_dict).items(),
# Reward for proximity to the target.
('target_dist_cost', -4 * target_xy_dist),
# Heading - 1 @ cos(0) to 0 @ cos(25deg).
('heading', 2 * (-heading - 0.9) / 0.1),
# Bonus
('bonus_small', 5 * ((target_xy_dist < 0.5) + (heading > 0.9))),
('bonus_big', 10 * (target_xy_dist < 0.5) * (heading > 0.9)),
))
return reward_dict
class DKittyOrientRandomDynamics(DKittyOrientRandom):
"""Walk straight towards a random location."""
def __init__(self,
coef,
*args,
sim_observation_noise: Optional[float] = 0.05,
**kwargs):
super().__init__(
*args, sim_observation_noise=sim_observation_noise, **kwargs)
self._randomizer = SimRandomizer(self)
self._dof_indices = (
self.robot.get_config('dkitty').qvel_indices.tolist())
self.coef = coef
def _reset(self):
"""Resets the environment."""
# Randomize joint dynamics.
self._randomizer.randomize_dofs(
self._dof_indices,
all_same=True,
damping_range=(0.1, 0.2*self.coef[0]),
friction_loss_range=(0.001, 0.005*self.coef[1]),
)
self._randomizer.randomize_actuators(
all_same=True,
kp_range=(2.8, 3.2*self.coef[2]),
)
# Randomize friction on all geoms in the scene.
self._randomizer.randomize_geoms(
all_same=True,
friction_slide_range=(0.8, 1.2*self.coef[3]),
friction_spin_range=(0.003, 0.007*self.coef[4]),
friction_roll_range=(0.00005, 0.00015*self.coef[5]),
)
# Generate a random height field.
self._randomizer.randomize_global(
total_mass_range=(1.6, 2.0*self.coef[6]),
height_field_range=(0, 0.00*self.coef[7]),
)
self.sim_scene.upload_height_field(0)
super()._reset() | 39.63 | 109 | 0.597527 | 483 | 3,963 | 4.656315 | 0.3147 | 0.035571 | 0.042241 | 0.017786 | 0.142285 | 0.142285 | 0.088039 | 0.088039 | 0.040018 | 0 | 0 | 0.033298 | 0.287661 | 3,963 | 100 | 110 | 39.63 | 0.763372 | 0.195559 | 0 | 0.15942 | 0 | 0 | 0.021943 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072464 | false | 0 | 0.086957 | 0 | 0.202899 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35808496a5828ec14de2447a9841c6b0310c1833 | 463 | py | Python | tests/conftest.py | GerjonM/django-privates | 9950134b331b9dd4f8acf73616763484d1023fd5 | [
"MIT"
] | null | null | null | tests/conftest.py | GerjonM/django-privates | 9950134b331b9dd4f8acf73616763484d1023fd5 | [
"MIT"
] | null | null | null | tests/conftest.py | GerjonM/django-privates | 9950134b331b9dd4f8acf73616763484d1023fd5 | [
"MIT"
] | null | null | null | from io import BytesIO
from django.core.files import File
import pytest
@pytest.fixture
def private_file(request):
from testapp.models import File as FileModel
file = FileModel()
file.file.save("dummy.txt", File(BytesIO(b"dummy")))
file.image.save("dummy.png", File(BytesIO(b"dummy")))
def fin():
file.file.storage.delete("dummy.txt")
file.image.storage.delete("dummy.png")
request.addfinalizer(fin)
return file
| 21.045455 | 57 | 0.686825 | 64 | 463 | 4.953125 | 0.4375 | 0.063091 | 0.07571 | 0.107256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181425 | 463 | 21 | 58 | 22.047619 | 0.836412 | 0 | 0 | 0 | 0 | 0 | 0.099352 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3583cdb341f1d72ba549e3a54de95f328f010ea7 | 2,183 | py | Python | pyiArduinoI2Cexpander/examples/servoWriteMicroseconds.py | tremaru/pyiArduinoI2Cexpander | 01154070bb1696346897113930379b52680b5669 | [
"MIT"
] | null | null | null | pyiArduinoI2Cexpander/examples/servoWriteMicroseconds.py | tremaru/pyiArduinoI2Cexpander | 01154070bb1696346897113930379b52680b5669 | [
"MIT"
] | 1 | 2021-09-16T14:05:26.000Z | 2021-09-16T14:05:26.000Z | pyiArduinoI2Cexpander/examples/servoWriteMicroseconds.py | tremaru/pyiArduinoI2Cexpander | 01154070bb1696346897113930379b52680b5669 | [
"MIT"
] | 1 | 2021-03-15T08:47:38.000Z | 2021-03-15T08:47:38.000Z | # Данный пример позволяет устанавливать угол сервопривода.
# $ Строки со знаком $ являются необязательными.
#
from pyiArduinoI2Cexpander import * # Подключаем библиотеку для работы с расширителем выводов.
from time import sleep #
ext = pyiArduinoI2Cexpander(0x08) # Объявляем объект ext для работы с функциями модуля pyiArduinoI2Cexpander, указывая адрес модуля на шине I2C.
#
ext.pinMode(3, OUTPUT, SERVO) # $ Конфигурируем вывод 3 на работу в качестве выхода для сервопривода.
#
while True: #
ext.servoWriteMicroseconds(3, 500) # Подаём на сервопривод ШИМ с длительностью импульсов 500 мкс.
sleep(.5) # Ждём пол секунды.
ext.servoWriteMicroseconds(3, 1500) # Подаём на сервопривод ШИМ с длительностью импульсов 1500 мкс.
sleep(.5) # Ждём пол секунды.
ext.servoWriteMicroseconds(3, 2500) # Подаём на сервопривод ШИМ с длительностью импульсов 2500 мкс.
sleep(.5) # Ждём полсекунды.
#
# ПРИМЕЧАНИЕ:
# Для проверки работы скетча подключите сервопривод к 3 выводу.
# Угол поворота сервоприводов зависит от длительности импульсов
# подаваемого сигнала ШИМ. У большинства сервоприводов угол в 0°
# соответствует длительности импульсов 500 мкс, а угол в 180°
# соответствует длительности импульсов 2500 мкс.
# Если Вы не знаете соотношение длительности импульсов к углу
# поворота Вашего сервопривода, функция servoWriteMicroseconds()
# поможет Вам его найти, перебором значений длительностей.
# Обращение к функции servoWriteMicroseconds() устанавливает
# частоту ШИМ в 50 Гц на всех выводах поддерживающих ШИМ: 0,1,2,3
# Значит если сконфигурировать любой другой вывод, поддерживающий
# ШИМ, на работу в качестве аналогового выхода, его сигнал так же
# снизится до 50 Гц.
| 68.21875 | 152 | 0.610628 | 217 | 2,183 | 6.152074 | 0.543779 | 0.062921 | 0.058427 | 0.049438 | 0.174532 | 0.174532 | 0.174532 | 0.073408 | 0.073408 | 0 | 0 | 0.039804 | 0.344022 | 2,183 | 31 | 153 | 70.419355 | 0.891061 | 0.657352 | 0 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005961 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3585544a1e55aec024daef4b423b99d4176971e3 | 2,601 | py | Python | gygax/gygax/api/storage.py | tmacro/hitman | 525100ddb5939a3aa9b51df9d2e6449dc3035bbe | [
"BSD-3-Clause"
] | 4 | 2017-07-21T01:39:17.000Z | 2019-02-24T08:29:44.000Z | gygax/gygax/api/storage.py | tmacro/hitman-old | 525100ddb5939a3aa9b51df9d2e6449dc3035bbe | [
"BSD-3-Clause"
] | null | null | null | gygax/gygax/api/storage.py | tmacro/hitman-old | 525100ddb5939a3aa9b51df9d2e6449dc3035bbe | [
"BSD-3-Clause"
] | 1 | 2017-06-23T21:01:51.000Z | 2017-06-23T21:01:51.000Z | from ..models import User, Weapon, Location, Slack
from ..app import Session
from ..util.log import getLogger
_log = getLogger(__name__)
def session(f):
'''
Creates a new database session per call
wrapped functions should return a 2-tuple
of 1) a list of models to be added to the session
and 2) value to return. If None, models from 1 will be returned
optionally wrapped functions can return a 3-tuple
with 3) value to return should commit the session fail
'''
def decarator(*args, **kwargs):
session = Session()
ret = f(session, *args, **kwargs)
try:
if ret[0]:
for m in ret[0]:
session.merge(m)
session.commit()
except Exception as e:
_log.warning('Failed to commit session to db with %s'%e)
_log.exception(e)
if len(ret) == 3:
session.close()
return ret[2]
if ret[1] is None:
if not ret[0] is None and len(ret[0]) == 1:
return ret[0][0]
return ret[0]
return ret[1]
return decarator
def query(f):
'''
Creates a new database query from the given model
'''
@session
def inner(s, *args, **kwargs):
q = s.query
r = f(q, *args, **kwargs)
return None, r
return inner
@query
def from_slack(query, slack):
u = query(Slack).filter_by(slack_id = slack).first()
return u.user if u else None
def from_uid(uid):
return User.query.filter_by(uid = uid).first()
def from_slack_id(slack_id):
return User.query.filter_by(slack_id = slack_id).first()
def slack_id_to_slack(slack_id):
user = from_slack_id(slack_id)
return user.slack if user else None
def slack_to_slack_id(slack):
user = from_slack(slack)
return user.slack_id if user else None
@session
def create_user(session, **kwargs):
u = User(**kwargs)
return [u], None
@session
def validate_slack(session, slack, uid = None):
user = from_slack(slack)
if user:
user.slack.confirmed = True
user.uid = uid
return [user], True, False
return None, False
@session
def create_weapon(desc):
w = Weapon(desc)
return [w], True, False
@session
def create_location(desc):
l = Location(desc)
return [l], True, False
@session
def set_weapon(session, slack, weapon):
user = from_slack(slack)
if not user.weapon:
w = create_weapon(weapon)
user.weapon = w
else:
w = user.weapon
w.desc = weapon
return [user, w], True, False
@session
def set_location(session, slack, location):
user = from_slack(slack)
if not user.location:
l = create_location(location)
user.location = l
else:
l = user.location
l.desc = location
return [user, l], True, False
def info_locked(query, slack):
user = from_slack(slack)
return user.info_locked
| 22.422414 | 65 | 0.695502 | 423 | 2,601 | 4.172577 | 0.217494 | 0.043626 | 0.044193 | 0.050992 | 0.209065 | 0.099717 | 0.099717 | 0 | 0 | 0 | 0 | 0.008535 | 0.189158 | 2,601 | 115 | 66 | 22.617391 | 0.828355 | 0.135333 | 0 | 0.157303 | 0 | 0 | 0.017133 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.179775 | false | 0 | 0.033708 | 0.022472 | 0.438202 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3585c0541edd891f6f00bd6ad2b4f891a2728693 | 1,400 | py | Python | utils/municipios.py | alexandrerays/E-xray | e02a08436013883491b55641a55f7e3268671f12 | [
"MIT"
] | null | null | null | utils/municipios.py | alexandrerays/E-xray | e02a08436013883491b55641a55f7e3268671f12 | [
"MIT"
] | null | null | null | utils/municipios.py | alexandrerays/E-xray | e02a08436013883491b55641a55f7e3268671f12 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # BCG Gamma Challenge
# # Libraries
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
# In[2]:
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# # Dataset
# In[3]:
df_municipios_2015 = pd.read_csv('../bcggammachallenge/municipios/municipios20150101.csv')
# In[4]:
df_municipios_2016 = pd.read_csv('../bcggammachallenge/municipios/municipios20160101.csv')
# In[5]:
df_municipios_2017 = pd.read_csv('../bcggammachallenge/municipios/municipios20170101.csv')
# In[6]:
df_municipios_2015.shape
# In[7]:
df_municipios_2016.shape
# In[8]:
df_municipios_2017.shape
# In[9]:
df = pd.concat([df_municipios_2015, df_municipios_2016, df_municipios_2017])
# In[10]:
df.shape
# In[11]:
df.head(10)
# In[12]:
df['regiao'].value_counts()
# In[13]:
df['ano_censo'].value_counts()
# In[14]:
df[df['municipio'] == 'Jussara']
# In[15]:
columns = [
'ano_censo',
'regiao',
'unidade_federativa',
'municipio',
'num_escolas',
'num_escolas_em_atividade',
'num_professores',
'num_estudantes',
'num_funcionarios'
]
df[columns].head()
# In[16]:
df[columns].describe()
# In[17]:
df.columns
# In[18]:
df['cod_municipio'].dtypes
# In[19]:
df.head()
# In[ ]:
| 9.589041 | 90 | 0.657143 | 191 | 1,400 | 4.617801 | 0.445026 | 0.122449 | 0.054422 | 0.088435 | 0.170068 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085069 | 0.177143 | 1,400 | 145 | 91 | 9.655172 | 0.680556 | 0.160714 | 0 | 0 | 0 | 0 | 0.317585 | 0.16273 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35877d5c57678bee58c472e473ccd5580ece82b7 | 1,627 | py | Python | EventGAN/models/eventgan_base.py | GEN418/EventGAN | 372318bc8f285f513db4babf7786b5c04e97c86d | [
"MIT"
] | 38 | 2019-12-19T10:01:49.000Z | 2022-03-24T07:58:53.000Z | EventGAN/models/eventgan_base.py | GEN418/EventGAN | 372318bc8f285f513db4babf7786b5c04e97c86d | [
"MIT"
] | 5 | 2020-01-16T06:26:29.000Z | 2021-05-24T00:07:58.000Z | EventGAN/models/eventgan_base.py | GEN418/EventGAN | 372318bc8f285f513db4babf7786b5c04e97c86d | [
"MIT"
] | 9 | 2020-01-22T03:31:50.000Z | 2021-03-25T13:18:06.000Z | from models.unet import UNet
import torch
from utils.utils import get_latest_checkpoint
class EventGANBase(object):
def __init__(self, options):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.generator = UNet(num_input_channels=2*options.n_image_channels,
num_output_channels=options.n_time_bins * 2,
skip_type='concat',
activation='relu',
num_encoders=4,
base_num_channels=32,
num_residual_blocks=2,
norm='BN',
use_upsample_conv=True,
with_activation=True,
sn=options.sn,
multi=False)
latest_checkpoint = get_latest_checkpoint(options.checkpoint_dir)
checkpoint = torch.load(latest_checkpoint)
self.generator.load_state_dict(checkpoint["gen"])
self.generator.to(self.device)
def forward(self, images, is_train=False):
if len(images.shape) == 3:
images = images[None, ...]
assert len(images.shape) == 4 and images.shape[1] == 2, \
"Input images must be either 2xHxW or Bx2xHxW."
if not is_train:
with torch.no_grad():
self.generator.eval()
event_volume = self.generator(images)
self.generator.train()
else:
event_volume = self.generator(images)
return event_volume
| 41.717949 | 82 | 0.534726 | 168 | 1,627 | 4.958333 | 0.494048 | 0.109244 | 0.045618 | 0.057623 | 0.072029 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011917 | 0.381069 | 1,627 | 38 | 83 | 42.815789 | 0.815293 | 0 | 0 | 0.057143 | 0 | 0 | 0.04118 | 0 | 0 | 0 | 0 | 0 | 0.028571 | 1 | 0.057143 | false | 0 | 0.085714 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3587856e707b318804535564ab9952105bcae689 | 9,481 | py | Python | PredictFeatures/PredictActions.py | Iglohut/autoscore_3d | 04d039900c0ae1d535d1d624257ce1760edaf50e | [
"MIT"
] | null | null | null | PredictFeatures/PredictActions.py | Iglohut/autoscore_3d | 04d039900c0ae1d535d1d624257ce1760edaf50e | [
"MIT"
] | null | null | null | PredictFeatures/PredictActions.py | Iglohut/autoscore_3d | 04d039900c0ae1d535d1d624257ce1760edaf50e | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import os
import cv2
from PredictFeatures.VisualizeBox import BoxTemplate
from PredictFeatures.MakeVideo import *
import csv
import math
class SequenceExtracter:
df_all = pd.read_csv('./data/ehmt1/VideoNamesStatus.csv')
def __init__(self, vidnumber,):
self.df = SequenceExtracter.df_all.iloc[vidnumber] # Dataframe with trial information
self.vidnumber = vidnumber
# The action list for all frames
self.ActionSequence = {
"pivot_locations": [],
"autoscores": [],
"actions": []
}
if self.canalyze or True: # Or True is temporary
self.template = BoxTemplate(self.df.VideoName) # Get the BoxTemplate
self.df_pose = pd.read_csv(self.posefile, header=[0, 1], skipinitialspace=True)
self.headPoints = self.calc_headPoints # precalculate all headpoints to speed up loops
self.headDirections = list(self.df_pose[("angle1" , "Nose")])
self.ActionSequence['autoscores'] = self.set_autoscores
else:
print("My apologies, this video was not analyzed by autoscore and DLC OR is already fully analyzed!. Video({}):".format(vidnumber), self.df.VideoName)
@property
def canalyze(self):
"""returns ff the video was actually analyzed according to file"""
if self.df.StatusPredicted == 1 and not math.isnan(self.df.genotype): # If analyzed AND train data available
return True
else:
return False
@property
def vidname(self):
"""
:return: raw path of current video
"""
return self.template.vidname
@property
def posefile(self):
"""Returns the pose estimation absolute path of the current video"""
posedir = os.getcwd() + '/data/ehmt1/ehmt1_poses'
posefiles = os.listdir(posedir)
posefiles = [file for file in posefiles if 'ORI' in file]
myposefile = [file for file in posefiles if self.df.VideoName.split('/')[-1].split('.')[0] in file]
myposefile = posedir + "/" + myposefile[0]
return myposefile
@property
def autoscorefile(self):
"""Returns thge autoscore estimation path of the current video"""
dir = os.getcwd() + '/data/ehmt1/ehmt1_autoscores'
files = os.listdir(dir)
myfile = [file for file in files if self.df.VideoName.split('/')[-1].split('.')[0] in file if '#' not in file]
myfile = dir + "/" + myfile[0]
return myfile
@property
def set_autoscores(self):
"""Returns for all frames decision exploring object or not"""
df = pd.read_csv(self.autoscorefile)
df = list(df["Explore"])
df = list(np.zeros(13)) + df + list(np.zeros(14))# Because autoscore used windows of 27frames
len_diff = abs(len(self) - len(df))
df += list(np.zeros(len_diff)) # Quick dirty length inconsistency fix for DLC/autoscore
return df
@property
def calc_headPoints(self):
xs = self.df_pose[[("Nose", "x"), ("Left ear", "x"), ("Right ear", "x")]].mean(axis=1)
ys = self.df_pose[[("Nose", "y"), ("Left ear", "y"), ("Right ear", "y")]].mean(axis=1)
return {'x': list(xs),
'y': list(ys)
}
def headPoint(self, frame_idx):
"""
:param frame_idx: index of frame in video/posefile
:return: tuple pixel position of head
"""
x = self.headPoints['x'][frame_idx]
y = self.headPoints['y'][frame_idx]
return (x, y)
def headDirection(self, frame_idx):
"""
Automatically controls for videos that are rotated
:param frame_idx: index of frame in video/posefile
:return: head direction in radians
"""
# HD = self.df_pose.loc[frame_idx, ("angle1", "Nose")]
HD = self.headDirections[frame_idx]
flips = self.template.df["Trial_flip"]["Trial_flip"]["Degrees"].values[0] # Clockwise flips
if np.sign(HD) == -1:
HD += 2 * np.pi
if flips > 0:
delta_HD = (4 - flips) * (np.pi / 2) # Some videos are flipped
else:
delta_HD = 0
HD = np.angle(np.exp(1j * (HD + delta_HD)))
return HD
def get_pivot_locations(self):
"""
Creates sequence of all pivot locations where the mouse was for all frames.
"""
# if not self.canalyze:
# return
pivot_locations = []
for i in range(len(self)): # for all estimated frames
# print("Get pivot locations: {}/{}".format(i, len(self)))
position = self.headPoint(i)
location = self.template.detect(position, self.ActionSequence["autoscores"][i])
pivot_locations.append(location)
self.ActionSequence["pivot_locations"] = pivot_locations
def get_actions(self):
"""
Calculates the actions based on DLC location, template, and autoscore (to be implemented).
"""
if len(self.ActionSequence["pivot_locations"]) == 0:
print("There's no pivot locations yet.")
return
all_actions = []
for i, pivots in enumerate(self.ActionSequence["pivot_locations"]):
frame_actions = []
for pivot in pivots:
if pivot is not None:
superlocation = pivot[0]
sublocation = pivot[1]
if superlocation == "Wall" and self._check_wall(sublocation, i):
frame_actions.append("Wall")
if superlocation == "Object":
if sublocation == self.df.obj_1:
frame_actions.append('obj_1')
if sublocation == self.df.obj_2:
frame_actions.append('obj_2')
if superlocation == "Corner":
frame_actions.append('Corner')
if len(frame_actions) == 0: frame_actions = [None]
all_actions.append(frame_actions)
self.ActionSequence["actions"] = all_actions
def _check_wall(self, sublocation, frame_idx):
HD = self.headDirection(frame_idx)
if sublocation == "North" and HD > 1 * np.pi / 10 and HD < 9 * np.pi / 10:
return True
elif sublocation == "East" and bool(HD < 4 * np.pi / 10 and not HD < -4 * np.pi / 10): # ^ is XOR
return True
elif sublocation == "South" and HD < -1 * np.pi / 10 and HD > - 9 * np.pi / 10:
return True
elif sublocation == "West" and bool(HD < - 6 * np.pi / 10) ^ bool(HD > 6 * np.pi / 10): # ^ is XOR
return True
else:
return False
def make_video(self):
if bool(len(self)) and bool(len(self.ActionSequence["actions"])):
make_video(self)
else:
print("Cannot make video. Either no actions yet or ambiguous videolength.")
def save_actions(self, path=None):
if path is None:
path = os.getcwd() + '/data/ehmt1/ehmt1_actions/' + self.vidname.split('.')[0].split('/')[-1] + '.csv'
with open(path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(self.ActionSequence['actions'])
def save_status(self, status=3):
SequenceExtracter.df_all.loc[self.vidnumber, 'StatusPredicted'] = status
SequenceExtracter.df_all.to_csv('./data/ehmt1/VideoNamesStatus.csv', index=False)
print("Successfully updated the status({}) of {}".format(status, self.vidname))
def __call__(self):
return self.ActionSequence
def __len__(self):
df_length = len(self.df_pose)
videolength = self.df.framelength
if df_length == videolength:
return df_length
elif df_length > videolength:
print("Couldn't determine length. More pose estimations than frames.")
return None
elif videolength > df_length:
print("Couldn't determine length. More frames than estimated poses.")
return None
myvid = SequenceExtracter(3477 + 28) # 2530 round8, 1670 round 7 norot, --2701 examplevid
# myframe = IconFrame(myvid.template.midframe)
# myframe.embed_icons()
# cv2.imshow('Templateee', myframe())
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# Make video
myvid.get_pivot_locations()
myvid.get_actions()
myvid.make_video()
# myvid.save_actions()
# for i in range(len(myvid.ActionSequence["actions"])):
# print(myvid.ActionSequence["actions"][i], i, myvid.ActionSequence["pivot_locations"][i], myvid.headPoint(i))
# listpath = '/media/iglohut/MD_Smits/Internship/autoscore_3d/data/ehmt1/ehmt1_actions/mouse_training_OS_5trials_inteldis_1_7animals_t0002_raw.csv'
# with open(listpath, 'r') as f:
# reader = csv.reader(f)
# mylist = list(reader)
# TODO make class that iterates over all len(SeuenceExtracter.df_all) to analyze and save the data
# TODO - make that multiprocessing
# fails = []
# for i in range(len(SequenceExtracter.df_all)):
#
# myvid = SequenceExtracter(i)
#
# if myvid.canalyze:
# print("Analyzing video({}): {}".format(i, myvid.vidname))
# myvid.get_pivot_locations()
# myvid.get_actions()
# myvid.save_actions()
# myvid.save_status(status=3)
# else:
# print("Couldn't analyze video({})".format(i))
# fails.append(i)
| 36.04943 | 162 | 0.59846 | 1,147 | 9,481 | 4.8483 | 0.249346 | 0.018342 | 0.008632 | 0.023017 | 0.147815 | 0.092429 | 0.069412 | 0.061859 | 0.046754 | 0.046754 | 0 | 0.015538 | 0.280456 | 9,481 | 262 | 163 | 36.187023 | 0.799619 | 0.245649 | 0 | 0.12987 | 0 | 0 | 0.116326 | 0.020587 | 0 | 0 | 0 | 0.003817 | 0 | 1 | 0.11039 | false | 0 | 0.051948 | 0.006494 | 0.298701 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3587924ab12fa5d36bd5f968184c7d62787048e4 | 11,149 | py | Python | dendropy/test/test_dataio_nexus_reader_chars.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | dendropy/test/test_dataio_nexus_reader_chars.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | dendropy/test/test_dataio_nexus_reader_chars.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # !/usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Tests for general NEXUS character matrix reading.
"""
import unittest
import dendropy
from dendropy.utility import error
from dendropy.test.support import dendropytest
from dendropy.test.support import pathmap
from dendropy.test.support import standard_file_test_chars
from dendropy.test.support import compare_and_validate
from dendropy.dataio import nexusreader
from dendropy.utility import messaging
_LOG = messaging.get_logger(__name__)
class NexusCharactersReaderDnaTestCase(
standard_file_test_chars.DnaTestChecker,
dendropytest.ExtendedTestCase):
@classmethod
def setUpClass(cls):
cls.build()
def test_basic_nexus(self):
src_filenames = [
"standard-test-chars-dna.simple.nexus",
"standard-test-chars-dna.basic.nexus",
"standard-test-chars-dna.interleaved.nexus",
"standard-test-chars-dna.matchchar.nexus",
"standard-test-chars-dna.multi.nexus",
]
for src_idx, src_filename in enumerate(src_filenames):
# print(src_idx, src_filename)
src_path = pathmap.char_source_path(src_filename)
self.verify_get_from(
matrix_type=dendropy.DnaCharacterMatrix,
src_filepath=src_path,
schema="nexus",
factory_kwargs={},
check_taxon_annotations=False,
check_matrix_annotations=False,
check_sequence_annotations=False,
check_column_annotations=False,
check_cell_annotations=False)
class NexusCharactersReaderRnaTestCase(
standard_file_test_chars.RnaTestChecker,
dendropytest.ExtendedTestCase):
@classmethod
def setUpClass(cls):
cls.build()
def test_basic_nexus(self):
src_filenames = [
"standard-test-chars-rna.simple.nexus",
"standard-test-chars-rna.basic.nexus",
"standard-test-chars-rna.interleaved.nexus",
"standard-test-chars-rna.matchchar.nexus",
"standard-test-chars-rna.multi.nexus",
]
for src_idx, src_filename in enumerate(src_filenames):
# print(src_idx, src_filename)
src_path = pathmap.char_source_path(src_filename)
self.verify_get_from(
matrix_type=dendropy.RnaCharacterMatrix,
src_filepath=src_path,
schema="nexus",
factory_kwargs={},
check_taxon_annotations=False,
check_matrix_annotations=False,
check_sequence_annotations=False,
check_column_annotations=False,
check_cell_annotations=False)
class NexusCharactersReaderProteinTestCase(
standard_file_test_chars.ProteinTestChecker,
dendropytest.ExtendedTestCase):
@classmethod
def setUpClass(cls):
cls.build()
def test_basic_nexus(self):
src_filenames = [
"standard-test-chars-protein.simple.nexus",
"standard-test-chars-protein.basic.nexus",
"standard-test-chars-protein.interleaved.nexus",
"standard-test-chars-protein.matchchar.nexus",
"standard-test-chars-protein.multi.nexus",
]
for src_idx, src_filename in enumerate(src_filenames):
# print(src_idx, src_filename)
src_path = pathmap.char_source_path(src_filename)
self.verify_get_from(
matrix_type=dendropy.ProteinCharacterMatrix,
src_filepath=src_path,
schema="nexus",
factory_kwargs={},
check_taxon_annotations=False,
check_matrix_annotations=False,
check_sequence_annotations=False,
check_column_annotations=False,
check_cell_annotations=False)
class NexusCharactersContinuousTestCase(
standard_file_test_chars.ContinuousTestChecker,
dendropytest.ExtendedTestCase):
@classmethod
def setUpClass(cls):
cls.build()
def test_basic_nexus(self):
src_filenames = [
"standard-test-chars-continuous.mesquite.nexus",
"standard-test-chars-continuous.mesquite.interleaved.nexus",
]
for src_idx, src_filename in enumerate(src_filenames):
# print(src_idx, src_filename)
src_path = pathmap.char_source_path(src_filename)
self.verify_get_from(
matrix_type=dendropy.ContinuousCharacterMatrix,
src_filepath=src_path,
schema="nexus",
factory_kwargs={},
check_taxon_annotations=False,
check_matrix_annotations=False,
check_sequence_annotations=False,
check_column_annotations=False,
check_cell_annotations=False)
class NexusStandardCharacters01234TestCase(
standard_file_test_chars.Standard01234TestChecker,
dendropytest.ExtendedTestCase):
@classmethod
def setUpClass(cls):
cls.build()
def test_basic_nexus(self):
src_filenames = [
"standard-test-chars-generic.simple.nexus",
"standard-test-chars-generic.basic.nexus",
"standard-test-chars-generic.dotted.nexus",
"standard-test-chars-generic.interleaved.nexus",
]
for src_idx, src_filename in enumerate(src_filenames):
# print(src_idx, src_filename)
src_path = pathmap.char_source_path(src_filename)
self.verify_get_from(
matrix_type=dendropy.StandardCharacterMatrix,
src_filepath=src_path,
schema="nexus",
factory_kwargs={},
check_taxon_annotations=False,
check_matrix_annotations=False,
check_sequence_annotations=False,
check_column_annotations=False,
check_cell_annotations=False)
class NexusTooManyTaxaTest(
dendropytest.ExtendedTestCase):
def testTooManyTaxaNonInterleaved(self):
data_str = """\
#NEXUS
BEGIN TAXA;
DIMENSIONS NTAX=2;
TAXLABELS AAA BBB ;
END;
BEGIN CHARACTERS;
DIMENSIONS NCHAR=8;
FORMAT DATATYPE=DNA GAP=- MISSING=? MATCHCHAR=.;
MATRIX
AAA ACGTACGT
BBB ACGTACGT
CCC ACGTACGT
;
END;
"""
self.assertRaises(nexusreader.NexusReader.TooManyTaxaError,
dendropy.DnaCharacterMatrix.get_from_string,
data_str,
'nexus')
class NexusCharsSubsetsTest(
compare_and_validate.Comparator,
dendropytest.ExtendedTestCase):
def verify_subsets(self, src_filename, expected_sets):
"""
``src_filename`` -- name of file containing full data and charsets
statement
``expected_sets`` -- dictionary with keys = label of charset, and values
= name of file with subset of characters correspond
to the charset.
"""
src_data = dendropy.DnaCharacterMatrix.get_from_path(
pathmap.char_source_path(src_filename),
'nexus')
state_alphabet = src_data.default_state_alphabet
self.assertEqual(len(src_data.character_subsets), len(expected_sets))
for label, expected_data_file in expected_sets.items():
_LOG.debug(label)
self.assertTrue(label in src_data.character_subsets)
result_subset = src_data.export_character_subset(label)
expected_subset = dendropy.DnaCharacterMatrix.get_from_path(
pathmap.char_source_path(expected_data_file),
'nexus')
# confirm subset is correct
self.compare_distinct_char_matrix(
result_subset,
expected_subset,
taxon_namespace_scoped=False,
)
# mutate new and confirm that old remains unchanged
e1_symbols = src_data[0].symbols_as_string()
r1 = result_subset[0]
dummy_state = state_alphabet["A"]
for idx in range(len(r1)):
r1[idx].value = dummy_state
self.assertEqual(e1_symbols, src_data[0].symbols_as_string())
# mutate old and confirm that new remains unchanged
r2_symbols = result_subset[1].symbols_as_string()
e2 = src_data[1]
dummy_state = state_alphabet["A"]
for idx in range(len(e2)):
e2[idx].value = dummy_state
self.assertEqual(r2_symbols, result_subset[1].symbols_as_string())
def testNonInterleaved(self):
"""
Charsets here go through all forms of position specification.
"""
expected_sets = {
"coding" : "primates.chars.subsets-coding.nexus",
"noncoding" : "primates.chars.subsets-noncoding.nexus",
"1stpos" : "primates.chars.subsets-1stpos.nexus",
"2ndpos" : "primates.chars.subsets-2ndpos.nexus",
"3rdpos" : "primates.chars.subsets-3rdpos.nexus",
}
self.verify_subsets('primates.chars.subsets-all.nexus', expected_sets)
def testInterleaved(self):
"""
A bug in DendroPy resulted in the block immediately following an
interleaved character matrix DATA or CHARACTERS block being skipped.
This tests for it by ensuring that the ASSUMPTIONS block following an
interleaved CHARACTERS block is parsed. A better test would approach
the issue more directly, by checking to see if block parsing left the
stream reader in the correct position.
"""
expected_sets = {
"c1" : "interleaved-charsets-c1.nex",
"c2" : "interleaved-charsets-c2.nex",
"c3" : "interleaved-charsets-c3.nex",
}
self.verify_subsets('interleaved-charsets-all.nex', expected_sets)
if __name__ == "__main__":
unittest.main()
| 38.577855 | 80 | 0.594582 | 1,081 | 11,149 | 5.899167 | 0.233117 | 0.038106 | 0.055982 | 0.055198 | 0.51968 | 0.413674 | 0.403324 | 0.4016 | 0.379959 | 0.361769 | 0 | 0.008111 | 0.314378 | 11,149 | 288 | 81 | 38.711806 | 0.826138 | 0.129967 | 0 | 0.457143 | 0 | 0 | 0.173699 | 0.124239 | 0 | 0 | 0 | 0 | 0.02381 | 1 | 0.066667 | false | 0 | 0.042857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3588db513e29de0a5e0a4e53c7320b175c983327 | 9,724 | py | Python | filter/filter.py | CCA-UberProject/CCA-UberProject | 4d4290a0cc1d8b706d9abe800d2e71d4ffd8aa5f | [
"MIT"
] | null | null | null | filter/filter.py | CCA-UberProject/CCA-UberProject | 4d4290a0cc1d8b706d9abe800d2e71d4ffd8aa5f | [
"MIT"
] | null | null | null | filter/filter.py | CCA-UberProject/CCA-UberProject | 4d4290a0cc1d8b706d9abe800d2e71d4ffd8aa5f | [
"MIT"
] | null | null | null | import abc
import argparse
import dateparser
import io
# 3p
from datetime import timedelta
import pandas as pd
import boto3
from botocore import UNSIGNED
from botocore.config import Config
# usage: filter [-h] [--input-bucket INPUT] [--output-bucket OUTPUT] [--days DAYS]
# [--hour HOUR] [--hour-range HOUR_RANGE] [--date DATE]
# [--date-range DATE_RANGE]
#
# optional arguments:
# -h, --help show this help message and exit
# --input INPUT input path to csv file
# --output OUTPUT output path to write filtered results
# --days DAYS [filter] comma seperated list of days of week
# --hour HOUR [filter] hour to filter on (24hr format)
# --hour-range HOUR_RANGE
# [filter] comma seperated start and end hours, ex 0,12
# --date DATE [filter] date to filter on (ex. 2014/08/17)
# --date-range DATE_RANGE
# [filter] comma seperated date range (ex.
# 2014/08/17,2014/08/18)
AWS_ACCESS_KEY = 'XXX'
AWS_SECRET_KEY = 'XXX'
DEFAULT_INPUT_BUCKET = 'cs498s3mapstore'
DEFAULT_OUTPUT_BUCKET = 'cs498s3mapstore-filtered'
COLUMN_HEADERS = ["cluster_id", "lat", "long", "weekday", "date", "hour", "type"]
DATE_FORMAT = "%Y/%m/%d"
DAY_OF_WEEK = {
0: 'Monday',
1: 'Tuesday',
2: 'Wednesday',
3: 'Thursday',
4: 'Friday',
5: 'Saturday',
6: 'Sunday'
}
class Filter(object):
__metaclass__ = abc.ABCMeta
""" Abstract base class for filters
"""
@abc.abstractproperty
def name(self):
"""(str) name of filter"""
@abc.abstractmethod
def is_match(self, row):
"""apply filter on self.original"""
class DateFilter(Filter):
def __init__(self, date):
self.dt = dateparser.parse(str(date))
self.date = self.dt.strftime(DATE_FORMAT)
def __str__(self):
return 'date={}'.format(self.dt.strftime('%Y-%m-%d'))
@property
def name(self):
return "DateFilter"
def is_match(self, df):
return df['date'] == self.date
class DateRangeFilter(Filter):
dates = []
def __init__(self, start, end):
self.dt_start = dateparser.parse(str(start))
self.dt_end = dateparser.parse(str(end))
if self.dt_start > self.dt_end:
raise ValueError("DateRangeFilter requires start date before end date")
# kind of hack but for date ranges generate all dates in the range
dt = self.dt_start
while dt <= self.dt_end:
self.dates.append(dt.strftime(DATE_FORMAT))
dt += timedelta(days=1)
def __str__(self):
return 'date={}-{}'.format(self.dt_start.strftime('%Y-%m-%d'), self.dt_end.strftime('%Y-%m-%d'))
@property
def name(self):
return "DateRangeFilter"
def is_match(self, df):
mask = (1 == 2)
for date in self.dates:
mask |= (df['date'] == date)
return (mask)
class DayOfWeekFilter(Filter):
def __init__(self, days):
self.weekdays = []
# day of the week as an integer, where Monday is 0 and Sunday is 6
for day in days:
weekday = None
if isinstance(day, int):
if day < 0 or day > 6:
raise ValueError("DayFilter as int must fall between 0(Mon) and 6(Sun)")
else:
weekday = day
elif isinstance(day, str):
day = day.strip().lower()
if day in ['0', 'monday', 'mon', 'm']:
weekday = 0
elif day in ['1', 'tuesday', 'tues', 'tu']:
weekday = 1
elif day in ['2', 'wednesday', 'wed', 'w']:
weekday = 2
elif day in ['3', 'thursday', 'thurs', 'th']:
weekday = 3
elif day in ['4', 'friday', 'fri', 'f']:
weekday = 4
elif day in ['5', 'saturday', 'sat']:
weekday = 5
elif day in ['6', 'sunday', 'sun']:
weekday = 6
else:
raise ValueError("Invalid string for DayFilter")
else:
raise ValueError("Invalid type for DayFilter")
self.weekdays.append(weekday)
def __str__(self):
return 'weekdays={}'.format(','.join([DAY_OF_WEEK[w] for w in self.weekdays]))
@property
def name(self):
return "DayOfWeekFilter"
def is_match(self, df):
mask = (1 == 2)
for day in self.weekdays:
mask |= (df['weekday'] == day)
return (mask)
class HourFilter(Filter):
def __init__(self, hour):
if isinstance(hour, int):
self.hour = hour
elif isinstance(hour, str):
hour = hour.lower()
if 'am' in hour:
self.hour = int(hour.replace('am', '').strip())
elif 'pm' in hour:
hour = int(hour.replace('am', '').strip())
self.hour = hour + 12 if hour < 12 else 0
else:
# just try converting to int
self.hour = int(hour)
else:
raise ValueError("Invalid type for HourFilter")
if self.hour < 0 or self.hour > 23:
raise ValueError("Invalid value for HourFilter")
def __str__(self):
return 'hour={}'.format(self.hour)
@property
def name(self):
return "HourFilter"
def is_match(self, df):
return self.hour == df['hour']
class HourRangeFilter(Filter):
def __init__(self, start, end):
self.hour_start = HourFilter(start).hour
self.hour_end = HourFilter(end).hour
def __str__(self):
return 'hour={}-{}'.format(self.hour_start, self.hour_end)
@property
def name(self):
return "HourRangeFilter"
def is_match(self, df):
return (df['hour'] >= self.hour_start) & (df['hour'] <= self.hour_end)
# *** main script execution starts here ***
def build_mask(df, filters):
mask = 1 == 1
for f in filters:
# print('Applying filter: {}'.format(f))
mask &= f.is_match(df)
return mask
def build_filters(args):
filters = []
if args.days:
days = args.days.split(',')
filters.append(DayOfWeekFilter(days))
if args.hour:
filters.append(HourFilter(args.hour))
elif args.hour_range:
start, end = args.hour_range.split(',')
filters.append(HourRangeFilter(start, end))
if args.date:
filters.append(DateFilter(args.date))
elif args.date_range:
start, end = args.date_range.split(',')
filters.append(DateRangeFilter(start, end))
return filters
def normalize_df(df):
# necessary due to formatting from the MR job output
new = df["cluster_id_lat"].str.split('\t', n = 1, expand = True)
df['cluster_id'] = new[0]
df['lat'] = new[1]
df.drop(columns=['cluster_id_lat'], inplace=True)
return df
def filter(input_bucket, output_bucket, filters):
# first two headers are connected with a tab
headers = ['cluster_id_lat'] + COLUMN_HEADERS[2:]
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
contents = s3.list_objects(Bucket=input_bucket).get('Contents', [])
for item in contents:
key = item['Key']
if 'cluster/part' not in key:
continue
# get the data into a dataframe
print('reading data from key {}'.format(key))
obj = s3.get_object(Bucket=input_bucket, Key=key)
df = pd.read_csv(io.BytesIO(obj['Body'].read()), encoding='utf8', names=headers)
df = normalize_df(df)
# filter results
print('filtering data...')
mask = build_mask(df, filters)
df = df[mask]
# write to s3
write_df_to_s3(output_bucket, filters, key, df)
def write_df_to_s3(bucket, filters, key, df):
# XXX: we're initing the session every time which is unecessary
session = boto3.Session(
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_SECRET_KEY,
)
s3 = session.resource('s3')
key_parts = key.split('/')
fmt_str = '_'.join([str(f) for f in filters])
key_out = '{}/{}'.format(fmt_str, key_parts[-1])
csv_buffer = io.BytesIO()
df.to_csv(csv_buffer)
print('writing filtered data to bucket: {} key: {}'.format(bucket, key_out))
object = s3.Object(bucket, key_out)
object.put(Body=csv_buffer.getvalue())
def main():
parser = argparse.ArgumentParser("filter")
parser.add_argument("--input", type=str, help="input bucket containing cluster data")
parser.add_argument("--output", type=str, help="output bucket to write filtered results")
parser.add_argument("--days", type=str, help="[filter] comma seperated list of days of week")
parser.add_argument("--hour", type=int, help="[filter] hour to filter on (24hr format)")
parser.add_argument("--hour-range", type=str, help="[filter] comma seperated start and end hours, ex 0,12")
parser.add_argument("--date", type=str, help="[filter] date to filter on (ex. 2014/08/17)")
parser.add_argument("--date-range", type=str, help="[filter] comma seperated date range (ex. 2014/08/17,2014/08/18)")
args = parser.parse_args()
filters = build_filters(args)
input_bucket = DEFAULT_INPUT_BUCKET
if args.input:
input_bucket = args.input
output_bucket = DEFAULT_OUTPUT_BUCKET
if args.output:
output_bucket = args.output
if not filters:
print('please specify a filter option')
return
filter(input_bucket, output_bucket, filters)
if __name__ == '__main__':
main()
| 32.521739 | 121 | 0.585664 | 1,241 | 9,724 | 4.454472 | 0.205479 | 0.021708 | 0.021527 | 0.015195 | 0.188857 | 0.175289 | 0.12699 | 0.101664 | 0.064761 | 0.031838 | 0 | 0.018362 | 0.283114 | 9,724 | 298 | 122 | 32.630872 | 0.774638 | 0.136158 | 0 | 0.146789 | 0 | 0.004587 | 0.146051 | 0.00553 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12844 | false | 0 | 0.041284 | 0.059633 | 0.293578 | 0.018349 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3589885a8d73d6535b7b5e2a628ed350e073604f | 353 | py | Python | HTB/crypto/babyEncryption/BabyEncryption/dec.py | x86ed/CTFS | 8b586aee5f007bc25c4760db34af8ce21a93fcd8 | [
"MIT"
] | null | null | null | HTB/crypto/babyEncryption/BabyEncryption/dec.py | x86ed/CTFS | 8b586aee5f007bc25c4760db34af8ce21a93fcd8 | [
"MIT"
] | null | null | null | HTB/crypto/babyEncryption/BabyEncryption/dec.py | x86ed/CTFS | 8b586aee5f007bc25c4760db34af8ce21a93fcd8 | [
"MIT"
] | null | null | null | import string
def decrypt(file):
#open text file in read mode
ct = []
with open(file, "rb") as f:
while (byte := f.read(1)):
char = int(byte,16)-18
char = 179 * char % 256
ct.append(char)
return bytes(ct)
ct = decrypt('./msg.enc')
f = open('./msg.txt','w')
f.write(ct.hex())
f.close()
| 18.578947 | 35 | 0.509915 | 53 | 353 | 3.396226 | 0.641509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0.314448 | 353 | 18 | 36 | 19.611111 | 0.698347 | 0.076487 | 0 | 0 | 0 | 0 | 0.064615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3589a2b708d04af4868ecaecd0325884fbf7b4f9 | 4,720 | py | Python | Limix_QTL/qtl_output.py | Bonder-MJ/limix_qtl | 71f18f4e39cdba0f0e6dc59713b83701599bc86f | [
"Apache-2.0"
] | 7 | 2020-05-17T18:36:50.000Z | 2021-12-01T11:24:24.000Z | Limix_QTL/qtl_output.py | Bonder-MJ/limix_qtl | 71f18f4e39cdba0f0e6dc59713b83701599bc86f | [
"Apache-2.0"
] | 2 | 2021-04-27T19:15:17.000Z | 2022-01-13T09:51:27.000Z | Limix_QTL/qtl_output.py | Bonder-MJ/limix_qtl | 71f18f4e39cdba0f0e6dc59713b83701599bc86f | [
"Apache-2.0"
] | 6 | 2019-12-04T09:57:28.000Z | 2022-03-14T02:20:10.000Z | import sys
import os
import tables
import numpy as np
import qtl_fdr_utilities
#V0.1.1
class hdf5_writer:
def __init__(self,output_filename):
self.h5file = tables.open_file(output_filename,'w')
def close(self):
self.h5file.close()
def add_result_df(self,qtl_results_df):
assert(len(set(qtl_results_df['feature_id'].values))==1)
feature_id = qtl_results_df['feature_id'].values[0]
column_names = ['snp_id','p_value','beta','beta_se','empirical_feature_p_value']
try:
#get the existing table for this feature
table = self.h5file.get_node('/'+feature_id)
except tables.exceptions.NoSuchNodeError:
#this table doesn't exist yet - create it
table = self.h5file.create_table(self.h5file.root,feature_id,QTL_result_hdf5,"QTL analysis results")
pass
qtl_result = table.row
for idx,df_row in qtl_results_df.iterrows():
for col_name in column_names:
qtl_result[col_name] = df_row[col_name]
qtl_result.append()
table.flush()
def apply_pval_correction(self,feature_id,top_pvalues_perm,cis_mode):
'''Function to correct p values based on nominal p values and the top
hits from permutation runs for the given feature.'''
table = self.h5file.get_node('/'+feature_id)
if(np.mean(top_pvalues_perm)>=0.999999999 and np.var(top_pvalues_perm)==0):
for row in table:
row['empirical_feature_p_value'] = row['p_value']
row.update()
alpha_para=-9
beta_para=-9
else:
correction_function, alpha_para, beta_para = qtl_fdr_utilities.define_correction_function(top_pvalues_perm,cis_mode)
for row in table:
row['empirical_feature_p_value'] = correction_function(row['p_value'])
row.update()
table.flush()
return [alpha_para, beta_para]
class text_writer:
def __init__(self,output_filename):
self.column_names = ['feature_id','snp_id','p_value','beta','beta_se','empirical_feature_p_value']
with open(output_filename,'w') as f:
header = '\t'.join(self.column_names)
f.write(header+'\n')
self.outfile = open(output_filename,'a')
def close(self):
self.outfile.close()
def add_result_df(self,qtl_results_df):
qtl_results_df.loc[:,self.column_names].to_csv(self.outfile,header=None,mode='a',index=False,sep='\t')
class hdf5_permutations_writer:
def __init__(self,output_filename,n_permutations):
self.h5file = tables.open_file(output_filename,'w')
self.column_names = ['snp_id'] + ['permutation_'+str(x) for x in range(n_permutations)]
#define the permutation result object on-the-fly, depending on the number of permutations that will be performed
self.permutation_result_definition = dict([(x,tables.Float64Col()) for x in self.column_names if x.split('_')[0]=='permutation'])
self.permutation_result_definition['snp_id'] = tables.StringCol(100)
def close(self):
self.h5file.close()
def add_permutation_results_df(self,permutation_results_df,feature_id):
'''Takes as input permutation_results_df and feature_id.
permutation_results_df must contain a "snp_id" column, and
columns labelled ""permutation_1","permutation_2",...,"permutation_n",
where n=the number of permutations specified when initialising the
hdf5_permutations_writer.'''
try:
#get the existing table for this feature
table = self.h5file.get_node('/'+feature_id)
except tables.exceptions.NoSuchNodeError:
#this table doesn't exist yet - create it
table = self.h5file.create_table(self.h5file.root,
feature_id,
self.permutation_result_definition,
"Permutation analysis results")
pass
permutation_result = table.row
for idx,df_row in permutation_results_df.iterrows():
for col_name in self.column_names:
permutation_result[col_name] = df_row[col_name]
permutation_result.append()
table.flush()
class QTL_result_hdf5(tables.IsDescription):
snp_id = tables.StringCol(100) # 100-character String
p_value = tables.Float64Col() # double (double-precision)
beta = tables.Float64Col() # double (double-precision)
beta_se = tables.Float64Col() # double (double-precision)
empirical_feature_p_value = tables.Float64Col()
| 42.142857 | 137 | 0.648517 | 605 | 4,720 | 4.781818 | 0.252893 | 0.037331 | 0.036295 | 0.038023 | 0.458693 | 0.403042 | 0.345316 | 0.285171 | 0.207397 | 0.156931 | 0 | 0.015833 | 0.250636 | 4,720 | 111 | 138 | 42.522523 | 0.802092 | 0.162712 | 0 | 0.3375 | 0 | 0 | 0.074512 | 0.025694 | 0 | 0 | 0 | 0 | 0.0125 | 1 | 0.125 | false | 0.025 | 0.0625 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
358a9c977c58e6f4cfcce7a5b78ccab9c72d945e | 19,221 | py | Python | pysoa/client/expander.py | arareko/pysoa | a90e428558500cf692f7f6e33fd358dd2779c328 | [
"Apache-2.0"
] | 91 | 2017-05-08T22:41:33.000Z | 2022-02-09T11:37:07.000Z | pysoa/client/expander.py | arareko/pysoa | a90e428558500cf692f7f6e33fd358dd2779c328 | [
"Apache-2.0"
] | 63 | 2017-06-14T20:08:49.000Z | 2021-06-16T23:08:25.000Z | pysoa/client/expander.py | arareko/pysoa | a90e428558500cf692f7f6e33fd358dd2779c328 | [
"Apache-2.0"
] | 26 | 2017-10-13T23:23:13.000Z | 2022-01-11T16:58:17.000Z | from __future__ import (
absolute_import,
unicode_literals,
)
from typing import (
Any,
Dict,
List,
Optional,
Union,
cast,
)
from conformity import fields
from conformity.settings import (
Settings,
SettingsSchema,
)
import six
__all__ = (
'ExpansionConverter',
'ExpansionNode',
'Expansions',
'ExpansionSettings',
'TypeExpansions',
'TypeNode',
'TypeRoutes',
)
class ExpansionSettings(Settings):
"""
Defines the schema for configuration settings used when expanding objects on responses with the Expansions tool.
"""
schema = {
'type_routes': fields.SchemalessDictionary(
key_type=fields.UnicodeString(
description='The name of the expansion route, to be referenced from the `type_expansions` '
'configuration',
),
value_type=fields.Dictionary(
{
'service': fields.UnicodeString(
description='The name of the service to call to resolve this route',
),
'action': fields.UnicodeString(
description='The name of the action to call to resolve this route, which must accept a single '
'request field of type `List`, to which all the identifiers for matching candidate '
'expansions will be passed, and which must return a single response field of type '
'`Dictionary`, from which all expansion objects will be obtained',
),
'request_field': fields.UnicodeString(
description='The name of the `List` identifier field to place in the `ActionRequest` body when '
'making the request to the named service and action',
),
'response_field': fields.UnicodeString(
description='The name of the `Dictionary` field returned in the `ActionResponse`, from which '
'the expanded objects will be extracted',
),
},
description='The instructions for resolving this type route',
),
description='The definition of all recognized types that can be expanded into and information about how '
'to resolve objects of those types through action calls',
),
'type_expansions': fields.SchemalessDictionary(
key_type=fields.UnicodeString(
description='The name of the type for which the herein defined expansions can be sought, which will be '
"matched with a key from the `expansions` dict passed to one of `Client`'s `call_***` "
'methods, and which must also match the value of a `_type` field found on response objects '
'on which extra data will be expanded',
),
value_type=fields.SchemalessDictionary(
key_type=fields.UnicodeString(
description='The name of an expansion, which will be matched with a value from the `expansions` '
"dict passed to one of `Client`'s `call_***` methods corresponding to the type key in "
'that dict',
),
value_type=fields.Dictionary(
{
'type': fields.Nullable(fields.UnicodeString(
description='The type of object this expansion yields, which must map back to a '
'`type_expansions` key in order to support nested/recursive expansions, and '
'may be `None` if you do not wish to support nested/recursive expansions for '
'this expansion',
)),
'route': fields.UnicodeString(
description='The route to use to resolve this expansion, which must match a key in the '
'`type_routes` configuration',
),
'source_field': fields.UnicodeString(
description='The name of the field in the base object that contains the identifier used '
'for obtaining the expansion object (the identifier will be passed to the '
'`request_field` in the route when resolving the expansion)',
),
'destination_field': fields.UnicodeString(
description='The name of a not-already-existent field in the base object into which the '
'expansion object will be placed after it is obtained from the route',
),
'raise_action_errors': fields.Boolean(
description='Whether to raise action errors encountered when expanding objects these '
'objects (by default, action errors are suppressed, which differs from the '
'behavior of the `Client` to raise action errors during normal requests)',
),
},
optional_keys=('raise_action_errors', ),
description='The definition of one specific possible expansion for this object type',
),
description='The definition of all possible expansions for this object type',
),
description='The definition of all types that may contain identifiers that can be expanded into objects '
'using the `type_routes` configurations',
),
} # type: SettingsSchema
class TypeNode(object):
"""
Represents a type node for an expansion tree.
"""
def __init__(self, node_type): # type: (six.text_type) -> None
"""
Create a new `TypeNode` instance.
:param node_type: The node type name
"""
self.type = node_type
self._expansions = {} # type: Dict[six.text_type, ExpansionNode]
def add_expansion(self, expansion_node): # type: (ExpansionNode) -> None
"""
Add a child expansion node to the type node's expansions.
If an expansion node with the same name is already present in type node's expansions, the new and existing
expansion node's children are merged.
:param expansion_node: The expansion node to add
"""
# Check for existing expansion node with the same name
existing_expansion_node = self.get_expansion(expansion_node.name) # type: Optional[ExpansionNode]
if existing_expansion_node:
# Expansion node exists with the same name, merge child expansions.
for child_expansion in expansion_node.expansions:
existing_expansion_node.add_expansion(child_expansion)
else:
# Add the expansion node.
self._expansions[expansion_node.name] = expansion_node
def get_expansion(self, expansion_name): # type: (six.text_type) -> Optional[ExpansionNode]
"""
Get an expansion node by name.
:param expansion_name: The name of the expansion
:return: an :class:`ExpansionNode` instance if the expansion exists, `None` otherwise.
"""
return self._expansions.get(expansion_name)
def find_objects(self, obj): # type: (Any) -> List[Dict[Any, Any]]
"""
Find all objects in obj that match the type of the type node.
:param obj: A dictionary or list of dictionaries to search, recursively
:return: a list of dictionary objects that have a "_type" key value that matches the type of this node.
"""
objects = [] # type: List[Dict[Any, Any]]
if isinstance(obj, dict):
# obj is a dictionary, so it is a potential match...
object_type = obj.get('_type')
if object_type == self.type:
# Found a match!
objects.append(obj)
else:
# Not a match. Check each value of the dictionary for matches.
for sub_object in six.itervalues(obj):
objects.extend(self.find_objects(sub_object))
elif isinstance(obj, list):
# obj is a list. Check each element of the list for matches.
for sub_object in obj:
objects.extend(self.find_objects(sub_object))
return objects
@property
def expansions(self): # type: () -> List[ExpansionNode]
"""
The type node's list of expansions.
"""
return list(six.itervalues(self._expansions))
def to_dict(self): # type: () -> Dict[six.text_type, List[six.text_type]]
"""
Convert the tree node to its dictionary representation.
:return: an expansion dictionary that represents the type and expansions of this tree node.
"""
expansion_strings = [] # type: List[six.text_type]
for expansion in self.expansions:
expansion_strings.extend(expansion.to_strings())
return {
self.type: expansion_strings,
}
class ExpansionNode(TypeNode):
"""
Represents a expansion node for an expansion tree.
If an expansion node has its own expansions, it can also function as a type node.
"""
def __init__(
self,
node_type, # type: six.text_type
name, # type: six.text_type
source_field, # type: six.text_type
destination_field, # type: six.text_type
service, # type: six.text_type
action, # type: six.text_type
request_field, # type: six.text_type
response_field, # type: six.text_type
raise_action_errors=True, # type: bool
):
# type: (...) -> None
"""
Create a new `ExpansionNode` instance.
:param node_type: The node type name
:param name: The node name
:param source_field: The type's source field name for the expansion identifier
:param destination_field: The type's destination field name for the expansion result
:param service: The name of the service that satisfies the expansion
:param action: The name of the service action that satisfies the expansion
:param request_field: The name of the field for the expansion request's body
:param response_field: The name of the field for the expansion response's body
:param raise_action_errors: Tells the client whether to raise an exception if the expansion action returns an
error response (defaults to True)
"""
super(ExpansionNode, self).__init__(node_type)
self.name = name
self.source_field = source_field
self.destination_field = destination_field
self.service = service
self.action = action
self.request_field = request_field
self.response_field = response_field
self.raise_action_errors = raise_action_errors
def to_strings(self): # type: () -> List[six.text_type]
"""
Convert the expansion node to a list of expansion strings.
:return: a list of expansion strings that represent the leaf nodes of the expansion tree.
"""
result = [] # type: List[six.text_type]
if not self.expansions:
result.append(self.name)
else:
for expansion in self.expansions:
result.extend('{}.{}'.format(self.name, es) for es in expansion.to_strings())
return result
TypeRoutes = Dict[
six.text_type,
Dict[six.text_type, six.text_type],
]
TypeExpansions = Dict[
six.text_type,
Dict[
six.text_type,
Dict[
six.text_type,
Union[six.text_type, bool],
],
],
]
Expansions = Dict[six.text_type, List[six.text_type]]
class ExpansionConverter(object):
"""
A utility class for converting the compact dictionary representation of expansions to expansion trees (and back
again).
"""
def __init__(self, type_routes, type_expansions): # type: (TypeRoutes, TypeExpansions) -> None
"""
Create an ExpansionConverter instance.
Type Routes:
To satisfy an expansion, the expansion processing code needs to know which service action to call and how to
call it. Type routes solve this problem by by giving the expansion processing code all the information in needs
to properly call a service action to satisfy an expansion.
Type Routes Configuration Format::
{
"<route>": {
"service": "<service name>",
"action": "<action name>",
"request_field": "<request field name>",
"response_field": "<response field name>",
},
...
}
<route> is the name of the expansion route, to be referenced from the
type expansions configuration
<service name> is the name of the service to call.
<action name> is the name of the action to call.
<request field> is the name of the field to use in the ActionRequest
body. The value of the field will be the expansion identifier
extracted from the object being expanded.
<response field> is the name of the field returned in the
ActionResponse body that contains the expansion object.
Type Expansions:
Type expansions detail the expansions that are supported for each type and the routes to use to expand them. If
a type wishes to support expansions, it must have a corresponding entry in the Type Expansions Configuration
dictionary.
Type Expansions Configuration Format::
{
"<type>": {
"<expansion name>": {
"type": "<expansion type>",
"route": "<expansion route>",
"source_field": "<source field name>",
"destination_field": "<destination field name>",
"raise_action_errors": <bool>,
},
...
},
...
}
<type> is a type for which you are defining expansions.
<expansion name> is the name of an expansion.
<expansion type> is the type of the expansion. This is used to look up
the type of the values returned by the expansion in this Type Expansions
Configuration dictionary for the purpose of processing nested/recursive
expansions.
<expansion route> is a reference to the route to use to process the
expansion. This is used to look up the appropriate expansion route in
the Type Routes Configuration.
<source field name> is the name of the source field that contains the
identifier for obtaining the expansion object.
<destination field name> is the name of the destination field into which
the expansion object will be placed.
:param type_routes: A type route configuration dictionary
:param type_expansions: A type expansions configuration dictionary
"""
self.type_routes = type_routes
self.type_expansions = type_expansions
def dict_to_trees(self, expansion_dict): # type: (Expansions) -> List[TypeNode]
"""
Convert an expansion dictionary to a list of expansion trees.
Expansion Dictionary Format::
{
"<type>": ["<expansion string>", ...],
...
}
<type> is the type of object to expand.
<expansion string> is a string with the following format:
<expansion string> => <expansion name>[.<expansion string>]
:param expansion_dict: An expansion dictionary (see below)
:return: a list of expansion trees (:class:`TypeNode` instances).
"""
trees = [] # type: List[TypeNode]
for node_type, expansion_list in six.iteritems(expansion_dict):
type_node = TypeNode(node_type=node_type)
for expansion_string in expansion_list:
expansion_node = type_node
for expansion_name in expansion_string.split('.'):
child_expansion_node = expansion_node.get_expansion(expansion_name)
if not child_expansion_node:
type_expansion = self.type_expansions[expansion_node.type][expansion_name]
type_route = self.type_routes[cast(six.text_type, type_expansion['route'])]
if type_expansion['destination_field'] == type_expansion['source_field']:
raise ValueError(
'Expansion configuration destination_field error: '
'destination_field can not have the same name as the source_field: '
'{}'.format(type_expansion['source_field'])
)
child_expansion_node = ExpansionNode(
node_type=cast(six.text_type, type_expansion['type']),
name=expansion_name,
source_field=cast(six.text_type, type_expansion['source_field']),
destination_field=cast(six.text_type, type_expansion['destination_field']),
service=type_route['service'],
action=type_route['action'],
request_field=type_route['request_field'],
response_field=type_route['response_field'],
raise_action_errors=cast(bool, type_expansion.get('raise_action_errors', False)),
)
expansion_node.add_expansion(child_expansion_node)
expansion_node = child_expansion_node
trees.append(type_node)
return trees
@staticmethod
def trees_to_dict(trees_list): # type: (List[TypeNode]) -> Dict[six.text_type, List[six.text_type]]
"""
Convert a list of :class:`TypeNode` objects to an expansion dictionary.
:param trees_list: A list of :class:`TypeNode` instances
:return: An expansion dictionary that represents the expansions detailed in the provided expansions tree nodes
"""
result = {} # type: Dict[six.text_type, List[six.text_type]]
for tree in trees_list:
result.update(tree.to_dict())
return result
| 43.193258 | 120 | 0.577858 | 2,091 | 19,221 | 5.193209 | 0.13056 | 0.021273 | 0.033428 | 0.020996 | 0.303711 | 0.207846 | 0.17497 | 0.124321 | 0.080486 | 0.042545 | 0 | 0 | 0.351959 | 19,221 | 444 | 121 | 43.290541 | 0.871789 | 0.356797 | 0 | 0.164502 | 0 | 0.004329 | 0.267717 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.017316 | 0.025974 | 0 | 0.125541 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
358c92f84ed09b6bbc439019e0353a8589fd0446 | 3,081 | py | Python | capacity.py | MullaAhmed/Predicting-RUL-for-EV-Battery | 24662dc828c1e2867b14126f8074f006e4b2c380 | [
"MIT"
] | null | null | null | capacity.py | MullaAhmed/Predicting-RUL-for-EV-Battery | 24662dc828c1e2867b14126f8074f006e4b2c380 | [
"MIT"
] | null | null | null | capacity.py | MullaAhmed/Predicting-RUL-for-EV-Battery | 24662dc828c1e2867b14126f8074f006e4b2c380 | [
"MIT"
] | 2 | 2021-06-20T06:38:28.000Z | 2022-02-23T14:13:13.000Z | # Importing libraries
#general stuff
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#apna time ayega
from preprocessing import *
from util import *
#sk learn
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
datasets=["B0005.mat","B0006.mat","B0007.mat","B0018.mat",'B0025.mat', 'B0026.mat', 'B0027.mat', 'B0028.mat', 'B0029.mat', 'B0030.mat', 'B0031.mat', 'B0032.mat', 'B0033.mat', 'B0034.mat', 'B0036.mat', 'B0038.mat', 'B0039.mat', 'B0040.mat', 'B0041.mat', 'B0045.mat', 'B0046.mat', 'B0047.mat', 'B0048.mat', 'B0049.mat', 'B0051.mat', 'B0053.mat', 'B0054.mat', 'B0055.mat', 'B0056.mat']
#dataset not working 42,43,44,50,52
#datasets that suck
good=[]
bad=[]
for i in datasets:
print(i)
# Importing dataset (temp 24)
battery = loadMat(i)
#Creating dataframe
dfbattery = getDataframe(battery)
l=[]#temperory storage for r 2 scores
d={}#temp dict to get random state values from r2_score
for j in range (10):
x_train_0, x_test_0, y_train_0, y_test_0 = train_test_split(dfbattery['cycle'], dfbattery['capacity'], test_size=0.1,random_state=j)
lst_x, lst_y =(x_train_0, y_train_0)
x_train_0=np.array(x_train_0)
y_train_0=np.array(y_train_0)
#training model
from sklearn.svm import SVR
x_train_0 = x_train_0.reshape(-1, 1)
y_train_0 = y_train_0.reshape(-1, 1)
regressor = SVR(C=2000, epsilon=0.0001,kernel='rbf')
regressor.fit(x_train_0,y_train_0)
y_pred = regressor.predict(x_test_0.values.reshape(-1, 1))
# Evaluating the Model Performance
from sklearn.metrics import r2_score
x=float(r2_score(y_test_0,y_pred))
l.append(x)
d[x]=j
z=(d[(max(l))])
if max(l)>0.80:
good.append("for {0} value of i {1} and r2_score {2} ".format(i,z,max(l)))
else:
bad.append("for {0} value of i {1} and r2_score {2} ".format(i,z,max(l)))
x_train, x_test, y_train, y_test = train_test_split(dfbattery['cycle'], dfbattery['capacity'], test_size=0.1,random_state=z)
x_train=np.array(x_train)
y_train=np.array(y_train)
x_train = x_train.reshape(-1, 1) #changes from 1 d array to 2 d array
y_train = y_train.reshape(-1, 1)
#Fitting model
regressor = SVR(C=2000, epsilon=0.0001,kernel='rbf') #epsilon defines the tube inside which error is allowed(must be small)
regressor.fit(x_train,y_train)
#Predicting data
y_pred = regressor.predict(x_test.values.reshape(-1, 1))
#Plotting curve
plt.plot(dfbattery['cycle'], dfbattery['capacity'],color='black')
plt.plot(dfbattery['cycle'],regressor.predict(dfbattery["cycle"].values.reshape(-1, 1)))
plt.xlabel('Cycles')
plt.ylabel('Battery Capacity')
temp='Model performance for Battery '+ str((i.split("."))[0])
plt.title(temp)
plt.show()
for j in good:
print("GOOD")
print(j)
for k in bad:
print("Bad")
print(k) | 35.011364 | 384 | 0.63778 | 485 | 3,081 | 3.903093 | 0.338144 | 0.044374 | 0.025885 | 0.02113 | 0.259905 | 0.203909 | 0.154253 | 0.154253 | 0.154253 | 0.114105 | 0 | 0.083471 | 0.214541 | 3,081 | 88 | 385 | 35.011364 | 0.69876 | 0.139565 | 0 | 0.070175 | 0 | 0 | 0.181068 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.157895 | 0.087719 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
358d3492ae1c058a6c5a9e8343c75d3ef14a7795 | 10,277 | py | Python | tfs/writer.py | st-walker/tfs | 7a229f4fecbf04d544c5116d79a281e4365ccd1d | [
"MIT"
] | 5 | 2019-02-18T14:38:59.000Z | 2021-12-14T15:33:50.000Z | tfs/writer.py | st-walker/tfs | 7a229f4fecbf04d544c5116d79a281e4365ccd1d | [
"MIT"
] | 54 | 2019-02-19T14:44:36.000Z | 2022-02-16T15:07:53.000Z | tfs/writer.py | st-walker/tfs | 7a229f4fecbf04d544c5116d79a281e4365ccd1d | [
"MIT"
] | 4 | 2019-10-17T08:58:57.000Z | 2022-02-15T15:55:18.000Z | """
Writer
-------------------
Writing functionalty for **TFS** files.
"""
import logging
import pathlib
from collections import OrderedDict
from typing import List, Union
import numpy as np
import pandas as pd
from pandas.api import types as pdtypes
from tfs.constants import DEFAULT_COLUMN_WIDTH, INDEX_ID, MIN_COLUMN_WIDTH
from tfs.frame import TfsDataFrame, validate
LOGGER = logging.getLogger(__name__)
def write_tfs(
tfs_file_path: Union[pathlib.Path, str],
data_frame: Union[TfsDataFrame, pd.DataFrame],
headers_dict: dict = None,
save_index: Union[str, bool] = False,
colwidth: int = DEFAULT_COLUMN_WIDTH,
headerswidth: int = DEFAULT_COLUMN_WIDTH,
non_unique_behavior: str = "warn",
) -> None:
"""
Writes the provided ``DataFrame`` to disk at **tfs_file_path**, eventually with the `headers_dict` as
headers dictionary.
Args:
tfs_file_path (Union[pathlib.Path, str]): Path object to the output **TFS** file. Can be
a string, in which case it will be cast to a Path object.
data_frame (Union[TfsDataFrame, pd.DataFrame]): ``TfsDataFrame`` or ``pandas.DataFrame`` to
write to file.
headers_dict (dict): Headers for the `data_frame`. If not provided, assumes a ``TfsDataFrame``
was given and tries to use ``data_frame.headers``.
save_index (Union[str, bool]): bool or string. Default to ``False``. If ``True``, saves
the index of `data_frame` to a column identifiable by `INDEX&&&`. If given as string,
saves the index of `data_frame` to a column named by the provided value.
colwidth (int): Column width, can not be smaller than `MIN_COLUMN_WIDTH`.
headerswidth (int): Used to format the header width for both keys and values.
non_unique_behavior (str): behavior to adopt if non-unique indices or columns are found in the
dataframe. Accepts `warn` and `raise` as values, case-insensitively, which dictates
to respectively issue a warning or raise an error if non-unique elements are found.
"""
left_align_first_column = False
tfs_file_path = pathlib.Path(tfs_file_path)
validate(data_frame, f"to be written in {tfs_file_path.absolute()}", non_unique_behavior)
if headers_dict is None: # tries to get headers from TfsDataFrame
try:
headers_dict = data_frame.headers
except AttributeError:
headers_dict = OrderedDict()
data_frame = _autoset_pandas_types(data_frame) # will always make a copy of the provided df
if save_index:
left_align_first_column = True
_insert_index_column(data_frame, save_index)
colwidth = max(MIN_COLUMN_WIDTH, colwidth)
headers_str = _get_headers_string(headers_dict, headerswidth)
colnames_str = _get_colnames_string(data_frame.columns, colwidth, left_align_first_column)
coltypes_str = _get_coltypes_string(data_frame.dtypes, colwidth, left_align_first_column)
data_str = _get_data_string(data_frame, colwidth, left_align_first_column)
LOGGER.debug(f"Attempting to write file: {tfs_file_path.name} in {tfs_file_path.parent}")
with tfs_file_path.open("w") as tfs_data:
tfs_data.write( # the last "\n" is to have an EOL at EOF, which is UNIX standard
"\n".join((line for line in (headers_str, colnames_str, coltypes_str, data_str) if line)) + "\n"
)
def _autoset_pandas_types(data_frame: Union[TfsDataFrame, pd.DataFrame]) -> Union[TfsDataFrame, pd.DataFrame]:
"""
Tries to apply the ``.convert_dtypes()`` method of pandas on a copy on the provided dataframe.
If the operation is not possible, checks if the provided dataframe is empty (which prevents
``convert_dtypes()`` to internally use ``pd.concat``) and then return only a copy of the original
dataframe. Otherwise, raise the exception given by ``pandas``.
NOTE: Starting with pandas 1.3.0, this behavior which was a bug has been fixed. This means no
``ValueError`` is raised by calling ``.convert_dtypes()`` on an empty ``DataFrame``, and from this
function a warning is logged. Testing of this behavior is disabled for Python 3.7+ workers, but the
function is kept as to not force a new min version requirement on ``pandas`` or Python for users.
See my comment at https://github.com/pylhc/tfs/pull/83#issuecomment-874208869
TODO: remove the aforementioned check when we make Python 3.7 the minimum version for tfs-pandas,
aka when Python 3.6 reaches EOL (end of 2021).
Args:
data_frame (Union[TfsDataFrame, pd.DataFrame]): ``TfsDataFrame`` or ``pandas.DataFrame`` to
determine the types of.
Returns:
The dataframe with dtypes inferred as much as possible to the ``pandas`` dtypes.
"""
LOGGER.debug("Attempting conversion of dataframe to pandas dtypes")
try:
return data_frame.copy().convert_dtypes(convert_integer=False) # do not force floats to int
except ValueError as pd_convert_error: # If used on empty dataframes (uses concat internally)
if not data_frame.size and "No objects to concatenate" in pd_convert_error.args[0]:
LOGGER.warning("An empty dataframe was provided, no types were inferred")
return data_frame.copy() # since it's empty anyway, nothing to convert
else:
raise pd_convert_error
def _insert_index_column(data_frame: Union[TfsDataFrame, pd.DataFrame], save_index: str) -> None:
if isinstance(save_index, str): # save index into column by name given
idx_name = save_index
else: # save index into column, which can be found by INDEX_ID
try:
idx_name = INDEX_ID + data_frame.index.name
except TypeError:
idx_name = INDEX_ID
data_frame.insert(0, idx_name, data_frame.index)
def _get_headers_string(headers_dict: dict, width: int) -> str:
"""
Returns the string to write a ``TfsDataFrame`` headers to file. Will return an empty string if
called for an empty headers dictionary, in order not write an line to file.
Args:
headers_dict (dict): the ``TfsDataFrame`` headers.
width (int): column width to use when formatting keys and values from the headers dict.
Returns:
A full string representation for the headers dictionary.
"""
if headers_dict:
return "\n".join(_get_header_line(name, headers_dict[name], width) for name in headers_dict)
else:
return ""
def _get_header_line(name: str, value, width: int) -> str:
if not isinstance(name, str):
raise TypeError(f"{name} is not a string")
type_str = _value_to_type_string(value)
if type_str == "%s":
value = f'"{value}"'
return f"@ {name:<{width}} {type_str} {value:>{width}}"
def _get_colnames_string(colnames: List[str], colwidth: int, left_align_first_column: bool) -> str:
format_string = _get_row_format_string([None] * len(colnames), colwidth, left_align_first_column)
return "* " + format_string.format(*colnames)
def _get_coltypes_string(types: pd.Series, colwidth: int, left_align_first_column: bool) -> str:
fmt = _get_row_format_string([str] * len(types), colwidth, left_align_first_column)
return "$ " + fmt.format(*[_dtype_to_id_string(type_) for type_ in types])
def _get_data_string(
data_frame: Union[TfsDataFrame, pd.DataFrame],
colwidth: int,
left_align_first_column: bool,
) -> str:
if len(data_frame.index) == 0 or len(data_frame.columns) == 0:
return "\n"
format_strings = " " + _get_row_format_string(data_frame.dtypes, colwidth, left_align_first_column)
data_frame = _quote_string_columns(data_frame)
data_frame = data_frame.astype(object) # overrides pandas auto-conversion (lead to format bug)
return "\n".join(data_frame.apply(lambda series: format_strings.format(*series), axis=1))
def _get_row_format_string(dtypes: List[type], colwidth: int, left_align_first_column: bool) -> str:
return " ".join(
f"{{{indx:d}:"
f"{'<' if (not indx) and left_align_first_column else '>'}"
f"{_dtype_to_formatter(type_, colwidth)}}}"
for indx, type_ in enumerate(dtypes)
)
def _quote_string_columns(data_frame: Union[TfsDataFrame, pd.DataFrame]) -> Union[TfsDataFrame, pd.DataFrame]:
def quote_strings(s):
if isinstance(s, str):
if not (s.startswith('"') or s.startswith("'")):
return f'"{s}"'
return s
data_frame = data_frame.applymap(quote_strings)
return data_frame
def _value_to_type_string(value) -> str:
dtype_ = np.array(value).dtype # let numpy handle conversion to it dtypes
return _dtype_to_id_string(dtype_)
def _dtype_to_id_string(type_: type) -> str:
"""
Return the proper **TFS** identifier for the provided dtype.
Args:
type_ (type): an instance of the built-in type (in this package, one of ``numpy`` or ``pandas``
types) to get the ID string for.
Returns:
The ID string.
"""
if pdtypes.is_integer_dtype(type_) or pdtypes.is_bool_dtype(type_):
return "%d"
elif pdtypes.is_float_dtype(type_):
return "%le"
elif pdtypes.is_string_dtype(type_):
return "%s"
raise TypeError(
f"Provided type '{type_}' could not be identified as either a bool, int, float or string dtype"
)
def _dtype_to_formatter(type_: type, colsize: int) -> str:
"""
Return the proper string formatter for the provided dtype.
Args:
type_ (type): an instance of the built-in type (in this package, one of ``numpy`` or ``pandas``
types) to get the formatter for.
colsize (int): size of the written column to use for the formatter.
Returns:
The formatter.
"""
if type_ is None:
return f"{colsize}"
if pdtypes.is_integer_dtype(type_) or pdtypes.is_bool_dtype(type_):
return f"{colsize}d"
elif pdtypes.is_float_dtype(type_):
return f"{colsize}.{colsize - len('-0.e-000')}g"
elif pdtypes.is_string_dtype(type_):
return f"{colsize}s"
raise TypeError(
f"Provided type '{type_}' could not be identified as either a bool, int, float or string dtype"
)
| 41.946939 | 110 | 0.68843 | 1,466 | 10,277 | 4.621419 | 0.196453 | 0.047823 | 0.026863 | 0.038376 | 0.287675 | 0.221255 | 0.188044 | 0.169151 | 0.136679 | 0.126937 | 0 | 0.004089 | 0.214751 | 10,277 | 244 | 111 | 42.118852 | 0.83544 | 0.383283 | 0 | 0.139535 | 0 | 0 | 0.118599 | 0.016188 | 0 | 0 | 0 | 0.004098 | 0 | 1 | 0.108527 | false | 0 | 0.069767 | 0.007752 | 0.341085 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35904bf8929ce0120182a76fd16e46cbd45a1641 | 1,249 | py | Python | top_down_attention/TRAIN/utils/data_utils.py | don-tpanic/CostsBenefitsAttention | 7221b873283c210cafd54fa895ba10b418d8ca67 | [
"MIT"
] | 3 | 2021-03-20T19:19:58.000Z | 2021-04-21T18:12:05.000Z | top_down_attention/TRAIN/utils/data_utils.py | don-tpanic/CostsBenefitsAttention | 7221b873283c210cafd54fa895ba10b418d8ca67 | [
"MIT"
] | null | null | null | top_down_attention/TRAIN/utils/data_utils.py | don-tpanic/CostsBenefitsAttention | 7221b873283c210cafd54fa895ba10b418d8ca67 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import socket
from tensorflow.keras.applications.vgg16 import preprocess_input
def load_classes(num_classes, df='imagenetA'):
"""
load in all imagenet/imagenetA or other dataframe classes,
return:
-------
n classes of wnids, indices and descriptions
"""
df = pd.read_csv(f'groupings-csv/{df}_Imagenet.csv',
usecols=['wnid', 'idx', 'description'])
sorted_indices = np.argsort([i for i in df['wnid']])[:num_classes]
wnids = np.array([i for i in df['wnid']])[sorted_indices]
indices = np.array([int(i) for i in df['idx']])[sorted_indices]
descriptions = np.array([i for i in df['description']])[sorted_indices]
return wnids.tolist(), indices, descriptions
def data_directory():
"""
Check which server we are on and return the corresponding
imagenet data directory.
"""
hostname = socket.gethostname()
server_num = int(hostname[4:6])
print(f'server_num = {server_num}')
if server_num <= 20:
imagenet_train = f'/mnt/fast-data{server_num}/datasets/ILSVRC/2012/clsloc/train/'
else:
imagenet_train = f'/fast-data{server_num}/datasets/ILSVRC/2012/clsloc/train/'
return imagenet_train | 33.756757 | 89 | 0.670136 | 171 | 1,249 | 4.777778 | 0.426901 | 0.066095 | 0.02448 | 0.034272 | 0.183599 | 0.172583 | 0.151775 | 0.112607 | 0.112607 | 0 | 0 | 0.013986 | 0.198559 | 1,249 | 37 | 90 | 33.756757 | 0.802198 | 0.165733 | 0 | 0 | 0 | 0 | 0.223896 | 0.149598 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.190476 | 0 | 0.380952 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
359074865780567f18cff8e5602fa82726fa9a22 | 9,321 | py | Python | src/asdeep/inferai.py | zhenhua-zhang/ASdeep | d0878fc0ee9787767a9f83bf6dd92601abe0afb2 | [
"MIT"
] | null | null | null | src/asdeep/inferai.py | zhenhua-zhang/ASdeep | d0878fc0ee9787767a9f83bf6dd92601abe0afb2 | [
"MIT"
] | null | null | null | src/asdeep/inferai.py | zhenhua-zhang/ASdeep | d0878fc0ee9787767a9f83bf6dd92601abe0afb2 | [
"MIT"
] | null | null | null | """Infer allelic imbalance effects from allelic read counts using Bayesian inference."""
import os
import shutil
import logging
import tempfile
import traceback
from argparse import Namespace
from collections import OrderedDict
import arviz as az
import pymc3 as pm
from pysam.libctabix import asBed, asTuple
from .zutils import LogManager
from .tabdict import BEDDict
from .tabdict import GTFDict
from .tabdict import VCFDict
# Suppress the loging of pymc3
logger = logging.getLogger("pymc3")
logger.propagate = False
logger.setLevel(logging.ERROR)
class AllelicCounts:
"""A class to handling allelic read counts."""
def __init__(self, sample_id: str, vcf_path: str, gtf_path: str,
bed_path: str, threads: int = 4, tar_feature: str = "exon",
hdi_prob: float = 0.90,
logman: LogManager = LogManager("RCPool")):
self._logman = logman
self._sample_id = sample_id
self._tar_feature = tar_feature
self._readcounts: OrderedDict = OrderedDict()
self._ai_summary: list = []
self._mrna_id: list = []
self._vcf_recs = VCFDict(vcf_path, mode="r", sample_id=sample_id,
threads=threads)
self._gtf_recs = GTFDict(gtf_path, mode="r", parser=asTuple(),
threads=threads)
self._bed_recs = BEDDict(bed_path, mode="r", parser=asBed(),
threads=threads)
self._hdi_prob = hdi_prob
self._trace = None
def __bool__(self):
return len(self._readcounts) > 0
def __getitem__(self, idx):
if isinstance(idx, str):
return self._readcounts[idx]
elif isinstance(idx, tuple) and len(idx) == 2:
gene_id, mrna_id = idx
return self._readcounts[gene_id][mrna_id]
raise KeyError("Unsupported way to index.")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
traceback.print_exception(exc_type, exc_value, tb)
if self._vcf_recs.is_open():
self._vcf_recs.close()
if self._gtf_recs.is_open():
self._gtf_recs.close()
if self._bed_recs.is_open():
self._bed_recs.close()
@property
def model(self):
return self._model
@property
def trace(self):
return self._trace
@property
def results(self):
return self._ai_summary
def fetch(self, **kwargs):
gtf_recs = self._gtf_recs.subset(**kwargs)
for gene_id, per_gene_rec in gtf_recs.tabdict.items():
if gene_id in self._readcounts:
raise KeyError(f"Duplicated entry: {gene_id}")
self._readcounts[gene_id] = OrderedDict()
for mrna_id, per_mrna_rec in per_gene_rec.items():
if mrna_id in self._readcounts[gene_id]:
raise KeyError(f"Duplicated entry: {mrna_id}")
self._readcounts[gene_id][mrna_id] = OrderedDict()
a12 = []
for per_exon_rec in per_mrna_rec:
chrom, _, feature, start, end, *_ = per_exon_rec
if feature != self._tar_feature:
continue
region = f"{chrom}:{start}-{end}"
rcpool = self._bed_recs.subset(region=region).tabdict
variants = self._vcf_recs.subset(region=region).tabdict
for key, per_var in variants.items():
if key not in rcpool:
continue
per_rc = rcpool[key]
chrom, pos, rsid, ref, alt, refrc, altrc, *_ = per_rc
_, _, _, _, _, is_phase, (a1_idx, a2_idx) = per_var
if a1_idx == a2_idx:
continue
if is_phase:
birc = (refrc, altrc)
a1_rc, a2_rc = birc[a1_idx], birc[a2_idx]
phase = f"{a1_idx}|{a2_idx}"
else:
a1_rc, a2_rc = altrc, refrc
phase = f"{a1_idx}/{a2_idx}"
a12.append((chrom, pos, ref, alt, rsid, phase, a1_rc,
a2_rc))
self._mrna_id.append((gene_id, mrna_id))
self._readcounts.update({gene_id: {mrna_id: a12}})
return self
def inferai(self, gene_id: list = None, mrna_id: list = None,
ab_sigma: float = 10, hdi_prob: float = None, **kwargs):
"""Infer allelic difference using MCMC."""
if "tune" not in kwargs:
kwargs["tune"] = 500
if "draws" not in kwargs:
kwargs["draws"] = 500
if "chains" not in kwargs:
kwargs["chains"] = 2
if "cores" not in kwargs:
kwargs["cores"] = kwargs["chains"]
if "progressbar" not in kwargs:
kwargs["progressbar"] = False
if "random_seed" not in kwargs:
kwargs["random_seed"] = 42
if "return_inferencedata" not in kwargs:
kwargs["return_inferencedata"] = True
if isinstance(hdi_prob, float) and 0 < hdi_prob < 1:
self._hdi_prob = hdi_prob
if gene_id is None:
gene_id = [x[0] for x in self._mrna_id]
if mrna_id is None:
mrna_id = [x[1] for x in self._mrna_id]
for per_gene_id, per_mrna_id in self._mrna_id:
if per_gene_id not in gene_id or per_mrna_id not in mrna_id:
continue
a12 = self._readcounts[per_gene_id][per_mrna_id]
a1_rc, a2_rc = [x[-2] for x in a12], [x[-1] for x in a12]
if not a1_rc or not a2_rc:
self._logman.warning(f"No enough reads for {per_mrna_id} of "
f"{per_gene_id}")
self._ai_summary.append((per_gene_id, per_mrna_id, "", "",
"", "", ""))
continue
n = sum(a1_rc) + sum(a2_rc)
k = sum(a1_rc)
self._model = pm.Model()
with self._model:
alpha = pm.HalfNormal(name="alpha", sigma=ab_sigma)
beta = pm.HalfNormal(name="beta", sigma=ab_sigma)
theta = pm.Beta(name="theta", alpha=alpha, beta=beta)
_ = pm.Binomial(name="exp", p=theta, n=n, observed=k)
self._trace = pm.sample(**kwargs)
obs_data = ";".join([":".join([str(i) for i in x]) for x in a12])
ai_summary = az.summary(self._trace, hdi_prob=self._hdi_prob)
mean, sd, hdi_lower, hdi_upper = ai_summary.iloc[-1, :4]
self._ai_summary.append((per_gene_id, per_mrna_id, mean, sd,
hdi_lower, hdi_upper, obs_data))
return self
def save_to_dist(self, fpath: str, sep=","):
lower_bound = (1 - self._hdi_prob) / 2
upper_bound = 1 - lower_bound
header = sep.join(["gene_id", "mrna_id", "mean", "sd",
f"hdi_{lower_bound:.3}",
f"hdi_{upper_bound:.3}", "evidence"])
with open(fpath, "w") as fhandle:
fhandle.write(header + "\n")
for rec in self._ai_summary:
line = sep.join([str(x) for x in rec]) + "\n"
fhandle.write(line)
return self
def inferai(args: Namespace, logman: LogManager = LogManager("InferAI")):
"""Infer the allelic imbalance using Bayesian inference."""
n_cpu = args.n_cpu
n_draw = args.n_draw
n_tune = args.n_tune
n_chain = args.n_chain
hdi_prob = args.hdi_prob
bed_path = args.readcounts_table
gtf_path = args.genome_intervals
vcf_path = args.genetic_variants
sample_id = args.sample_id
tar_feature = args.feature
out_file = args.out_file
logman.info(f"HDI : {hdi_prob}")
logman.info(f"N cpu : {n_cpu}")
logman.info(f"N draws : {n_draw}")
logman.info(f"N tunes : {n_tune}")
logman.info(f"N chains : {n_chain}")
logman.info(f"Variants : {vcf_path}")
logman.info(f"Sample ID : {sample_id}")
logman.info(f"Output file : {out_file}")
logman.info(f"Genome region : {gtf_path}")
logman.info(f"Target features : {tar_feature}")
logman.info(f"Rreadcounts table: {bed_path}")
out_dir, _ = os.path.split(out_file)
out_dir = os.path.realpath(out_dir)
cache_path = tempfile.mkdtemp(prefix="theano-", dir=out_dir)
os.environ["THEANO_FLAGS"] = f"base_compiledir={cache_path}"
with AllelicCounts(sample_id=sample_id,
vcf_path=vcf_path,
gtf_path=gtf_path,
bed_path=bed_path,
tar_feature=tar_feature,
hdi_prob=hdi_prob) as allelic_counts:
(allelic_counts
.fetch()
.inferai(draws=n_draw, chains=n_chain, tune=n_tune, cores=n_cpu)
.save_to_dist(out_file))
# Clean-up the cache path
shutil.rmtree(cache_path, ignore_errors=True)
| 34.650558 | 88 | 0.551228 | 1,174 | 9,321 | 4.105622 | 0.204429 | 0.029876 | 0.025104 | 0.024689 | 0.113278 | 0.0639 | 0.015353 | 0.015353 | 0.015353 | 0.015353 | 0 | 0.011453 | 0.344276 | 9,321 | 268 | 89 | 34.779851 | 0.77716 | 0.028752 | 0 | 0.084577 | 0 | 0 | 0.087919 | 0.005426 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.069652 | 0.024876 | 0.18408 | 0.004975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35917ab070d275bbdce20580f0e34fe9eacd303e | 2,806 | py | Python | test/python/me/lp1-2018/exam.py | AndreasArne/python-examination | a24297f3f73b181e64e744d0b8b52d88d03e844b | [
"MIT"
] | null | null | null | test/python/me/lp1-2018/exam.py | AndreasArne/python-examination | a24297f3f73b181e64e744d0b8b52d88d03e844b | [
"MIT"
] | 14 | 2021-02-09T10:40:43.000Z | 2022-02-18T12:24:39.000Z | test/python/me/lp1-2018/exam.py | AndreasArne/python-examination | a24297f3f73b181e64e744d0b8b52d88d03e844b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Write your code in this file. Fill out the defined functions with your solutions.
You are free to write additional functions and modules as you see fit.
"""
import analyze_functions as analyze_functions
import date_time_functions as date_time_functions
def analyze_text():
"""
Analyze a text file for different characters
"""
while True:
c = input("What to do? ")
if c in ("s", "spaces"):
print(analyze_functions.spaces())
elif c in ("l", "letters"):
print(analyze_functions.letters())
elif c in ("c", "specials"):
print(analyze_functions.specials())
elif c in ("q", "quit"):
break
else:
print("Not an option!")
input("...")
return True
def validate_mobile(number):
"""
Validate mobile number
"""
if len(number) == 13 and number[3] == "-" and number[7] == " " and number[10] == " ":
if number[0:3] in ["070", "072", "073", "076", "079"]:
n = number[4:].replace(" ", "")
for c in n:
if not c.isdigit():
return False
return True
return False
def verify_credit_card(number):
"""
Verify credit card numbers
"""
control = number[-1]
sequence = [int(x) for x in list(number)[:-1]]
for i, s_number in enumerate(sequence):
if i % 2 == 0:
temp = int(s_number) * 2
if temp > 9:
tmp = str(temp)
temp = int(tmp[0]) + int(tmp[1])
sequence[i] = temp
tot = sum(sequence)
tot *= 9
return control == str(tot)[-1]
def find_difference(items, items2):
"""
Find uniqe values between lists
"""
result = check_dup(items, items2)
result += check_dup(items2, items)
return sorted(result)
def check_dup(items_, items2_):
"""
check for duplicates in lists
"""
result = {}
for item in items_:
is_dup = False
for item2 in items2_:
if item.lower() == item2.lower():
is_dup = True
if not is_dup:
result[item] = item
return list(result.keys())
def validate_date_time():
"""
Find valid dates and times in text
"""
while True:
c = input("Enter a choice: ")
if c in ("d", "date"):
date_time_functions.find_dates()
elif c in ("t", "time"):
date_time_functions.find_times()
elif c in ("q", "quit"):
return True
else:
print("Not an option!")
input("...")
return True
if __name__ == '__main__':
analyze_text()
validate_mobile("")
verify_credit_card("")
find_difference([], [])
validate_date_time()
| 23.383333 | 89 | 0.53742 | 346 | 2,806 | 4.216763 | 0.358382 | 0.01645 | 0.023989 | 0.020562 | 0.064428 | 0.047978 | 0.047978 | 0.047978 | 0 | 0 | 0 | 0.022424 | 0.332502 | 2,806 | 119 | 90 | 23.579832 | 0.75654 | 0.130435 | 0 | 0.222222 | 0 | 0 | 0.056789 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.027778 | 0 | 0.236111 | 0.069444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3593ecbe40efe5f42f686c6d1a5e9d939f242cb2 | 1,059 | py | Python | Python/linear_regression.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 1,428 | 2018-10-03T15:15:17.000Z | 2019-03-31T18:38:36.000Z | Python/linear_regression.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 1,162 | 2018-10-03T15:05:49.000Z | 2018-10-18T14:17:52.000Z | Python/linear_regression.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 3,909 | 2018-10-03T15:07:19.000Z | 2019-03-31T18:39:08.000Z | import pandas as pa
import numpy as np
import matplotlib.pyplot as plt
datset=pa.read_csv("Salary_Data.csv")
x= datset.ix[:,:-1].values #indepedent experience
y= datset.ix[:,1].values #dependent salary
from sklearn.cross_validation import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(x,y,test_size=1/3, random_state=0)
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(xtrain,ytrain)
predy=regressor.predict(xtest)
tot=0
for i in range(0,len(ytest)):
dev= abs(ytest[i]-predy[i])
tot+=dev
average_deviation= tot/len(ytest)
#%% visuakisation of the data
plt.scatter(xtrain,ytrain,color="red")
plt.plot(xtrain,regressor.predict(xtrain),color="blue")
plt.title("salary vs experience (training set)")
plt.xlabel("experience")
plt.ylabel("salary")
plt.show()
plt.scatter(xtest,ytest,color="red")
plt.plot(xtrain,regressor.predict(xtrain),color="green")
plt.title("salary predicted vs experience (training set)")
plt.xlabel("experience")
plt.ylabel("salary")
plt.show()
| 26.475 | 82 | 0.753541 | 159 | 1,059 | 4.949686 | 0.45283 | 0.060991 | 0.022872 | 0.038119 | 0.284625 | 0.284625 | 0.284625 | 0.284625 | 0.284625 | 0.162643 | 0 | 0.007361 | 0.101983 | 1,059 | 39 | 83 | 27.153846 | 0.820189 | 0.061379 | 0 | 0.206897 | 0 | 0 | 0.143434 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.172414 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
359421b10a8b1a05dd1594fd052b807a880e5d52 | 17,667 | py | Python | python/clean_data.py | TWJolly/CivilServiceR | 41c3ea2c9e639db1cad051491b296de120ac3489 | [
"CC0-1.0"
] | null | null | null | python/clean_data.py | TWJolly/CivilServiceR | 41c3ea2c9e639db1cad051491b296de120ac3489 | [
"CC0-1.0"
] | null | null | null | python/clean_data.py | TWJolly/CivilServiceR | 41c3ea2c9e639db1cad051491b296de120ac3489 | [
"CC0-1.0"
] | null | null | null | from boto3 import client
from datetime import datetime
from io import StringIO
from os import getenv
from pandas import concat, DataFrame, read_csv, set_option
#load_dotenv()
# To run this task while working on it, set the below to True, so that there appears to be an uncleaned file to work on,
# and so that you are pointing the uploads to test_folder rather than overwriting crucial files.
def convert_to_datetime(value):
# format: 'Closes : 05:00 pm on Wednesday 2nd December 2020'
date_elements = value.split()[-3:]
time_elements = value.split()[-7:-5]
# '2nd' => '02'
day_of_month = date_elements[0]
day_of_month = ''.join(filter(str.isdigit, day_of_month))
if len(day_of_month) == 1:
date_elements[0] = f'0{day_of_month}'
else:
date_elements[0] = day_of_month
# Account for 'midday' edgecase ('Closes : Midday on Monday 4th January 2021')
# Convert 12:00 => 11:59 to sidestep am/pm ambiguity without requiring logic in web app
if time_elements[-1] != "pm" and time_elements[-1] != "am":
if time_elements[-1].lower() == 'midday':
time_elements = ["11:59", "am"]
elif time_elements[-1].lower() == 'midnight':
time_elements = ["11:59", "pm"]
datetime_elements = date_elements + time_elements
# '02 December 2020 05:00 pm' => date object
return datetime.strptime((' ').join(datetime_elements), '%d %B %Y %I:%M %p')
def lambda_handler(event, context):
development = False
s3_client = client(
's3',
aws_access_key_id = getenv("ACCESS_KEY"),
aws_secret_access_key = getenv("SECRET_KEY")
)
# Get list of files that have already been cleaned
original_cleaned_files = s3_client.get_object(Bucket="civil-service-jobs", Key="cleaned_files.csv")
original_cleaned_data_filenames = original_cleaned_files['Body'].read().decode('utf-8').split()
column_heading = 'csv'
if column_heading in original_cleaned_data_filenames:
original_cleaned_data_filenames.remove(column_heading)
if development:
# We have to iterate with 'while' because it may be listed more than once.
old_style_file = '2020-12-22_91257_72083_.csv'
new_style_file = '2021-01-23_95715_80682_.csv'
for removed_cleaned_data_filename in [old_style_file, new_style_file]:
while removed_cleaned_data_filename in original_cleaned_data_filenames:
original_cleaned_data_filenames.remove(removed_cleaned_data_filename)
removed_cleaned_data_filename = old_style_file
while removed_cleaned_data_filename in original_cleaned_data_filenames:
original_cleaned_data_filenames.remove(removed_cleaned_data_filename)
# Don't truncate the precious data
set_option('display.max_colwidth', 100000)
# Get raw data files to work on
# All files in raw_data/ which end csv and haven't already been cleaned
raw_data_directory_name = "raw_data/"
raw_data_filenames = set()
for object in [e for p in s3_client.get_paginator("list_objects_v2")
.paginate(Bucket="civil-service-jobs", Prefix=raw_data_directory_name)
for e in p['Contents']]:
filename = object['Key'][int(len(raw_data_directory_name)):]
if (filename == raw_data_directory_name) or (filename[-4:] != ".csv"):
continue
raw_data_filenames.add(filename)
print(len(original_cleaned_data_filenames))
print(len(raw_data_filenames))
filenames_to_clean = set(original_cleaned_data_filenames).symmetric_difference(raw_data_filenames)
# Clean data
# ==========
# Reformat some columns, convert to wide data structure (i.e. variables as individual columns instead of
# variable-value pairs), and filter to required columns.
wide_cleaned_dataframes_dictionary = {}
descriptions_and_summaries = []
for filename in filenames_to_clean:
jobs = []
print(filename)
raw_data = s3_client.get_object(Bucket="civil-service-jobs", Key=f'{raw_data_directory_name}{filename}')
body = raw_data['Body']
csv_string = body.read().decode('ISO-8859-1')
# Read job_ref as string to try and avoid getting NaNs for non-number job ref numbers.
dataframe = read_csv(StringIO(csv_string), dtype={'job_ref': str})
dataframe = dataframe[dataframe['job_ref'].notnull()]
dataframe.drop_duplicates(subset=['variable', 'job_ref'], keep='first', inplace=True)
# Reformat some columns
for index in dataframe.index:
# Rename any columns called 'stage'. We are moving to a consistent schema across
# all parts of the HIPE app, such that the column is always called 'approach'. But
# old raw data will still have the old column name.
if dataframe.loc[index, "variable"] == "stage":
dataframe.loc[index, "variable"] = "approach"
variable = dataframe.loc[index, "variable"]
value = dataframe.loc[index, "value"]
if variable == "closingdate":
dataframe.loc[index, "value"] = convert_to_datetime(value)
elif variable == "grade":
dataframe.loc[index, "value"] = value.replace("Grade : ", "")
elif variable == "approach":
dataframe.loc[index, "value"] = value.replace("Approach : ", "")
required_columns = {
"approach", # formatted as "Approach : [internal/external]".
"closingdate", # formatted as "Closes : 11:55 pm on Sunday 17th January 2021"
"date_downloaded",
"department",
"grade", # formatted as "Grade : [grade]"
"link",
# We cannot tell if location contains >1 location ("East Midlands, Eastern, London"), or a single location ("Piccadilly, Manchester")
"location",
"Number of posts",
"title",
"Type of role"
}
description_and_summary_columns = { "Job description", "Summary" } # Used later to count keywords: not saved in a file.
# Convert to wide data structure (i.e. variables as individual columns instead of variable-value pairs) while
# filtering to required columns.
for job_reference_number in dataframe.job_ref.unique():
job_data = dataframe[dataframe.job_ref == job_reference_number]
job_dictionary = { 'job_ref': int(job_reference_number) }
for column in required_columns:
row = job_data[job_data.variable == column]
job_dictionary[column] = row.value.to_string(index=False)
jobs.append(job_dictionary)
description_and_summary_dictionary = { 'job_ref': int(job_reference_number) }
for column in description_and_summary_columns:
row = job_data[job_data.variable == column]
description_and_summary_dictionary[column] = row.value.to_string(index=False).lower()
descriptions_and_summaries.append(description_and_summary_dictionary)
job_as_dataframe = DataFrame(jobs)[['job_ref'] + list(required_columns)] #esnure the order is the same every time
wide_cleaned_dataframes_dictionary[filename] = job_as_dataframe
if(len(wide_cleaned_dataframes_dictionary) <1):
return
all_cleaned_data_dataframe = concat(wide_cleaned_dataframes_dictionary.values()).drop_duplicates().reset_index(drop=True)
descriptions_and_summaries_dataframe = DataFrame(descriptions_and_summaries)
# Update tables which count the number of instances of tokens. For grades, roles, and keywords.
# =============================================================================================
# Grades: Download files and reorder contents where necessary for avoiding false positives
grades_lookup = s3_client.get_object(Bucket="civil-service-jobs", Key="grade_lookup.csv")
grades_lookup_dataframe = read_csv(StringIO(grades_lookup['Body'].read().decode('windows-1252')))
old_grades_count_data = s3_client.get_object(Bucket="civil-service-jobs", Key="data/grades_data/grades_data.csv")
# Read job_ref as str - otherwise we get mixed types in the column (int and str) because of the two formats: '11111' and 'old_22222'
old_grades_count_dataframe = read_csv(StringIO(old_grades_count_data['Body'].read().decode('utf-8')), dtype={'job_ref': str})
# Move 'Executive Officer' to the end to be searched for last, so that we aren't getting false positives by counting
# 'Higher Executive Officer' and 'Senior Executive Officer' jobs as EO.
# This does not fix backwards: the previous issue of counting HEO jobs as EO has not been fixed for past data.
grade_labels = list(grades_lookup_dataframe.label.unique())
grade_names = list(grades_lookup_dataframe.name.unique())
eo_label = 'Executive Officer'
eo_index = grade_labels.index(eo_label)
eo_name = grade_names[eo_index]
grade_labels.remove(eo_label)
grade_labels.append(eo_label)
grade_names.remove(eo_name)
grade_names.append(eo_name)
# Roles: Download files and reorder contents where necessary for avoiding false positives
roles_lookup = s3_client.get_object(Bucket="civil-service-jobs", Key="role_lookup.csv")
roles_lookup_dataframe = read_csv(StringIO(roles_lookup['Body'].read().decode('windows-1252')))
role_type_groups = list(roles_lookup_dataframe.role_type_group.unique())
old_roles_count_data = s3_client.get_object(Bucket="civil-service-jobs", Key="data/roles_data/roles_data.csv")
# Read job_ref as str - otherwise we get mixed types in the column (int and str) because of the two formats: '11111' and 'old_22222'
old_roles_count_dataframe = read_csv(StringIO(old_roles_count_data['Body'].read().decode('utf-8')), dtype={'job_ref': str})
# There are some role-types whose names are a subset of another role-type (e.g. below), which means that searching for
# the presence of the shorter role-type will produce false positives unless we both search for the longer role-type
# and remove it from the string afterwards. We could rely on the delimiter '!!!' if this cleaning task did not need
# to be backwards-compatible with data scraped before the introduction of the delimiter; in the future we might be
# able to move to that preferable delimiter-based way of doing things, if we decide that we don't need this task to
# be compatible with pre-2021 data.
# Trade < International Trade
# Finance < Corporate Finance
# Audit < Internal Audit
# Social Research < Social Research / Market Research
# Market Research < Social Research / Market Research
role_labels = list(roles_lookup_dataframe.label.unique())
for role_type in ['Trade', 'Finance', 'Audit', 'Social Research', 'Market Research']:
role_labels.remove(role_type)
role_labels.append(role_type)
# Keywords: Download files and reorder contents where necessary for avoiding false positives
keywords = s3_client.get_object(Bucket="civil-service-jobs", Key="data/key_words_context/key_words_context.csv")
keywords_dataframe = read_csv(StringIO(keywords['Body'].read().decode('windows-1252')))
old_keywords_count_data = s3_client.get_object(Bucket="civil-service-jobs", Key="data/key_words_data/key_words_data.csv")
# Read job_ref as str - otherwise we get mixed types in the column (int and str) because of the two formats: '11111' and 'old_22222'
old_keywords_count_dataframe = read_csv(StringIO(old_keywords_count_data['Body'].read().decode('utf-8')), dtype={'job_ref': str})
# Reordering for similar reasons as for role_labels and grade_labels/grade_names: avoid false positives.
keyword_labels = list(keywords_dataframe.label.unique())
for keyword_label in ['global', 'mental health', 'international']:
keyword_labels.remove(keyword_label)
keyword_labels.append(keyword_label)
# Grades and roles: Count the instances of each in cleaned data
new_grade_counts = []
new_role_counts = []
for job_reference_number in all_cleaned_data_dataframe.job_ref.unique():
job_data = all_cleaned_data_dataframe[all_cleaned_data_dataframe.job_ref == job_reference_number]
# Grades: Search for labels or, failing that, names, in the cleaned grade
job_grade = job_data.grade.to_string(index=False)
grade_token = None
for label in grade_labels:
if label in job_grade:
grade_token = label
break
if grade_token == None:
for index, name in enumerate(grade_names):
if name in job_grade:
grade_token = grade_labels[index]
break
if grade_token != None:
new_grade_counts.append({
'job_ref': int(job_reference_number),
'label': grade_token,
'count': 1 })
# Roles: Search for roles in the Type of role column (which can contain multiple roles)
# NB: For old-format raw data files (before the scrape task began to delimit roles by looking for a <br> tag in the
# HTML and inserting '!!!'), when there are multiple roles they come back concatenated together, like:
# Administration / Corporate SupportHuman ResourcesOperational Delivery. Regexing for CamelCase does not solve this
# in all cases because Civil Service Jobs is inconsistent about whether it inserts a space after each role.
roles = job_data['Type of role'].to_string(index=False)
for label in role_labels:
if label in roles:
role_type_group = roles_lookup_dataframe[roles_lookup_dataframe.label == label]["role_type_group"].to_string(index=False)
new_role_counts.append({
'job_ref': int(job_reference_number),
'label': role_type_group,
'count': 1 })
# Remove the label from the roles to prevent false positives (see earlier comment).
roles = roles.replace(label, '(replaced)')
# Keywords: Search for keywords in the job descriptions and summaries.
new_keyword_counts = []
for job_reference_number in descriptions_and_summaries_dataframe.job_ref.unique():
job_data = descriptions_and_summaries_dataframe[descriptions_and_summaries_dataframe.job_ref == job_reference_number]
job_text = ''
for column in description_and_summary_columns:
job_text += job_data[column].to_string(index=False).lower()
for word in keyword_labels:
word_count = job_text.count(word)
if word_count > 0:
cause_area_label = keywords_dataframe[keywords_dataframe.label == word]["label"].to_string(index=False)
new_keyword_counts.append({
'job_ref': int(job_reference_number),
'label': cause_area_label,
'count': word_count })
# Remove the word from the text to prevent false positives (see earlier comment).
job_text = job_text.replace(word, '(replaced)')
# Combine old and new data without duplicating rows (incorrect data resulting from previous bugs is NOT overwritten.)
grades_dataframe = concat([old_grades_count_dataframe, DataFrame(new_grade_counts)]).drop_duplicates().reset_index(drop=True)
roles_dataframe = concat([old_roles_count_dataframe, DataFrame(new_role_counts)]).drop_duplicates().reset_index(drop=True)
keywords_dataframe = concat([old_keywords_count_dataframe, DataFrame(new_keyword_counts)]).drop_duplicates().reset_index(drop=True)
# Upload
# ======
# Cleaned data
for original_filename, dataframe in wide_cleaned_dataframes_dictionary.items():
destination = 'cleaned_data/cleaned_' + original_filename
if development:
destination = 'test_folder/' + destination
dataframe_as_csv = dataframe.to_csv(index=False, encoding='unicode')
s3_client.put_object(Body=dataframe_as_csv, Bucket="civil-service-jobs", Key=destination)
# List of cleaned files (cleaned_files.csv)
destination = 'cleaned_files.csv'
if development:
destination = 'test_folder/' + destination
todays_cleaned_data_filenames = list(wide_cleaned_dataframes_dictionary.keys())
# Use set to remove duplicates
cleaned_files = list(set(todays_cleaned_data_filenames + original_cleaned_data_filenames))
cleaned_files.sort()
cleaned_files_dataframe = DataFrame(cleaned_files, columns=['csv'])
dataframe_as_csv = cleaned_files_dataframe.to_csv(index=False, encoding='unicode')
s3_client.put_object(Body=dataframe_as_csv, Bucket="civil-service-jobs", Key=destination)
# Grades, roles, keyword count tables
token_count_tables = {
'grades_data/grades_data.csv': grades_dataframe,
'key_words_data/key_words_data.csv': keywords_dataframe,
'roles_data/roles_data.csv': roles_dataframe
}
for filename, dataframe in token_count_tables.items():
destination = 'data/' + filename
if development:
destination = 'test_folder/' + destination
dataframe_as_csv = dataframe.to_csv(index=False, encoding='unicode')
s3_client.put_object(Body=dataframe_as_csv, Bucket="civil-service-jobs", Key=destination)
#concatonate and upload the data
cleaned_data = "cleaned_data"
cleaned_data_files = set()
for object in [e for p in s3_client.get_paginator("list_objects_v2")
.paginate(Bucket="civil-service-jobs", Prefix=cleaned_data)
for e in p['Contents']]:
filename = object['Key'][int(len(cleaned_data)):]
if (filename == cleaned_data) or (filename[-4:] != ".csv"):
continue
cleaned_data_files.add(filename)
all_data = []
for file in cleaned_data_files:
print(file)
data = s3_client.get_object(Bucket="civil-service-jobs", Key=f'{cleaned_data}{file}')
data = read_csv(StringIO(data['Body'].read().decode('windows-1252')))
all_data.append(data)
all_cleaned_data = concat(all_data, sort=False)
output_file = 'data/cleaned_data/cleaned_data.csv'
all_data_as_csv = all_cleaned_data.to_csv(index=False, encoding='unicode')
s3_client.put_object(Body=all_data_as_csv, Bucket="civil-service-jobs", Key=output_file)
| 47.491935 | 139 | 0.720779 | 2,429 | 17,667 | 4.999588 | 0.191025 | 0.03442 | 0.02108 | 0.027174 | 0.376729 | 0.325675 | 0.263916 | 0.227437 | 0.214262 | 0.199934 | 0 | 0.013244 | 0.175129 | 17,667 | 371 | 140 | 47.619946 | 0.820078 | 0.280749 | 0 | 0.143478 | 0 | 0 | 0.126227 | 0.029519 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008696 | false | 0 | 0.021739 | 0 | 0.03913 | 0.017391 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35959e1a774db0bc1da90c9fc912c85e5a93ab45 | 990 | py | Python | backend/project/server/abstract/models.py | Kamil732/DK-team | d99482111761f8c12b486246c539542f4f41c6c3 | [
"CC0-1.0"
] | 4 | 2021-07-05T14:26:52.000Z | 2021-08-25T15:54:39.000Z | backend/project/server/abstract/models.py | Kamil732/beauty-salon | d99482111761f8c12b486246c539542f4f41c6c3 | [
"CC0-1.0"
] | null | null | null | backend/project/server/abstract/models.py | Kamil732/beauty-salon | d99482111761f8c12b486246c539542f4f41c6c3 | [
"CC0-1.0"
] | null | null | null | from django.db import models
class Color(models.Model):
COLORS = (
('black', 'Czarny'),
('light-blue', 'Jasny niebieski'),
('blue', 'Niebieski'),
('light-green', 'Jasny Zielony'),
('green', 'Zielony'),
('pink', 'Różowy'),
('purple', 'Fioletowy'),
('brown', 'Brązowy'),
('yellow', 'Żółty'),
('orange', 'Pomarańczowy'),
)
color = models.CharField(max_length=15, choices=COLORS, default=COLORS[0][1])
class Meta:
abstract = True
class Group(models.Model):
name = models.CharField(max_length=30, unique=True)
parent = models.ForeignKey('self', on_delete=models.CASCADE, blank=True, null=True, related_name='children')
def __str__(self):
full_path = [self.name]
k = self.parent
while k is not None:
full_path.append(k.name)
k = k.parent
return ' -> '.join(full_path[::-1])
class Meta:
abstract = True
| 24.75 | 112 | 0.557576 | 109 | 990 | 4.963303 | 0.587156 | 0.044362 | 0.066543 | 0.088725 | 0.081331 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00979 | 0.277778 | 990 | 39 | 113 | 25.384615 | 0.746853 | 0 | 0 | 0.137931 | 0 | 0 | 0.168687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.034483 | 0 | 0.37931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3595e312ad6901c6815a8d2c1d088be7f56b326f | 12,692 | py | Python | project3-mlops/05-Model-Management.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | null | null | null | project3-mlops/05-Model-Management.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | null | null | null | project3-mlops/05-Model-Management.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | 53 | 2022-01-11T19:06:06.000Z | 2022-03-25T19:27:48.000Z | # Databricks notebook source
# MAGIC %md
# MAGIC # Model Management
# MAGIC
# MAGIC An MLflow model is a standard format for packaging models that can be used on a variety of downstream tools. This lesson provides a generalizable way of handling machine learning models created in and deployed to a variety of environments.
# MAGIC
# MAGIC ##  In this lesson you:<br>
# MAGIC - Introduce model management best practices
# MAGIC - Store and use different flavors of models for different deployment environments
# MAGIC - Apply models combined with arbitrary pre and post-processing code using Python models
# MAGIC
# MAGIC ## Prerequisites
# MAGIC - Web browser: Chrome
# MAGIC - A cluster configured with **8 cores** and **DBR 7.0 ML**
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Setup
# MAGIC
# MAGIC For each lesson to execute correctly, please make sure to run the **`Classroom-Setup`** cell at the<br/>
# MAGIC start of each lesson (see the next cell) and the **`Classroom-Cleanup`** cell at the end of each lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Setup"
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Managing Machine Learning Models
# MAGIC
# MAGIC Once a model has been trained and bundled with the environment it was trained in, the next step is to package the model so that it can be used by a variety of serving tools. The current deployment options include Docker-based REST servers, Spark using streaming or batch, and cloud platforms such as Azure ML and AWS SageMaker. Packaging the final model in a platform-agnostic way offers the most flexibility in deployment options and allows for model reuse across a number of platforms.
# MAGIC
# MAGIC **MLflow models is a tool for deploying models that's agnostic to both the framework the model was trained in and the environment it's being deployed to. It's convention for packaging machine learning models that offers self-contained code, environments, and models.** The main abstraction in this package is the concept of **flavors,** which are different ways the model can be used. For instance, a TensorFlow model can be loaded as a TensorFlow DAG or as a Python function: using the MLflow model convention allows for the model to be used regardless of the library that was used to train it originally.
# MAGIC
# MAGIC The primary difference between MLflow projects and models is that models are geared more towards inference and serving. The `python_function` flavor of models gives a generic way of bundling models regardless of whether it was `sklearn`, `keras`, or any other machine learning library that trained the model. We can thereby deploy a python function without worrying about the underlying format of the model. **MLflow therefore maps any training framework to any deployment environment**, massively reducing the complexity of inference.
# MAGIC
# MAGIC Finally, arbitrary pre and post-processing steps can be included in the pipeline such as data loading, cleansing, and featurization. This means that the full pipeline, not just the model, can be preserved.
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/mlflow-models-enviornments.png" style="height: 400px; margin: 20px"/></div>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Model Flavors
# MAGIC
# MAGIC Flavors offer a way of saving models in a way that's agnostic to the training development, making it significantly easier to be used in various deployment options. Some of the most popular built-in flavors include the following:<br><br>
# MAGIC
# MAGIC * <a href="https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#module-mlflow.pyfunc" target="_blank">mlflow.pyfunc</a>
# MAGIC * <a href="https://mlflow.org/docs/latest/python_api/mlflow.keras.html#module-mlflow.keras" target="_blank">mlflow.keras</a>
# MAGIC * <a href="https://mlflow.org/docs/latest/python_api/mlflow.pytorch.html#module-mlflow.pytorch" target="_blank">mlflow.pytorch</a>
# MAGIC * <a href="https://mlflow.org/docs/latest/python_api/mlflow.sklearn.html#module-mlflow.sklearn" target="_blank">mlflow.sklearn</a>
# MAGIC * <a href="https://mlflow.org/docs/latest/python_api/mlflow.spark.html#module-mlflow.spark" target="_blank">mlflow.spark</a>
# MAGIC * <a href="https://mlflow.org/docs/latest/python_api/mlflow.tensorflow.html#module-mlflow.tensorflow" target="_blank">mlflow.tensorflow</a>
# MAGIC
# MAGIC Models also offer reproducibility since the run ID and the timestamp of the run are preserved as well.
# MAGIC
# MAGIC <a href="https://mlflow.org/docs/latest/python_api/index.html" target="_blank">You can see all of the flavors and modules here.</a>
# MAGIC
# MAGIC <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/mlflow-models.png" style="height: 400px; margin: 20px"/></div>
# COMMAND ----------
# MAGIC %md
# MAGIC To demonstrate the power of model flavors, let's first create two models using different frameworks.
# MAGIC
# MAGIC Import the data.
# COMMAND ----------
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv("/dbfs/mnt/training/airbnb/sf-listings/airbnb-cleaned-mlflow.csv")
X_train, X_test, y_train, y_test = train_test_split(df.drop(["price"], axis=1), df[["price"]].values.ravel(), random_state=42)
# COMMAND ----------
display(df)
# COMMAND ----------
# MAGIC %md
# MAGIC Train a random forest model.
# COMMAND ----------
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
rf = RandomForestRegressor(n_estimators=100, max_depth=5)
rf.fit(X_train, y_train)
rf_mse = mean_squared_error(y_test, rf.predict(X_test))
rf_mse
# COMMAND ----------
# MAGIC %md
# MAGIC Train a neural network.
# COMMAND ----------
import tensorflow as tf
tf.random.set_seed(42) # For reproducibility
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
nn = Sequential([
Dense(40, input_dim=21, activation='relu'),
Dense(20, activation='relu'),
Dense(1, activation='linear')
])
nn.compile(optimizer="adam", loss="mse")
nn.fit(X_train, y_train, validation_split=.2, epochs=40, verbose=2)
# nn.evaluate(X_test, y_test)
nn_mse = mean_squared_error(y_test, nn.predict(X_test))
nn_mse
# COMMAND ----------
# MAGIC %md
# MAGIC Now log the two models.
# COMMAND ----------
import mlflow.sklearn
with mlflow.start_run(run_name="RF Model") as run:
mlflow.sklearn.log_model(rf, "model")
mlflow.log_metric("mse", rf_mse)
sklearnRunID = run.info.run_uuid
sklearnURI = run.info.artifact_uri
experimentID = run.info.experiment_id
# COMMAND ----------
import mlflow.keras
with mlflow.start_run(run_name="NN Model") as run:
mlflow.keras.log_model(nn, "model")
mlflow.log_metric("mse", nn_mse)
kerasRunID = run.info.run_uuid
kerasURI = run.info.artifact_uri
# COMMAND ----------
# MAGIC %md
# MAGIC Now we can use both of these models in the same way, even though they were trained by different packages.
# COMMAND ----------
import mlflow.pyfunc
rf_pyfunc_model = mlflow.pyfunc.load_model(model_uri="runs:/"+sklearnRunID+"/model")
type(rf_pyfunc_model)
# COMMAND ----------
import mlflow.pyfunc
nn_pyfunc_model = mlflow.pyfunc.mlflow.pyfunc.load_model(model_uri="runs:/"+kerasRunID+"/model")
type(nn_pyfunc_model)
# COMMAND ----------
# MAGIC %md
# MAGIC Both will implement a predict method. The `sklearn` model is still of type `sklearn` because this package natively implements this method.
# COMMAND ----------
rf_pyfunc_model.predict(X_test)
# COMMAND ----------
nn_pyfunc_model.predict(X_test)
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Pre and Post Processing Code using `pyfunc`
# MAGIC
# MAGIC A `pyfunc` is a generic python model that can define any model, regardless of the libraries used to train it. As such, it's defined as a directory structure with all of the dependencies. It is then "just an object" with a predict method. Since it makes very few assumptions, it can be deployed using MLflow, SageMaker, a Spark UDF or in any other environment.
# MAGIC
# MAGIC <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> Check out <a href="https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-create-custom" target="_blank">the `pyfunc` documentation for details</a><br>
# MAGIC <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> Check out <a href="https://github.com/mlflow/mlflow/blob/master/docs/source/models.rst#example-saving-an-xgboost-model-in-mlflow-format" target="_blank">this README for generic example code and integration with `XGBoost`</a>
# COMMAND ----------
# MAGIC %md
# MAGIC To demonstrate how `pyfunc` works, create a basic class that adds `n` to the input values.
# MAGIC
# MAGIC Define a model class.
# COMMAND ----------
import mlflow.pyfunc
class AddN(mlflow.pyfunc.PythonModel):
def __init__(self, n):
self.n = n
def predict(self, context, model_input):
return model_input.apply(lambda column: column + self.n)
# COMMAND ----------
# MAGIC %md
# MAGIC Construct and save the model.
# COMMAND ----------
from mlflow.exceptions import MlflowException
model_path = f"{workingDir}/add_n_model2"
add5_model = AddN(n=5)
dbutils.fs.rm(model_path, True) # Allows you to rerun the code multiple times
mlflow.pyfunc.save_model(path=model_path.replace("dbfs:", "/dbfs"), python_model=add5_model)
# COMMAND ----------
# MAGIC %md
# MAGIC Load the model in `python_function` format.
# COMMAND ----------
loaded_model = mlflow.pyfunc.load_model(model_path)
# COMMAND ----------
# MAGIC %md
# MAGIC Evaluate the model.
# COMMAND ----------
import pandas as pd
model_input = pd.DataFrame([range(10)])
model_output = loaded_model.predict(model_input)
assert model_output.equals(pd.DataFrame([range(5, 15)]))
model_output
# COMMAND ----------
# MAGIC %md
# MAGIC ## Review
# MAGIC **Question:** How do MLflow projects differ from models?
# MAGIC **Answer:** The focus of MLflow projects is reproducibility of runs and packaging of code. MLflow models focuses on various deployment environments.
# MAGIC
# MAGIC **Question:** What is a ML model flavor?
# MAGIC **Answer:** Flavors are a convention that deployment tools can use to understand the model, which makes it possible to write tools that work with models from any ML library without having to integrate each tool with each library. Instead of having to map each training environment to a deployment environment, ML model flavors manages this mapping for you.
# MAGIC
# MAGIC **Question:** How do I add pre and post processing logic to my models?
# MAGIC **Answer:** A model class that extends `mlflow.pyfunc.PythonModel` allows you to have load, pre-processing, and post-processing logic.
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Classroom-Cleanup<br>
# MAGIC
# MAGIC Run the **`Classroom-Cleanup`** cell below to remove any artifacts created by this lesson.
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Cleanup"
# COMMAND ----------
# MAGIC %md
# MAGIC ##  Next Steps
# MAGIC
# MAGIC Start the labs for this lesson, [Model Management Lab]($./Labs/05-Lab)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Additional Topics & Resources
# MAGIC
# MAGIC **Q:** Where can I find out more information on MLflow Models?
# MAGIC **A:** Check out <a href="https://www.mlflow.org/docs/latest/models.html" target="_blank">the MLflow documentation</a>
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2020 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="http://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="http://help.databricks.com/">Support</a>
| 41.887789 | 617 | 0.733533 | 1,896 | 12,692 | 4.844937 | 0.263186 | 0.025038 | 0.028957 | 0.031025 | 0.250708 | 0.206619 | 0.160026 | 0.152841 | 0.152841 | 0.152841 | 0 | 0.007209 | 0.136543 | 12,692 | 302 | 618 | 42.02649 | 0.831006 | 0.769855 | 0 | 0.080645 | 0 | 0 | 0.067494 | 0.032105 | 0 | 0 | 0 | 0 | 0.016129 | 1 | 0.032258 | false | 0 | 0.225806 | 0.016129 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
359a5e7d272bd031222b0d851debb0086b2326fd | 19,096 | py | Python | xl_link/mappers.py | 0Hughman0/xl_link | 7e91e9caccc01f4f58fda51cf3d840f30479090a | [
"MIT"
] | 9 | 2017-11-17T18:44:11.000Z | 2021-11-17T11:10:10.000Z | xl_link/mappers.py | 0Hughman0/xl_link | 7e91e9caccc01f4f58fda51cf3d840f30479090a | [
"MIT"
] | null | null | null | xl_link/mappers.py | 0Hughman0/xl_link | 7e91e9caccc01f4f58fda51cf3d840f30479090a | [
"MIT"
] | null | null | null | import pandas as pd
try:
from pandas.io.formats.excel import ExcelFormatter
except ImportError:
from pandas.formats.format import ExcelFormatter
from pandas.io.common import _stringify_path
from .xl_types import XLCell
from .chart_wrapper import create_chart, SINGLE_CATEGORY_CHARTS, CATEGORIES_REQUIRED_CHARTS
def get_xl_ranges(frame_index, frame_columns,
sheet_name='Sheet1',
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
merge_cells=True):
"""
Deduces location of data_range, index_range and col_range within excel spreadsheet, given the parameters provided.
Does not require an actual DataFrame, which could be useful!
Parameters
----------
frame_index: Pandas Index or Array-like
to determine location of index within spreadsheet.
frame_columns: Pandas Index or Array-like
used to determine location of column within spreadsheet.
excel_writer : string or ExcelWriter
sheet_name : str
default ‘Sheet1’, Name of sheet which will contain DataFrame
columns : sequence
optional, Columns to write
header : bool or list of strings,
default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names
index : bool
default True. Write row names (index)
index_label : str or sequence
default None. Column label for index column(s) if desired. If None is given, and header and index are True, then the index names are used. A sequence should be given if the
DataFrame uses MultiIndex.
startrow : int
upper left cell row to dump data frame
startcol : int
upper left cell column to dump data frame
merge_cells : bool
default True. Write MultiIndex and Hierarchical Rows as merged cells.
Returns
-------
data_range, index_range, col_range : XLRange
Each range represents where the data, index and columns can be found on the spreadsheet
empty_f : DatFrame
an empty DataFrame with matching Indices.
"""
empty_f = pd.DataFrame(index=frame_index, columns=frame_columns)
formatter = ExcelFormatter(empty_f,
cols=columns,
header=header,
index=index,
index_label=index_label,
merge_cells=merge_cells)
excel_header = list(formatter._format_header())
col_start, col_stop = excel_header[0], excel_header[-1]
col_start_cell = XLCell(col_stop.row + startrow, col_start.col + startcol, sheet_name)
col_stop_cell = XLCell(col_stop.row + startrow, col_stop.col + startcol, sheet_name)
if isinstance(empty_f.index, pd.MultiIndex):
col_start_cell = col_start_cell.translate(0, 1)
col_range = col_start_cell - col_stop_cell
body = list(formatter._format_body())
if empty_f.index.name or index_label:
body.pop(0) # gets rid of index label cell that comes first!
index_start_cell = XLCell(body[0].row + startrow, body[0].col + startcol + empty_f.index.nlevels - 1, sheet_name)
index_stop_cell = XLCell(body[-1].row + startrow, body[0].col + startcol + empty_f.index.nlevels - 1, sheet_name)
index_range = index_start_cell - index_stop_cell
data_start_cell = XLCell(index_start_cell.row, col_start_cell.col, sheet_name)
data_stop_cell = XLCell(index_stop_cell.row, col_stop_cell.col, sheet_name)
data_range = data_start_cell - data_stop_cell
return data_range, index_range, col_range, empty_f
def write_frame(f, excel_writer, to_excel_args=None):
"""
Write a Pandas DataFrame to excel by calling to_excel, returning an XLMap, that can be used to determine
the position of parts of f, using pandas indexing.
Parameters
----------
f : DataFrame
Frame to write to excel
excel_writer : str or ExcelWriter
Path or existing Excel Writer to use to write frame
to_excel_args : dict
Additional arguments to pass to DataFrame.to_excel, see docs for DataFrame.to_excel
Returns
-------
XLMap :
Mapping that corresponds to the position in the spreadsheet that frame was written to.
"""
xlf = XLDataFrame(f)
return xlf.to_excel(excel_writer, **to_excel_args)
def _mapper_to_xl(value):
"""
Convert mapper frame result to XLRange or XLCell
"""
if isinstance(value, XLCell):
return value
if isinstance(value, pd.Series):
return value.values[0] - value.values[-1]
if isinstance(value, pd.DataFrame):
return value.values[0, 0] - value.values[-1, -1]
raise TypeError("Could not conver {} to XLRange or XLCell".format(value))
class _SelectorProxy:
"""
Proxy object that intercepts calls to Pandas DataFrame indexers, and re-interprets result into excel locations.
Parameters
----------
mapper_frame: DataFrame
with index the same as the DataFrame it is representing, however, each cell contains
the location they sit within the spreadsheet.
selector_name: str
name of the indexer SelectorProxy is emulating, i.e. loc, iloc, ix, iat or at
Notes
-----
Only implements __getitem__ behaviour of indexers.
"""
def __init__(self, mapper_frame, selector_name):
self.mapper_frame = mapper_frame
self.selector_name = selector_name
def __getitem__(self, key):
val = getattr(self.mapper_frame, self.selector_name)[key]
return _mapper_to_xl(val)
class XLMap:
"""
An object that maps a Pandas DataFrame to it's positions on an excel spreadsheet.
Provides access to basic pandas indexers - __getitem__, loc, iloc, ix, iat and at.
These indexers are modified such that they return the cell/ range of the result.
The idea is should make using the data in spreadsheet easy to access, by using Pandas indexing syntax.
For example can be used to create charts more easily (see example below).
Notes
-----
Recommended to not be created directly, instead via, XLDataFrame.to_excel.
XLMap can only go 'one level deep' in terms of indexing, because each indexer always returns either an XLCell,
or an XLRange. The only workaround is to reduce the size of your DataFrame BEFORE you call write_frame.
This limitation drastically simplifies the implementation. Examples of what WON'T WORK:
>>> xlmap.loc['Mon':'Tues', :].index
AttributeError: 'XLRange' object has no attribute 'index'
>>> xlmap.index['Mon':'Tues'] # Doesn't work because index is not a Pandas Index, but an XLRange.
TypeError: unsupported operand type(s) for -: 'str' and 'int'
Parameters
----------
data_range, index_range, column_range : XLRange
that represents the region the DataFrame's data sits in.
f : DataFrame
that has been written to excel.
Attributes
----------
index : XLRange
range that the index column occupies
columns : XLRange
range that the frame columns occupy
data : XLRange
range that the frame data occupies
writer : Pandas.ExcelWriter
writer used to create spreadsheet
sheet : object
sheet object corresponding to sheet the frame was written to, handy if you want insert a chart into the same sheet
Examples
--------
>>> calories_per_meal = XLDataFrame(columns=("Meal", "Mon", "Tues", "Weds", "Thur"),
data={'Meal': ('Breakfast', 'Lunch', 'Dinner', 'Midnight Snack'),
'Mon': (15, 20, 12, 3),
'Tues': (5, 16, 3, 0),
'Weds': (3, 22, 2, 8),
'Thur': (6, 7, 1, 9)})
>>> calories_per_meal.set_index("Meal", drop=True, inplace=True)
Write to excel
>>> writer = pd.ExcelWriter("Example.xlsx", engine='xlsxwriter')
>>> xlmap = calories_per_meal.to_excel(writer, sheet_name="XLLinked") # returns the XLMap
Create chart with XLLink
>>> workbook = writer.book
>>> xl_linked_sheet = writer.sheets["XLLinked"]
>>> xl_linked_chart = workbook.add_chart({'type': 'column'})
>>> for time in calories_per_meal.index:
>>> xl_linked_chart.add_series({'name': time,
'categories': proxy.columns.frange,
'values': proxy.loc[time].frange})
"""
def __init__(self, data_range, index_range, column_range, f, writer=None):
self.index = index_range
self.columns = column_range
self.data = data_range
self._f = f.copy()
self.writer = writer
self.book = writer.book
self.sheet = writer.sheets[self.index.sheet]
self._mapper_frame = f.copy().astype(XLCell)
x_range = self._f.index.size
y_range = self._f.columns.size
for x in range(x_range):
for y in range(y_range):
self._mapper_frame.values[x, y] = data_range[x, y]
@property
def f(self):
"""
for convenience provides read-only access to the DataFrame originally written to excel.
"""
return self._f
@property
def df(self):
"""
for convenience provides read-only access to the DataFrame originally written to excel.
"""
return self._f
def __repr__(self):
return "<XLMap: index: {}, columns: {}, data: {}>".format(self.index, self.columns, self.data)
def create_chart(self, type_='scatter',
values=None, categories=None, names=None,
subtype=None,
title=None, x_axis_name=None, y_axis_name=None):
"""
Create excel chart object based off of data within the Frame.
Parameters
----------
type_ : str
Type of chart to create.
values : str or list or tuple
label or list of labels to corresponding to column to use as values for each series in chart.
Default all columns.
categories : str or list or tuple
label or list of labels to corresponding to column to use as categories for each series in chart.
Default, use index for 'scatter' or None for everything else.
names: str or list or tuple
str or list of strs to corresponding to names for each series in chart.
Default, column names corresponding to values.
subtype : str
subtype of type, only available for some chart types e.g. bar, see Excel writing package for details
title : str
chart title
x_axis_name : str
used as label on x_axis
y_axis_name : str
used as label on y_axis
Returns
-------
Chart object corresponding to the engine selected
Notes
-----
values, categories parameters can only correspond to columns.
"""
if names is None and categories is None:
names = tuple(name for name in self.f.columns.values)
elif names is None and isinstance(categories, (str, int, list, tuple)):
names = categories
elif isinstance(names, (str, list, tuple)):
names = names
else:
raise TypeError("Couldn't understand names input: " + names)
if values is None:
values = tuple(self[value] for value in self.f.columns)
elif isinstance(values, list) or isinstance(values, tuple):
values = tuple(self[value] for value in values)
else:
values = self[values]
if categories is None and (type_ in SINGLE_CATEGORY_CHARTS and isinstance(values, tuple)) or \
type_ in CATEGORIES_REQUIRED_CHARTS:
categories = self.index # Default, use x as index
elif categories is None:
pass
elif isinstance(categories, (list, tuple)):
categories = list(self[category] for category in categories)
else:
categories = self[categories]
return create_chart(self.book, self.writer.engine, type_,
values, categories, names,
subtype, title,
x_axis_name, y_axis_name)
def __getitem__(self, key):
"""
Emulates DataFrame.__getitem__ (DataFrame[key] syntax), see Pandas DataFrame indexing for help on behaviour.
Will return the location of the columns found, rather than the underlying data.
Parameters
----------
key : hashable or array-like
hashables, corresponding to the names of the columns desired.
Returns
-------
XLRange :
corresponding to position of found colummn(s) within spreadsheet
Example
-------
>>> xlmap['Col 1']
<XLRange: B2:B10>
"""
val = self._mapper_frame[key]
return _mapper_to_xl(val)
@property
def loc(self):
"""
Proxy for DataFrame.loc, see Pandas DataFrame loc help for behaviour.
Will return location result rather than underlying data.
Returns
-------
XLCell or XLRange
corresponding to position of DataFrame, Series or Scalar found within spreadsheet.
Example
-------
>>> xlmap.loc['Tues']
<XLRange: A2:D2>
"""
return _SelectorProxy(self._mapper_frame, 'loc')
@property
def iloc(self):
"""
Proxy for DataFrame.iloc, see Pandas DataFrame iloc help for behaviour.
Will return location result rather than underlying data.
Returns
-------
XLCell or XLRange
corresponding to position of DataFrame, Series or Scalar found within spreadsheet.
Example
-------
>>> xlmap.iloc[3, :]
<XLRange: A2:D2>
"""
return _SelectorProxy(self._mapper_frame, 'iloc')
@property
def ix(self):
"""
Proxy for DataFrame.ix, see Pandas DataFrame ix help for behaviour. (That said this is deprecated since 0.20!)
Will return location result rather than underlying data.
Returns
-------
XLCell or XLRange
corresponding to position of DataFrame, Series or Scalar found within spreadsheet.
Example
-------
>>> xlmap.ix[3, :]
<XLRange A2:D2>
"""
return _SelectorProxy(self._mapper_frame, 'ix')
@property
def iat(self):
"""
Proxy for DataFrame.iat, see Pandas DataFrame iat help for behaviour.
Will return location result rather than underlying data.
Returns
-------
XLCell
location corresponding to position value within spreadsheet.
Example
-------
>>> xlmap.iat[3, 2]
<XLCell C3>
"""
return _SelectorProxy(self._mapper_frame, 'iat')
@property
def at(self):
"""
Proxy for DataFrame.at, see Pandas DataFrame at help for behaviour.
Will return location result rather than underlying data.
Returns
-------
XLCell
location corresponding to position value within spreadsheet.
Example
-------
>>> xlmap.at["Mon", "Lunch"]
<XLCell: C3>
"""
return _SelectorProxy(self._mapper_frame, 'at')
class XLDataFrame(pd.DataFrame):
"""
Monkeypatched DataFrame modified by xl_link!
Changes:
--------
* to_excel modified to return an XLMap.
* XLDataFrame._constructor set to XLDataFrame -> stops reverting to normal DataFrame
Notes
-----
Conversions from this DataFrame to Series or Panels will return regular Panels and Series,
which will convert back into regular DataFrame's upon expanding/ reducing dimensions.
See Also
--------
Pandas.DataFrame
"""
@property
def _constructor(self):
return XLDataFrame
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
**kwargs):
"""
Monkeypatched DataFrame.to_excel by xl_link!
Changes:
--------
Returns
-------
XLMap
corresponding to position of frame as it appears in excel (see XLMap for details)
See Also
--------
Pandas.DataFrame.to_excel for info on parameters
Note
----
When providing a path as excel_writer, default engine used is 'xlsxwriter', as xlsxwriter workbooks can only be
saved once, xl_link suppresses calling `excel_writer.save()`, as a result, `xlmap.writer.save()` should be
called once no further changes are to be made to the spreadsheet.
"""
if isinstance(excel_writer, pd.ExcelWriter):
need_save = False
else:
excel_writer = pd.ExcelWriter(_stringify_path(excel_writer), engine=engine)
need_save = True if excel_writer.engine != 'xlsxwriter' else False # xlsxwriter can only save once!
super().to_excel(excel_writer, sheet_name=sheet_name, na_rep=na_rep,
float_format=float_format, columns=columns, header=header, index=index,
index_label=index_label, startrow=startrow, startcol=startcol, engine=engine,
merge_cells=merge_cells, encoding=encoding, inf_rep=inf_rep, verbose=verbose,
**kwargs)
if need_save:
excel_writer.save()
data_range, index_range, col_range, _ = get_xl_ranges(self.index, self.columns,
sheet_name=sheet_name,
columns=columns,
header=header,
index=index,
index_label=index_label,
startrow=startrow,
startcol=startcol,
merge_cells=merge_cells)
f = self.copy()
if isinstance(columns, list) or isinstance(columns, tuple):
f = f[columns]
return XLMap(data_range, index_range, col_range, f, writer=excel_writer) | 34.469314 | 180 | 0.603425 | 2,286 | 19,096 | 4.903325 | 0.178478 | 0.01249 | 0.01472 | 0.011865 | 0.227763 | 0.199215 | 0.173075 | 0.150415 | 0.1464 | 0.133821 | 0 | 0.004894 | 0.315197 | 19,096 | 554 | 181 | 34.469314 | 0.85226 | 0.496858 | 0 | 0.157576 | 0 | 0 | 0.01994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0.006061 | 0.042424 | 0.012121 | 0.278788 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
359bee8a8464b32cccc3da209ee561f994d54277 | 19,848 | py | Python | GDHAllocationModel.py | geodesignhub/GDHLanduseAllocationModel | 0cc2a12385192b1a6ce3e32e227dcdf1069b1bda | [
"MIT"
] | null | null | null | GDHAllocationModel.py | geodesignhub/GDHLanduseAllocationModel | 0cc2a12385192b1a6ce3e32e227dcdf1069b1bda | [
"MIT"
] | null | null | null | GDHAllocationModel.py | geodesignhub/GDHLanduseAllocationModel | 0cc2a12385192b1a6ce3e32e227dcdf1069b1bda | [
"MIT"
] | 1 | 2019-07-26T03:46:54.000Z | 2019-07-26T03:46:54.000Z | import config
import GeodesignHub, shapelyHelper
from shapely.geometry.base import BaseGeometry
from shapely.geometry import shape, mapping, shape, asShape
import os, sys, requests, geojson
import json, pyproj
import string, random
from operator import itemgetter
from rtree import Rtree
from shapely.validation import explain_validity
from tqdm import tqdm
from pyproj import Geod
from shapely.ops import unary_union
from shapely import speedups
from sys import version_info
from termcolor import colored
if speedups.available:
speedups.enable()
'''
Geodesign Hub Compatible Land Use Allocation Model
This model takes in gridded evaluation files and input features from Geodesign Hub (www.geodesignhub.com) and allocates them.
Projection: Geodesign Hub uses EPSG 4326 / WGS 94 (http://epsg.io/4326) and all GeoJSON files should be in that projection.
This is the main file the other files are as follows:
config.py: This file contains the configuration and input evaluation and features files and also settings for system prirority.
GeodesignHub.py : This is the Geodesign Hub client written in Python, it is useful for interacting with the Geodesign Hub API.
shapelyHelper.py: This file is a helper class for Shapely (https://pypi.python.org/pypi/Shapely) the Python library used for spatial analysis.
'''
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
Source : http://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class ShapesFactory():
''' A class to help in geometry operations '''
def __init__(self):
self.geod = Geod(ellps="WGS84")
def multiPolytoFeature(self, mp):
''' Given Multipolygons, convert them into single polygon '''
feats =[]
for curCoords in mp['coordinates']:
feats.append({'type':'Polygon','coordinates':curCoords})
return feats
def genFeature(self, coords):
''' Given a set of coordinates return a Feature, useful when converting from Multipolygon -> Polygon '''
f = {}
f['type']= 'Feature'
f['properties']= {}
f['geometry']= coords
return f
def createUnaryUnion(self, allAreas):
''' Given a set of areas, this method constructs a unary union for them '''
try:
# Construct a unary_union assume that there are no errors in
# geometry.
allDsgnPlygons = unary_union(allAreas)
except Exception as e1:
# If there are errors while consutrcuting the union, examine the
# geometries further to seperate to just valid polygons. To avoid this error,
# ensure that the evaluation features are topologically correct, usually use a
# Geometry checker in GIS tools.
s1All = []
try:
s1Polygons = MultiPolygon([x for x in allAreas if (
x.geom_type == 'Polygon' or x.geom_type == 'MultiPolygon') and x.is_valid])
if s1Polygons:
s1All.append(s1Polygons)
except Exception as e:
print('Error in CreateUnaryUnion Polygon: %s' % e)
else:
if s1All:
allDsgnPlygons = unary_union(s1All)
else:
allDsgnPlygons = ''
return allDsgnPlygons
def generateShapeArea(self, feature, units):
''' Given a feature compute the area in the given units. Acceptable units are acres or hectares.
This function converts the feature in AEA (http://mathworld.wolfram.com/AlbersEqual-AreaConicProjection.html) to approximate
the total area. '''
geom = feature['geometry']
s = shape(geom)
featureArea = abs(self.geod.geometry_area_perimeter(s)[0])
# default is hectares, if in acres, convert by using the multiplier.
multiplier = 0.000247105 if units == 'acres' else 0.0001
fArea = featureArea * multiplier
# print('# Geodesic area: {:.3f} {}'.format(fArea, units))
return fArea
class RTreeHelper():
'''This class has helper functions for the RTree Spatial Index. (https://pypi.python.org/pypi/Rtree/) '''
def getNearestBounds(self, rtree, inputbounds,):
''' Given a set of input bounds, return a list of nearest bounds from the index '''
l = list(rtree.nearest(inputbounds, 1))
return l
def uniqify(self, seq):
''' Given a set of bounds keep only the uniques '''
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def extendBounds(self, origbounds, newboundslist):
''' Given two bounds (in WGS 1984) lant long extend the bounds '''
mins ={'minx':origbounds[0],'miny':origbounds[1]}
maxs = {'maxx':origbounds[2],'maxy':origbounds[3]}
for curbounds in newboundslist:
mins['minx'] = float(curbounds[0]) if (mins['minx'] == 0) else min(float(curbounds[0]), mins['minx'])
mins['miny'] = float(curbounds[1]) if (mins['miny'] == 0) else min(float(curbounds[1]), mins['miny'])
maxs['maxx'] = float(curbounds[2]) if (maxs['maxx'] == 0) else max(float(curbounds[2]), maxs['maxx'])
maxs['maxy'] = float(curbounds[3]) if (maxs['maxy'] == 0) else max(float(curbounds[3]), maxs['maxy'])
return (mins['minx'], mins['miny'], maxs['maxx'], maxs['maxy'])
# Set the current path so that the evaluation and feature folders can be reads.
curPath = os.path.dirname(os.path.abspath(__file__))
def iter_evals(evalfeats):
''' This function returns a generator for evaluation features '''
for x in tqdm(evalfeats):
yield x
if __name__ == "__main__":
# Read and set the units.
print(colored("Starting Allocation Model..","white"))
units = config.units
# Set up the API Client
myAPIHelper = GeodesignHub.GeodesignHubClient(url = config.apisettings['serviceurl'], project_id=config.apisettings['projectid'], token=config.apisettings['apitoken'])
# Download the features file from the given synthesis ID
evalspriority = config.evalsandpriority
cteamid = config.changeteamandsynthesis['changeteamid']
synthesisid = config.changeteamandsynthesis['synthesisid']
try:
synthesischeck = myAPIHelper.get_synthesis(teamid = cteamid, synthesisid = synthesisid)
except requests.ConnectionError:
print(colored("Could not connect to Geodesignhub API service.","red"))
sys.exit(0)
if synthesischeck.status_code == 200:
c = synthesischeck.json()
else:
raise RuntimeError("Error in downloading dessign data, Geodesignhub returned with a status code %s " % synthesischeck.status_code)
print(colored("Downloading project features from the synthesis...","yellow"))
try:
assert c['status'] != "API Endpoint not found."
except AssertionError as e:
print(colored("Invalid change team or synthesis id.", "red"))
except KeyError as e1:
# print colored("Features downloaded, saving.. ","yellow")
pass
inputdirectory = os.path.join(curPath,'input-features')
if not os.path.exists(inputdirectory):
os.makedirs(inputdirectory)
for sp in evalspriority:
cursysid = sp['systemid']
fname = sp['name']
# get the projects for this system from the synthesis ID.
try:
projectsdata = myAPIHelper.get_synthesis_system_projects(teamid =cteamid , sysid =cursysid, synthesisid = synthesisid)
except requests.ConnectionError:
print(colored("Could not connect to Geodesignhub API service.", "red"))
sys.exit(0)
# write the file
featfilename = fname +'.geojson'
fpath = os.path.join(curPath,'input-features', fname +'.geojson')
f = open(fpath, 'w')
f.write(projectsdata.text)
f.close()
sp['featuresfilename'] = fpath # not necessary
print(colored("Features downloaded in the input-features directory..", "green"))
# Create instances of our helper classes
myShapesHelper = ShapesFactory()
myRTreeHelper = RTreeHelper()
# read the evaluations from the config file
evalspriority = config.evalsandpriority
# a ordered list to store the shapes per areatype and system. # TODO: User a OrderedDict
allEvalSortedFeatures = []
# iterate over the evaluations
# iterate over the evaluations
opfiles = []
for cureval in evalspriority:
print(colored("Loading Evaluation data for %s system.." % cureval["name"], "yellow"))
# a dictionary to hold features, we will ignore the red and red2 since allocation should not happen here.
evalfeatcollection = {'green3':[],'green2':[], 'green':[]}
# A dictionary to store the index of the features.
evalfeatRtree = {'green3':Rtree(),'green2': Rtree(), 'green': Rtree()}
# open evaluation file
filename = os.path.join(curPath, cureval['evalfilename'])
try:
assert os.path.isfile(filename)
except AssertionError as e:
print(colored("Input file %s does not exist" % filename, "red"))
sys.exit(0)
with open(filename) as data_file:
try:
geoms = json.load(data_file)
except Exception as e:
print(colored("Error in loading evaluation geometries, please check if it is a valid JSON.", "red"))
sys.exit(0)
allf = iter_evals(geoms['features'])
# iterate over the geometry features.
for curFeature in allf:
shp = 0
featureArea=0
try:
# convert the JSON feature in to Shape using Shapely's asShape.
shp = asShape(curFeature['geometry'])
except Exception as e:
# if there is a error in conversion go to the next shape.
print(explain_validity(shp))
pass
try:
assert shp != 0
# get the bounds of the shape
bounds = shp.bounds
# generate the area of the shape
featureArea = myShapesHelper.generateShapeArea(curFeature, units)
# generate a random id for the shape
fid = random.randint(1, 900000000)
# check the areatype
areatype = curFeature['properties']['areatype']
if areatype in evalfeatcollection.keys():
# input the shape and details in the collections
evalfeatcollection[areatype].append({'id':fid,'shape':shp, 'bounds':bounds,'areatype':areatype,'area':featureArea, 'allocated':False})
# insert the bounds and id into the rtree, the id is used to get the shape later.
evalfeatRtree[areatype].insert(fid,bounds)
except AssertionError as e:
pass
print(colored("Processed {0} green3, {1} green2, {2} green from {3} system.".format(len(evalfeatcollection['green3']), len(evalfeatcollection['green2']),len(evalfeatcollection['green']),cureval['name']),"green"))
# Once all the evaluation features are processed, then insert it into the sorted features list including the rtree index.
allEvalSortedFeatures.append({'rtree':evalfeatRtree,'systemid':cureval['systemid'],'priority':cureval['priority'], 'features':evalfeatcollection})
# Proceed to the next evaluation file.
# now all evaluations are in place, read the feature inputs
syspriority = config.featurefilesandpriority
# sort the dictionary so we read the most important first.
syspriority = sorted(syspriority, key=itemgetter('priority'), reverse=True)
# a list to hold the processed features and their details.
sysAreaToBeAllocated =[]
# iterate over the system files.
print("Preparing Input Features..")
for cursysfeat in syspriority:
filename = os.path.join(curPath, 'input-features', cursysfeat['name']+'.geojson')
with open(filename) as data_file:
try:
geoms = json.load(data_file)
except Exception as e:
print(colored("Invalid geometries in the file, please check that it is valid JSON.", "red"))
sys.exit(0)
# a list to hold all shapes in this feature file
allFeatShapes = []
# iterate over the read featur
totalarea = 0
for curFeature in geoms['features']:
shp = 0
# set the default shape area to be 0
try:
# Convert the feature into a shape.
shp = asShape(curFeature['geometry'])
except Exception as e:
#if there is a error in converting to shape, describe the error.
print(explain_validity(shp))
pass
try:
assert shp != 0
# add the shape to our features list
allFeatShapes.append(shp)
totalarea += myShapesHelper.generateShapeArea(curFeature, units)
except AssertionError as e:
pass
# if allFeatShapes and cursysfeat['allocationtype'] =='random':
allShapes = [myShapesHelper.createUnaryUnion(allFeatShapes)]
print(colored("Processed {0} features from {1} system.".format(len(allFeatShapes),cursysfeat['name']), "green"))
sysAreaToBeAllocated.append({'name':cursysfeat['name'],'systemid':cursysfeat['systemid'], 'priority':cursysfeat['priority'], 'type':cursysfeat['allocationtype'], 'targetarea':cursysfeat['target'], 'shapes':allShapes,'totalarea':totalarea, 'alreadyallocated': Rtree()})
# All data has now been setup, we start the allocaiton process.
sysAreaToBeAllocated = sorted(sysAreaToBeAllocated, key=itemgetter('priority'))
colorPrefs = ('green3','green2', 'green') # there is no preference for reds
# a counter for systems.
syscounter = 0
# iterate over the features which are sorted by priority.
print("Starting Allocations..." )
for curSysAreaToBeAllocated in sysAreaToBeAllocated:
print("Allocating for " + curSysAreaToBeAllocated['name'])
alreadyAllocatedFeats = [] # a object to hold already allocated features for this system.
sysid = curSysAreaToBeAllocated['systemid'] # the id of the current system
evalfeatures = next((item for item in allEvalSortedFeatures if item["systemid"] == sysid)) # get the evaluation feature object.
totalIntersectedArea = 0 # variable to hold the intersected area.
curSysPriority = curSysAreaToBeAllocated['priority']
curSysName = curSysAreaToBeAllocated['name']
for curAllocationColor in colorPrefs: # iterate over the colors
curEFeatRtree = evalfeatures['rtree'][curAllocationColor] #get the rtree of the eval color
# totalEvalFeats = evalfeatures['features'][curAllocationColor]
modifiedevalFeats =[] # a list to hold the evaluation features that have allocated = true for this color
if totalIntersectedArea < curSysAreaToBeAllocated['targetarea']:
for curFeat in curSysAreaToBeAllocated['shapes']: # iterate over the input shapes
bnds = curFeat.bounds # get the bounds
# check if there is a intersection
iFeats = [n for n in curEFeatRtree.intersection(bnds)] # check how many eval features intersect with the input
if iFeats and curSysAreaToBeAllocated['type'] == 'random': # once the evaluation features are selected, shuffle them so that the allocaiton can be random.
random.shuffle(iFeats)
for curiFeat in iFeats: # iterate over the evaluation features.
if totalIntersectedArea < curSysAreaToBeAllocated['targetarea']: # if the area of intersectio is less then the target area.
curevalfeat = next((item for item in evalfeatures['features'][curAllocationColor] if item["id"] == curiFeat)) # get the evaluation featre with the id
try:
# Since this is the first system, create a allreaded allocated RTree
assert syscounter != 0
# get a list of rTrees that have lower priority than this system. example if the current sys priority is 2, get the priority 1 already allocated features. This is to ensure that
prevRTrees = [x['alreadyallocated'] for x in sysAreaToBeAllocated if x['priority'] < curSysPriority]
l = []
for prevRTree in prevRTrees:
l.extend(list(prevRTree.intersection(curevalfeat['bounds'])))
if l:
pass
else:
intersection = 0
try:
intersection = curevalfeat['shape'].intersection(curFeat)
except Exception as e:
pass
if intersection:
curSysAreaToBeAllocated['alreadyallocated'].insert(curevalfeat['id'],curevalfeat['bounds'])
alreadyAllocatedFeats.append(intersection)
ft = json.loads(shapelyHelper.export_to_JSON(intersection))
ft = myShapesHelper.genFeature(ft)
if ft['geometry']['type'] == 'MultiPolygon':
ft = myShapesHelper.multiPolytoFeature(ft['geometry'])
for feat in ft:
feat = myShapesHelper.genFeature(feat)
area += myShapesHelper.generateShapeArea(feat, units)
else:
area = myShapesHelper.generateShapeArea(ft, units)
totalIntersectedArea += area
curevalfeat['allocated'] = True
modifiedevalFeats.append(curevalfeat)
except AssertionError as ae:
intersection = 0
try:
intersection = curevalfeat['shape'].intersection(curFeat)
except Exception as e:
pass
if intersection:
curSysAreaToBeAllocated['alreadyallocated'].insert(curevalfeat['id'],curevalfeat['bounds'])
alreadyAllocatedFeats.append(intersection)
f1 = json.loads(shapelyHelper.export_to_JSON(intersection))
f1 = myShapesHelper.genFeature(f1)
if f1['geometry']['type'] == 'MultiPolygon':
f1 = myShapesHelper.multiPolytoFeature(f1['geometry'])
for feat in f1:
feat = myShapesHelper.genFeature(feat)
area += myShapesHelper.generateShapeArea(feat, units)
else:
area = myShapesHelper.generateShapeArea(f1, units)
totalIntersectedArea += area
curevalfeat['allocated'] = True
modifiedevalFeats.append(curevalfeat)
for curmodifiedFeat in modifiedevalFeats:
evalfeatures['features'][curAllocationColor] = [x for x in evalfeatures['features'][curAllocationColor] if x['id'] != curmodifiedFeat['id']]
evalfeatures['features'][curAllocationColor].append(curmodifiedFeat)
print(colored("Allocated " + str(totalIntersectedArea) + " " + units, "green"))
print("Writing Output file..")
newGeoms = []
for curAllocation in alreadyAllocatedFeats:
cf ={}
f = json.loads(shapelyHelper.export_to_JSON(curAllocation))
cf['type']= 'Feature'
cf['properties']= {}
cf['geometry']= f
# cf['properties']['allocated'] = 1
newGeoms.append(cf)
syscounter+= 1
transformedGeoms = {}
transformedGeoms['type'] = 'FeatureCollection'
transformedGeoms['features'] = newGeoms
outputdirectory = os.path.join(curPath,'output')
if not os.path.exists(outputdirectory):
os.makedirs(outputdirectory)
oppath = os.path.join(curPath, 'output',str(curSysAreaToBeAllocated['name'])+'-op.geojson')
with open(oppath, 'w') as outFile:
json.dump(transformedGeoms , outFile)
opfiles.append({'allocationfile':oppath,'sysname':curSysAreaToBeAllocated['name'],'sysid':curSysAreaToBeAllocated['systemid']})
print(colored("Finished Allocations", "green"))
uploadOK = query_yes_no("Upload allocation outputs to the Project?")
if uploadOK:
# read the allocated file
for curopfile in opfiles:
with open(curopfile['allocationfile'], 'r') as f:
# set the system number
allocatedFeats = f.read()
allocatedFeats = json.loads(allocatedFeats)
print("Uploading allocations as diagrams..")
uploadfilename = 'Allocated '+ curSysAreaToBeAllocated['name'] + ' v '+ str(config.allocationrunnumber)
# upload = myAPIHelper.post_as_diagram(geoms = allocatedFeats, projectorpolicy= 'project',featuretype = 'polygon', description=uploadfilename, sysid = curopfile['sysid'] )
# print(upload.text)
| 43.147826 | 270 | 0.70395 | 2,418 | 19,848 | 5.754756 | 0.232837 | 0.012936 | 0.009055 | 0.009055 | 0.162846 | 0.12562 | 0.108803 | 0.102192 | 0.102192 | 0.083938 | 0 | 0.007746 | 0.186971 | 19,848 | 459 | 271 | 43.24183 | 0.854558 | 0.248589 | 0 | 0.271028 | 0 | 0.003115 | 0.15707 | 0 | 0 | 0 | 0 | 0.002179 | 0.031153 | 1 | 0.031153 | false | 0.024922 | 0.05296 | 0 | 0.11838 | 0.068536 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
359fa1b55b8198d4ad245c110cd89c2830e7ab5a | 1,638 | py | Python | app.py | RobinCheptileh/json-excel | e58431f8110e0dd7b675a8244c280d1ff554ee2f | [
"MIT"
] | 1 | 2019-08-01T10:36:38.000Z | 2019-08-01T10:36:38.000Z | app.py | RobinCheptileh/json-excel | e58431f8110e0dd7b675a8244c280d1ff554ee2f | [
"MIT"
] | 3 | 2021-03-19T02:29:44.000Z | 2021-09-08T01:11:47.000Z | app.py | RobinCheptileh/json-excel | e58431f8110e0dd7b675a8244c280d1ff554ee2f | [
"MIT"
] | 1 | 2019-08-26T13:32:20.000Z | 2019-08-26T13:32:20.000Z | import pandas as pd
from flask import Flask, jsonify, request, make_response
from openpyxl import Workbook
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.writer.excel import save_virtual_workbook
app = Flask(__name__)
@app.route("/")
def index():
return jsonify({
'message': 'Welcome to json-excel! Head over to https://github.com/RobinCheptileh/json-excel for documentation.'
})
@app.route("/api/v1/to-spreadsheet", methods=['POST'])
def to_spreadsheet():
if not request.json or 'rows' not in request.json or not isinstance(request.json['rows'], list):
return jsonify({
'message': 'Bad Request'
}), 400
try:
data = request.json
header = 'header' in data
data_frame = pd.DataFrame(data['rows'], columns=data['header']) if header else pd.DataFrame(data['rows'])
print(data_frame)
wb = Workbook()
ws = wb.active
for r in dataframe_to_rows(data_frame, index=False, header=header):
ws.append(r)
raw_data = save_virtual_workbook(wb)
response = make_response(raw_data)
response.headers['Content-Type'] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
response.headers['Content-Disposition'] = "inline; filename=spreadsheet.xlsx"
return response
except ValueError as error:
return jsonify({
'message': str(error)
}), 400
@app.route("/api/v1/to-json", methods=['POST'])
def to_json():
return jsonify({
'message': 'Coming Soon!'
})
if __name__ == "__main__":
app.run(debug=True)
| 29.25 | 120 | 0.653846 | 202 | 1,638 | 5.158416 | 0.430693 | 0.049904 | 0.076775 | 0.024952 | 0.028791 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006265 | 0.220391 | 1,638 | 55 | 121 | 29.781818 | 0.80971 | 0 | 0 | 0.190476 | 0 | 0.02381 | 0.220391 | 0.068376 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.119048 | 0.047619 | 0.309524 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
359fee6a05f2093e4175bccdccc00fb135f709e4 | 388 | py | Python | tests/util.py | legnaleurc/wcpan.database | 7aeef90aa6b9a88075230eca575b02bd61e775d7 | [
"MIT"
] | null | null | null | tests/util.py | legnaleurc/wcpan.database | 7aeef90aa6b9a88075230eca575b02bd61e775d7 | [
"MIT"
] | null | null | null | tests/util.py | legnaleurc/wcpan.database | 7aeef90aa6b9a88075230eca575b02bd61e775d7 | [
"MIT"
] | null | null | null | import asyncio
import functools as ft
SQL_CREATE_TABLE = [
'''
CREATE TABLE people (
id INTEGER PRIMARY KEY,
name TEXT
);
''',
]
def sync(method):
@ft.wraps(method)
def wrapper(self, *args, **kwargs):
loop = asyncio.get_event_loop()
f = method(self, *args, **kwargs)
return loop.run_until_complete(f)
return wrapper
| 17.636364 | 41 | 0.592784 | 47 | 388 | 4.765957 | 0.659574 | 0.098214 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.293814 | 388 | 21 | 42 | 18.47619 | 0.817518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35a1b721bcf16de9c0bb094b9c144a6f38728f37 | 1,697 | py | Python | simulation_headless.py | iceychris/Planetensimulation | 30c20f5533771e01905e14777c275fcbc16583b1 | [
"MIT"
] | null | null | null | simulation_headless.py | iceychris/Planetensimulation | 30c20f5533771e01905e14777c275fcbc16583b1 | [
"MIT"
] | null | null | null | simulation_headless.py | iceychris/Planetensimulation | 30c20f5533771e01905e14777c275fcbc16583b1 | [
"MIT"
] | null | null | null | import multiprocessing
import time
from config import Config
from simulation_constants import END_MESSAGE
from lib.helper import get_log_func
import simulation
log = get_log_func("[headless]")
SECONDS_TO_RUN = 120
# specify the mode in which the simulation is run
# to alter cluster values open appropriate json files or the GUI
MODE = 'cluster' # or '<worker_implementation>'
def main():
# load config
# make sure to configure it before using
config = Config(filename="save.cfg.json")
config.load()
if MODE == 'cluster':
config.cluster["active"] = True
config.cluster["manager_host"] = "xray.informatik.fh-augsburg.de"
config.cluster["manager_port"] = 33333
config.cluster["redis_host"] = "xray.informatik.fh-augsburg.de"
config.cluster["redis_port"] = 6379
config.cluster["chunks"] = 8
elif MODE in config.update_impls:
config.update_impl = MODE
else:
log(f"no MODE '{MODE}'")
# random planets
config.mode_stuff["mode"] = 1
# 5000
config.nr_planets = 5000
# load up simulation
# create a pipe which solely purpose is to send commands to the simulation
renderer_conn, simulation_conn = multiprocessing.Pipe()
simulation_process = \
multiprocessing.Process(target=simulation.startup,
args=(simulation_conn, config))
simulation_process.start()
# spin around and don't use the results
t = time.time()
while time.time() - t < SECONDS_TO_RUN:
if renderer_conn.poll():
renderer_conn.recv_bytes()
renderer_conn.send(END_MESSAGE)
time.sleep(0.1)
if __name__ == '__main__':
main() | 28.283333 | 78 | 0.670595 | 218 | 1,697 | 5.050459 | 0.486239 | 0.070845 | 0.018165 | 0.036331 | 0.078111 | 0.078111 | 0.078111 | 0.078111 | 0 | 0 | 0 | 0.018405 | 0.231585 | 1,697 | 60 | 79 | 28.283333 | 0.82592 | 0.200354 | 0 | 0 | 0 | 0 | 0.134373 | 0.044543 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.157895 | 0 | 0.184211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35a4da82f93f77707b4a9469229888479c2bc0bb | 3,654 | py | Python | Miscellaneous/Mesh_To_Part.py | Jay4C/Python-Macros-For_FreeCAD | 12ce5441a26731377fa43e86ccd2be675740d3a0 | [
"MIT"
] | 2 | 2021-03-31T14:11:52.000Z | 2021-04-16T10:01:54.000Z | Miscellaneous/Mesh_To_Part.py | Jay4C/Python-Macros-For_FreeCAD | 12ce5441a26731377fa43e86ccd2be675740d3a0 | [
"MIT"
] | null | null | null | Miscellaneous/Mesh_To_Part.py | Jay4C/Python-Macros-For_FreeCAD | 12ce5441a26731377fa43e86ccd2be675740d3a0 | [
"MIT"
] | 1 | 2019-06-25T05:33:02.000Z | 2019-06-25T05:33:02.000Z | # https://wiki.freecadweb.org/Scripting_examples
# https://wiki.freecadweb.org/Mesh_to_Part
import FreeCAD, Part, Drawing, time
import Mesh
DOC = FreeCAD.activeDocument()
DOC_NAME = "Pippo"
def clear_doc():
"""
Clear the active document deleting all the objects
"""
for obj in DOC.Objects:
DOC.removeObject(obj.Name)
def setview():
"""Rearrange View"""
FreeCAD.Gui.SendMsgToActiveView("ViewFit")
FreeCAD.Gui.activeDocument().activeView().viewAxometric()
if DOC is None:
FreeCAD.newDocument(DOC_NAME)
FreeCAD.setActiveDocument(DOC_NAME)
DOC = FreeCAD.activeDocument()
else:
clear_doc()
# EPS= tolerance to use to cut the parts
EPS = 0.10
EPS_C = EPS * -0.5
def convert_part_objects_to_meshes():
import Part
doc = FreeCAD.newDocument("Box_5")
box = doc.addObject("Part::Box", "myBox")
box.Height = 5
box.Length = 5
box.Width = 5
doc.recompute()
import Mesh
obj = box # a Part object must be preselected
shp = obj.Shape
faces = []
triangles = shp.tessellate(1) # the number represents the precision of the tessellation
for tri in triangles[1]:
face = []
for i in tri:
face.append(triangles[0][i])
faces.append(face)
m = Mesh.Mesh(faces)
Mesh.show(m)
time.sleep(5)
import MeshPart
obj = box # a Part object must be preselected
shp = obj.Shape
mesh = FreeCAD.ActiveDocument.addObject("Mesh::Feature", "Mesh")
mesh.Mesh = MeshPart.meshFromShape(
Shape=shp,
LinearDeflection=0.01,
AngularDeflection=0.025,
Relative=False)
setview()
# convert_part_objects_to_meshes()
def convert_meshes_to_part_objects_1():
import Part
doc = FreeCAD.newDocument("Box_5")
box = doc.addObject("Part::Box", "myBox")
box.Height = 5
box.Length = 5
box.Width = 5
doc.recompute()
import Mesh
import Part
mesh = Mesh.createTorus()
shape = Part.Shape()
shape.makeShapeFromMesh(mesh.Topology, 0.05) # the second arg is the tolerance for sewing
solid = Part.makeSolid(shape)
Part.show(solid)
setview()
# convert_meshes_to_part_objects_1()
def convert_meshes_to_part_objects_2():
import Part
doc = FreeCAD.newDocument("Box_5")
box = doc.addObject("Part::Box", "myBox")
box.Height = 5
box.Length = 5
box.Width = 5
doc.recompute()
import Mesh
import Part
import MeshPart
obj = box # a Mesh object must be preselected
mesh = obj.Mesh
segments = mesh.getPlanarSegments(0.00001) # use rather strict tolerance here
faces = []
for i in segments:
if len(i) > 0:
# a segment can have inner holes
wires = MeshPart.wireFromSegment(mesh, i)
# we assume that the exterior boundary is that one with the biggest bounding box
if len(wires) > 0:
ext = None
max_length=0
for i in wires:
if i.BoundBox.DiagonalLength > max_length:
max_length = i.BoundBox.DiagonalLength
ext = i
wires.remove(ext)
# all interior wires mark a hole and must reverse their orientation, otherwise Part.Face fails
for i in wires:
i.reverse()
# make sure that the exterior wires comes as first in the list
wires.insert(0, ext)
faces.append(Part.Face(wires))
solid = Part.Solid(Part.Shell(faces))
Part.show(solid)
setview()
convert_meshes_to_part_objects_2()
| 25.375 | 110 | 0.619595 | 465 | 3,654 | 4.776344 | 0.31828 | 0.016209 | 0.010806 | 0.034219 | 0.310671 | 0.271499 | 0.243584 | 0.243584 | 0.243584 | 0.202161 | 0 | 0.017222 | 0.284893 | 3,654 | 143 | 111 | 25.552448 | 0.832759 | 0.207718 | 0 | 0.454545 | 0 | 0 | 0.03007 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050505 | false | 0 | 0.121212 | 0 | 0.171717 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35a522d9dde3357fed374483b611b8951641d629 | 9,165 | py | Python | pretix_stretchgoals/chart.py | rixx/pretix-stretchgoals | f29b199395f9cfea4d6c5ed6557351622f271c74 | [
"Apache-2.0"
] | 5 | 2017-08-21T23:35:30.000Z | 2020-04-26T16:08:04.000Z | pretix_stretchgoals/chart.py | pretix/pretix-stretchgoals | f29b199395f9cfea4d6c5ed6557351622f271c74 | [
"Apache-2.0"
] | 5 | 2017-08-09T13:34:42.000Z | 2020-12-21T18:18:43.000Z | pretix_stretchgoals/chart.py | pretix/pretix-stretchgoals | f29b199395f9cfea4d6c5ed6557351622f271c74 | [
"Apache-2.0"
] | 2 | 2018-12-17T21:33:31.000Z | 2020-12-21T15:06:20.000Z | import json
from datetime import date, datetime, timedelta
import pytz
from django.db.models import Avg, DateTimeField, Max, OuterRef, Subquery, Sum
from django.db.models.query import QuerySet
from django.utils.timezone import now
from i18nfield.strings import LazyI18nString
from pretix.base.models import Item, OrderPayment, OrderPosition
from .json import ChartJSONEncoder
from .utils import get_cache_key, get_goals
def get_base_queryset(event, items, include_pending):
qs = OrderPosition.objects.filter(order__event=event)
allowed_states = ['p', 'n'] if include_pending else ['p']
op_date = (
OrderPayment.objects.filter(
order=OuterRef('order'),
state__in=(
OrderPayment.PAYMENT_STATE_CONFIRMED,
OrderPayment.PAYMENT_STATE_REFUNDED,
),
payment_date__isnull=False,
)
.order_by()
.values('order')
.annotate(m=Max('payment_date'))
.values('m')
)
qs = qs.filter(order__status__in=allowed_states).annotate(
payment_date=Subquery(op_date, output_field=DateTimeField())
)
if items:
qs = qs.filter(item__in=items)
if include_pending:
return qs.order_by('order__datetime')
return qs.order_by('payment_date')
def get_start_date(event, items, include_pending):
tz = pytz.timezone(event.settings.timezone)
start_date = event.settings.get('stretchgoals_start_date', as_type=date)
if start_date:
return start_date
first_order = get_base_queryset(event, items, include_pending).first()
if first_order:
if include_pending:
return first_order.order.datetime.astimezone(tz).date()
if first_order.order and first_order.payment_date:
return first_order.payment_date.astimezone(tz).date()
return (now() - timedelta(days=2)).astimezone(tz).date()
def get_end_date(event, items, include_pending):
tz = pytz.timezone(event.settings.timezone)
end_date = event.settings.get('stretchgoals_end_date', as_type=date)
if end_date:
return end_date
last_order = get_base_queryset(event, items, include_pending).last()
if last_order:
if include_pending:
last_date = last_order.order.datetime.astimezone(tz).date()
else:
last_date = last_order.payment_date.astimezone(tz).date()
if (
last_date == now().astimezone(tz).date()
and event.settings.stretchgoals_is_public
):
return last_date - timedelta(days=1)
return last_date
if event.settings.stretchgoals_is_public:
return (now() - timedelta(days=1)).astimezone(tz).date()
return now().astimezone(tz).date()
def get_date_range(start_date, end_date):
for offset in range((end_date - start_date).days + 1):
yield start_date + timedelta(days=offset)
def get_average_price(event, start_date, end_date, items, include_pending):
tz = pytz.timezone(event.settings.timezone)
start_dt = datetime(
start_date.year, start_date.month, start_date.day, 0, 0, 0, tzinfo=tz
)
end_dt = datetime(
end_date.year, end_date.month, end_date.day, 23, 59, 59, tzinfo=tz
)
if include_pending:
qs = get_base_queryset(event, items, include_pending).filter(
order__datetime__gte=start_dt, order__datetime__lte=end_dt
)
else:
qs = get_base_queryset(event, items, include_pending).filter(
payment_date__gte=start_dt, payment_date__lte=end_dt
)
return round(qs.aggregate(Avg('price')).get('price__avg') or 0, 2)
def get_total_price(event, start_date, end_date, items, include_pending):
tz = pytz.timezone(event.settings.timezone)
start_dt = datetime(
start_date.year, start_date.month, start_date.day, 0, 0, 0, tzinfo=tz
)
end_dt = datetime(
end_date.year, end_date.month, end_date.day, 23, 59, 59, tzinfo=tz
)
if include_pending:
qs = get_base_queryset(event, items, include_pending).filter(
order__datetime__gte=start_dt, order__datetime__lte=end_dt
)
else:
qs = get_base_queryset(event, items, include_pending).filter(
payment_date__gte=start_dt, payment_date__lte=end_dt
)
return round(qs.aggregate(Sum('price')).get('price__sum') or 0, 2)
def get_required_average_price(
event, items, include_pending, target, total_count, total_now
):
if not target:
return
start_date = get_start_date(event, items, include_pending)
end_date = get_end_date(event, items, include_pending)
tz = pytz.timezone(event.settings.timezone)
start_dt = datetime(
start_date.year, start_date.month, start_date.day, 0, 0, 0, tzinfo=tz
)
end_dt = datetime(
end_date.year, end_date.month, end_date.day, 23, 59, 59, tzinfo=tz
)
if include_pending:
all_orders = get_base_queryset(event, items, include_pending).filter(
order__datetime__gte=start_dt, order__datetime__lte=end_dt
)
else:
all_orders = get_base_queryset(event, items, include_pending).filter(
payment_date__gte=start_dt, payment_date__lte=end_dt
)
current_count = all_orders.count()
if total_now > target:
return 0
try:
return round((target - total_now) / (total_count - current_count), 2)
except Exception as e:
return None
def get_public_text(event, items, include_pending, data=None):
text = str(event.settings.get('stretchgoals_public_text', as_type=LazyI18nString))
if data:
text = text.format(**{'avg_now': data['avg_now']})
return text
def get_chart_and_text(event):
cache = event.cache
cache_key = get_cache_key(event)
chart_data = cache.get(cache_key)
if chart_data:
return chart_data
result = {}
include_pending = event.settings.stretchgoals_include_pending or False
avg_chart = event.settings.stretchgoals_chart_averages or False
total_chart = event.settings.stretchgoals_chart_totals or False
event.settings._h.add_type(
QuerySet,
lambda queryset: ','.join([str(element.pk) for element in queryset]),
lambda pk_list: Item.objects.filter(pk__in=pk_list.split(',')) if pk_list else []
)
items = event.settings.get('stretchgoals_items', as_type=QuerySet) or []
start_date = get_start_date(event, items, include_pending)
end_date = get_end_date(event, items, include_pending)
goals = get_goals(event)
data = {
'avg_data': {
'data': [
{
'date': date.strftime('%Y-%m-%d'),
'price': get_average_price(
event, start_date, date, items, include_pending
)
or 0,
}
for date in get_date_range(start_date, end_date)
]
if avg_chart
else None,
'target': [goal.get('avg', 0) for goal in goals],
'label': 'avg',
},
'total_data': {
'data': [
{
'date': date.strftime('%Y-%m-%d'),
'price': get_total_price(
event, start_date, date, items, include_pending
)
or 0,
}
for date in get_date_range(start_date, end_date)
]
if total_chart
else None,
'target': [goal['total'] for goal in goals],
'label': 'total',
},
}
if avg_chart:
data['avg_data']['ymin'] = int(
min([d['price'] for d in data['avg_data']['data'] if d['price']] or [0])
)
if total_chart:
data['total_data']['ymin'] = int(
min([d['price'] for d in data['total_data']['data'] if d['price']] or [0])
)
result['data'] = {
key: json.dumps(value, cls=ChartJSONEncoder) for key, value in data.items()
}
try:
result['avg_now'] = data['avg_data']['data'][-1]['price']
result['total_now'] = data['total_data']['data'][-1]['price']
except (TypeError, IndexError): # no data, data[-1] does not exist
result['avg_now'] = 0
result['total_now'] = 0
for goal in goals:
goal['avg_required'] = get_required_average_price(
event,
items,
include_pending,
goal['total'],
goal['amount'],
result['total_now'],
)
goal['total_left'] = goal['total'] - result['total_now']
result['goals'] = goals
result[
'significant'
] = not event.settings.stretchgoals_min_orders or get_base_queryset(
event, items, include_pending
).count() >= event.settings.get(
'stretchgoals_min_orders', as_type=int
)
result['public_text'] = get_public_text(event, items, include_pending, data=result)
result['last_generated'] = now()
cache.set(
cache_key, result, timeout=3600
) # timeout is set in seconds, so it's hourly
return result
| 35.3861 | 89 | 0.627278 | 1,163 | 9,165 | 4.662081 | 0.134996 | 0.085208 | 0.084102 | 0.088528 | 0.523239 | 0.458318 | 0.414054 | 0.38602 | 0.337329 | 0.337329 | 0 | 0.008423 | 0.261648 | 9,165 | 258 | 90 | 35.523256 | 0.792818 | 0.008074 | 0 | 0.25431 | 0 | 0 | 0.060189 | 0.010013 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038793 | false | 0 | 0.043103 | 0 | 0.168103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35a7e0c42bfa601d5fd25f7b98d90361c3dff964 | 4,928 | py | Python | src/algo_alpha_beta.py | deterralba/centrale_ia | c9d4d27aaec7debe312eb4ea4f59d0e2576958f4 | [
"MIT"
] | null | null | null | src/algo_alpha_beta.py | deterralba/centrale_ia | c9d4d27aaec7debe312eb4ea4f59d0e2576958f4 | [
"MIT"
] | null | null | null | src/algo_alpha_beta.py | deterralba/centrale_ia | c9d4d27aaec7debe312eb4ea4f59d0e2576958f4 | [
"MIT"
] | null | null | null | from algo_mini_max import get_available_moves, clone_and_apply_actions, from_numpy_to_tuple
import numpy as np
from time import time, sleep
from const import RACE_ID, HUM, WOLV, VAMP
from board import Action, Board
from threading import RLock
from game import TRANSPOSITION, INF
PRINT_SUMMARY = False
VICTORY_IS_INF = True
def alphabeta(board, race, race_ennemi, depth, evaluate, esperance, transposition_table=None, with_score=False):
'''without group division and only one action'''
old_skip = Board.SKIP_CHECKS
Board.SKIP_CHECKS = True
start_time = time()
if TRANSPOSITION:
assert transposition_table is not None
counter = 0
alpha = -INF
beta = INF
all_actions = []
best_action, best_score, total_counter = _alpha_beta(
True, board, race, race_ennemi, depth, evaluate, esperance, all_actions, counter, alpha, beta, transposition_table
)
print('=' * 40)
print('action {}, score {}'.format(best_action, best_score))
Board.SKIP_CHECKS = old_skip
if PRINT_SUMMARY:
print('Action summary')
all_actions = [action for action in all_actions if action[2] == best_score]
all_actions.sort(key=lambda x: x[1], reverse=True)
print('\n'.join(map(str, all_actions)))
end_time = time() - start_time
#print('#position calc: {}, in {:.2f}s ({:.0f}/s)'.format(total_counter, end_time, total_counter / end_time))
if with_score:
return [best_action], best_score
return [best_action] # return a list with only one move for the moment
def _alpha_beta(is_max, board, race, race_ennemi, depth, evaluate, esperance, all_actions, counter, alpha, beta, transposition_table=None):
winning_race = board.is_over()
if winning_race:
if VICTORY_IS_INF:
score = INF if winning_race == race else -INF
return None, score, counter + 1
else:
return None, 2 * evaluate(board, race, race_ennemi), counter + 1
if depth == 0:
return None, evaluate(board, race, race_ennemi), counter + 1
playing_race = race if is_max else race_ennemi
actions = get_available_moves(board, playing_race) # return a list of possible actions
np.random.shuffle(actions)
best_action = actions[0]
for action in actions:
if esperance:
clone_boards = clone_and_apply_actions(board, [action], playing_race, True)
scores = []
for clone_board in clone_boards:
_, score, counter = _alpha_beta(
not is_max, clone_board, race, race_ennemi, depth - 1, evaluate, esperance, all_actions, counter, alpha, beta
)
scores.append(score * clone_board.proba)
if len(scores) > 1:
# print('calculated several clone_boards :', scores, sum([clone_board.proba for clone_board in clone_boards]))
pass
score = sum(scores)
else:
clone_board = clone_and_apply_actions(board, [action], playing_race, False)
_, score, counter = _alpha_beta(
not is_max, clone_board, race, race_ennemi, depth - 1, evaluate, esperance, all_actions, counter, alpha, beta
)
'''
# TRANSPOSITION old code
clone_board = clone_and_apply_actions(board, [action], playing_race, False)
skip_alpha_beta = False
if TRANSPOSITION:
clone_grid = from_numpy_to_tuple(clone_board)
if clone_grid in transposition_table.keys():
# print('situation already encountered...skipping calculation thks to transposition_table...')
score = transposition_table[clone_grid]
skip_alpha_beta = True
if not skip_alpha_beta:
_, score, counter = _alpha_beta(not is_max, clone_board, race, race_ennemi, depth - 1, evaluate, esperance,
all_actions, counter, alpha, beta, transposition_table)
if TRANSPOSITION:
if depth == transposition_table['depth']:
transposition_table[clone_grid] = score
'''
# print('score = ' + str(score))
if is_max:
if score > alpha:
alpha = score
best_action = action
if alpha >= beta:
all_actions.append((best_action, depth, alpha))
return best_action, alpha, counter
else:
if score < beta:
beta = score
best_action = action
if alpha >= beta:
all_actions.append((best_action, depth, beta))
return best_action, beta, counter
if is_max:
all_actions.append((best_action, depth, alpha))
return best_action, alpha, counter
else:
all_actions.append((best_action, depth, beta))
return best_action, beta, counter
| 39.111111 | 139 | 0.626623 | 603 | 4,928 | 4.870647 | 0.199005 | 0.051073 | 0.04903 | 0.051753 | 0.391556 | 0.391556 | 0.373851 | 0.336057 | 0.321757 | 0.320054 | 0 | 0.004845 | 0.287946 | 4,928 | 125 | 140 | 39.424 | 0.832146 | 0.075487 | 0 | 0.258824 | 0 | 0 | 0.009912 | 0 | 0 | 0 | 0 | 0 | 0.011765 | 1 | 0.023529 | false | 0.011765 | 0.082353 | 0 | 0.211765 | 0.047059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35aaefad8383889677db6462fee4f20f5e2e2dc2 | 2,197 | py | Python | bank.py | kmranrg/BankWithUs | d4f22387e8cd4ef23a0e23c64d0f02cdc6a362f9 | [
"BSD-3-Clause"
] | 1 | 2020-12-17T21:41:02.000Z | 2020-12-17T21:41:02.000Z | bank.py | kmranrg/BankWithUs | d4f22387e8cd4ef23a0e23c64d0f02cdc6a362f9 | [
"BSD-3-Clause"
] | null | null | null | bank.py | kmranrg/BankWithUs | d4f22387e8cd4ef23a0e23c64d0f02cdc6a362f9 | [
"BSD-3-Clause"
] | 1 | 2019-12-31T19:54:19.000Z | 2019-12-31T19:54:19.000Z | # Source Code
print("\t\t\t***** BankWithUs *****")
bank_data = {}
while True:
print("\n\n\t\t\t----- Main Menu -----")
ch = int(input("\n\n1.New Customer\n2.Existing Customer\n3.Exit\n\nEnter choice:"))
if ch == 1:
name = input("\nEnter name:")
city = input("Enter city:")
age = int(input("Enter age:"))
acc = input("Enter account type:")
amt = int(input("Enter amount:"))
acc_no = int(input("Enter account no:"))
user_data = {}
user_data["name"] = name
user_data["city"] = city
user_data["age"] = age
user_data["account"] = acc
user_data["amount"] = amt
bank_data[acc_no] = user_data
print("\nACCOUNT CREATED\n")
elif ch == 2:
acc_no = int(input("Enter account no:"))
if acc_no in bank_data:
print("\nACCOUNT EXISTS\n")
while True:
print("\n\n\t\t\t----- User Portal -----")
choice = int(input("\n1.Check Balance\n2.Withdraw\n3.Deposit\n4.Back to main menu\n\nEnter choice:"))
if choice == 1:
print("\nYour available balance:",bank_data[acc_no]["amount"])
elif choice == 2:
withd = int(input("\nEnter withdraw amount:"))
bank_data[acc_no]["amount"] = bank_data[acc_no]["amount"] - withd
print("\nAmount withdrawn, now your available balance is %i"%bank_data[acc_no]["amount"])
elif choice == 3:
dep = int(input("\nEnter deposit amount:"))
bank_data[acc_no]["amount"] = bank_data[acc_no]["amount"] + dep
print("\nAmount deposited, now your available balance is %i"%bank_data[acc_no]["amount"])
elif choice == 4:
break
else:
print("\n\nINVALID CHOICE!")
else:
print("\nACCOUNT NOT FOUND\n")
elif ch == 3:
break
else:
print("\n\nINVALID CHOICE!")
print("\n\n***** Thank you for banking with us *****")
| 33.287879 | 117 | 0.492035 | 260 | 2,197 | 4.05 | 0.288462 | 0.052232 | 0.083571 | 0.098765 | 0.358025 | 0.358025 | 0.302944 | 0.224122 | 0.188034 | 0.188034 | 0 | 0.009866 | 0.354119 | 2,197 | 65 | 118 | 33.8 | 0.732206 | 0.005007 | 0 | 0.229167 | 0 | 0.041667 | 0.328297 | 0.028846 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35adb0cec60ae1d4fdeced6efbbc881df190df88 | 16,833 | py | Python | robust_cifar_train.py | snap-stanford/crust | a430069a413a0bb96ab33080df16b7d285e18040 | [
"MIT"
] | 21 | 2020-12-14T03:30:43.000Z | 2021-11-23T03:51:00.000Z | robust_cifar_train.py | snap-stanford/crust | a430069a413a0bb96ab33080df16b7d285e18040 | [
"MIT"
] | 2 | 2021-04-03T08:55:52.000Z | 2022-01-24T00:30:59.000Z | robust_cifar_train.py | snap-stanford/crust | a430069a413a0bb96ab33080df16b7d285e18040 | [
"MIT"
] | 7 | 2020-12-18T10:32:07.000Z | 2021-05-31T02:32:07.000Z | import argparse
import os
import random
import time
import warnings
import sys
import numpy as np
from sklearn.metrics import pairwise_distances
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
import copy
from torch.utils.tensorboard import SummaryWriter
#import wandb
from torch.autograd import grad
from fl_cifar import FacilityLocationCIFAR
from lazyGreedy import lazy_greedy_heap
from utils import *
from mislabel_cifar import MISLABELCIFAR10, MISLABELCIFAR100
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch Cifar Training')
parser.add_argument('--dataset', default='cifar10', help='dataset setting')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet32',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet32)')
parser.add_argument('--exp-str', default='0', type=str, help='number to indicate which experiment it is')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=120, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N',
help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--root_log',type=str, default='log')
parser.add_argument('--root_model', type=str, default='checkpoint')
parser.add_argument('--use_crust', action='store_true',
help="Whether to use clusters in dataset.")
parser.add_argument('--r', type=float, default=2.0,
help="Distance threshold (i.e. radius) in calculating clusters.")
parser.add_argument('--fl-ratio', type=float, default=0.5,
help="Ratio for number of facilities.")
parser.add_argument('--mislabel-type', type=str, default='agnostic')
parser.add_argument('--mislabel-ratio', type=float, default=0.5)
parser.add_argument('--rand-number', type=int, default=0,
help="Ratio for number of facilities.")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.use_crust:
args.store_name = '_'.join([args.dataset, args.arch, args.mislabel_type, str(args.mislabel_ratio), str(args.fl_ratio), str(args.r), args.exp_str])
else:
args.store_name = '_'.join([args.dataset, args.arch, args.mislabel_type, str(args.mislabel_ratio), args.exp_str])
prepare_folders(args)
#wandb.init(project="robust_cifar", tensorboard=True, name=args.store_name)
#wandb.config.update(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model
print("=> creating model '{}'".format(args.arch))
args.num_classes = 100 if args.dataset == 'cifar100' else 10
model = models.__dict__[args.arch](num_classes=args.num_classes)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == 'cifar10':
train_dataset = MISLABELCIFAR10(root='./data', mislabel_type=args.mislabel_type, mislabel_ratio=args.mislabel_ratio, transform=transform_train, rand_number=args.rand_number, download=True)
val_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_val)
elif args.dataset == 'cifar100':
train_dataset = MISLABELCIFAR100(root='./data', mislabel_type=args.mislabel_type, mislabel_ratio=args.mislabel_ratio, transform=transform_train, rand_number=args.rand_number, download=True)
val_dataset = datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_val)
criterion = nn.CrossEntropyLoss(reduction='none').cuda(args.gpu)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
trainval_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, 0, args)
return
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[80, 100], last_epoch=args.start_epoch - 1)
# init log for training
log_training = open(os.path.join(args.root_log, args.store_name, 'log.csv'), 'w')
with open(os.path.join(args.root_log, args.store_name, 'args.txt'), 'w') as f:
f.write(str(args))
tf_writer = SummaryWriter(log_dir=os.path.join(args.root_log, args.store_name))
weights = [1] * len(train_dataset)
weights = torch.FloatTensor(weights)
for epoch in range(args.start_epoch, args.epochs):
if args.use_crust and epoch >= 5:
train_dataset.switch_data()
# FL part
grads_all, labels = estimate_grads(trainval_loader, model, criterion, args, epoch, log_training)
# per-class clustering
ssets = []
weights = []
for c in range(args.num_classes):
sample_ids = np.where((labels == c) == True)[0]
grads = grads_all[sample_ids]
dists = pairwise_distances(grads)
weight = np.sum(dists < args.r, axis=1)
V = range(len(grads))
F = FacilityLocationCIFAR(V, D=dists)
B = int(args.fl_ratio * len(grads))
sset, vals = lazy_greedy_heap(F, V, B)
weights.extend(weight[sset].tolist())
sset = sample_ids[np.array(sset)]
ssets += list(sset)
weights = torch.FloatTensor(weights)
train_dataset.adjust_base_indx_tmp(ssets)
label_acc = train_dataset.estimate_label_acc()
tf_writer.add_scalar('label_acc', label_acc, epoch)
log_training.write('epoch %d label acc: %f\n'%(epoch, label_acc))
print("change train loader")
# train for one epoch
if args.use_crust and epoch > 5:
train(train_loader, model, criterion, weights, optimizer, epoch, args, log_training, tf_writer, fetch=True)
else:
train(train_loader, model, criterion, weights, optimizer, epoch, args, log_training, tf_writer, fetch=False)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, epoch, args, log_training, tf_writer)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
tf_writer.add_scalar('acc/test_top1_best', best_acc1, epoch)
output_best = 'Best Prec@1: %.3f\n' % (best_acc1)
print(output_best)
save_checkpoint(args, {
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
lr_scheduler.step()
print('best_acc1: {:.4f}'.format(best_acc1.item()))
def train(train_loader, model, criterion, weights, optimizer, epoch, args, log_training, tf_writer, fetch=False):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, batch in enumerate(train_loader):
input, target, target_real, index = batch
if fetch:
input_b = train_loader.dataset.fetch(target)
lam = np.random.beta(1, 0.1)
input = lam * input + (1 - lam) * input_b
c_weights = weights[index]
c_weights = c_weights.type(torch.FloatTensor)
c_weights = c_weights / c_weights.sum()
if args.gpu is not None:
c_weights = c_weights.to(args.gpu, non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
input = input.type(torch.FloatTensor)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output, feats = model(input)
loss = criterion(output, target)
loss = (loss * c_weights).sum()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
tf_writer.add_scalar('loss/train', losses.avg, epoch)
tf_writer.add_scalar('acc/train_top1', top1.avg, epoch)
tf_writer.add_scalar('acc/train_top5', top5.avg, epoch)
tf_writer.add_scalar('lr', optimizer.param_groups[-1]['lr'], epoch)
def validate(val_loader, model, criterion, epoch, args, log_training=None, tf_writer=None):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.type(torch.FloatTensor)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output, feats = model(input)
loss = criterion(output, target)
loss = loss.mean()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
if tf_writer is not None:
tf_writer.add_scalar('loss/test', losses.avg, epoch)
tf_writer.add_scalar('acc/test_top1', top1.avg, epoch)
tf_writer.add_scalar('acc/test_top5', top5.avg, epoch)
log_training.write('epoch %d val acc: %f\n'%(epoch, top1.avg))
return top1.avg
def estimate_grads(trainval_loader, model, criterion, args, epoch, log_training):
# switch to train mode
model.train()
all_grads = []
all_targets = []
all_preds = []
top1 = AverageMeter('Acc@1', ':6.2f')
for i, (input, target, target_real, idx) in enumerate(trainval_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
all_targets.append(target)
target = target.cuda(args.gpu, non_blocking=True)
target_real = target_real.cuda(args.gpu, non_blocking=True)
# compute output
output, feat = model(input)
_, pred = torch.max(output, 1)
loss = criterion(output, target).mean()
acc1, acc5 = accuracy(output, target_real, topk=(1, 5))
top1.update(acc1[0], input.size(0))
est_grad = grad(loss, feat)
all_grads.append(est_grad[0].detach().cpu().numpy())
all_preds.append(pred.detach().cpu().numpy())
all_grads = np.vstack(all_grads)
all_targets = np.hstack(all_targets)
all_preds = np.hstack(all_preds)
log_training.write('epoch %d train acc: %f\n'%(epoch, top1.avg))
return all_grads, all_targets
if __name__ == '__main__':
main()
| 42.18797 | 197 | 0.625082 | 2,137 | 16,833 | 4.775854 | 0.181563 | 0.021164 | 0.039976 | 0.014991 | 0.386635 | 0.34127 | 0.300803 | 0.281893 | 0.264746 | 0.229963 | 0 | 0.020553 | 0.2485 | 16,833 | 398 | 198 | 42.29397 | 0.786245 | 0.046041 | 0 | 0.213376 | 0 | 0.003185 | 0.122388 | 0 | 0 | 0 | 0 | 0.002513 | 0 | 1 | 0.015924 | false | 0 | 0.079618 | 0 | 0.105096 | 0.047771 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35adde2a6ef46edcff8d3a858f51e4c80f0a60f8 | 6,324 | py | Python | markdown_it/rules_block/reference.py | ExecutableBookProject/markdown-it-py | 53084e1ffa82323e37fe2d17a1b53d1dc66e5afd | [
"MIT"
] | 12 | 2020-03-26T08:00:43.000Z | 2020-04-23T09:10:36.000Z | markdown_it/rules_block/reference.py | sthagen/executablebooks-markdown-it-py | 53084e1ffa82323e37fe2d17a1b53d1dc66e5afd | [
"MIT"
] | 9 | 2020-03-25T11:36:16.000Z | 2020-04-23T18:07:16.000Z | markdown_it/rules_block/reference.py | sthagen/executablebooks-markdown-it-py | 53084e1ffa82323e37fe2d17a1b53d1dc66e5afd | [
"MIT"
] | 1 | 2020-04-01T16:12:38.000Z | 2020-04-01T16:12:38.000Z | import logging
from ..common.utils import charCodeAt, isSpace, normalizeReference
from .state_block import StateBlock
LOGGER = logging.getLogger(__name__)
def reference(state: StateBlock, startLine, _endLine, silent):
LOGGER.debug(
"entering reference: %s, %s, %s, %s", state, startLine, _endLine, silent
)
lines = 0
pos = state.bMarks[startLine] + state.tShift[startLine]
maximum = state.eMarks[startLine]
nextLine = startLine + 1
# if it's indented more than 3 spaces, it should be a code block
if state.sCount[startLine] - state.blkIndent >= 4:
return False
if state.srcCharCode[pos] != 0x5B: # /* [ */
return False
# Simple check to quickly interrupt scan on [link](url) at the start of line.
# Can be useful on practice: https:#github.com/markdown-it/markdown-it/issues/54
while pos < maximum:
# /* ] */ /* \ */ /* : */
if state.srcCharCode[pos] == 0x5D and state.srcCharCode[pos - 1] != 0x5C:
if pos + 1 == maximum:
return False
if state.srcCharCode[pos + 1] != 0x3A:
return False
break
pos += 1
endLine = state.lineMax
# jump line-by-line until empty one or EOF
terminatorRules = state.md.block.ruler.getRules("reference")
oldParentType = state.parentType
state.parentType = "reference"
while nextLine < endLine and not state.isEmpty(nextLine):
# this would be a code block normally, but after paragraph
# it's considered a lazy continuation regardless of what's there
if state.sCount[nextLine] - state.blkIndent > 3:
nextLine += 1
continue
# quirk for blockquotes, this line should already be checked by that rule
if state.sCount[nextLine] < 0:
nextLine += 1
continue
# Some tags can terminate paragraph without empty line.
terminate = False
for terminatorRule in terminatorRules:
if terminatorRule(state, nextLine, endLine, True):
terminate = True
break
if terminate:
break
nextLine += 1
string = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
maximum = len(string)
labelEnd = None
pos = 1
while pos < maximum:
ch = charCodeAt(string, pos)
if ch == 0x5B: # /* [ */
return False
elif ch == 0x5D: # /* ] */
labelEnd = pos
break
elif ch == 0x0A: # /* \n */
lines += 1
elif ch == 0x5C: # /* \ */
pos += 1
if pos < maximum and charCodeAt(string, pos) == 0x0A:
lines += 1
pos += 1
if (
labelEnd is None or labelEnd < 0 or charCodeAt(string, labelEnd + 1) != 0x3A
): # /* : */
return False
# [label]: destination 'title'
# ^^^ skip optional whitespace here
pos = labelEnd + 2
while pos < maximum:
ch = charCodeAt(string, pos)
if ch == 0x0A:
lines += 1
elif isSpace(ch):
pass
else:
break
pos += 1
# [label]: destination 'title'
# ^^^^^^^^^^^ parse this
res = state.md.helpers.parseLinkDestination(string, pos, maximum)
if not res.ok:
return False
href = state.md.normalizeLink(res.str)
if not state.md.validateLink(href):
return False
pos = res.pos
lines += res.lines
# save cursor state, we could require to rollback later
destEndPos = pos
destEndLineNo = lines
# [label]: destination 'title'
# ^^^ skipping those spaces
start = pos
while pos < maximum:
ch = charCodeAt(string, pos)
if ch == 0x0A:
lines += 1
elif isSpace(ch):
pass
else:
break
pos += 1
# [label]: destination 'title'
# ^^^^^^^ parse this
res = state.md.helpers.parseLinkTitle(string, pos, maximum)
if pos < maximum and start != pos and res.ok:
title = res.str
pos = res.pos
lines += res.lines
else:
title = ""
pos = destEndPos
lines = destEndLineNo
# skip trailing spaces until the rest of the line
while pos < maximum:
ch = charCodeAt(string, pos)
if not isSpace(ch):
break
pos += 1
if pos < maximum and charCodeAt(string, pos) != 0x0A:
if title:
# garbage at the end of the line after title,
# but it could still be a valid reference if we roll back
title = ""
pos = destEndPos
lines = destEndLineNo
while pos < maximum:
ch = charCodeAt(string, pos)
if not isSpace(ch):
break
pos += 1
if pos < maximum and charCodeAt(string, pos) != 0x0A:
# garbage at the end of the line
return False
label = normalizeReference(string[1:labelEnd])
if not label:
# CommonMark 0.20 disallows empty labels
return False
# Reference can not terminate anything. This check is for safety only.
if silent:
return True
if "references" not in state.env:
state.env["references"] = {}
state.line = startLine + lines + 1
# note, this is not part of markdown-it JS, but is useful for renderers
if state.md.options.get("inline_definitions", False):
token = state.push("definition", "", 0)
token.meta = {
"id": label,
"title": title,
"url": href,
"label": string[1:labelEnd],
}
token.map = [startLine, state.line]
if label not in state.env["references"]:
state.env["references"][label] = {
"title": title,
"href": href,
"map": [startLine, state.line],
}
else:
state.env.setdefault("duplicate_refs", []).append(
{
"title": title,
"href": href,
"label": label,
"map": [startLine, state.line],
}
)
state.parentType = oldParentType
return True
| 28.876712 | 84 | 0.54222 | 693 | 6,324 | 4.935065 | 0.285714 | 0.035088 | 0.044444 | 0.024854 | 0.221637 | 0.200585 | 0.169006 | 0.154971 | 0.154971 | 0.143275 | 0 | 0.015783 | 0.358792 | 6,324 | 218 | 85 | 29.009174 | 0.82762 | 0.203827 | 0 | 0.503226 | 0 | 0 | 0.035586 | 0 | 0 | 0 | 0.011196 | 0 | 0 | 1 | 0.006452 | false | 0.012903 | 0.019355 | 0 | 0.103226 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |