text stringlengths 957 885k |
|---|
<reponame>lavon321/Kunlun-M
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/7/26 16:38
# @Author : LoRexxar
# @File : views.py
# @Contact : <EMAIL>
import os
import codecs
import json
from django.core import serializers
from django.shortcuts import render, redirect, HttpResponse
from django.http import HttpResponseRedirect, JsonResponse
from web.index.controller import login_or_token_required, api_token_required
from django.views.generic import TemplateView
from django.views import View
from web.index.models import ScanTask, VendorVulns, Rules, Tampers, NewEvilFunc, Project, ProjectVendors
from web.index.models import get_and_check_scantask_project_id, get_resultflow_class, get_and_check_scanresult
from utils.utils import show_context
from Kunlun_M.settings import LOGS_PATH
def index(request):
return HttpResponse("Nothing here.")
class TaskListApiView(View):
"""展示当前任务列表"""
@staticmethod
@api_token_required
def get(request):
scantasks = ScanTask.objects.all().order_by('-id')
scantaskidlist = []
for scantask in scantasks:
scantaskdata = {
"id": scantask.id,
"taskname": scantask.task_name,
"is_finished": scantask.is_finished,
}
scantaskidlist.append(scantaskdata)
scantasklist = {"code": 200, "status": True, "message": scantaskidlist}
return JsonResponse(scantasklist)
class TaskDetailApiView(View):
"""展示当前任务细节"""
@staticmethod
@api_token_required
def get(request, task_id):
scantask = ScanTask.objects.filter(id=task_id).values()
return JsonResponse({"code": 200, "status": True, "message": list(scantask)})
class TaskResultDetailApiView(View):
"""展示当前任务结果细节"""
@staticmethod
@api_token_required
def get(request, task_id):
scantask = ScanTask.objects.filter(id=task_id).first()
if not scantask.is_finished:
return JsonResponse({"code": 403, "status": False, "message": "Task {} not finished.".format(task_id)})
project_id = get_and_check_scantask_project_id(task_id)
scantaskresults = list(get_and_check_scanresult(task_id).objects.filter(scan_project_id=project_id).values())
return JsonResponse(
{"code": 200, "status": True, "message": scantaskresults})
class TaskResultFlowDetailApiView(View):
"""展示当前任务结果流细节"""
@staticmethod
@api_token_required
def get(request, task_id):
scantask = ScanTask.objects.filter(id=task_id).first()
if not scantask.is_finished:
return JsonResponse({"code": 403, "status": False, "message": "Task {} not finished.".format(task_id)})
ResultFlow = get_resultflow_class(int(task_id))
rfs = ResultFlow.objects.filter().order_by('vul_id')
resultflow_list = list(rfs.values())
return JsonResponse(
{"code": 200, "status": True, "message": resultflow_list})
class TaskNewEvilFuncApiView(View):
"""展示当前任务生成的新恶意函数"""
@staticmethod
@api_token_required
def get(request, task_id):
scantask = ScanTask.objects.filter(id=task_id).first()
if not scantask.is_finished:
return JsonResponse({"code": 403, "status": False, "message": "Task {} not finished.".format(task_id)})
project_id = get_and_check_scantask_project_id(task_id)
nefs = list(NewEvilFunc.objects.filter(project_id=project_id).values())
return JsonResponse(
{"code": 200, "status": True, "message": nefs})
class TaskVendorsApiView(View):
"""展示当前任务组件"""
@staticmethod
@api_token_required
def get(request, task_id):
scantask = ScanTask.objects.filter(id=task_id).first()
if not scantask.is_finished:
return JsonResponse({"code": 403, "status": False, "message": "Task {} not finished.".format(task_id)})
project_id = get_and_check_scantask_project_id(task_id)
pvs = list(ProjectVendors.objects.filter(project_id=project_id).values())
return JsonResponse(
{"code": 200, "status": True, "message": pvs})
class RuleListApiView(View):
"""展示规则列表"""
@staticmethod
@api_token_required
def get(request):
rules = Rules.objects.filter().values()
return JsonResponse(
{"code": 200, "status": True, "message": list(rules)})
class RuleDetailApiView(View):
"""展示当前规则细节"""
@staticmethod
@api_token_required
def get(request, rule_cviid):
rules = Rules.objects.filter(svid=rule_cviid).values()
return JsonResponse({"code": 200, "status": True, "message": list(rules)})
class VendorVulListApiView(View):
"""展示组件漏洞列表"""
@staticmethod
@api_token_required
def get(request):
vendorvuls = VendorVulns.objects.filter()[:100].values()
return JsonResponse(
{"code": 200, "status": True, "message": list(vendorvuls)})
class VendorVuLDetailApiView(View):
"""展示当前规则细节"""
@staticmethod
@api_token_required
def get(request, vendor_vul_id):
vendorvuls = VendorVulns.objects.filter(id=vendor_vul_id).values()
return JsonResponse({"code": 200, "status": True, "message": list(vendorvuls)})
|
<filename>tasks/bert/__init__.py
import os
import types
import contextlib
import itertools
import re
from typing import Iterable, List, Tuple
import torch
from torch.random import fork_rng
import torchvision
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset, random_split
import torch
from ..api import Batch, Dataset, Gradient, Loss, Parameters, Quality, State, Task
from ..cifar import PyTorchDataset, parameter_type, fork_rng_with_seed
from .linear_predictors import ptl2classes
from .task_configs import task2dataiter
from ..utils.non_iid_dirichlet import distribute_data_dirichlet
class BERTTask(Task):
def __init__(
self,
weight_decay,
model_name,
data_name,
data_split_method,
non_iid_alpha=None,
seed=0,
):
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self._pretrained_lm = model_name.split("-")[0]
self._model_name = model_name
self._data_name = data_name
self._data = BERTDataset(
model_name,
pretrained_lm=self._pretrained_lm,
data_name=data_name,
split="train",
device=self._device,
)
self.max_batch_size = self._data.max_batch_size
self._test_data = BERTDataset(
model_name,
pretrained_lm=self._pretrained_lm,
data_name=data_name,
split="test",
device=self._device,
)
if torch.distributed.is_available() and torch.distributed.is_initialized():
# Splitting data by worker
num_workers = torch.distributed.get_world_size()
if data_split_method == "dirichlet":
splits = self._data.dirichlet_split(
num_workers, non_iid_alpha, seed=seed
)
elif data_split_method == "random":
splits = self._data.random_split(
fractions=[1 / num_workers for _ in range(num_workers)], seed=seed
)
else:
raise ValueError(
f"Unknown value {data_split_method} for data_split_method"
)
self.mean_num_data_per_worker = (
sum(len(split) for split in splits) / num_workers
)
print(
f"Splitting data using {data_split_method} according to",
[len(split) for split in splits],
)
self.data = splits[torch.distributed.get_rank()]
else:
self.data = self._data
self.mean_num_data_per_worker = len(self._data)
self._model = self._create_model()
self._criterion = torch.nn.CrossEntropyLoss().to(self._device)
self._weight_decay_per_param = [
0 if parameter_type(p) == "batch_norm" else weight_decay
for p, _ in self._model.named_parameters()
]
def initialize(self, seed=42) -> Tuple[Parameters, State]:
with fork_rng_with_seed(seed):
self._model = self._create_model()
parameters = [p.data for p in self._model.parameters()]
state = [b.data for b in self._model.buffers()]
return parameters, state
def loss_and_gradient(
self,
parameters: List[torch.Tensor],
state: List[torch.Tensor],
batch: Batch,
random_seed=None,
) -> Tuple[Loss, Gradient, State]:
with fork_rng_with_seed(random_seed):
output, state = self._forward(
batch._x, parameters=parameters, state=state, is_training=True
)
loss = self._criterion(output, batch._y)
gradients = torch.autograd.grad(loss, list(self._model.parameters()))
gradients = [
g + wd * p
for g, wd, p in zip(gradients, self._weight_decay_per_param, parameters)
]
return loss.item(), gradients, state
def quality(
self, parameters: List[torch.Tensor], state: List[torch.Tensor], batch: Batch
) -> Quality:
"""Average quality on the batch"""
with torch.no_grad():
output, _ = self._forward(batch._x, parameters, state, is_training=False)
accuracy = torch.argmax(output, 1).eq(batch._y).sum().float() / len(batch)
loss = self._criterion(output, batch._y)
return {"loss": loss.item(), "accuracy": accuracy.item()}
def evaluate(
self,
dataset: Dataset,
parameters: List[torch.Tensor],
state: List[torch.Tensor],
) -> Quality:
"""Average quality on a dataset"""
mean_quality = None
count = 0
for _, batch in dataset.iterator(batch_size=250, shuffle=False, repeat=False):
quality = self.quality(parameters, state, batch)
if mean_quality is None:
count = len(batch)
mean_quality = quality
else:
count += len(batch)
weight = float(len(batch)) / count
for key, value in mean_quality.items():
mean_quality[key] += weight * (quality[key] - mean_quality[key])
return mean_quality
def _forward(
self,
batched,
parameters: List[torch.Tensor],
state: List[torch.Tensor],
is_training=False,
) -> Tuple[torch.Tensor, State]:
if is_training:
self._model.train()
else:
self._model.eval()
for param, value in zip(self._model.parameters(), parameters):
param.data = value
for buffer, value in zip(self._model.buffers(), state):
buffer.data = value.clone()
output, *_ = self._model(**batched)
state = [b.data for b in self._model.buffers()]
return output, state
def _create_model(self):
classes = ptl2classes[self._pretrained_lm]
pretrained_weight_path = os.path.join(os.getenv("DATA"), "pretrained_weights")
# define dataset types.
vector_cls_sentence_datasets = [
"mrpc",
"sst2",
"mnli",
"qqp",
"cola",
"qnli",
"rte",
"agnews",
"trec",
"dbpedia",
"yelp2",
"semeval16",
"germeval",
"imdb",
]
postagging_datasets = ["posptb", "conll2003"]
multiplechoice_datasets = ["swag"]
# define model.
if self._data_name in vector_cls_sentence_datasets:
model = classes.seqcls.from_pretrained(
self._model_name,
num_labels=self._data.data_iter.num_labels,
cache_dir=pretrained_weight_path,
)
elif self._data_name in postagging_datasets:
model = classes.postag.from_pretrained(
self._model_name,
out_dim=self._data.data_iter.num_labels,
cache_dir=pretrained_weight_path,
)
elif self._data_name in multiplechoice_datasets:
model = classes.multiplechoice.from_pretrained(
self._model_name, cache_dir=pretrained_weight_path
)
model.to(self._device)
model.train()
for param_name, param in model.named_parameters():
if self._pretrained_lm in param_name:
param.requires_grad = True
if "classifier" in param_name:
param.requires_grad = True
return model
"""classes for dataset."""
class BERTDataset(PyTorchDataset):
max_batch_size = 256
def __init__(
self,
model_name,
pretrained_lm,
data_name,
split,
data_root=os.getenv("DATA"),
device="cuda",
max_sequence_len=200,
):
# create data_iter.
self.pretrained_lm = pretrained_lm
classes = ptl2classes[pretrained_lm]
tokenizer = classes.tokenizer.from_pretrained(model_name)
data_iter = task2dataiter[data_name](
data_name, model_name, tokenizer, max_sequence_len
)
dataset = data_iter.trn_dl if split == "train" else data_iter.val_dl
self.data_iter = data_iter
self._batch_to_device = types.MethodType(
task2batched_fn[self.data_iter.task], self
)
super().__init__(dataset, device=device, prepare_batch=self.prepare_batch)
def dirichlet_split(
self,
num_workers: int,
alpha: float = 1,
seed: int = 0,
distribute_evenly: bool = True,
) -> List[Dataset]:
indices_per_worker = distribute_data_dirichlet(
self._set.golds, alpha, num_workers, num_auxiliary_workers=10, seed=seed
)
if distribute_evenly:
indices_per_worker = np.array_split(
np.concatenate(indices_per_worker), num_workers
)
return [
PyTorchDataset(
Subset(self._set, indices),
device=self._device,
prepare_batch=self.prepare_batch,
)
for indices in indices_per_worker
]
def prepare_batch(self, batch):
uids, golds, batched, _ = self._batch_to_device(batch)
if (
self.pretrained_lm == "roberta" or self.pretrained_lm == "distilbert"
) and "token_type_ids" in batched:
batched.pop("token_type_ids")
return Batch(batched, golds).to(self._device)
"""functions for batch_to_device."""
def seqcls_batch_to_device(self, batched):
uids = batched[0]
input_ids, golds, attention_mask, token_type_ids = batched[1:]
return (
uids,
golds,
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
},
None,
)
def tagging_batch_to_device(self, batched):
uids = batched[0]
input_ids, attention_mask, _golds, if_tgts = batched[1:]
golds = []
for b_step in range(_golds.shape[0]):
gold = _golds[b_step][if_tgts[b_step]]
golds.append(gold)
if self.conf.bert_conf_["task"] != "conll2003":
return (
uids,
torch.cat(golds, dim=0),
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"if_tgts": if_tgts,
},
None,
)
return (
uids,
torch.cat(golds, dim=0),
{"input_ids": input_ids, "attention_mask": attention_mask, "if_tgts": if_tgts},
_golds,
)
task2batched_fn = {
"mrpc": seqcls_batch_to_device,
"sst2": seqcls_batch_to_device,
"mnli": seqcls_batch_to_device,
"qqp": seqcls_batch_to_device,
"cola": seqcls_batch_to_device,
"qnli": seqcls_batch_to_device,
"rte": seqcls_batch_to_device,
"posptb": tagging_batch_to_device,
"swag": seqcls_batch_to_device,
"agnews": seqcls_batch_to_device,
"trec": seqcls_batch_to_device,
"dbpedia": seqcls_batch_to_device,
"yelp2": seqcls_batch_to_device,
"semeval16": seqcls_batch_to_device,
"conll2003": tagging_batch_to_device,
}
|
import sys
import os
import numpy as np
import glob
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pylab as plt
from matplotlib.backends.backend_pdf import PdfPages
from geoNet.utils import read_statsll, get_processed_stats_list
from geoNet.gmpe import readStationFile as rsf
from geoNet.gmpe.calculateGMPE import set_faultprop
#*****************************************************************************
# OBSERVATIONS
#*****************************************************************************
parent_dir_loc_obs="/nesi/projects/nesi00213/ObservedGroundMotions/ahsan/Mw4pt6_20100904_103801_11Jan2017/Vol1/data"
stats_dict_obs = read_statsll("/nesi/projects/nesi00213/ObservedGroundMotions/ahsan/Mw4pt9_20110429_190804_11Jan2017",
"20100904_103801_eventStats_2017-01-10.ll")
plot_dir_accBB_obs="accBB_1"
plot_dir_velBB_obs="velBB_1"
loc_accBB_obs="/".join([parent_dir_loc_obs, plot_dir_accBB_obs])
loc_velBB_obs="/".join([parent_dir_loc_obs, plot_dir_velBB_obs])
loc_acc_obs = loc_accBB_obs
loc_vel_obs = loc_velBB_obs
#reset stats_dict to those processed
stats_dict_obs=get_processed_stats_list(loc_vel_obs,stats_dict_obs)
#*****************************************************************************
# SIMULATIONS
#*****************************************************************************
base_dir_sim="/nesi/projects/nesi00213/RunFolder/ahsan"
parent_dir_loc_sim="/".join([base_dir_sim, "ptSource_2010Sep04_v2_m4pt6_VMv1p64_FVM-h0p100_170123"])
stats_dict_sim = read_statsll(parent_dir_loc_sim, "HF_BB_stats.ll")
plot_dir_accBB_sim="Acc"
plot_dir_velBB_sim="Vel"
loc_accBB_sim=glob.glob("/".join([parent_dir_loc_sim, "BB", "*", plot_dir_accBB_sim]))[0]
loc_velBB_sim=glob.glob("/".join([parent_dir_loc_sim, "BB", "*", plot_dir_velBB_sim]))[0]
loc_acc_sim = loc_accBB_sim
loc_vel_sim = loc_velBB_sim
#*****************************************************************************
#*****************************************************************************
# Read the standard rupture file
#*****************************************************************************
srf_fname="/nesi/projects/nesi00213/RupModel/ahsan/2010Sep04_m4pt6/ptSource/Srf/2010Sep04_v2_m4pt6.srf"
FiniteFault = rsf.readSrfFile(srf_fname)
FiniteFault = rsf.Points_np(FiniteFault)
#set fault properties
faultprop = set_faultprop(Mw=4.6, rake=173., dip=74., Ztor=8.)
periods_gmpe=np.array([0.1, 0.2, 0.5, 1.0, 3.0, 5.0, 8.0, 10.0])
Rrups_gmpe = np.logspace(np.log10(5.),np.log10(100.),30)
#get_empIM_v2(Rrup, period, faultprop, Rjb=None, Rtvz=0., V30measured=0., V30=250.)
empIM_values, empIM_sigmas = get_empIM_v2(Rrups_gmpe, periods_gmpe, faultprop)
#def plot_IMvsRrup(Rrup, IM)
fig, ax = plt.subplots()
ax.plot(Rrups_gmpe, empIM_values[:,0],c='k', ls='solid')
ax.plot(Rrups_gmpe, empIM_values[:,0]*np.exp(empIM_sigmas[:,0]), c='k',ls='dashed')
ax.plot(Rrups_gmpe, empIM_values[:,0]*np.exp(-empIM_sigmas[:,0]), c='k', ls='dashed')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(Rrups_gmpe[0],Rrups_gmpe[-1])
fig.savefig("gmpe_pSA0.1.png")
stats_codes_simObs = list(set(stats_dict) & set(stats_dict_sim))
stats_dict_simObs = {}
for stat_code in stats_codes_simObs:
lon, lat = stats_dict_sim[stat_code]
stats_dict_simObs[stat_code] = (lon, lat)
sorted_stat_codes, sms_rrups = get_SMS_Rrups(stats_dict_simObs, FiniteFault)
period=periods_gmpe[0]
g=981. #cm/s^2
pSA_sim = get_SMS_pSA(sorted_stat_codes, period, loc_acc_sim, comp='geom')/g
pSA_obs = get_SMS_pSA(sorted_stat_codes, period, loc_acc, comp='geom')
fig, ax = plot_SMS_IM(sms_rrups[:,0], pSA_sim[:,0], pSA_obs[:,0])
#get_empIM_v2(Rrup, period, faultprop, Rjb=None, Rtvz=0., V30measured=0., V30=250.)
#empIM_values, empIM_sigmas = get_empIM_v2(Rrups_gmpe, periods_gmpe, faultprop)
pSA_gmpe, pSA_gmpe_std = get_empIM_v2(Rrups_gmpe, period, faultprop)
fig, ax = plot_IMvsRrup(Rrups_gmpe, pSA_gmpe, pSA_gmpe_std, fig=fig, ax=ax)
ax.legend(loc="best", scatterpoints=1)
fig.savefig("pSA_ex.png")
fig, ax = plot_SMS_IM_ratio(sms_rrups[:,0], pSA_obs[:,0]/pSA_sim[:,0])
fig.savefig("pSA_ex_ratio.png")
import sys
sys.exit("outa here")
periods = np.logspace(start=np.log10(0.01), stop=np.log10(10.), num=100, base=10)
#periods = np.concatenate([
# np.logspace(start=np.log10(0.01), stop=np.log10(0.1), num=100, base=10, endpoint=False),
# np.logspace(start=np.log10(0.1), stop=np.log10(1.), num=100, base=10, endpoint=False),
# np.logspace(start=np.log10(1.), stop=np.log10(10.), num=100, base=10, endpoint=True)
# ])
#periods = np.concatenate([
# np.logspace(start=np.log10(0.01), stop=np.log10(0.1), num=33, base=10, endpoint=False),
# np.logspace(start=np.log10(0.1), stop=np.log10(1.), num=34, base=10, endpoint=False),
# np.logspace(start=np.log10(1.), stop=np.log10(10.), num=33, base=10, endpoint=True)
# ])
pSA_sim = get_SMS_pSA(sorted_stat_codes, periods, loc_acc_sim, comp='geom')
pSA_obs = get_SMS_pSA(sorted_stat_codes, periods, loc_acc, comp='geom')
bias, std = get_bias(pSA_obs, pSA_sim)
figBias, axBias = plot_bias(bias, std, savefig=True)
|
from typing import List
import math
import sys
from functools import partial
from random import shuffle
import getpass
from .constants import ALL_DIRECTIONS, print, log, STRATEGY_HYPERPARAMETERS, ResourceTypes, Directions, LogicGlobals, StrategyTypes, INFINITE_DISTANCE, GAME_CONSTANTS, ValidActions, is_turn_during_night
MAX_DISTANCE_FROM_EDGE = STRATEGY_HYPERPARAMETERS['MAX_DISTANCE_FROM_EDGE']
class Resource:
def __init__(self, r_type: str, amount: int):
self.type = r_type
self.amount = amount
@property
def fuel_amount(self):
return GAME_CONSTANTS['PARAMETERS']['RESOURCE_TO_FUEL_RATE'][self.type.upper()] * self.amount
@property
def can_harvest(self):
if self.type.upper() == ResourceTypes.WOOD:
return True
if self.type.upper() == ResourceTypes.COAL and LogicGlobals.player.researched_coal():
return True
if self.type.upper() == ResourceTypes.URANIUM and LogicGlobals.player.researched_uranium():
return True
return False
class ResourceCluster:
def __init__(self, r_type: str, positions):
self.type = r_type
self._resource_positions = dict()
self.total_amount = -1
self.pos_to_defend = []
self.pos_defended = []
self.pos_defended_by_player = []
self.min_loc = None
self.max_loc = None
self.center_pos = None
self.current_score = 0
self.n_workers_spawned = 0
self.n_workers_sent_to_colonize = 0
self.city_ids = set()
self.sort_position = None
self.needs_defending_from_opponent = False
self.cart_id = None
for pos in positions:
self._resource_positions[pos] = None
self.id = self._hash = hash(tuple(self._resource_positions.keys()))
def __repr__(self) -> str:
return f"ResourceCluster({self.type}, {self.center_pos}, {self.id})"
def __eq__(self, other) -> bool:
return self.resource_positions == other.resource_positions
def __hash__(self):
return self._hash
def calculate_score(self, player, opponent, scaling_factor=1):
# From list 'L' of clusters with type (wood, coal, uranium) and number of resources in cluster:
# 1.) Iterate through 'L' and compare type to number of research points and if compatible, add the number of resources of cluster to list 'K'
# 2.) Reorder 'K' by number of resources
# 3.) Divide all by value of first item
# 4.) If cluster has any cities formed then divide by number of cities - 1 otherwise
# 5.) Divide by distance to OUR nearest city outside of cluster
# 6.) Multiply by distance to nearest opponent city or worker
# 7.) Send worker to cluster with highest value
self.current_score = self.total_amount / scaling_factor
self.current_score /= max(1, len(self.pos_defended))
self.current_score /= min(
[1]
+
[self.center_pos.distance_to(pos) for pos in player.city_pos]
)
self.current_score *= min(
[1]
+
[self.center_pos.distance_to(pos) for pos in opponent.city_pos]
+
[self.center_pos.distance_to(unit.pos) for unit in opponent.units]
)
return self.current_score
def _set_research_based_pos_to_defend(self, game_map):
self.pos_to_defend = set()
if self.id in LogicGlobals.clusters_to_colonize_rbs:
for x in range(self.min_loc[0] - 1, self.max_loc[0] + 2):
for y in range(self.min_loc[1] - 1, self.max_loc[1] + 2):
if game_map.is_loc_within_bounds(x, y):
if game_map.get_cell(x, y).is_empty():
self.pos_to_defend.add(Position(x, y))
def _set_smart_positions(self, game_map):
self.pos_to_defend = set()
if self.min_loc[1] < MAX_DISTANCE_FROM_EDGE:
if self.min_loc[0] >= MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend = self.pos_to_defend | {
Position(self.min_loc[0] - 1, y)
for y in range(0, self.min_loc[1] + 1)
if game_map.is_loc_within_bounds(self.min_loc[0] - 1, y)
}
if self.max_loc[0] < game_map.width - MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend = self.pos_to_defend | {
Position(self.max_loc[0] + 1, y)
for y in range(0, self.min_loc[1] + 1)
if game_map.is_loc_within_bounds(self.max_loc[0] + 1, y)
}
else:
self.pos_to_defend = self.pos_to_defend | {
Position(x, self.min_loc[1] - 1)
for x in range(self.min_loc[0], self.max_loc[0] + 1)
if game_map.is_loc_within_bounds(x, self.min_loc[1] - 1)
}
if self.max_loc[1] > game_map.height - MAX_DISTANCE_FROM_EDGE - 1:
if self.min_loc[0] >= MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend = self.pos_to_defend | {
Position(self.min_loc[0] - 1, y)
for y in range(self.max_loc[1], game_map.height)
if game_map.is_loc_within_bounds(self.min_loc[0] - 1, y)
}
if self.max_loc[0] < game_map.width - MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend = self.pos_to_defend | {
Position(self.max_loc[0] + 1, y)
for y in range(self.max_loc[1], game_map.height)
if game_map.is_loc_within_bounds(self.max_loc[0] + 1, y)
}
else:
self.pos_to_defend = self.pos_to_defend | {
Position(x, self.max_loc[1] + 1)
for x in range(self.min_loc[0], self.max_loc[0] + 1)
if game_map.is_loc_within_bounds(x, self.max_loc[1] + 1)
}
if self.min_loc[0] < MAX_DISTANCE_FROM_EDGE:
if self.min_loc[1] >= MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend = self.pos_to_defend | {
Position(x, self.min_loc[1] - 1)
for x in range(0, self.min_loc[0] + 1)
if game_map.is_loc_within_bounds(x, self.min_loc[1] - 1)
}
if self.max_loc[1] < game_map.height - MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend = self.pos_to_defend | {
Position(x, self.max_loc[1] + 1)
for x in range(0, self.min_loc[0] + 1)
if game_map.is_loc_within_bounds(x, self.max_loc[1] + 1)
}
else:
self.pos_to_defend = self.pos_to_defend | {
Position(self.min_loc[0] - 1, y)
for y in range(self.min_loc[1], self.max_loc[1] + 1)
if game_map.is_loc_within_bounds(self.min_loc[0] - 1, y)
}
if self.max_loc[0] > game_map.width - MAX_DISTANCE_FROM_EDGE - 1:
if self.min_loc[1] >= MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend = self.pos_to_defend | {
Position(x, self.min_loc[1] - 1)
for x in range(self.max_loc[0], game_map.width)
if game_map.is_loc_within_bounds(x, self.min_loc[1] - 1)
}
if self.max_loc[1] < game_map.height - MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend = self.pos_to_defend | {
Position(x, self.max_loc[1] + 1)
for x in range(self.max_loc[0], game_map.width)
if game_map.is_loc_within_bounds(x, self.max_loc[1] + 1)
}
else:
self.pos_to_defend = self.pos_to_defend | {
Position(self.max_loc[0] + 1, y)
for y in range(self.min_loc[1], self.max_loc[1] + 1)
if game_map.is_loc_within_bounds(self.max_loc[0] + 1, y)
}
p = Position(self.min_loc[0] - 1, self.min_loc[1] - 1)
if game_map.is_within_bounds(p) and p.distance_to(Position(0, 0)) > MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend.add(p)
p = Position(self.min_loc[0] - 1, self.max_loc[1] + 1)
if game_map.is_within_bounds(p) and p.distance_to(Position(0, game_map.height - 1)) > MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend.add(p)
p = Position(self.max_loc[0] + 1, self.min_loc[1] - 1)
if game_map.is_within_bounds(p) and p.distance_to(Position(game_map.width - 1, 0)) > MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend.add(p)
p = Position(self.max_loc[0] + 1, self.max_loc[1] + 1)
if game_map.is_within_bounds(p) and p.distance_to(
Position(game_map.width - 1, game_map.height - 1)) > MAX_DISTANCE_FROM_EDGE:
self.pos_to_defend.add(p)
def _set_basic_positions(self, game_map):
self.pos_to_defend = set()
for r_pos in self._resource_positions:
for pos in r_pos.adjacent_positions(include_center=True, include_diagonals=True):
if game_map.is_within_bounds(pos) and not game_map.get_cell_by_pos(pos).has_resource():
self.pos_to_defend.add(pos)
def update_state(self, game_map, opponent):
cells = {
game_map.get_cell_by_pos(p)
for p in self._resource_positions.keys()
if game_map.is_within_bounds(p)
}
cells = {
c for c in cells if c.resource is not None
}
self.total_amount = sum(
cell.resource.amount for cell in cells # if cell.resource is not None
) if cells else 0
x_vals = [p.x for p in self._resource_positions.keys()]
y_vals = [p.y for p in self._resource_positions.keys()]
self.min_loc = (min(x_vals), min(y_vals))
self.max_loc = (max(x_vals), max(y_vals))
self.center_pos = Position(
(self.max_loc[0] - self.min_loc[0]) // 2 + self.min_loc[0],
(self.max_loc[1] - self.min_loc[1]) // 2 + self.min_loc[1],
)
self._set_basic_positions(game_map)
# if LogicGlobals.player.current_strategy == StrategyTypes.RESEARCH_BASED:
# self._set_research_based_pos_to_defend(game_map)
# else:
# if not cells:
# self._resource_positions = dict()
# elif not self.pos_to_defend or len(cells) != len(self._resource_positions):
#
# self._resource_positions = dict()
# for cell in cells:
# self._resource_positions[cell.pos] = None
#
# x_vals = [p.x for p in self._resource_positions.keys()]
# y_vals = [p.y for p in self._resource_positions.keys()]
#
# self.min_loc = (min(x_vals), min(y_vals))
# self.max_loc = (max(x_vals), max(y_vals))
# self.center_pos = Position(
# (self.max_loc[0] - self.min_loc[0]) // 2 + self.min_loc[0],
# (self.max_loc[1] - self.min_loc[1]) // 2 + self.min_loc[1],
# )
#
# # self._set_smart_positions(game_map)
# self._set_basic_positions(game_map)
#
# log(f"Num to block for cluster at {self.center_pos}: {self.n_to_block}")
#
#
# # opponent_x_vals, opponent_y_vals = [], []
# # for unit in opponent.units:
# # opponent_x_vals.append(unit.pos.x)
# # opponent_y_vals.append(unit.pos.y)
# # for p in opponent.city_pos:
# # opponent_x_vals.append(p.x)
# # opponent_y_vals.append(p.y)
# # opponent_med_pos = Position(
# # statistics.median(opponent_x_vals),
# # statistics.median(opponent_y_vals),
# # )
# # self.pos_to_defend = sorted(
# # self.pos_to_defend, key=opponent_med_pos.distance_to
# # )
#
#
if self.sort_position is None:
opponent_positions = opponent.city_pos | opponent.unit_pos
if opponent_positions and (not opponent_positions & self.pos_to_defend):
# closest_opponent_pos = min(
# opponent_positions,
# key=lambda p: (self.center_pos.distance_to(p), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y)
# )
closest_opponent_pos = min(
opponent_positions,
key=lambda p: (self.center_pos.tile_distance_to(p, positions_to_avoid=self.resource_positions), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y)
)
else:
to_search_pos = LogicGlobals.player.city_pos | LogicGlobals.player.unit_pos
if not to_search_pos:
to_search_pos = {Position(0, 0)}
closest_opponent_pos = min(
to_search_pos,
key=lambda p: (self.center_pos.tile_distance_to(p, positions_to_avoid=self.resource_positions), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y)
)
else:
closest_opponent_pos = self.sort_position
# self.pos_to_defend = sorted(
# self.pos_to_defend, key=lambda p: (closest_opponent_pos.distance_to(p), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y)
# )
self.pos_to_defend = sorted(
self.pos_to_defend, key=lambda p: (closest_opponent_pos.tile_distance_to(p, positions_to_avoid=self.resource_positions), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y)
)
if LogicGlobals.opponent.units:
distance_to_closest_enemy = {
p: (min(p.distance_to(u.pos) for u in LogicGlobals.opponent.units), -min(p.distance_to(u.pos) for u in LogicGlobals.player.units) if LogicGlobals.player.units else 0, LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y)
for p in self.pos_to_defend
}
pos_closest_to_enemy = min(
distance_to_closest_enemy,
key=distance_to_closest_enemy.get
)
opponent_distance, player_distance, *__ = distance_to_closest_enemy[pos_closest_to_enemy]
self.needs_defending_from_opponent = -player_distance + 1 >= opponent_distance
else:
self.needs_defending_from_opponent = False
# print(f"Resource cluster at {self.center_pos} needs defending: {self.needs_defending_from_opponent}")
self.city_ids = set()
self.pos_defended = []
self.pos_defended_by_player = set()
for x in range(self.min_loc[0]-1, self.max_loc[0] + 2):
for y in range(self.min_loc[1]-1, self.max_loc[1] + 2):
if game_map.is_loc_within_bounds(x, y):
city_tile = game_map.get_cell(x, y).citytile
if city_tile is not None:
if city_tile.cityid in LogicGlobals.player.city_ids:
self.city_ids.add(city_tile.cityid)
self.pos_defended_by_player.add(city_tile.pos)
self.pos_defended.append(Position(x, y))
@property
def n_to_block(self):
return len(self.pos_to_defend)
@property
def n_defended(self):
return len(self.pos_defended)
@property
def resource_positions(self):
return set(self._resource_positions.keys())
class Cell:
def __init__(self, x, y):
self.pos = Position(x, y)
self.resource: Resource = None
self.citytile = None
self.road = 0
def has_resource(self, include_wood_that_is_growing=True, min_amt=0):
if include_wood_that_is_growing:
return self.resource is not None and self.resource.amount > min_amt
else:
return self.resource is not None and ((self.resource.type != ResourceTypes.WOOD and self.resource.amount > min_amt) or (self.resource.type == ResourceTypes.WOOD and self.resource.amount >= GAME_CONSTANTS["PARAMETERS"]["MAX_WOOD_AMOUNT"]))
def is_empty(self):
return self.citytile is None and not self.has_resource()
MAP_CACHE = {}
class GameMap:
def __init__(self, width, height):
self.height = height
self.width = width
self.map: List[List[Cell]] = [None] * height
self._resources = []
self.resource_clusters = None
for y in range(0, self.height):
self.map[y] = [None] * width
for x in range(0, self.width):
self.map[y][x] = Cell(x, y)
self.__dict__.update(MAP_CACHE.get('map', {}))
def save_state(self):
MAP_CACHE['map'] = {
key: self.__dict__[key]
for key in [
'resource_clusters',
]
}
def get_cell_by_pos(self, pos):
if not self.is_within_bounds(pos):
return None
return self.map[pos.y][pos.x]
def get_cell(self, x, y):
if not self.is_loc_within_bounds(x, y):
return None
return self.map[y][x]
def is_loc_within_bounds(self, x, y):
return (0 <= x < self.height) and (0 <= y < self.width)
def is_within_bounds(self, pos):
return self.is_loc_within_bounds(pos.x, pos.y)
def _setResource(self, r_type, x, y, amount):
"""
do not use this function, this is for internal tracking of state
"""
cell = self.get_cell(x, y)
cell.resource = Resource(r_type, amount)
def max_collectors_allowed_at(self, pos):
return sum(
self.is_within_bounds(p) and self.get_cell_by_pos(pos).citytile is None
for p in pos.adjacent_positions(include_center=False, include_diagonals=False)
)
def num_adjacent_resources(self, pos, include_center=True, include_wood_that_is_growing=True, check_for_unlock=False):
return sum(
self.get_cell_by_pos(p).has_resource(include_wood_that_is_growing=include_wood_that_is_growing) and (self.get_cell_by_pos(p).resource.can_harvest if check_for_unlock else True)
for p in pos.adjacent_positions(include_center=include_center)
if self.is_within_bounds(p)
)
def adjacent_resource_types(self, pos, include_center=True):
return set(
self.get_cell_by_pos(p).resource.type
for p in pos.adjacent_positions(include_center=include_center)
if self.is_within_bounds(p) and self.get_cell_by_pos(p).has_resource(include_wood_that_is_growing=True)
)
def resources(self, return_positions_only=False):
if not self._resources:
self._resources = [
cell.pos if return_positions_only else cell
for cell in self.cells()
if cell.has_resource()
]
return self._resources
def _check_for_cluster(self, position, resource_set, resource_type):
for step in ((-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1)):
new_position = position.shift_by(*step)
if self.is_within_bounds(new_position) and new_position not in resource_set:
new_cell = self.get_cell_by_pos(new_position)
if new_cell.resource is not None and new_cell.resource.type == resource_type:
resource_set.add(new_position)
self._check_for_cluster(new_position, resource_set, resource_type)
return resource_set
def find_clusters(self):
resource_pos_found = set()
self.resource_clusters = set()
for cell in self.cells():
if cell.has_resource() and cell.pos not in resource_pos_found:
new_cluster_pos = self._check_for_cluster(cell.pos, {cell.pos}, cell.resource.type)
resource_pos_found = resource_pos_found | new_cluster_pos
new_cluster = ResourceCluster(cell.resource.type, new_cluster_pos)
self.resource_clusters.add(new_cluster)
self.save_state()
return self.resource_clusters
def update_clusters(self, opponent):
clusters_to_discard = set()
for cluster in self.resource_clusters:
cluster.update_state(
game_map=self, opponent=opponent
)
cluster_has_no_resources = cluster.total_amount <= 0
cluster_has_cities = any(p in LogicGlobals.player.city_pos for p in cluster.pos_to_defend)
if cluster_has_no_resources and not cluster_has_cities and LogicGlobals.player.current_strategy == StrategyTypes.STARTER:
clusters_to_discard.add(cluster)
self.resource_clusters = self.resource_clusters - clusters_to_discard
self.save_state()
def get_cluster_by_id(self, cluster_id):
for c in self.resource_clusters:
if c.id == cluster_id:
return c
def position_to_cluster(self, pos):
if not self.resource_clusters:
print("No clusters found!",)
return None
if pos is None:
return None
for cluster in self.resource_clusters:
if (cluster.min_loc[0] - 1 <= pos.x <= cluster.max_loc[0] + 1) and (
cluster.min_loc[1] - 1 <= pos.y <= cluster.max_loc[1] + 1):
return cluster
elif pos in cluster.pos_to_defend:
return cluster
return None
def positions(self):
""" Iterate over all positions of the map. """
for x in range(self.height):
for y in range(self.width):
yield Position(x, y)
def cells(self):
""" Iterate over all cells of the map. """
for x in range(self.height):
for y in range(self.width):
yield self.get_cell(x, y)
class Position:
def __init__(self, x, y):
self.x = x
self.y = y
self._closest_resource_pos = {
ResourceTypes.WOOD: [],
ResourceTypes.COAL: [],
ResourceTypes.URANIUM: [],
}
self._closest_city_pos = None
def __sub__(self, pos) -> int:
return abs(pos.x - self.x) + abs(pos.y - self.y)
def turn_distance_to(self, pos, game_map, cooldown, avoid_own_cities=False, include_target_road=False, debug=False):
num_turns = self.__turn_distance_to(
pos, game_map, cooldown, avoid_own_cities=avoid_own_cities,
include_target_road=include_target_road, debug=debug
)
if num_turns >= INFINITE_DISTANCE or num_turns == 0:
return num_turns
num_turns_to_add = 0
num_turns_left = num_turns
turn_number = LogicGlobals.game_state.turn
turn_number += 1
num_turns_left -= 1
while num_turns_left > 0:
if is_turn_during_night(turn_number):
num_turns_to_add += cooldown
turn_number += cooldown
turn_number += cooldown
num_turns_left -= cooldown
return num_turns + num_turns_to_add
def __turn_distance_to(self, pos, game_map, cooldown, avoid_own_cities=False, include_target_road=False, debug=False):
if pos is None or not game_map.is_within_bounds(pos):
return INFINITE_DISTANCE
if pos == self:
return 0
elif (self - pos) > 10:
return (self - pos) * cooldown
else:
i = 0
if include_target_road:
step = max(1, cooldown - game_map.get_cell_by_pos(pos).road)
else:
step = 1
main_list = [(pos, step)]
tiles_not_blocked = {self, pos}
for p in [self, pos]:
cell = LogicGlobals.game_state.map.get_cell_by_pos(p)
if cell.citytile is not None:
city_id = cell.citytile.cityid
if city_id in LogicGlobals.player.cities:
tiles_not_blocked = tiles_not_blocked | {c.pos for c in LogicGlobals.player.cities[city_id].citytiles}
while self not in set(x[0] for x in main_list):
# if debug:
# print(main_list)
try:
next_pos, step = main_list[i]
except IndexError:
return INFINITE_DISTANCE
if step >= 10 * cooldown:
break
for p in next_pos.adjacent_positions(include_center=False):
if p == self:
return step
is_valid_to_move_to = p not in (LogicGlobals.opponent.city_pos - tiles_not_blocked)
tiles_blocked_by_units = {u.pos for u in LogicGlobals.player.units if (LogicGlobals.game_state.map.get_cell_by_pos(u.pos).citytile is not None and (u.current_task is not None and u.current_task[0] != ValidActions.MOVE))}
is_valid_to_move_to = is_valid_to_move_to and p not in (tiles_blocked_by_units - tiles_not_blocked)
if avoid_own_cities:
is_valid_to_move_to = is_valid_to_move_to and p not in (LogicGlobals.player.city_pos - tiles_not_blocked)
if game_map.is_within_bounds(p) and is_valid_to_move_to and p not in set(x[0] for x in main_list):
main_list.append((p, step + max(1, cooldown - game_map.get_cell_by_pos(p).road)))
main_list = sorted(main_list, key=lambda x: (x[1], LogicGlobals.x_mult * x[0].x, LogicGlobals.y_mult * x[0].y))
i += 1
for x in main_list:
if x[0] == self:
return x[1]
return step
def pathing_distance_to(self, pos, game_map, avoid_own_cities=False, debug=False):
if pos is None or not game_map.is_within_bounds(pos):
return INFINITE_DISTANCE
if pos == self:
return 0
elif self - pos > 15:
return self - pos
else:
i = 0
step = 0
main_list = [(pos, step)]
tiles_not_blocked = {self, pos}
for p in [self, pos]:
cell = LogicGlobals.game_state.map.get_cell_by_pos(p)
if cell.citytile is not None:
city_id = cell.citytile.cityid
if city_id in LogicGlobals.player.cities:
tiles_not_blocked = tiles_not_blocked | {c.pos for c in LogicGlobals.player.cities[city_id].citytiles}
while self not in set(x[0] for x in main_list):
# if debug:
# print(main_list)
try:
next_pos, step = main_list[i]
except IndexError:
return 100000
if step >= 15:
break
for p in next_pos.adjacent_positions(include_center=False):
if p == self:
return step + 1
is_valid_to_move_to = p not in (LogicGlobals.opponent.city_pos - tiles_not_blocked)
tiles_blocked_by_units = {u.pos for u in LogicGlobals.player.units if (LogicGlobals.game_state.map.get_cell_by_pos(u.pos).citytile is not None and (u.current_task is not None and u.current_task[0] != ValidActions.MOVE))}
is_valid_to_move_to = is_valid_to_move_to and p not in (tiles_blocked_by_units - tiles_not_blocked)
if avoid_own_cities:
is_valid_to_move_to = is_valid_to_move_to and p not in (LogicGlobals.player.city_pos - tiles_not_blocked)
if game_map.is_within_bounds(p) and is_valid_to_move_to and p not in set(x[0] for x in main_list):
main_list.append((p, step + 1))
i += 1
for x in main_list:
if x[0] == self:
return x[1] + 1
return step
def tile_distance_to(self, pos, positions_to_avoid=None, debug=False):
if pos is None or not LogicGlobals.game_state.map.is_within_bounds(pos):
return INFINITE_DISTANCE
if pos == self:
return 0
elif self - pos > 10:
return self - pos
else:
i = 0
step = 0
main_list = [(pos, step)]
tiles_not_blocked = {self, pos}
for p in [self, pos]:
cell = LogicGlobals.game_state.map.get_cell_by_pos(p)
if cell and cell.citytile is not None:
city_id = cell.citytile.cityid
if city_id in LogicGlobals.player.cities:
tiles_not_blocked = tiles_not_blocked | {c.pos for c in LogicGlobals.player.cities[city_id].citytiles}
while self not in set(x[0] for x in main_list):
# if debug:
# print(main_list)
try:
next_pos, step = main_list[i]
except IndexError:
return 100000
if step >= 10:
break
for p in next_pos.adjacent_positions(include_center=False):
if p == self:
return step + 1
is_valid_to_move_to = not positions_to_avoid or p not in positions_to_avoid
if LogicGlobals.game_state.map.is_within_bounds(p) and is_valid_to_move_to and p not in set(x[0] for x in main_list):
main_list.append((p, step + 1))
i += 1
for x in main_list:
if x[0] == self:
return x[1] + 1
return step
def radial_distance_to(self, pos):
"""
Returns L2 distance to pos
"""
return math.sqrt((self.x - pos.x) ** 2 + (self.y - pos.y) ** 2)
def distance_to(self, pos):
"""
Returns Manhattan (L1/grid) distance to pos
"""
return self - pos
def is_adjacent(self, pos):
return (self - pos) <= 1
def __eq__(self, pos) -> bool:
return self.x == pos.x and self.y == pos.y
def __hash__(self):
return hash((self.x, self.y))
def __str__(self) -> str:
return f"({self.x}, {self.y})"
def __repr__(self) -> str:
return f"Position({self.x}, {self.y})"
def adjacent_positions(self, include_center=True, include_diagonals=False):
adjacent_positions = {
self.translate(Directions.NORTH, 1),
self.translate(Directions.EAST, 1),
self.translate(Directions.SOUTH, 1),
self.translate(Directions.WEST, 1),
}
if include_center:
adjacent_positions.add(self)
if include_diagonals:
adjacent_positions = adjacent_positions | {
self.translate(Directions.NORTH, 1).translate(Directions.EAST, 1),
self.translate(Directions.NORTH, 1).translate(Directions.WEST, 1),
self.translate(Directions.SOUTH, 1).translate(Directions.EAST, 1),
self.translate(Directions.SOUTH, 1).translate(Directions.WEST, 1)
}
return adjacent_positions
def shift_by(self, x, y) -> 'Position':
return Position(self.x + x, self.y + y)
def translate(self, direction, units) -> 'Position':
if direction == Directions.NORTH:
return Position(self.x, self.y - units)
elif direction == Directions.EAST:
return Position(self.x + units, self.y)
elif direction == Directions.SOUTH:
return Position(self.x, self.y + units)
elif direction == Directions.WEST:
return Position(self.x - units, self.y)
elif direction == Directions.CENTER:
return Position(self.x, self.y)
def reflect_about(self, pos):
return Position(2 * pos.x - self.x, 2 * pos.y - self.y)
def find_closest_city_tile(self, player, game_map):
""" Find the closest city tile to this position.
Parameters
----------
player : Player object
Owner of the city tiles to consider.
game_map : :GameMap:
Map containing position and resources.
Returns
-------
Position
Position of closest city tile.
"""
if self._closest_city_pos is None or game_map.get_cell_by_pos(self._closest_city_pos).citytile is None:
if len(player.cities) > 0:
closest_dist = math.inf
for pos in player.city_pos:
dist = pos.distance_to(self)
if dist < closest_dist:
closest_dist = dist
self._closest_city_pos = pos
else:
return None
return self._closest_city_pos
def _find_closest_resource(self, resources_to_consider, game_map, tie_breaker_func=None):
for resource in resources_to_consider:
if not self._closest_resource_pos[resource] or any(not game_map.get_cell_by_pos(p).has_resource() for p in self._closest_resource_pos[resource]):
self._closest_resource_pos[resource] = []
closest_dist = math.inf
for resource_tile in game_map.resources():
if resource_tile.resource.type != resource:
continue
dist = resource_tile.pos.distance_to(self)
if dist <= closest_dist:
closest_dist = dist
self._closest_resource_pos[resource].append(resource_tile.pos)
# positions = list(filter(None, [self._closest_resource_pos[r] for r in resources_to_consider]))
positions = [p for r in resources_to_consider for p in self._closest_resource_pos[r]]
if positions:
if tie_breaker_func is None:
return min(positions, key=lambda p: (self.distance_to(p), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y))
else:
return min(positions, key=lambda p: (self.distance_to(p), tie_breaker_func(p), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y))
else:
return None
def _find_closest_resource_for_collecting(self, resources_to_consider, game_map, tie_breaker_func=None):
closest_resource_pos = {}
for resource in resources_to_consider:
closest_resource_pos[resource] = []
closest_dist = math.inf
for resource_tile in game_map.resources():
if resource_tile.resource.type != resource or len(LogicGlobals.RESOURCES_BEING_COLLECTED.get(resource_tile.pos, set())) >= LogicGlobals.game_state.map.max_collectors_allowed_at(resource_tile.pos):
continue
dist = resource_tile.pos.distance_to(self)
if dist <= closest_dist:
closest_dist = dist
closest_resource_pos[resource].append(resource_tile.pos)
# positions = list(filter(None, [self._closest_resource_pos[r] for r in resources_to_consider]))
positions = [p for r in resources_to_consider for p in closest_resource_pos[r]]
if positions:
if tie_breaker_func is None:
return min(positions, key=lambda p: (self.distance_to(p), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y))
else:
return min(positions, key=lambda p: (self.distance_to(p), tie_breaker_func(p), LogicGlobals.x_mult * p.x, LogicGlobals.y_mult * p.y))
else:
return None
def find_closest_wood(self, game_map, tie_breaker_func=None):
""" Find the closest wood to this position.
Parameters
----------
game_map : :GameMap:
Map containing position and resources.
tie_breaker_func : callable, optional
Function used to break ties in distance to position.
Returns
-------
Position
Position of closest resource.
"""
# return self._find_closest_resource([ResourceTypes.WOOD], game_map, tie_breaker_func=tie_breaker_func)
return self._find_closest_resource_for_collecting([ResourceTypes.WOOD], game_map, tie_breaker_func=tie_breaker_func)
def find_closest_resource(self, player, game_map, r_type=None, tie_breaker_func=None):
""" Find the closest resource to this position.
Parameters
----------
player : Player object
Player wanting to find the closest resource.
Used to determine if player can mind coal or uranium.
game_map : :GameMap:
Map containing position and resources.
r_type : Constants.RESOURCE_TYPES, optional
Type of resource to look for. If `None`,
all resources are considered.
tie_breaker_func : callable, optional
Function used to break ties in distance to position.
Returns
-------
Position
Position of closest resource.
"""
if r_type is not None:
resources_to_consider = [r_type]
else:
resources_to_consider = [ResourceTypes.WOOD]
if player.researched_coal():
resources_to_consider.append(ResourceTypes.COAL)
if player.researched_uranium():
resources_to_consider.append(ResourceTypes.URANIUM)
return self._find_closest_resource(resources_to_consider, game_map, tie_breaker_func=tie_breaker_func)
def find_closest_resource_for_collecting(self, player, game_map, r_type=None, tie_breaker_func=None):
""" Find the closest resource to this position.
Excludes positions that are already at max
collection capacity.
Parameters
----------
player : Player object
Player wanting to find the closest resource.
Used to determine if player can mind coal or uranium.
game_map : :GameMap:
Map containing position and resources.
r_type : Constants.RESOURCE_TYPES, optional
Type of resource to look for. If `None`,
all resources are considered.
tie_breaker_func : callable, optional
Function used to break ties in distance to position.
Returns
-------
Position
Position of closest resource.
"""
if r_type is not None:
resources_to_consider = [r_type]
else:
resources_to_consider = [ResourceTypes.WOOD]
if player.researched_coal():
resources_to_consider.append(ResourceTypes.COAL)
if player.researched_uranium():
resources_to_consider.append(ResourceTypes.URANIUM)
return self._find_closest_resource_for_collecting(resources_to_consider, game_map, tie_breaker_func=tie_breaker_func)
def sort_directions_by_pathing_distance(self, target_pos, game_map, pos_to_check=None, tolerance=None, avoid_own_cities=False):
if self.distance_to(target_pos) == 0:
return Directions.CENTER
return self._sort_directions(
dist_func=partial(
target_pos.pathing_distance_to,
avoid_own_cities=avoid_own_cities,
game_map=game_map
),
pos_to_check=pos_to_check,
tolerance=tolerance
)
def sort_directions_by_turn_distance(self, target_pos, game_map, cooldown, pos_to_check=None, tolerance=None, avoid_own_cities=False):
if self.distance_to(target_pos) == 0:
return Directions.CENTER
return self._sort_directions(
dist_func=partial(
target_pos.turn_distance_to,
game_map=game_map,
cooldown=cooldown,
avoid_own_cities=avoid_own_cities,
include_target_road=True
),
secondary_dist_func=target_pos.distance_to,
pos_to_check=pos_to_check,
tolerance=tolerance
)
def _default_positions_to_check(self):
return {
direction: self.translate(direction, 1)
for direction in ALL_DIRECTIONS
}
def _sort_directions(self, dist_func, secondary_dist_func=None, pos_to_check=None, tolerance=None):
if pos_to_check is None:
pos_to_check = self._default_positions_to_check()
dir_pos = list(pos_to_check.items())
dists = {d: (dist_func(p), secondary_dist_func(p) if secondary_dist_func is not None else 0) for d, p in dir_pos}
if tolerance is not None:
dists = {k: v for k, v in dists.items() if v[0] <= tolerance + min(v[0] for v in dists.values())}
return sorted(dists, key=lambda x: (dists.get(x), x))
def direction_to(self, target_pos: 'Position', pos_to_check=None, do_shuffle=True) -> Directions:
""" Return closest position to target_pos from this position
Parameters
----------
target_pos : Position
Target position to move to. Can be more than 1 unit away.
pos_to_check : dict
Dictionary with keys as directions and values as positions
corresponding to a move in that direction.
do_shuffle : bool
Option to shuffle directions so that a random
one is chosen when multiple have a min distance.
Returns
-------
Direction
Direction to move
"""
if self.distance_to(target_pos) == 0:
return Directions.CENTER
if pos_to_check is None:
pos_to_check = {
direction: self.translate(direction, 1)
for direction in ALL_DIRECTIONS
}
dir_pos = list(pos_to_check.items())
if do_shuffle and getpass.getuser() != 'Paul':
shuffle(dir_pos)
dists = {d: target_pos.distance_to(p) for d, p in dir_pos}
return min(dists, key=lambda x: (dists.get(x), x))
|
#!/usr/bin/env python
#
# Copyright (c) 2006 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Create binary symbol map out of linker map file
"""
import sys
import struct
import re
MAXSTRING = 63
symtabfmt = "<Q%ds" % (MAXSTRING + 1)
funcline = re.compile(r'([0-9a-f]+)\s+[lg]\s+.\s+\.text\s+([0-9a-f]+)\s+(.*)$')
bssline = re.compile(r'([0-9a-f]+)\s+[lg]\s+[a-zA-Z]\s+\.bss\s+([0-9a-f]+)\s+(.*)$')
dataline = re.compile(r'([0-9a-f]+)\s+[lg]\s+[a-zA-Z]\s+\.data\s+([0-9a-f]+)\s+(.*)$')
fileexp = re.compile(r'([^\s]+):\s+file format')
startfile = re.compile(r'\.(text|bss|data)\s+(0x[0-9a-f]+)\s+0x[0-9a-f]+\s+(.*)$')
def read_obdump(inp):
"Parse input"
funcs = {}
data = {}
bss = {}
fname = ''
for line in inp:
line = line.strip()
res = funcline.match(line)
if (res):
funcs.setdefault(fname, []).append((int(res.group(1), 16), res.group(3)))
continue
res = bssline.match(line)
if (res):
start = int(res.group(1), 16)
end = int(res.group(2), 16)
if (end):
bss.setdefault(fname, []).append((start, res.group(3)))
res = dataline.match(line)
if (res):
start = int(res.group(1), 16)
end = int(res.group(2), 16)
if (end):
data.setdefault(fname, []).append((start, res.group(3)))
res = fileexp.match(line)
if (res):
fname = res.group(1)
continue
return {'text' : funcs, 'bss' : bss, 'data' : data}
def generate(kmapf, obmapf, out):
"Generate output file"
obdump = read_obdump(obmapf)
def key_sorter(x):
return x[0]
for line in kmapf:
line = line.strip()
res = startfile.match(line)
if ((res) and (res.group(3) in obdump[res.group(1)])):
offset = int(res.group(2), 16)
fname = res.group(3)
symbols = obdump[res.group(1)][fname]
symbols.sort(key = key_sorter)
for addr, symbol in symbols:
value = fname + ':' + symbol
value_bytes = value.encode('ascii')
data = struct.pack(symtabfmt, addr + offset, value_bytes[:MAXSTRING])
out.write(data)
out.write(struct.pack(symtabfmt, 0, b''))
def main():
if (len(sys.argv) != 4):
print("Usage: %s <kernel.map> <nm dump> <output.bin>" % sys.argv[0])
return 1
kmapf = open(sys.argv[1], 'r')
obmapf = open(sys.argv[2], 'r')
out = open(sys.argv[3], 'wb')
generate(kmapf, obmapf, out)
kmapf.close()
obmapf.close()
out.close()
if __name__ == '__main__':
sys.exit(main())
|
from __future__ import division
from ..lab1.Solver import Solver
from ..lab2.SimplexMethod import get_basis_matrix, get_cannonical_form, get_basis_cost_vector
from sympy import zeros, Matrix
from sympy.functions import transpose
import bisect
class DualSimplexMethod(object):
"""
:type matrix_c:Matrix
:type matrix_A:Matrix
:type matrix_b:Matrix
:type precision:float
"""
def __init__(self, matrix_c, matrix_A, matrix_b, precision, condition_operators=None):
self.matrix_c = matrix_c
self._matrix_A = matrix_A
self.matrix_b = matrix_b
self.precision = precision
self.m, self.n = matrix_A.shape
if condition_operators is None:
self.condition_operators = ["="] * self.m
else:
self.condition_operators = condition_operators
self.solver = Solver(self.precision)
def solve(self, basis_indexes_set, maximize, vector_y=None):
"""
:type basis_indexes_set: list[int]
:type not_basis_indexes_set: list[int]
"""
self._matrix_A, self.matrix_c = get_cannonical_form(self._matrix_A, self.condition_operators, self.matrix_c, maximize)
self.m, self.n=self._matrix_A.shape
basis_indexes_set.sort()
not_basis_indexes_set = sorted(set(range(self.n)) - set(basis_indexes_set))
if vector_y is None:
vector_y=transpose(get_basis_cost_vector(basis_indexes_set, self.matrix_c))*get_basis_matrix(basis_indexes_set, self._matrix_A).inv()
vector_kaplan=zeros(self.m+self.n, 1)
for j in not_basis_indexes_set:
vector_kaplan[j, 0]=(vector_y*self._matrix_A[:, j])[0,0]-self.matrix_c[j, 0]
return self.dual_simplex_algorithm(basis_indexes_set, not_basis_indexes_set, vector_kaplan)
def dual_simplex_algorithm(self, basis_indexes_set, not_basis_indexes_set, vector_kaplan):
"""
:type basis_indexes_set: list[int]
:type not_basis_indexes_set: list[int]
"""
basis_matrix = zeros(self.m, len(basis_indexes_set))
for i, j in enumerate(basis_indexes_set):
basis_matrix[:, i] = self._matrix_A[:, j]
inverse_basis_matrix = basis_matrix.inv()
while True:
vector_kappa = inverse_basis_matrix * self.matrix_b
for j in range(vector_kappa.shape[0]):
if vector_kappa[j, 0] < 0:
break
else:
basis_plan = zeros(self.n, 1)
for i, j in enumerate(basis_indexes_set):
basis_plan[j, 0] = vector_kappa[i, 0]
return basis_plan, basis_indexes_set
for k, j in enumerate(basis_indexes_set):
if vector_kappa[k, 0] < 0:
vector_mu = zeros(self.n, 1)
vector_sigma = []
for j_nb in not_basis_indexes_set:
vector_mu[j_nb, 0] = inverse_basis_matrix[k, :] * self._matrix_A[:, j_nb]
if vector_mu[j_nb, 0] < 0:
vector_sigma.append(-vector_kaplan[j_nb, 0] / vector_mu[j_nb, 0])
else:
vector_sigma.append(None)
min_sigma_index = 0
min_sigma = vector_sigma[0]
for i, sigma in enumerate(vector_sigma):
if sigma is None:
continue
elif min_sigma is None or sigma < min_sigma:
min_sigma = sigma
min_sigma_index = i
if min_sigma is None:
raise Exception("Limitations of direct task are incompatible")
min_sigma_index = not_basis_indexes_set[min_sigma_index]
basis_indexes_set.pop(k)
bisect.insort_left(basis_indexes_set, min_sigma_index)
not_basis_indexes_set.remove(min_sigma_index)
bisect.insort_left(not_basis_indexes_set, j)
vector_kaplan[min_sigma_index, 0]=0
for j_nb in not_basis_indexes_set:
vector_kaplan[j_nb, 0]=vector_kaplan[j_nb, 0]+min_sigma*vector_mu[j_nb, 0]
vector_kaplan[j] = min_sigma
inverse_basis_matrix = get_basis_matrix(basis_indexes_set, self._matrix_A)
break
|
<reponame>OmriNach/WizardHat
"""Plotting of data in `buffers.Buffer` objects.
Rough implementation of a standalone bokeh server.
Currently just grabs the most recent sample from Buffers.buffer every time the
periodic callback executes. This is probably not the best way to do it, because
the sampling rate is arbitrarily based on the value for
`add_periodic_callback()`. For example, you can set the callback time to
something faster than the sampling rate and you'll see that each value in
`buffer.data` gets sampled a few times (starts to look like a step
function). Right now there's no good way to check that we're not dropping
samples when updating.
Also just two manually retrieved channels for now as a proof of concept, but
the gridplot method seems to work well for this.
TODO:
* Figure out sampling method- possibly using Data's self.updated attribute
to trigger an update? Maybe we can update everything "in-place" because
buffer.data already has a built-in window..
* Automatically determine device name/set to title?
"""
from functools import partial
from threading import Thread
from bokeh.layouts import row,gridplot, widgetbox
from bokeh.models.widgets import Button, RadioButtonGroup
from bokeh.models import ColumnDataSource
from bokeh.palettes import all_palettes as palettes
from bokeh.plotting import figure
from bokeh.server.server import Server
from tornado import gen
import time
class Plotter():
"""Base class for plotting."""
def __init__(self, buffer, autostart=True):
"""Construct a `Plotter` instance.
Args:
buffer (buffers.Buffer): Data object managing data to be plotted.
plot_params (dict): Plot display parameters.
"""
self.buffer = buffer
# output_file('WizardHat Plotter.html')
self.server = Server({'/': self._app_manager})
#self.add_widgets()
self.autostart = autostart
def add_widgets(self):
self.stream_option = RadioButtonGroup(labels=['EEG', 'ACC', 'GYR'], active=0)
self.filter_option = RadioButtonGroup(labels=['Low Pass', 'High Pass', 'Band Pass'], active=0)
self.widget_box = widgetbox(self.stream_option,
self.filter_option,
width=300)
def run_server(self):
self.server.start()
self.server.io_loop.add_callback(self.server.show, '/')
self._update_thread.start()
self.server.io_loop.start()
def _app_manager(self, curdoc):
self._curdoc = curdoc
self._set_layout()
self._set_callbacks()
def _set_callbacks(self):
#self._curdoc.add_root(row(self.widget_box,
# gridplot(self.plots, toolbar_location="left",
# plot_width=1000)))
self._curdoc.add_root(gridplot(self.plots, toolbar_location="left",
plot_width=1000))
self._curdoc.title = "WizardHat"
class Lines(Plotter):
"""Multiple (stacked) line plots.
Expects a two-dimensional `buffers.Buffer` object (such as `TimeSeries`) where
all columns after the first give the data used to plot individual lines.
Multiple data sources may be given in a list, assuming they have the same
form (number of channels and rows/samples); the user can cycle between
plots of each data source with the 'D' key.
"""
def __init__(self, buffer, n_samples=5000, palette='Category10',
bgcolor="white", **kwargs):
"""Construct a `Lines` instance.
Args:
buffer (buffers.Buffer or List[buffers.Buffer]): Objects with data
to be plotted. Multiple objects may be passed in a list, in
which case the plot can cycle through plotting the data in
each object by pressing 'd'. However, all data objects passed
should have a similar form (e.g. `TimeSeries` with same number
of rows/samples and channels).
plot_params (dict): Plot display parameters.
"""
super().__init__(buffer, **kwargs)
# TODO: initialize with existing samples in self.buffer.data
data_dict = {name: [] # [self.buffer.data[name][:n_samples]]
for name in self.buffer.dtype.names}
self._source = ColumnDataSource(data_dict)
self._update_thread = Thread(target=self._get_new_samples)
self._n_samples = n_samples
self._colors = palettes[palette][len(self.buffer.ch_names)]
self._bgcolor = bgcolor
if self.autostart:
self.run_server()
def _set_layout(self):
self.plots = []
for i, ch in enumerate(self.buffer.ch_names):
p = figure(plot_height=100,
tools="xpan,xwheel_zoom,xbox_zoom,reset",
x_axis_type='datetime', y_axis_location="right")#,y_range=(-10,10))
p.x_range.follow = "end" # always follows new data in source
p.x_range.follow_interval = 5 # in s
p.x_range.range_padding = 0 # we can play with this stuff
p.yaxis.axis_label = ch
p.background_fill_color = self._bgcolor
# p.background_fill_alpha = 0.5
p.line(x='time', y=ch, alpha=0.8, line_width=2,
color=self._colors[i], source=self._source)
self.plots.append([p])
@gen.coroutine
def _update(self, data_dict):
self._source.stream(data_dict, self._n_samples)
def _get_new_samples(self):
#TODO Time delay of 1 second is necessary because there seems to be plotting issue related to server booting
#time delay allows the server to boot before samples get sent to it.
time.sleep(1)
while True:
self.buffer.updated.wait()
data_dict = {name: self.buffer.last_samples[name]
for name in self.buffer.dtype.names}
try: # don't freak out if IOLoop
self._curdoc.add_next_tick_callback(partial(self._update,
data_dict))
except AttributeError:
pass
self.buffer.updated.clear()
|
<gh_stars>1-10
"""
给定两个单词 word1 和 word2,找到使得 word1 和 word2 相同所需的最小步数,每步可以删除任意一个字符串中的一个字符。
示例 1:
输入: "sea", "eat"
输出: 2
解释: 第一步将"sea"变为"ea",第二步将"eat"变为"ea"
说明:
给定单词的长度不超过500。
给定单词中的字符只含有小写字母。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/delete-operation-for-two-strings
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
if word2 == word1: return 0
_len1, _len2 = len(word1) + 1, len(word2) + 1
# 最长公共子序列
dp = [[0 for _ in range(_len2)] for _ in range(_len1)]
for i in range(_len1):
for j in range(_len2):
if i == 0 or j == 0: continue
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1 # 字符相同 子长度+1
else:
dp[i][j] = max(dp[i - 1][j],
dp[i][j - 1]) # 最长 max 字符不同 要删除 word1删除dp[i - 1][j] 或 word2删除dp[i][j-1]
return len(word1) + len(word2) - 2 * dp[-1][-1]
def minDistance2(self, word1: str, word2: str) -> int:
if word2 == word1: return 0
_len1, _len2 = len(word1) + 1, len(word2) + 1
dp = [[0 for _ in range(_len2)] for _ in range(_len1)]
for i in range(_len1):
for j in range(_len2):
if i == 0 or j == 0:
dp[i][j] = i + j # 边界值 删除word1 i 或 word2 j
elif word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1] # 字符相同 不删除
else:
dp[i][j] = 1 + min(dp[i - 1][j],
dp[i][j - 1]) # 最少删除min 字符不同 要删除 word1删除dp[i - 1][j] 或 word2删除dp[i][j-1]
return dp[-1][-1]
def minDistance3(self, word1: str, word2: str) -> int:
if word2 == word1: return 0
_len1, _len2 = len(word1) + 1, len(word2) + 1
dp, temp = [0 for _ in range(_len2)], [0 for _ in range(_len2)]
# dp = [[0 for _ in range(_len2)] for _ in range(_len1)]
# dp2[i-1][k] = dp[k] 即dp=dp2[i-1] dp2[i][k] = temp[k-1] 即temp = dp2[i]
for i in range(_len1):
for j in range(_len2):
temp[j] = 0
if i == 0 or j == 0:
temp[j] = i + j # 边界值 删除word1 i 或 word2 j
elif word1[i - 1] == word2[j - 1]:
temp[j] = dp[j - 1] # 字符相同 不删除
else:
temp[j] = 1 + min(dp[j], temp[j - 1]) # 最少删除min 字符不同 要删除 word1删除dp[i - 1][j] 或 word2删除dp[i][j-1]
dp, temp = temp, dp
return dp[-1]
print(Solution().minDistance3("", "a"))
|
<reponame>Microsoft/CameraTraps
#
# Merge high-confidence detections from one results file into another file,
# when the target file does not detect anything on an image.
#
# Does not currently attempt to merge every detection based on whether individual
# detections are missing; only merges detections into images that would otherwise
# be considered blank.
#
# If you want to literally merge two .json files, see combine_api_outputs.py.
#
#%% Constants and imports
import json
import os
from tqdm import tqdm
#%% Structs
class MergeDetectionsOptions:
def __init__(self):
self.max_detection_size = 1.01
self.source_confidence_thresholds = [0.8]
self.target_confidence_threshold = 0.8
# If you want to merge only certain categories, specify one
# (but not both) of these.
self.categories_to_include = None
self.categories_to_exclude = None
#%% Main function
def merge_detections(source_files,target_file,output_file,options=None):
if isinstance(source_files,str):
source_files = [source_files]
if options is None:
options = MergeDetectionsOptions()
assert not ((options.categories_to_exclude is not None) and \
(options.categories_to_include is not None)), \
'categories_to_include and categories_to_exclude are mutually exclusive'
if options.categories_to_exclude is not None:
options.categories_to_exclude = [int(c) for c in options.categories_to_exclude]
if options.categories_to_include is not None:
options.categories_to_include = [int(c) for c in options.categories_to_include]
assert len(source_files) == len(options.source_confidence_thresholds)
for fn in source_files:
assert os.path.isfile(fn), 'Could not find source file {}'.format(fn)
assert os.path.isfile(target_file)
os.makedirs(os.path.dirname(output_file),exist_ok=True)
with open(target_file,'r') as f:
output_data = json.load(f)
print('Loaded results for {} images'.format(len(output_data['images'])))
fn_to_image = {}
# im = output_data['images'][0]
for im in output_data['images']:
fn_to_image[im['file']] = im
if 'detections_transferred_from' not in output_data['info']:
output_data['info']['detections_transferred_from'] = []
if 'detector' not in output_data['info']:
output_data['info']['detector'] = 'MDv4 (assumed)'
detection_categories_raw = output_data['detection_categories'].keys()
# Determine whether we should be processing all categories, or just a subset
# of categories.
detection_categories = []
if options.categories_to_exclude is not None:
for c in detection_categories_raw:
if int(c) not in options.categories_to_exclude:
detection_categories.append(c)
else:
print('Excluding category {}'.format(c))
elif options.categories_to_include is not None:
for c in detection_categories_raw:
if int(c) in options.categories_to_include:
print('Including category {}'.format(c))
detection_categories.append(c)
else:
detection_categories = detection_categories_raw
# i_source_file = 0; source_file = source_files[i_source_file]
for i_source_file,source_file in enumerate(source_files):
print('Processing detections from file {}'.format(source_file))
with open(source_file,'r') as f:
source_data = json.load(f)
if 'detector' in source_data['info']:
source_detector_name = source_data['info']['detector']
else:
source_detector_name = os.path.basename(source_file)
output_data['info']['detections_transferred_from'].append(os.path.basename(source_file))
output_data['info']['detector'] = output_data['info']['detector'] + ' + ' + source_detector_name
assert source_data['detection_categories'] == output_data['detection_categories']
source_confidence_threshold = options.source_confidence_thresholds[i_source_file]
# source_im = source_data['images'][0]
for source_im in tqdm(source_data['images']):
image_filename = source_im['file']
assert image_filename in fn_to_image
source_detections_this_image = source_im['detections']
target_detections_this_image = fn_to_image[image_filename]['detections']
detections_to_transfer = []
# detection_category = list(detection_categories)[0]
for detection_category in detection_categories:
target_detections_this_category = \
[det for det in target_detections_this_image if det['category'] == \
detection_category]
max_target_confidence_this_category = 0.0
if len(target_detections_this_category) > 0:
max_target_confidence_this_category = max([det['conf'] for \
det in target_detections_this_category])
# This is already a detection, no need to proceed looking for detections to
# transfer
if max_target_confidence_this_category >= options.target_confidence_threshold:
continue
source_detections_this_category_raw = [det for det in \
source_detections_this_image if det['category'] == detection_category]
# Boxes are x/y/w/h
# source_sizes = [det['bbox'][2]*det['bbox'][3] for det in source_detections_this_category_raw]
# Only look at boxes below the size threshold
source_detections_this_category_filtered = [
det for det in source_detections_this_category_raw if \
det['bbox'][2]*det['bbox'][3] <= options.max_detection_size]
for det in source_detections_this_category_filtered:
if det['conf'] >= source_confidence_threshold:
det['transferred_from'] = source_detector_name
detections_to_transfer.append(det)
# ...for each detection category
if len(detections_to_transfer) > 0:
# print('Adding {} detections to image {}'.format(len(detections_to_transfer),image_filename))
detections = fn_to_image[image_filename]['detections']
detections.extend(detections_to_transfer)
# Update the max_detection_conf field
fn_to_image[image_filename]['max_detection_conf'] = \
max([d['conf'] for d in detections])
# ...for each image
# ...for each source file
with open(output_file,'w') as f:
json.dump(output_data,f,indent=2)
print('Saved merged results to {}'.format(output_file))
#%% Test driver
if False:
#%%
organization = 'sdsu-schmidt'
options = MergeDetectionsOptions()
options.max_detection_size = 0.1
options.target_confidence_threshold = 0.3
options.categories_to_include = [1]
source_files = ['/home/user/postprocessing/' + organization + '/' + organization + '-2022-05-14/combined_api_outputs/' + organization + '-2022-05-14_detections.filtered_rde_file_0_mdv4_0.60_0.85_15_0.20.json']
options.source_confidence_thresholds = [0.8]
target_file = '/home/user/postprocessing/' + organization + '/' + organization + '-mdv5-camcocoinat-2022-05-12/combined_api_outputs/' + organization + '-mdv5-camcocoinat-2022-05-12_detections.filtered_rde_file_1_mdv5-camcocoinat_0.20_0.85_15_0.20.json'
output_file = '/home/user/postprocessing/' + organization + '/merged-detections/mdv4_mdv5-camcocoinat-2022-05-12.json'
merge_detections(source_files, target_file, output_file, options)
options = MergeDetectionsOptions()
options.max_detection_size = 0.1
options.target_confidence_threshold = 0.3
options.categories_to_include = [1]
source_files = [
'/home/user/postprocessing/' + organization + '/' + organization + '-2022-05-14/combined_api_outputs/' + organization + '-2022-05-14_detections.filtered_rde_file_0_mdv4_0.60_0.85_15_0.20.json',
'/home/user/postprocessing/' + organization + '/' + organization + '-mdv5-camonly-2022-05-12/combined_api_outputs/' + organization + '-mdv5-camonly-2022-05-12_detections.filtered_rde_file_2_mdv5-camonly_0.20_0.85_15_0.20.json']
options.source_confidence_thresholds = [0.8,0.5]
target_file = '/home/user/postprocessing/' + organization + '/' + organization + '-mdv5-camcocoinat-2022-05-12/combined_api_outputs/' + organization + '-mdv5-camcocoinat-2022-05-12_detections.filtered_rde_file_1_mdv5-camcocoinat_0.20_0.85_15_0.20.json'
output_file = '/home/user/postprocessing/' + organization + '/merged-detections/mdv4_mdv5-camonly_mdv5-camcocoinat-2022-05-12.json'
merge_detections(source_files, target_file, output_file, options)
#%%
options = MergeDetectionsOptions()
options.max_detection_size = 0.1
options.target_confidence_threshold = 0.3
options.categories_to_include = [1]
source_files = ['/home/user/postprocessing/iwildcam/iwildcam-mdv4-2022-05-01/combined_api_outputs/iwildcam-mdv4-2022-05-01_detections.json']
options.source_confidence_thresholds = [0.8]
target_file = '/home/user/postprocessing/iwildcam/iwildcam-mdv5-camcocoinat-2022-05-02/combined_api_outputs/iwildcam-mdv5-camcocoinat-2022-05-02_detections.json'
output_file = '/home/user/postprocessing/iwildcam/merged-detections/mdv4_mdv5-camcocoinat-2022-05-02.json'
merge_detections(source_files, target_file, output_file, options)
options = MergeDetectionsOptions()
options.max_detection_size = 0.1
options.target_confidence_threshold = 0.3
options.categories_to_include = [1]
source_files = [
'/home/user/postprocessing/iwildcam/iwildcam-mdv4-2022-05-01/combined_api_outputs/iwildcam-mdv4-2022-05-01_detections.json',
'/home/user/postprocessing/iwildcam/iwildcam-mdv5-camonly-2022-05-02/combined_api_outputs/iwildcam-mdv5-camonly-2022-05-02_detections.json',
]
options.source_confidence_thresholds = [0.8,0.5]
target_file = '/home/user/postprocessing/iwildcam/iwildcam-mdv5-camcocoinat-2022-05-02/combined_api_outputs/iwildcam-mdv5-camcocoinat-2022-05-02_detections.json'
output_file = '/home/user/postprocessing/iwildcam/merged-detections/mdv4_mdv5-camonly_mdv5-camcocoinat-2022-05-02.json'
merge_detections(source_files, target_file, output_file, options)
#%% Command-line driver
|
<filename>src/fastjet/_utils.py
import awkward as ak
import fastjet._swig
# light wrapping for the functions to raise an error if the user inputs awkward arrays into functions meant for swig
def sorted_by_E(data):
if isinstance(data, ak.Array):
try:
tempE = data.E
except AttributeError:
raise AttributeError(
"Needs either correct coordinates or embedded vector backend"
) from None
tmpsort = ak.argsort(tempE, axis=-1)
return data[tmpsort]
else:
return fastjet._swig.sorted_by_E(data)
def sorted_by_pt(data):
if isinstance(data, ak.Array):
try:
temppt = data.pt
except AttributeError:
raise AttributeError(
"Needs either correct coordinates or embedded vector backend"
) from None
tmpsort = ak.argsort(temppt, axis=-1)
return data[tmpsort]
else:
return fastjet._swig.sorted_by_pt(data)
def sorted_by_pz(data):
if isinstance(data, ak.Array):
try:
temppz = data.pz
except AttributeError:
raise AttributeError(
"Needs either correct coordinates or embedded vector backend"
) from None
tmpsort = ak.argsort(temppz, axis=-1)
return data[tmpsort]
else:
return fastjet._swig.sorted_by_pz(data)
def sorted_by_rapidity(data):
if isinstance(data, ak.Array):
try:
temprap = data.eta
except AttributeError:
raise AttributeError(
"Needs either correct coordinates or embedded vector backend"
) from None
tmpsort = ak.argsort(temprap, axis=-1)
return data[tmpsort]
else:
return fastjet._swig.sorted_by_pz(data)
def join(*argv):
if isinstance(argv[0], ak.Array):
raise TypeError("Use inbuilt methods for awkward arrays")
else:
for arg in argv:
if isinstance(arg, ak.Array):
raise AttributeError(
"All arguments need to be of the same type"
) from None
if len(argv) == 1: # Calling different constructors
return fastjet._swig.join(argv[0])
if len(argv) == 2:
return fastjet._swig.join(argv[0], argv[1])
if len(argv) == 3:
return fastjet._swig.join(argv[0], argv[1], argv[2])
if len(argv) == 4:
return fastjet._swig.join(argv[0], argv[1], argv[2], argv[3])
if len(argv) > 4:
raise ValueError("Length exceeded")
def dot_product(a, b):
if isinstance(a, ak.Array) or isinstance(b, ak.Array):
raise TypeError("Use inbuilt methods for Awkward Array") from None
else:
return fastjet._swig.dot_product(a, b)
def sort_indices(indices, values):
if isinstance(indices, ak.Array) or isinstance(values, ak.Array):
raise TypeError("Use inbuilt methods for Awkward Array") from None
else:
return sort_indices(indices, values)
def theta(a, b):
if isinstance(a, ak.Array) or isinstance(b, ak.Array):
raise TypeError("Use inbuilt methods for Awkward Array") from None
else:
return fastjet._swig.theta(a, b)
def have_same_momentum(a, b):
if isinstance(a, ak.Array) or isinstance(b, ak.Array):
raise TypeError("Use inbuilt methods for Awkward Array") from None
else:
return fastjet._swig.have_same_momentum(a, b)
def cos_theta(a, b):
if isinstance(a, ak.Array) or isinstance(b, ak.Array):
raise TypeError("Use inbuilt methods for Awkward Array") from None
else:
return fastjet._swig.cos_theta(a, b)
def PtYPhiM(pt, y, phi, m):
if (
isinstance(pt, ak.Array)
or isinstance(y, ak.Array)
or isinstance(phi, ak.Array)
or isinstance(m, ak.Array)
):
raise TypeError("Use inbuilt methods for Awkward Array") from None
else:
return fastjet._swig.PtYPhiM(pt, y, phi, m)
|
<reponame>tristanengst/apex-utils<filename>conditional_imle/ConditionalIMLE.py
"""File with conditional IMLE implementation.
To use this utility, you need to do the following:
1. Your network needs to return a list of outputs, where the ith element is the
network's output (to the loss function) at the ith level.
2. In your model's forward function, you need a `loi` argument that specifies
the level of of the network to return examples from. When this is specified
the output should not be returned inside a list.
3. You need to define a `get_zs()` function. See the function definition and
documentation below.
4. In your model's forward function, in every level, you need the ability to
accept BS data points and k * BS latent codes, and return k * BS generated
results. The first k results should come from the first data point, the
second k results from the second, etc.
EXAMPLE ------------------------------------------
Let's say we're sampling latent codes for four examples, and we want to
sample three latent codes per example at a time. Our network has only one
level (indexed as level zero).
In the model's forward function, we detect that there are more codes than
there are data points. We call `torch.repeat_interleave(x, k, axis=0)` to
expand (copying memory 😞) the input data to match the number of codes. This
allows for evaluating many latent codes on many data points in parallel.
What if the network has more than one level? At the level we're currently
sampling for, we will repeat_interleave anything that needs to be passed
into the level other than the codes, following the process above.
"""
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
def make_list(x, length=1):
"""Returns a list of length [length] where each elment is [x], or, if [x]
is a list of length [length], returns [x].
"""
if isinstance(x, list) and len(x) == length:
return x
elif isinstance(x, list) and len(x) == 1:
return x * length
elif isinstance(x, list) and not len(x) == length and len(x) > 1:
raise ValueError(f"Can not convert list {x} to length {length}")
else:
return [x] * length
def get_new_codes(data, model, loss_fn, num_levels, get_zs, num_samples=128,
code_bs=1, sample_parallelism=1, num_prints=0, num_workers=6, **kwargs):
"""
data -- a Dataset wrapping the batch of data
model -- the model being optimized
loss_fn -- the loss function; should be defined on a batch of
data to return the loss on each example (ie.
returns a vector of losses)
num_levels -- the number of levels of sampling
get_zs -- a function that takes in an integer and produces a
latent code for an example at that level of sampling
num_samples -- the number of samples to make per data example
sample_parallelism -- the number of samples to make in parallel per data
example
num_prints -- number of times to print sampling information
"""
############################################################################
# Check arguments
############################################################################
if code_bs <= 0 or not code_bs % len(data) == 0:
raise ValueError()
if sample_parallelism <= 0 or not sample_parallelism % num_samples == 0:
raise ValueError()
code_bs = make_list(code_bs, num_levels)
sample_parallelism = make_list(sample_parallelism, num_levels)
num_samples = make_list(num_samples, num_levels)
level_codes = [torch.zeros((bs,) + get_zs(level).shape, device=device)
for level in range(num_levels)]
loader = DataLoader(data, batch_size=code_bs, shuffle=False,
pin_memory=True, num_workers=num_workers)
for level in tqdm(range(num_levels), desc="Levels", dynamic_ncols=True):
ns = num_samples[level]
sp = sample_parallelism[level]
cbs = code_bs[levels]
least_losses = torch.full((bs,), float("inf"), device=device)
for i in tqdm(range(ns // sp), desc="Sampling", dynamic_ncols=True):
for idx,(x,y) in enumerate(loader):
start_idx, end_idx = cbs * idx, cbs * (idx + 1)
least_losses_batch = least_losses[start_idx:end_idx]
old_codes = [l[start_idx:end_idx] for l in level_codes[:level]]
new_codes = get_zs(bs=cbs * sp, device=device, level=level)
test_codes = old_codes + [new_codes]
losses = loss_fn(
model(x.to(device), test_codes, loi=level, **kwargs),
y[level].to(device)
)
if sp > 1:
_, idxs = torch.min(losses.view(code_bs, sp), axis=1)
new_codes = new_codes.view((code_bs, sp) + new_codes.shape[1:])
new_codes = new_codes[torch.arange(code_bs), idxs]
losses = losses.view(code_bs, sp)[torch.arange(code_bs), idxs]
change_idxs = losses < least_losses_batch
level_codes[level_idx][start_idx:end_idx][change_idxs] = new_codes[change_idxs]
least_losses[start_idx:end_idx][change_idxs] = losses[change_idxs]
# tqdm.write(f" Processed {i * sp} samples | mean loss {torch.mean(least_losses):.5f}")
return level_codes
|
<filename>item.py
#!/usr/bin/env python
import sys
from os.path import isfile, basename, join as pjoin
from subprocess import Popen
from xml.etree import ElementTree as ET
lorem="Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."
slots="Head Neck Waist MainHand OffHand Body Arm Hand Ring Feet Symbol/Ki Tattoo".split()
CARDSPERPAGE=4
CARDSPERROW=2
def escape(multiline):
return multiline.replace("\n","<br/>")
def tonicerstring(root,tags=None,html=False):
if tags is None:
return ET.tostring(root).replace("><",">\n<")
else:
s = ET.tostring(root)
if html:
s = escape(s)
for tag in tags:
s = s.replace("</%s>"%tag, "</%s>\n"%tag)
return s
def text(s):
if s is None:
return " "
else:
return str(s) or " "
def HTMLCard(it):
isweapon=isinstance(it, Weapon)
isarmor = isinstance(it, Armor)
card = ET.Element("div", id="card")
# header
header = ET.SubElement(card, "div", id="cardheader", Class=it.itemtype.lower())
if not isweapon:
ET.SubElement(header, "div", id="slot", Class="littlebox").text=text(it.slot)
else:
ET.SubElement(header, "div", id="slot", Class="littlebox").text=text(it.group)
name = it.name
for i in range(1,20):
if str(i) in it.enhancement:
name += " %s"%it.enhancement
# break
# if it.enhancement
ET.SubElement(header, "div", id="name").text=text(name)
if isweapon: # insert Weapon Group
pass
# ET.SubElement(header, "div", id="weapongroup", Class="littlebox").text=text(it.slot)
ET.SubElement(header, "div", id="description").text=text(it.description)
if it.keywords:
ET.SubElement(card, "div", id="keywords").text="Keywords: "+", ".join(it.keywords)
# card body
body = ET.SubElement(card, "div", id="cardbody")
if isarmor: # add Armor Table
armordiv=ET.SubElement(body, "div", id="armordiv", Class="subclass")
armortable=ET.SubElement(armordiv, "table", Class="armortable")
hrow = ET.SubElement(armortable, "tr", Class="head")
for s in "AC Enhancement Check Speed".split():
ET.SubElement(hrow, "td").text=s
hrow = ET.SubElement(armortable, "tr")
for s in (it.ACBonus, it.enhancement, it.armorCheck, it.speedCheck):
ET.SubElement(hrow, "td").text=text(s)
if isweapon: # add Weapon Table
weapondiv=ET.SubElement(body, "div", id="weapondiv", Class="subclass")
weapontable=ET.SubElement(weapondiv, "table", Class="weapontable")
hrow = ET.SubElement(weapontable, "tr", Class="head")
for s in "Hand Type Prof Damage Range".split():
ET.SubElement(hrow, "td").text=s
hrow = ET.SubElement(weapontable, "tr")
for s in (it.slot, it.weapontype, it.proficiency, it.dice, it.range):
ET.SubElement(hrow, "td").text=text(s)
if it.properties:
weaponprops=ET.SubElement(weapondiv, "div", id="properties")
ul=ET.SubElement(weaponprops, "ul")
ul.text=" "
for p in it.properties:
definition = Weapon.Properties.get(p, "")
if definition:
txt = "%s: %s"%(p, definition)
else:
txt = text(p)
feats = txt.split(':',1)
if len(feats) > 1:
bold,feat=feats
li = ET.SubElement(ul, "li")
b = ET.SubElement(li,'b')
b.text=bold+":"
a = ET.SubElement(li, 'a')
a.text=feat
else:
ET.SubElement(ul, "li").text=text(txt)
# features
# if it.features:
features = ET.SubElement(body, "div", id="features")
ul=ET.SubElement(features, "ul")
ul.text=" "
for feat in it.features:
feats = feat.split(':',1)
if len(feats) > 1:
bold,feat=feats
li = ET.SubElement(ul, "li")
b = ET.SubElement(li,'b')
b.text=bold+":"
a = ET.SubElement(li, 'a')
a.text=feat
else:
ET.SubElement(ul, "li").text=text(feat)
# bottom row
ET.SubElement(card, "div", id="flavor").text=text(it.flavor)
value=it.value
if value and value[-1] in '1234567890':
value = value+'gp'
ET.SubElement(card, "div", id="value", Class="littlebox").text=text(value)
ET.SubElement(card, "div", id="level", Class="littlebox").text="Lvl "+it.level
# print tonicerstring(card)
return card
def HTMLCardList(itemlist, perrow=CARDSPERROW, perpage=CARDSPERPAGE):
pages = []
for n,it in enumerate(itemlist):
if n%perpage == 0:
pagetable = ET.Element("table",cellpadding="0",cellspacing="5px")
pages.append(pagetable)
if n%perrow == 0:
row = ET.SubElement(pagetable, "tr")
ET.SubElement(row, "td").append(HTMLCard(it))
for page in pages:
pass
# print tonicerstring(page, tags=("tr", "table","li"))
return pages
class Item(object):
"""docstring for Item"""
itemtype="Item"
def __init__(self, name):
super(Item, self).__init__()
if not isinstance(name, (str,unicode)):
self.load(name)
else:
self.name = name
self.slot="MainHand"
self.value="0gp"
self.description=lorem
self.flavor=lorem
self.enhancement="-"
self.features=[]
self.keywords=[]
self.level="1"
self.etree = None
def render(self):
self.root = ET.Element(self.itemtype)
# ET.SubElement(self.root, "itemtype").text=self.itemtype
for entry in ["level", "name","slot","value","description","flavor", "enhancement"]:
value = getattr(self, entry)
if value is None:
value = ""
ET.SubElement(self.root, entry).text=str(value)
# self.render_features(self.root)
for key in self.keywords:
ET.SubElement(self.root, "keyword").text=key
for feature in self.features:
ET.SubElement(self.root, "feature").text=str(feature)
return ET.ElementTree(self.root)
def __repr__(self):
if len(self.name) > 24:
shortname = self.name[:21]+"..."
else:
shortname=self.name
return "Lvl %s %s %s: '%s' %s %s"%(self.level, self.itemtype, self.enhancement, shortname, self.slot, self._subrepr())
_subrepr=lambda self: ""
def __str__(self):
etree = self.render()
start = ET.tostring(etree.getroot())
return start.replace("><",">\n <").replace(" </%s>"%self.itemtype,"</%s>"%self.itemtype)
def __eq__(self,other):
for atr in "level name slot value enhancement description flavor".split():
if getattr(self,atr) != getattr(other,atr):
# print "%s %s != %s"%(atr, getattr(self,atr),getattr(other,atr))
return False
for atr in "keywords features".split():
if set(getattr(self,atr)) != set(getattr(other,atr)):
# print atr, set(getattr(self,atr)), "!=", set(getattr(other,atr))
return False
return True
def save(self, fname,mode='a'):
assert mode == 'a' or mode == 'w', "must open in a write mode!"
fp = open(fname, mode)
fp.write(str(self))
fp.close()
def load(self, etree):
if isinstance(etree, ET.ElementTree):
self.root = etree.getroot()
else: # assume we got root
self.root = etree
assert self.root.tag.lower() == self.itemtype.lower(), "wrong type"
for key in "name level slot value description flavor enhancement".split():
setattr(self, key, getattr(self.root.find(key),"text",""))
self.keywords = []
for keyword in self.root.findall("keyword"):
self.keywords.append(keyword.text)
self.features = []
for feat in self.root.findall("feature"):
self.features.append(feat.text)
def isin(self, iter):
for it in iter:
if self == it:
return True
return False
_weapontypes=[]
for mod in "Simple Military Superior Improvised".split():
for kind in "Melee Ranged".split():
_weapontypes.append("%s %s"%(mod,kind))
class Weapon(Item):
"""docstring for Weapon"""
itemtype="Weapon"
Groups="Unarmed Axe Bow Crossbow Flail Hammer HeavyBlade LightBlade Mace Pick Polearm Sling Spear Staff".split()
Properties=dict(HeavyThrown="use STR",
HighCrit="extra 1[W] crit damage (+1[W] each tier)",
LightThrown="use DEX",
OffHand="",
Reach="attack=close burst 2, no threat bonus",
Small="",
Versatile="1 or 2-handed (+1 dmg if 2)",
LoadFree="Load as free action",
LoadMinor="Load as minor action")
types=_weapontypes
def __init__(self, name):
self.proficiency="+1"
self.dice="1d4"
self.range="-"
self.group=self.Groups[0]
self.weapontype=self.types[0]
self.properties=[]
super(Weapon, self).__init__(name)
def load(self, etree):
Item.load(self, etree)
for key in "group proficiency dice range".split():
setattr(self, key, getattr(self.root.find(key),"text",""))
self.properties = []
for prop in self.root.findall("property"):
self.properties.append(prop.text)
def __eq__(self,other):
if not Item.__eq__(self, other):
return False
for atr in "group proficiency dice range enhancement".split():
if getattr(self,atr) != getattr(other,atr):
# print "a.%s %s != %s"%(atr, getattr(self,atr),getattr(other,atr))
return False
for atr in "properties".split():
if set(getattr(self,atr)) != set(getattr(other,atr)):
# print atr, set(getattr(self,atr)), "!=", set(getattr(other,atr))
return False
return True
def _subrepr(self):
return "%s %s %s"%(self.weapontype, self.proficiency, self.group)
def render(self):
etree = Item.render(self)
root = etree.getroot()
for key in "group proficiency dice range weapontype".split():
value = getattr(self, key)
ET.SubElement(self.root, key).text=str(value)
for p in self.properties:
ET.SubElement(root, "property").text=p
return etree
class Armor(Item):
"""docstring for Armor"""
itemtype="Armor"
types="Cloth Leather Hide Chain Scale Plate LightShield HeavyShield".split()
def __init__(self, name):
self.ACBonus="+1"
# self.enhancement="-"
self.armorCheck="-"
self.speedCheck="-"
self.slot=self.types[0]
super(Armor, self).__init__(name)
def load(self, etree):
Item.load(self, etree)
for key in "ACBonus armorCheck speedCheck".split():
# print key, getattr(self.root.find(key),"text","")
setattr(self, key, getattr(self.root.find(key),"text",""))
# print getattr(self, key)
def __eq__(self,other):
if not Item.__eq__(self, other):
return False
for atr in "ACBonus enhancement armorCheck speedCheck".split():
if getattr(self,atr) != getattr(other,atr):
# print "a.%s %s != %s"%(atr, getattr(self,atr),getattr(other,atr))
return False
return True
def _subrepr(self):
return "%s AC, %s, %s"%(self.ACBonus, self.armorCheck, self.speedCheck)
def render(self):
etree = Item.render(self)
root = etree.getroot()
for key in "ACBonus armorCheck speedCheck".split():
value = getattr(self, key)
if value is None:
value = "-"
ET.SubElement(self.root, key).text=str(value)
return etree
def loadItem(s):
# print s
if isinstance(s,str):
etree = ET.parse(s)
else:
etree = ET.ElementTree(s)
root = etree.getroot()
# print root
# print ET.tostring(root)
# print root.tag.lower()
if root.tag.lower() == "item":
return Item(root)
elif root.tag.lower() == "weapon":
return Weapon(root)
elif root.tag.lower() == "armor":
return Armor(root)
def loadItemList(fname):
etree = ET.parse(fname)
root = etree.getroot()
# print root
if root.tag.lower() == "itemlist":
return map(loadItem, root.getchildren())
else:
return [loadItem(root)]
def writeItemList(itemlist, fname,mode='w'):
if mode == 'a' and isfile(fname):
olditems=loadItemList(fname)
else:
olditems=[]
fp = open(fname, 'w')
fp.write("<itemlist>\n")
for it in olditems:
fp.write(str(it)+'\n')
for it in itemlist:
fp.write(str(it)+'\n')
fp.write("</itemlist>\n")
fp.close()
def writeHTMLItemTables(itemlist, fname,separate=False,stylesheet="big.css",embedStyle=False,openAfter=True):
if separate and len(itemlist) > CARDSPERPAGE:
for i in range((len(itemlist)-1)/CARDSPERPAGE+1):
writeHTMLItemTables(("page%i."%i)+fname)
return
fp = open(fname, 'w')
if "big" in stylesheet:
perrow=2
perpage=4
else:
perrow=3
perpage=9
pages = HTMLCardList(itemlist,perrow,perpage)
if embedStyle:
try:
sfp = open(stylesheet)
except:
# import os.path
here = abspath(dirname(__file__))
sfp = open(pjoin(here, stylesheet))
style="<style>%s</style>"%sfp.read()
sfp.close()
# if sfp
else:
style="""<link rel="stylesheet" href="bigstyle.css" type="text/css" />"""
fp.write("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>
D&D Items
</title>
%s
</head>
<body>
"""%style)
for page in pages:
fp.write(tonicerstring(page, tags=("tr", "table","li","div"),html=True)+\
"<div id=pagebreak></div>")
fp.write("""</body></html>""")
fp.close()
if sys.platform == "darwin":
Popen(["open",fname])
|
<reponame>Ecrypty/florijncoinmnb
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
import time
def clear_screen():
os.system('clear')
def check_version():
from mnb_explorer import get_version_txt
cur_version = get_florijncoinmnbversion()
git_version = get_version_txt()
if ((cur_version.get('major') != git_version.get('major', None)) or
(cur_version.get('minor') != git_version.get('minor', None)) or
(cur_version.get('fix') != git_version.get('fix', None))):
print('\t*** New version is available, plese update ! do git pull\n')
if git_version.get('msgs', None):
print('\t*** %s\n\n' % git_version.get('msgs', None))
def logo_show(skip):
from pyfiglet import Figlet
from config import MAINNET
from config import MOVE_1K_COLLATERAL
f = Figlet(font='slant')
#f = Figlet(font='small')
print(f.renderText('Florijncoin Masternode with HW Wallet'))
#print('\n\t\t\tdonation : xxxxxxxxxx')
#print('\t\t\tby : <NAME>xpue5iuWcbmcK\n')
if not skip:
check_version()
print('Network : ' + ('MAINNET' if MAINNET else 'TESTNET'))
if MOVE_1K_COLLATERAL:
print()
print('**** MOVE_1K_COLLATERAL is True *******')
print()
time.sleep(5)
else:
time.sleep(1)
# clear_screen()
def check_mempool(mn_config, access):
import simplejson as json
from mnb_rpc import getaddressmempool
for m in mn_config:
checkaddress = m.get('collateral_address', None)
if checkaddress != None:
r = getaddressmempool(checkaddress, access)
if len(r) > 0:
return True
return False
def get_xferblockcount_cache(getblock=False):
from config import MAINNET
import simplejson as json
xferblockcount_cache_abs_path = os.path.join(os.path.dirname(os.path.abspath(
__file__)), '../cache/' + ('MAINNET' if MAINNET else 'TESTNET') + '-xferblockcount.dat')
if getblock:
xferblockcount = 0
if os.path.exists(xferblockcount_cache_abs_path):
with open(xferblockcount_cache_abs_path) as data_file:
xferblockcount = json.load(data_file)
return xferblockcount
else:
return xferblockcount_cache_abs_path
def get_txidtxidn(txid, txidn):
if txid is None or txidn is None:
return None
else:
return txid + '-' + str(txidn)
def print_mnlist(mnconfig, ipmatch, mnstatus, florijncoinninja_cnt):
from config import MAINNET
if MAINNET:
print(mnconfig.get('alias') + '\t' + mnconfig.get('ipport') + ':' +
ipmatch + '\t' + mnconfig.get('collateral_address') + ' ' + florijncoinninja_cnt + ' ' + mnstatus)
else:
print(mnconfig.get('alias') + '\t' + mnconfig.get('ipport') + ':' +
ipmatch + '\t' + mnconfig.get('collateral_address') + ' ' + mnstatus)
def get_florijncoinninja(mn_config):
import simplejson as json
from mnb_explorer import get_mnstatus_florijncoinninja
vins =[]
for m in mn_config:
vin = m.get('collateral_txidtxidn', None)
if vin != None:
vins.append(vin)
status_ninja = get_mnstatus_florijncoinninja(vins)
if status_ninja:
mnj = {}
if status_ninja.get('status') == 'OK':
data = status_ninja.get('data')
for d in data:
txidn = get_txidtxidn(d.get('MasternodeOutputHash'), d.get('MasternodeOutputIndex'))
mnj[txidn] = {
"ipport": d.get('MasternodeIP') + ':' + d.get('MasternodePort'),
"ActiveCount": d.get('ActiveCount', '-'),
"InactiveCount": d.get('InactiveCount', '-'),
"UnlistedCount": d.get('UnlistedCount', '-')
}
return mnj
else:
return None
def print_mnstatus(mn_config, mns, mna):
from config import MAINNET
print()
print('[masternodes status]')
if MAINNET:
mnj = get_florijncoinninja(mn_config)
if mnj:
print('alias\tip (m: ip/port match)\tcollateral address\t\t dn\t status')
else:
print('alias\tip (m: ip/port match)\tcollateral address\t\t status')
else:
print('alias\tip (m: ip/port match)\tcollateral address\t\t status')
for m in mn_config:
florijncoinninja_cnt = '-/-'
mna_ip = mna.get(m.get('collateral_txidtxidn', '-------'), '-')
mns_status = mns.get(m.get('collateral_txidtxidn', '-------'), '-')
if MAINNET:
if mnj:
if mnj.get(m.get('collateral_txidtxidn')) != None:
florijncoinninja_cnt = str(mnj.get(m.get('collateral_txidtxidn')).get('UnlistedCount')) + '/' + str(mnj.get(m.get('collateral_txidtxidn')).get('InactiveCount')) + '/' + str(mnj.get(m.get('collateral_txidtxidn')).get('ActiveCount'))
if m.get('ipport') != mna_ip:
ipmatch = '-'
else:
ipmatch = 'm'
print_mnlist(m, ipmatch, mns_status, florijncoinninja_cnt)
if MAINNET:
print('\n* dn: florijncoinninja status : UnlistedCount / InactiveCount / ActiveCount')
else:
print('\n* be sure to check masternode status again using online tools like florijncoinninja\n')
def get_function_name():
return sys._getframe(1).f_code.co_name
def get_caller_name():
return sys._getframe(2).f_code.co_name
def get_florijncoinmnbversion():
import simplejson as json
version_file = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'version.txt')
with open(version_file) as data_file:
data = json.load(data_file)
return data
def print_err_exit(
caller_name,
function_name,
err_msg,
errargs=None):
VERSION = get_florijncoinmnbversion()
msg = '\n\n\tversion : %s.%s.%s\n' % (VERSION.get(
'major'), VERSION.get('minor'), VERSION.get('fix'))
msg += '\tcaller : %s\n' % caller_name
msg += '\tfunction : %s\n' % function_name
if errargs:
msg += '\terr : %s' % str(errargs)
msg += '\t===> %s\n' % err_msg
# if tunnel:
# os.kill(tunnel, signal.SIGTERM)
raise SystemExit(msg)
def now():
return int(time.time())
def printdbg(str):
ts = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now()))
logstr = "{} {}".format(ts, str)
if os.environ.get('FLRNMNB_DEBUG', None):
print(logstr)
def print_hw_wallet_check():
print('---> check hw wallet, check message on screen and press button')
print('\tif PIN protected, wallet ask your PIN(once per session)')
print('\tif Passphrase protected, wallet ask your Passphrase(once per session)')
print('\tcheck message on screen and press button on hw wallet to proceed(all signing)\n')
# end
|
<reponame>short-greg/takonet
from abc import ABC, abstractmethod
from functools import partial, reduce
import typing
from ._networks import Node, NodeSet, Network
from abc import ABC, abstractmethod
import typing
class Q(ABC):
@abstractmethod
def __call__(self, nodes: typing.Iterable[Node]) -> NodeSet:
raise NotImplementedError
def __or__(self, other):
return OrQ([self, other])
def __and__(self, other):
return AndQ([self, other])
def __sub__(self, other):
return DifferenceQ([self, other])
class OrQ(Q):
def __init__(self, queries: typing.List[Q]):
self._queries = queries
def __call__(self, nodes: typing.Iterable[Node]) -> NodeSet:
node_sets = [query(nodes) for query in self._queries]
return reduce(lambda x, y: x | y, node_sets)
def __or__(self, other):
self._queries.append(other)
return self
class AndQ(Q):
def __init__(self, queries: typing.List[Q]):
self._queries = queries
def __call__(self, nodes: typing.Iterable[Node]) -> NodeSet:
node_sets = [query(nodes) for query in self._queries]
return reduce(lambda x, y: x & y, node_sets)
def __and__(self, other):
self._queries.append(other)
return self
class DifferenceQ(Q):
def __init__(self, q1: Q, q2: Q):
self._q1 = q1
self._q2 = q2
def __call__(self, nodes: typing.Iterable[Node]) -> NodeSet:
return self._q1(nodes) - self._q2(nodes)
class NodeProcessor(ABC):
@abstractmethod
def process(self, node: Node) -> Node:
pass
class UpdateNodeName(NodeProcessor):
def __init__(self, prepend_with='', append_with=''):
self._prepend_with = prepend_with
self._append_with = append_with
def __call__(self, node: Node) -> Node:
name = self._prepend_with + node.name + self._append_with
node = node.clone()
node.name = name
return node
class Filter(ABC):
"""Checks if a node should be processed or not
"""
@abstractmethod
def __call__(self, node) -> bool:
pass
class NullFilter(Filter):
"""
"""
def __call__(self, node) -> bool:
return True
class Traverser(object):
def __init__(self, filter: Filter=None):
"""_summary_
Args:
filter (Filter, optional): Filters out nodes not to process. Defaults to None.
"""
self._filter = filter
def _traverse(self, traverse_f, processor: NodeProcessor):
for node in traverse_f():
if not self._filter(node):
continue
processor(node)
def traverse_forward(self, net: Network, from_nodes, to_nodes, processor: NodeProcessor):
self._traverse(
partial(net.traverse_forward(from_nodes, to_nodes), processor)
)
def traverse_backward(self, net: Network, from_nodes, to_nodes, processor: NodeProcessor):
self._traverse(
partial(net.traverse_backward(from_nodes, to_nodes), processor)
)
class Backwardraverser(Traverser):
def __init__(self, filter: Q=None):
self._filter = filter
# class NullNodeProcessor(NodeProcessor):
# def __init__(self):
# pass
# def process(self, node: Node) -> Node:
# return node.clone()
# TODO: move to processors tako.process
# def _get_input_names_helper(self, node: Node, use_input: typing.List[bool], roots: typing.List):
# for node_input_port in node.inputs:
# name = node_input_port.node
# try:
# use_input[roots.index(name)] = True
# except ValueError:
# self._get_input_names_helper(self._nodes[name], use_input, roots)
# def get_input_names(self, output_names: typing.List[str]) -> typing.List[str]:
# """
# Args:
# output_names (typing.List[str]): Output names in the network.
# Returns:
# typing.List[str]: The names of all the inputs required for the arguments.
# """
# use_input = [False] * len(self._roots)
# assert len(use_input) == len(self._roots)
# for output_name in output_names:
# if output_name not in self._nodes:
# raise KeyError(f'Output name {output_name} is not in the network')
# roots = list(self._roots)
# for name in output_names:
# self._get_input_names_helper(self._nodes[name], use_input, roots)
# return [input_name for input_name, to_use in zip(self._roots, use_input) if to_use is True]
# def _is_input_name_helper(
# self, node: Node, input_names: typing.List[str],
# is_inputs: typing.List[bool]
# ) -> bool:
# """Helper to check if the node is an input for an output
# Args:
# node (Node): A node in the network
# input_names (typing.List[str]): Current input names
# is_inputs (typing.List[bool]): A list of booleans that specifies
# which nodes are inputs
# Returns:
# bool: Whether a node is an input
# """
# other_found = False
# if not node.inputs:
# return True
# for node_input_port in node.inputs:
# name = node_input_port.node
# try:
# is_inputs[input_names.index(name)] = True
# except ValueError:
# other_found = self._is_input_name_helper(self._nodes[name], input_names, is_inputs)
# if other_found: break
# return other_found
# def are_inputs(self, output_names: typing.List[str], input_names: typing.List[str]) -> bool:
# """Check if a list of nodes are directly or indirectly inputs into other nodes
# Args:
# output_names (typing.List[str]): Names of nodes to check
# input_names (typing.List[str]): Names of input candidates
# Raises:
# KeyError: Name of the module
# Returns:
# bool: Whether or not input_names are inputs
# """
# is_inputs = [False] * len(input_names)
# for name in itertools.chain(input_names, output_names):
# if name not in self._nodes:
# raise KeyError(f'Node name {name} does not exist')
# for name in output_names:
# other_found: bool = self._is_input_name_helper(self._nodes[name], input_names, is_inputs)
# if other_found:
# break
# all_true = not (False in is_inputs)
# return all_true and not other_found
# TODO: Reevaluate these processors
# class NetPorts(typing.NamedTuple):
# inputs: typing.List[typing.Union[Port, str]]
# outputs: typing.List[typing.Union[Port, str]]
# class NetworkBuilder(object):
# def __init__(self, node_processor: NodeProcessor):
# self._network = None
# self._nodes: typing.Dict[str, Node] = {}
# self._sub_networks: typing.Dict[str, SubNetwork] = {}
# self._node_processor = node_processor
# self._added_nodes: typing.Set[str] = set()
# self.reset()
# def reset(self):
# self._network = None
# self._operation_nodes: typing.Dict[str, OpNode] = {}
# self._network_interfaces: typing.Dict[str, InterfaceNode] = {}
# self._added_nodes: typing.Set[str] = set()
# self._nodes: typing.Dict[str, Node] = {}
# def add_node(self, node: Node):
# self._nodes[node.name] = node
# return node.ports
# def _build_network(self, cur_node: Node):
# if cur_node.name in self._added_nodes:
# return
# for input_node in cur_node.input_nodes:
# self._build_network(self._nodes[input_node])
# node = self._node_processor.process_node(cur_node)
# self._added_nodes.add(cur_node.name)
# self._network.add_node(node)
# def get_result(self, default_interface: NetPorts=None):
# self._network = Network()
# for name, node in self._nodes.items():
# self._build_network(node)
# if default_interface:
# self._network.set_default_interface(default_interface)
# return self._network
# class NameAppendVisitor(NodeVisitor):
# def __init__(self, prepend_with='', append_with=''):
# node_processor = UpdateNodeName(prepend_with, append_with)
# self._builder = NetworkBuilder(node_processor)
# def visit(self, node: Node):
# self._builder.add_node(node)
# @property
# def get_result(self, default_interface: NetPorts=None):
# return self._builder.get_result(default_interface)
# def reset(self):
# self._builder.reset()
# def visit_network(self, network: Network, default_interface: NetPorts=None):
# self.reset()
# network.traverse_forward(self)
# for sub_network in network.sub_networks:
# sub_network.accept(self)
# return self._builder.get_result(default_interface)
# class MergeVisitor(NodeVisitor):
# def __init__(self):
# node_processor = NullNodeProcessor()
# self._builder = NetworkBuilder(node_processor)
# def visit(self, node: Node):
# self._builder.add_node(node)
# @property
# def get_result(self, default_interface: NetPorts=None):
# return self._builder.get_result(default_interface)
# def visit_networks(
# self, networks: typing.List[Network]
# ):
# self.reset()
# input_names = []
# output_names = []
# for network in networks:
# network.traverse_forward(self)
# input_names.extend(network.input_names)
# output_names.extend(network.output_names)
# for sub_network in network.sub_networks:
# sub_network.accept(self)
# return self._builder.get_result(
# NetPorts(input_names, output_names)
# )
|
<reponame>albgar/legacy_aiida_plugin<filename>aiida_siesta/workflows/exchange_barrier.py
from aiida import orm
from aiida.engine import WorkChain, ToContext
from aiida_siesta.workflows.neb_base import SiestaBaseNEBWorkChain
from aiida_siesta.workflows.base import SiestaBaseWorkChain
from aiida_siesta.utils.structures import exchange_sites_in_structure
from aiida_siesta.utils.structures import compute_mid_path_position
from aiida_siesta.utils.structures import find_intermediate_structure
from aiida_siesta.utils.interpol import interpolate_two_structures_ase
class ExchangeBarrierWorkChain(WorkChain):
"""
Workchain to compute the barrier for exchange of two atoms
in a structure.
"""
@classmethod
def define(cls, spec):
super().define(spec)
spec.expose_inputs(SiestaBaseWorkChain,
exclude=('structure',),
namespace="initial")
spec.expose_inputs(SiestaBaseWorkChain,
exclude=('structure',),
namespace="final")
spec.expose_inputs(SiestaBaseNEBWorkChain,
exclude=('starting_path',),
namespace="neb")
spec.input('initial_structure', valid_type=orm.StructureData,
help='Initial structure')
spec.input('first_index', valid_type=orm.Int,
help='Index of first atom in structure')
spec.input('second_index', valid_type=orm.Int,
help='Index of second atom structure')
spec.input('migration_direction', valid_type=orm.List,
help='Migration direction (in lattice coordinates)')
spec.input('n_images', valid_type=orm.Int,
help='Number of (internal) images in Path (odd!!)') # validate
spec.expose_outputs(SiestaBaseNEBWorkChain)
spec.outline(
cls.prepare_structures,
cls.relax_initial,
cls.relax_final,
cls.prepare_initial_path,
cls.run_NEB_workchain,
cls.check_results
)
spec.exit_code(200, 'ERROR_MAIN_WC', message='The end-point relaxation SiestaBaseWorkChain failed')
spec.exit_code(250, 'ERROR_CONFIG', message='Cannot generate initial path correctly')
spec.exit_code(300, 'ERROR_NEB_WK', message='SiestaBaseNEBWorkChain did not finish correctly')
def prepare_structures(self):
"""
Generate exchanged structure as final end-point
"""
s_initial = self.inputs.initial_structure
i1 = self.inputs.first_index.value
i2 = self.inputs.second_index.value
s_final = exchange_sites_in_structure(s_initial,i1,i2)
self.ctx.s_initial = s_initial
self.ctx.s_final = s_final
self.report(f'Created initial and final structures')
def relax_initial(self):
"""
Run the SiestaBaseWorkChain, might be a relaxation or a scf only.
"""
inputs = self.exposed_inputs(SiestaBaseWorkChain,
namespace='initial')
inputs['structure'] = self.ctx.s_initial
running = self.submit(SiestaBaseWorkChain, **inputs)
self.report(f'Launched SiestaBaseWorkChain<{running.pk}> to relax the initial structure.')
return ToContext(initial_relaxation_wk=running)
def relax_final(self):
"""
Run the SiestaBaseWorkChain, might be a relaxation or a scf only.
"""
inputs = self.exposed_inputs(SiestaBaseWorkChain,
namespace='final')
inputs['structure'] = self.ctx.s_final
running = self.submit(SiestaBaseWorkChain, **inputs)
self.report(f'Launched SiestaBaseWorkChain<{running.pk}> to relax the final structure.')
return ToContext(final_relaxation_wk=running)
def prepare_initial_path(self):
"""
Perhaps more heuristics are needed?
Here we just interpolate.
"""
initial_wk = self.ctx.initial_relaxation_wk
if not initial_wk.is_finished_ok:
return self.exit_codes.ERROR_MAIN_WC
final_wk = self.ctx.final_relaxation_wk
if not final_wk.is_finished_ok:
return self.exit_codes.ERROR_MAIN_WC
s_initial = initial_wk.outputs.output_structure
s_final = final_wk.outputs.output_structure
n_images = self.inputs.n_images.value
#
# Add here any heuristics, before handling the
# path for further refinement
#
#---------------------------------------------
# The basic heuristic here is to avoid head-on collissions
# by defining an "avoidance cylinder" around the line
# joining the two atoms exchanged. The input "migration_direction"
# serves to define a point on the surface of that cylinder, at
# the mid-point, which is used as the mid-point of the trial path.
migration_direction = self.inputs.migration_direction.get_list()
i1 = self.inputs.first_index.value
i2 = self.inputs.second_index.value
i1_mid_path_position = compute_mid_path_position(s_initial,
i1, i2,
migration_direction)
#
s_intermediate = find_intermediate_structure(s_initial,
i1, i2,
i1_mid_path_position)
#
# The starting_path is now built from two sections
# We assume that the number of internal images is odd,
# so that n_images // 2 is the number of internal images
# of each section
first_list = interpolate_two_structures_ase(s_initial,
s_intermediate,
n_images//2)
second_list = interpolate_two_structures_ase(s_intermediate,
s_final,
n_images//2)
#
# Remove duplicate central point
#
images_list = first_list[:-1] + second_list
if len(images_list) != n_images+2:
self.report(f"Number of images: {n_images} /= list length")
return self.exit_codes.ERROR_CONFIG
#
# We might need a more general refiner, starting
# with the trial path
#
# refined_path = refine_neb_path(starting_path)
path_object = orm.TrajectoryData(images_list)
#
# Use a 'serializable' dictionary instead of the
# actual kinds list
#
_kinds_raw = [ k.get_raw() for k in s_initial.kinds ]
path_object.set_attribute('kinds', _kinds_raw)
self.ctx.path = path_object
self.report(f'Generated starting path for NEB.')
def run_NEB_workchain(self):
inputs = self.exposed_inputs(SiestaBaseNEBWorkChain, namespace='neb')
print(inputs)
inputs['starting_path'] = self.ctx.path
running = self.submit(SiestaBaseNEBWorkChain, **inputs)
self.report(f'Launched SiestaBaseNEBWorkChain<{running.pk}> to find MEP for atom exchange.')
return ToContext(neb_wk=running)
def check_results(self):
"""
All checks are done in the NEB workchain
"""
if not self.ctx.neb_wk.is_finished_ok:
return self.exit_codes.ERROR_NEB_WK
outps = self.ctx.neb_wk.outputs
self.out('neb_output_package', outps['neb_output_package'])
self.report(f'ExchangeBarrier workchain done.')
# @classmethod
# def inputs_generator(cls): # pylint: disable=no-self-argument,no-self-use
# from aiida_siesta.utils.inputs_generators import BaseWorkChainInputsGenerator
# return BaseWorkChainInputsGenerator(cls)
|
<filename>roster/crawlers.py
# -*- coding:utf-8 -*-
import os
import re
from steem.comment import SteemComment
from steem.collector import get_posts, get_comments
from utils.logging.logger import logger
TEAMCN_SHOP_ACCOUNT = "teamcn-shop"
TEAMCN_SHOP_POST_NAME_NICKNAME_PATTERN = r"\|(\@[A-Za-z0-9._-]+) ([^|]+)\|"
TEAMCN_SHOP_COMMENT_NAME_NICKNAME_PATTERN = r"^你好鸭,([^!]+)!"
TEAMCN_CEREMONY_POST = "https://steemit.com/@team-cn/egxwc0ewsi"
TEAMCN_CEREMONY_NAME_NICKNAME_PATTERN = r"\n(\@[A-Za-z0-9._-]+)\s+(\S+)"
class Crawler:
def __init__(self, roster_dict):
self._roster_dict = roster_dict or {}
def _update(self, account, name):
# the nickname should not be equal to account
if account == name:
return False
if not account in self._roster_dict:
self._roster_dict[account] = [name]
return True
else:
if not name in self._roster_dict[account]:
self._roster_dict[account].append(name)
return True
return False
def run(self):
# crawl posts
posts = self.crawl()
if len(posts) > 0:
for post in posts:
self.parse(post)
else:
logger.info("No posts are fetched.")
return self._roster_dict
class TeamCnShopDaily(Crawler):
def __init__(self, roster_dict, days):
Crawler.__init__(self, roster_dict)
self.account = TEAMCN_SHOP_ACCOUNT
self.days = days
logger.info("Crawling roster from teamcn-shop daily posts...")
def crawl(self):
return get_posts(account=self.account, days=self.days)
def parse(self, post):
res = re.findall(TEAMCN_SHOP_POST_NAME_NICKNAME_PATTERN, post.body)
if res:
for r in res:
account = r[0]
nickname = r[1]
self._update(account, nickname)
class TeamCnShopComments(Crawler):
def __init__(self, roster_dict, days):
Crawler.__init__(self, roster_dict)
self.account = TEAMCN_SHOP_ACCOUNT
self.days = days
logger.info("Crawling roster from teamcn-shop comments...")
def crawl(self):
return get_comments(account=self.account, days=self.days)
def parse(self, comment):
res = re.search(TEAMCN_SHOP_COMMENT_NAME_NICKNAME_PATTERN, comment.body)
if res:
parent_account = comment["parent_author"]
nickname = res.group(1)
if parent_account != nickname:
account = "@" + parent_account
self._update(account, nickname)
class TeamCnCeremony(Crawler):
def __init__(self, roster_dict):
Crawler.__init__(self, roster_dict)
logger.info("Crawling roster from teamcn ceremony posts...")
def crawl(self):
post = SteemComment(url=TEAMCN_CEREMONY_POST).get_comment()
return [post]
def parse(self, post):
name_list_section = post.body.split("新手村村民名单")[-1]
res = re.findall(TEAMCN_CEREMONY_NAME_NICKNAME_PATTERN, name_list_section)
if res:
for r in res:
account = r[0]
nicknames = r[1]
for nickname in nicknames.split("/"):
if nickname != "TBD":
self._update(account, nickname)
|
import hmac
import json
import re
import time
from hashlib import sha256
import requests
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files.base import ContentFile
from django.core.validators import URLValidator
from django.http import HttpResponse
from wagtail_live.exceptions import RequestVerificationError
from wagtail_live.receivers import BaseMessageReceiver, WebhookReceiverMixin
from wagtail_live.utils import is_embed
class SlackWebhookMixin(WebhookReceiverMixin):
"""Slack WebhookMixin."""
url_path = "slack/events"
url_name = "slack_events_handler"
def post(self, request, *args, **kwargs):
"""Checks if Slack is trying to verify our Request URL.
Returns:
(HttpResponse) containing the challenge string if Slack
is trying to verify our request URL.
"""
payload = json.loads(request.body.decode("utf-8"))
if payload["type"] == "url_verification":
return HttpResponse(payload["challenge"])
return super().post(request, *args, **kwargs)
@staticmethod
def sign_slack_request(content):
"""Signs content from a Slack request using the SLACK_SIGNING_SECRET as key."""
hasher = hmac.new(str.encode(settings.SLACK_SIGNING_SECRET), digestmod=sha256)
hasher.update(str.encode(content))
return hasher.hexdigest()
def verify_request(self, request, body):
"""Verifies Slack requests.
See https://api.slack.com/authentication/verifying-requests-from-slack.
Args:
request (HttpRequest): from Slack
Raises:
(RequestVerificationError) if request failed to be verified.
"""
timestamp = request.headers.get("X-Slack-Request-Timestamp")
if not timestamp:
raise RequestVerificationError(
"X-Slack-Request-Timestamp not found in request's headers."
)
if abs(time.time() - float(timestamp)) > 60 * 5:
# The request timestamp is more than five minutes from local time.
# It could be a replay attack, so let's ignore it.
raise RequestVerificationError(
"The request timestamp is more than five minutes from local time."
)
sig_basestring = "v0:" + timestamp + ":" + body
my_signature = "v0=" + self.sign_slack_request(content=sig_basestring)
slack_signature = request.headers["X-Slack-Signature"]
if not hmac.compare_digest(slack_signature, my_signature):
raise RequestVerificationError("Slack signature couldn't be verified.")
@classmethod
def set_webhook(cls):
"""This is done in Slack UI."""
pass
@classmethod
def webhook_connection_set(cls):
"""Assume that it's true."""
return True
class SlackEventsAPIReceiver(BaseMessageReceiver, SlackWebhookMixin):
"""Slack Events API receiver."""
def dispatch_event(self, event):
"""See base class."""
message = event["event"]
subtype = message.get("subtype")
if subtype:
if subtype == "message_changed":
self.change_message(message=message)
elif subtype == "message_deleted":
self.delete_message(message=message)
elif subtype == "file_share":
self.add_message(message=message)
return
self.add_message(message=message)
def get_channel_id_from_message(self, message):
"""See base class."""
return message["channel"]
def get_message_id_from_message(self, message):
"""See base class."""
return message["ts"]
def get_message_text(self, message):
"""See base class."""
return message["text"]
def get_message_files(self, message):
"""See base class."""
return message["files"] if "files" in message else []
def get_image_title(self, image):
"""See base class."""
return image["title"]
def get_image_name(self, image):
"""See base class."""
return image["name"]
def get_image_mimetype(self, image):
"""See base class."""
return image["mimetype"].split("/")[1]
def get_image_dimensions(self, image):
"""See base class."""
try:
return (image["original_w"], image["original_h"])
except KeyError:
raise ValueError
def get_image_content(self, image):
"""See base class."""
slack_bot_token = getattr(settings, "SLACK_BOT_TOKEN", "")
if not slack_bot_token:
raise ImproperlyConfigured(
"You haven't specified SLACK_BOT_TOKEN in your settings."
+ "You won't be able to upload images from Slack without this setting defined."
)
headers = {"Authorization": f"Bearer {slack_bot_token}"}
response = requests.get(image["url_private"], headers=headers)
return ContentFile(response.content)
def get_message_id_from_edited_message(self, message):
"""See base class."""
return self.get_message_id_from_message(message=message["previous_message"])
def get_message_text_from_edited_message(self, message):
"""See base class."""
return self.get_message_text(message=message["message"])
def get_message_files_from_edited_message(self, message):
"""See base class."""
return self.get_message_files(message=message["message"])
def get_embed(self, text):
"""Slack sends url in this format:
<https://twitter.com/wagtail/|https://twitter.com/wagtail/>'
where the first part is the full url and the second part
represents the user's input.
See https://api.slack.com/reference/surfaces/formatting#links-in-retrieved-messages
"""
# Check if the text provided is a Slack-like url
if text.startswith("<") and text.endswith(">"):
# Get the url resolved by Slack
url = text[1:-1].split("|")[0]
if is_embed(text=url):
return url
return ""
def parse_text(self, text):
"""See base class. See also:
https://api.slack.com/reference/surfaces/formatting#links-in-retrieved-messages
"""
url_format = re.compile(r"<http([^|]+?)(\|([^|]+?))?>")
urls = url_format.finditer(text)
for url_match in urls:
match = url_match.group()[1:-1]
if "|" in match:
url, description = match.split("|")
else:
url = description = match
try:
validator = URLValidator()
validator(url)
except ValidationError:
continue
text = text.replace(url_match.group(), f"<a href='{url}'>{description}</a>")
return text
|
<filename>tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/joblib/test/test_dask.py
from __future__ import print_function, division, absolute_import
import os
import pytest
from random import random
from uuid import uuid4
from time import sleep
from .. import Parallel, delayed, parallel_backend
from ..parallel import ThreadingBackend, AutoBatchingMixin
from .._dask import DaskDistributedBackend
distributed = pytest.importorskip('distributed')
from distributed import Client, LocalCluster, get_client
from distributed.metrics import time
from distributed.utils_test import cluster, inc
def noop(*args, **kwargs):
pass
def slow_raise_value_error(condition, duration=0.05):
sleep(duration)
if condition:
raise ValueError("condition evaluated to True")
def count_events(event_name, client):
worker_events = client.run(lambda dask_worker: dask_worker.log)
event_counts = {}
for w, events in worker_events.items():
event_counts[w] = len([event for event in list(events)
if event[1] == event_name])
return event_counts
def test_simple(loop):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client: # noqa: F841
with parallel_backend('dask') as (ba, _):
seq = Parallel()(delayed(inc)(i) for i in range(10))
assert seq == [inc(i) for i in range(10)]
with pytest.raises(ValueError):
Parallel()(delayed(slow_raise_value_error)(i == 3)
for i in range(10))
seq = Parallel()(delayed(inc)(i) for i in range(10))
assert seq == [inc(i) for i in range(10)]
def test_dask_backend_uses_autobatching(loop):
assert (DaskDistributedBackend.compute_batch_size
is AutoBatchingMixin.compute_batch_size)
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client: # noqa: F841
with parallel_backend('dask') as (ba, _):
with Parallel() as parallel:
# The backend should be initialized with a default
# batch size of 1:
backend = parallel._backend
assert isinstance(backend, DaskDistributedBackend)
assert backend.parallel is parallel
assert backend._effective_batch_size == 1
# Launch many short tasks that should trigger
# auto-batching:
parallel(
delayed(lambda: None)()
for _ in range(int(1e4))
)
assert backend._effective_batch_size > 10
def random2():
return random()
def test_dont_assume_function_purity(loop):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client: # noqa: F841
with parallel_backend('dask') as (ba, _):
x, y = Parallel()(delayed(random2)() for i in range(2))
assert x != y
@pytest.mark.parametrize("mixed", [True, False])
def test_dask_funcname(loop, mixed):
from joblib._dask import Batch
if not mixed:
tasks = [delayed(inc)(i) for i in range(4)]
batch_repr = 'batch_of_inc_4_calls'
else:
tasks = [
delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4)
]
batch_repr = 'mixed_batch_of_inc_4_calls'
assert repr(Batch(tasks)) == batch_repr
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client:
with parallel_backend('dask') as (ba, _):
_ = Parallel(batch_size=2, pre_dispatch='all')(tasks)
def f(dask_scheduler):
return list(dask_scheduler.transition_log)
batch_repr = batch_repr.replace('4', '2')
log = client.run_on_scheduler(f)
assert all('batch_of_inc' in tup[0] for tup in log)
def test_no_undesired_distributed_cache_hit(loop):
# Dask has a pickle cache for callables that are called many times. Because
# the dask backends used to wrapp both the functions and the arguments
# under instances of the Batch callable class this caching mechanism could
# lead to bugs as described in: https://github.com/joblib/joblib/pull/1055
# The joblib-dask backend has been refactored to avoid bundling the
# arguments as an attribute of the Batch instance to avoid this problem.
# This test serves as non-regression problem.
# Use a large number of input arguments to give the AutoBatchingMixin
# enough tasks to kick-in.
lists = [[] for _ in range(100)]
np = pytest.importorskip('numpy')
X = np.arange(int(1e6))
def isolated_operation(list_, X=None):
list_.append(uuid4().hex)
return list_
cluster = LocalCluster(n_workers=1, threads_per_worker=2)
client = Client(cluster)
try:
with parallel_backend('dask') as (ba, _):
# dispatches joblib.parallel.BatchedCalls
res = Parallel()(
delayed(isolated_operation)(list_) for list_ in lists
)
# The original arguments should not have been mutated as the mutation
# happens in the dask worker process.
assert lists == [[] for _ in range(100)]
# Here we did not pass any large numpy array as argument to
# isolated_operation so no scattering event should happen under the
# hood.
counts = count_events('receive-from-scatter', client)
assert sum(counts.values()) == 0
assert all([len(r) == 1 for r in res])
with parallel_backend('dask') as (ba, _):
# Append a large array which will be scattered by dask, and
# dispatch joblib._dask.Batch
res = Parallel()(
delayed(isolated_operation)(list_, X=X) for list_ in lists
)
# This time, auto-scattering should have kicked it.
counts = count_events('receive-from-scatter', client)
assert sum(counts.values()) > 0
assert all([len(r) == 1 for r in res])
finally:
client.close()
cluster.close()
class CountSerialized(object):
def __init__(self, x):
self.x = x
self.count = 0
def __add__(self, other):
return self.x + getattr(other, 'x', other)
__radd__ = __add__
def __reduce__(self):
self.count += 1
return (CountSerialized, (self.x,))
def add5(a, b, c, d=0, e=0):
return a + b + c + d + e
def test_manual_scatter(loop):
x = CountSerialized(1)
y = CountSerialized(2)
z = CountSerialized(3)
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client: # noqa: F841
with parallel_backend('dask', scatter=[x, y]) as (ba, _):
f = delayed(add5)
tasks = [f(x, y, z, d=4, e=5),
f(x, z, y, d=5, e=4),
f(y, x, z, d=x, e=5),
f(z, z, x, d=z, e=y)]
expected = [func(*args, **kwargs)
for func, args, kwargs in tasks]
results = Parallel()(tasks)
# Scatter must take a list/tuple
with pytest.raises(TypeError):
with parallel_backend('dask', loop=loop, scatter=1):
pass
assert results == expected
# Scattered variables only serialized once
assert x.count == 1
assert y.count == 1
# Depending on the version of distributed, the unscattered z variable
# is either pickled 4 or 6 times, possibly because of the memoization
# of objects that appear several times in the arguments of a delayed
# task.
assert z.count in (4, 6)
def test_auto_scatter(loop):
np = pytest.importorskip('numpy')
data1 = np.ones(int(1e4), dtype=np.uint8)
data2 = np.ones(int(1e4), dtype=np.uint8)
data_to_process = ([data1] * 3) + ([data2] * 3)
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client:
with parallel_backend('dask') as (ba, _):
# Passing the same data as arg and kwarg triggers a single
# scatter operation whose result is reused.
Parallel()(delayed(noop)(data, data, i, opt=data)
for i, data in enumerate(data_to_process))
# By default large array are automatically scattered with
# broadcast=1 which means that one worker must directly receive
# the data from the scatter operation once.
counts = count_events('receive-from-scatter', client)
# assert counts[a['address']] + counts[b['address']] == 2
assert 2 <= counts[a['address']] + counts[b['address']] <= 4
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client:
with parallel_backend('dask') as (ba, _):
Parallel()(delayed(noop)(data1[:3], i) for i in range(5))
# Small arrays are passed within the task definition without going
# through a scatter operation.
counts = count_events('receive-from-scatter', client)
assert counts[a['address']] == 0
assert counts[b['address']] == 0
@pytest.mark.parametrize("retry_no", list(range(2)))
def test_nested_scatter(loop, retry_no):
np = pytest.importorskip('numpy')
NUM_INNER_TASKS = 10
NUM_OUTER_TASKS = 10
def my_sum(x, i, j):
return np.sum(x)
def outer_function_joblib(array, i):
client = get_client() # noqa
with parallel_backend("dask"):
results = Parallel()(
delayed(my_sum)(array[j:], i, j) for j in range(
NUM_INNER_TASKS)
)
return sum(results)
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as _:
with parallel_backend("dask"):
my_array = np.ones(10000)
_ = Parallel()(
delayed(outer_function_joblib)(
my_array[i:], i) for i in range(NUM_OUTER_TASKS)
)
def test_nested_backend_context_manager(loop):
def get_nested_pids():
pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
return pids
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client:
with parallel_backend('dask') as (ba, _):
pid_groups = Parallel(n_jobs=2)(
delayed(get_nested_pids)()
for _ in range(10)
)
for pid_group in pid_groups:
assert len(set(pid_group)) <= 2
# No deadlocks
with Client(s['address'], loop=loop) as client: # noqa: F841
with parallel_backend('dask') as (ba, _):
pid_groups = Parallel(n_jobs=2)(
delayed(get_nested_pids)()
for _ in range(10)
)
for pid_group in pid_groups:
assert len(set(pid_group)) <= 2
def test_nested_backend_context_manager_implicit_n_jobs(loop):
# Check that Parallel with no explicit n_jobs value automatically selects
# all the dask workers, including in nested calls.
def _backend_type(p):
return p._backend.__class__.__name__
def get_nested_implicit_n_jobs():
with Parallel() as p:
return _backend_type(p), p.n_jobs
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client: # noqa: F841
with parallel_backend('dask') as (ba, _):
with Parallel() as p:
assert _backend_type(p) == "DaskDistributedBackend"
assert p.n_jobs == -1
all_nested_n_jobs = p(
delayed(get_nested_implicit_n_jobs)()
for _ in range(2)
)
for backend_type, nested_n_jobs in all_nested_n_jobs:
assert backend_type == "DaskDistributedBackend"
assert nested_n_jobs == -1
def test_errors(loop):
with pytest.raises(ValueError) as info:
with parallel_backend('dask'):
pass
assert "create a dask client" in str(info.value).lower()
def test_correct_nested_backend(loop):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client: # noqa: F841
# No requirement, should be us
with parallel_backend('dask') as (ba, _):
result = Parallel(n_jobs=2)(
delayed(outer)(nested_require=None) for _ in range(1))
assert isinstance(result[0][0][0], DaskDistributedBackend)
# Require threads, should be threading
with parallel_backend('dask') as (ba, _):
result = Parallel(n_jobs=2)(
delayed(outer)(nested_require='sharedmem')
for _ in range(1))
assert isinstance(result[0][0][0], ThreadingBackend)
def outer(nested_require):
return Parallel(n_jobs=2, prefer='threads')(
delayed(middle)(nested_require) for _ in range(1)
)
def middle(require):
return Parallel(n_jobs=2, require=require)(
delayed(inner)() for _ in range(1)
)
def inner():
return Parallel()._backend
def test_secede_with_no_processes(loop):
# https://github.com/dask/distributed/issues/1775
with Client(loop=loop, processes=False, set_as_default=True):
with parallel_backend('dask'):
Parallel(n_jobs=4)(delayed(id)(i) for i in range(2))
def _worker_address(_):
from distributed import get_worker
return get_worker().address
def test_dask_backend_keywords(loop):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client: # noqa: F841
with parallel_backend('dask', workers=a['address']) as (ba, _):
seq = Parallel()(
delayed(_worker_address)(i) for i in range(10))
assert seq == [a['address']] * 10
with parallel_backend('dask', workers=b['address']) as (ba, _):
seq = Parallel()(
delayed(_worker_address)(i) for i in range(10))
assert seq == [b['address']] * 10
def test_cleanup(loop):
with Client(processes=False, loop=loop) as client:
with parallel_backend('dask'):
Parallel()(delayed(inc)(i) for i in range(10))
start = time()
while client.cluster.scheduler.tasks:
sleep(0.01)
assert time() < start + 5
assert not client.futures
@pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"])
@pytest.mark.skipif(
distributed.__version__ <= '2.1.1' and distributed.__version__ >= '1.28.0',
reason="distributed bug - https://github.com/dask/distributed/pull/2841")
def test_wait_for_workers(cluster_strategy):
cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
client = Client(cluster)
if cluster_strategy == "adaptive":
cluster.adapt(minimum=0, maximum=2)
elif cluster_strategy == "late_scaling":
# Tell the cluster to start workers but this is a non-blocking call
# and new workers might take time to connect. In this case the Parallel
# call should wait for at least one worker to come up before starting
# to schedule work.
cluster.scale(2)
try:
with parallel_backend('dask'):
# The following should wait a bit for at least one worker to
# become available.
Parallel()(delayed(inc)(i) for i in range(10))
finally:
client.close()
cluster.close()
def test_wait_for_workers_timeout():
# Start a cluster with 0 worker:
cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
client = Client(cluster)
try:
with parallel_backend('dask', wait_for_workers_timeout=0.1):
# Short timeout: DaskDistributedBackend
msg = "DaskDistributedBackend has no worker after 0.1 seconds."
with pytest.raises(TimeoutError, match=msg):
Parallel()(delayed(inc)(i) for i in range(10))
with parallel_backend('dask', wait_for_workers_timeout=0):
# No timeout: fallback to generic joblib failure:
msg = "DaskDistributedBackend has no active worker"
with pytest.raises(RuntimeError, match=msg):
Parallel()(delayed(inc)(i) for i in range(10))
finally:
client.close()
cluster.close()
|
<gh_stars>10-100
#!/usr/bin/env python
import os
import json
import torch
import torch.nn.functional as F
import pickle
import random
import urllib
import urllib.request
import cherrypy
from transformers import DistilBertTokenizer
from model.multimodal_transformer import MMT_VideoQA
from util import compute_a2v, get_mask
from args import get_args
from global_parameters import (
SERVER_HTML_PATH,
SERVER_FEATURE_PATH,
) # to be defined in this file
class Server(object):
def __init__(
self,
vqa_model,
vqa_model2,
model_ckpt,
model_ckpt2,
video_features_path,
a2v,
id2a,
T,
Q,
default_data,
max_videos,
):
"""
:param vqa_model: first model used for the demo
:param vqa_model2: second model used for the demo
:param model_ckpt: path to weights for the first model
:param model_ckpt2: path to weights for the second model
:param video_features_path: path to the features corresponding to the videos used in the demo
:param a2v: map answer to tokens for all answers in a given answer dictionary
:param id2a: map index to answer
:param T: maximum number of video features
:param Q: maximum number of tokens in the question
:param default_data: map video_id to question, start, end
:param max_videos: maximum number of videos in the demo
"""
self.video_features = torch.load(video_features_path)
# load weights for the first model on CPU
self.vqa_model = vqa_model
weights = torch.load(model_ckpt, map_location=torch.device("cpu"))
weights = {x.split("module.")[1]: weights[x] for x in weights}
self.vqa_model.load_state_dict(weights)
self.vqa_model.eval()
self.vqa_model._compute_answer_embedding(a2v)
# load weights for the second model on CPU
self.vqa_model2 = vqa_model2
weights2 = torch.load(model_ckpt2, map_location=torch.device("cpu"))
weights2 = {x.split("module.")[1]: weights2[x] for x in weights2}
self.vqa_model2.load_state_dict(weights2)
self.vqa_model2.eval()
self.vqa_model2._compute_answer_embedding(a2v)
self.all_video_ids = list(self.video_features.keys())[:max_videos]
self.id2a = id2a
self.T = T
self.Q = Q
self.default_data = default_data
self.max_videos = max_videos
@cherrypy.expose
def index(self):
index_html = '<head><link rel="icon" href="https://antoyang.github.io/img/favicon.ico" type="image/x-icon"/>'
index_html += '<link href="https://antoyang.github.io/css/bootstrap.min.css" rel="stylesheet"></head>'
index_html += "<center><h1> <a href='https://antoyang.github.io/just-ask.html'> Just Ask </a> VideoQA Demo </h1></center>"
index_html += "<center><h2> Choose a video for which you want to ask a question </h2></center>"
index_html += "<center><h3> Default question, start and end timestamps are from the iVQA test set annotations. Nothing is pre-computed for these videos. </h3></center><br>"
index_html += '<div class="container">' # grid of videos
for i, vid in enumerate(self.all_video_ids):
url = "https://www.youtube.com/oembed"
params = {
"format": "json",
"url": "https://www.youtube.com/watch?v=%s" % vid,
}
query_string = urllib.parse.urlencode(params)
url = url + "?" + query_string
try:
with urllib.request.urlopen(
url
) as response: # get thumbnail and title from YouTube
response_text = response.read()
data = json.loads(response_text.decode())
# pprint.pprint(data)
title = data["title"]
thumbnail_url = data["thumbnail_url"]
except: # if the video is deleted: json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
title = "Unavailable Video"
thumbnail_url = "https://images.drivereasy.com/wp-content/uploads/2017/10/this-video-is-not-available-1.jpg"
if i % 4 == 0: # 4 videos per row
index_html += '<div class="row">'
index_html += '<div class="col-md-3 col-sm-12"><center><a href="vqa?video_id={}"><img src={} height="180" width="240"></img></a><br>'.format(
vid, thumbnail_url
)
index_html += '<a href="vqa?video_id={}">{}</a></center></div>'.format(
vid, title
)
if (i % 4 == 3) or (
i == min(len(self.all_video_ids), self.max_videos) - 1
): # end of row
index_html += "</div><br><br>"
index_html += "</div>"
index_html += "<center><a href='reload' class='btn btn-primary btn-lg active'>More videos!</a></center><br>"
index_html += "<center><h2> Built by <a href='https://antoyang.github.io/'> <NAME> </a> </h2> </center><br>"
return index_html
@cherrypy.expose
def vqa(self, video_id, start=0, end=5, question="", model="finetuned"):
if video_id not in self.video_features:
return (
f'Video {video_id} is not available, <a href="/">go back to index</a>.'
)
html_path = SERVER_HTML_PATH
with open(html_path, "r") as f:
html = f.read()
if not str(start).isdigit():
return 'Start time (in seconds) must be a positive integer, <a href="/">go back to index</a>.'
if not str(end).isdigit():
return 'End time (in seconds) must be a positive integer, <a href="/">go back to index</a>.'
if not question: # put default data
flag = False
start = self.default_data[video_id]["start"]
end = self.default_data[video_id]["end"]
question = self.default_data[video_id]["question"]
else:
flag = True # a question is asked
html = html.format(video_id, start, end, video_id, start, end, question)
feature = self.video_features[video_id][int(start) : int(end) + 1]
if len(feature) == 0:
return f'Features are not available for video {video_id} between start {start} seconds and {end} seconds, <a href="/">go back to index</a>.'
if flag:
# prepare padded features and tokens, masks
video_len = torch.Tensor([len(feature)])
if len(feature) < self.vqa_model.T:
feature = torch.cat(
[
feature,
torch.zeros(self.vqa_model.T - len(feature), feature.size(1)),
],
dim=0,
)
else:
sampled = []
for j in range(self.vqa_model.T):
sampled.append(feature[(j * len(feature)) // self.vqa_model.T])
feature = torch.stack(sampled)
feature = feature.unsqueeze(0)
video_mask = get_mask(video_len, self.vqa_model.Q)
tokens = torch.tensor(
self.vqa_model.bert.bert_tokenizer.encode(
question,
add_special_tokens=True,
padding="max_length",
max_length=self.vqa_model.Q,
truncation=True,
),
dtype=torch.long,
).unsqueeze(0)
question_mask = tokens > 0
with torch.no_grad(): # forward
if (
model == "zeroshot"
): # assumes that the first model is the zeroshot one
predicts = self.vqa_model(
feature,
question=tokens,
video_mask=video_mask,
text_mask=question_mask,
)
elif model == "finetuned":
predicts = self.vqa_model2(
feature,
question=tokens,
video_mask=video_mask,
text_mask=question_mask,
)
else:
raise NotImplementedError
predicts = F.softmax(predicts, dim=1)
topk = torch.topk(predicts, dim=1, k=5) # top 5 answers
topk_txt = [
[self.id2a[x.item()] for x in y] for y in topk.indices.cpu()
]
topk_scores = [[x * 100 for x in y] for y in topk.values.cpu()]
progress_bar = ""
for i in range(5): # plot answer logits with a nice progress bar
progress_bar += f'<div class="row"><div class="col-md-3" style="height: 5%;"><h3 style="color: #428bca !important;" class="center">{topk_txt[0][i]}</h3></div>'
progress_bar += f'<div class="col-md-9" style="height: 5%;"><div class="progress" style="margin-top: 20px !important;"><div class="progress-bar" style="color: black; width: {topk_scores[0][i]}%;" width: {topk_scores[0][i]}%;" role="progressbar" aria-valuenow="{topk_scores[0][i]}" aria-valuemin="0" aria-valuemax="1">{topk_scores[0][i]:.2f}%</div></div></div></div>'
html += '<div class="col-sm-offset-2 col-sm-8"> <b> Question input </b>: {} <br> <b> <br> Top 5 answers ({} model) </b>: {} </div></div>'.format(
question, model, progress_bar
)
return html + "</div><br><br></body></html>"
@cherrypy.expose
def reload(self): # same as index after a randomizing the videos
self.all_video_ids = random.sample(
list(self.video_features.keys()), self.max_videos
)
index_html = '<head><link rel="icon" href="https://antoyang.github.io/img/favicon.ico" type="image/x-icon"/>'
index_html += '<link href="https://antoyang.github.io/css/bootstrap.min.css" rel="stylesheet"></head>' # https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css
index_html += "<center><h1> <a href='https://antoyang.github.io/just-ask.html'> Just Ask </a> VideoQA Demo </h1></center>"
index_html += "<center><h2> Choose a video for which you want to ask a question </h2></center>"
index_html += "<center><h3> Default question, start and end timestamps are from the iVQA test set annotations. Nothing is pre-computed for these videos. </h3></center><br>"
index_html += '<div class="container">'
for i, vid in enumerate(self.all_video_ids):
url = "https://www.youtube.com/oembed"
params = {
"format": "json",
"url": "https://www.youtube.com/watch?v=%s" % vid,
}
query_string = urllib.parse.urlencode(params)
url = url + "?" + query_string
try:
with urllib.request.urlopen(url) as response:
response_text = response.read()
data = json.loads(response_text.decode())
title = data["title"]
thumbnail_url = data["thumbnail_url"]
except:
title = "Unavailable Video"
thumbnail_url = "https://images.drivereasy.com/wp-content/uploads/2017/10/this-video-is-not-available-1.jpg"
if i % 4 == 0:
index_html += '<div class="row">'
index_html += '<div class="col-md-3 col-sm-12"><center><a href="vqa?video_id={}"><img src={} height="180" width="240"></img></a><br>'.format(
vid, thumbnail_url
)
index_html += '<a href="vqa?video_id={}">{}</a></center></div>'.format(
vid, title
)
if (i % 4 == 3) or (i == min(len(self.all_video_ids), self.max_videos) - 1):
index_html += "</div><br><br>"
index_html += "</div>"
index_html += "<center><a href='reload' class='btn btn-primary btn-lg active'>More videos!</a></center><br>"
index_html += "<center><h2> Built by <a href='https://antoyang.github.io/'> <NAME> </a> </h2> </center><br>"
return index_html
def run():
args = get_args()
port = args.port
cherrypy.config.update({"server.socket_port": port})
cherrypy.config.update({"server.socket_host": "0.0.0.0"})
conf = {
"/": {
"tools.sessions.on": True,
"tools.staticdir.root": os.path.abspath(os.getcwd()),
},
"/js": {"tools.staticdir.on": True, "tools.staticdir.dir": "./js"},
}
dir_map = {
"activitynet": "ActivityNet-QA",
"msrvtt": "MSRVTT-QA",
"msvd": "MSVD-QA",
"ivqa": "iVQA",
}
feature_path = os.path.join(
SERVER_FEATURE_PATH, dir_map[args.dataset], "full_s3d_features_test.pth"
) # path to S3D features extracted for the full video duration
default_data = pickle.load(
open(
os.path.join(
SERVER_FEATURE_PATH, dir_map[args.dataset], "default_test.pkl"
),
"rb",
)
) # dictionary mapping video_id to question, start and end extracted from the dataset
bert_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
a2id, id2a, a2v = compute_a2v(
vocab_path=args.vocab_path,
bert_tokenizer=bert_tokenizer,
amax_words=args.amax_words,
)
a2v = a2v.cpu()
vqa_model = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
)
vqa_model2 = MMT_VideoQA(
feature_dim=args.feature_dim,
word_dim=args.word_dim,
N=args.n_layers,
d_model=args.embd_dim,
d_ff=args.ff_dim,
h=args.n_heads,
dropout=args.dropout,
T=args.max_feats,
Q=args.qmax_words,
baseline=args.baseline,
)
print(f"http server is running at port {port}")
cherrypy.quickstart(
Server(
vqa_model,
vqa_model2,
args.pretrain_path,
args.pretrain_path2,
feature_path,
a2v,
id2a,
args.max_feats,
args.qmax_words,
default_data,
args.nb_examples,
),
"/",
conf,
)
if __name__ == "__main__":
run()
|
<filename>checks.d/burrow_v3.py
# stdlib
from urlparse import urljoin
# 3rd Party
import requests
import json
# project
from checks import AgentCheck
SERVICE_CHECK_NAME = 'burrow.can_connect'
DEFAULT_BURROW_URI = 'http://localhost:8000'
CLUSTER_ENDPOINT = '/v3/kafka'
CHECK_TIMEOUT = 10
class BurrowCheck(AgentCheck):
'''
Extract consumer offsets, topic offsets and offset lag from Burrow REST API
'''
def check(self, instance):
burrow_address = instance.get("burrow_uri", DEFAULT_BURROW_URI)
target_clusters = instance.get("clusters")
extra_tags = instance.get("tags", [])
self._check_burrow(burrow_address, extra_tags)
clusters = self._find_clusters(burrow_address, target_clusters)
self.log.debug("Collecting Topic Offsets")
self._topic_offsets(clusters, burrow_address, extra_tags)
self.log.debug("Collecting Consumer Group Offsets")
self._consumer_groups_offsets(clusters, burrow_address, extra_tags)
self.log.debug("Collecting Consumer Group lags")
self._consumer_groups_lags(clusters, burrow_address, extra_tags)
def _consumer_groups_lags(self, clusters, burrow_address, extra_tags):
"""
Retrieve the offsets for all consumer groups in the clusters
Getting Consumer list could be factored out
"""
for cluster in clusters:
consumers_path = "%s/%s/consumer" % (CLUSTER_ENDPOINT, cluster)
consumers_list = self._rest_request_to_json(burrow_address, consumers_path).get("consumers", [])
for consumer in consumers_list:
lags_path = "%s/%s/lag" % (consumers_path, consumer)
lag_json = self._rest_request_to_json(burrow_address, lags_path)
if not lag_json:
continue
status = lag_json["status"]
if status == "NOTFOUND":
continue
consumer_tags = ["cluster:%s" % cluster, "consumer:%s" % consumer] + extra_tags
self.gauge("kafka.consumer.maxlag", status["maxlag"]["end"].get("lag", 0), tags=consumer_tags)
self._submit_lag_status("kafka.consumer.lag_status", status["status"], tags=consumer_tags)
for partition in status[u"partitions"]:
if partition.get('end') == None:
continue
partition_tags = consumer_tags + ["topic:%s" % partition["topic"], "partition:%s" % partition["partition"]]
self._submit_partition_lags(partition, partition_tags)
self._submit_lag_status("kafka.consumer.partition_lag_status", partition["status"], tags=partition_tags)
def _submit_lag_status(self, metric_namespace, status, tags):
burrow_status = {
"UNKNOWN" : 0,
"OK": 0,
"WARN": 0,
"ERR": 0,
"STOP": 0,
"STALL": 0,
"REWIND": 0
}
if status not in burrow_status.keys():
self.log.error("Invalid lag status: '%s' for '%s'" % (status, tags))
return
burrow_status[status] = 1
for metric_name, value in burrow_status.iteritems():
self.gauge("%s.%s" % (metric_namespace, metric_name.lower()), value, tags=tags)
def _submit_partition_lags(self, partition, tags):
if partition.get("end", -1) == -1:
self.log.error("[_submit_partition_lags] Failed submit data, cannot found `end`")
return
if partition["end"].get("lag", -1) == -1:
self.log.error("[_submit_partition_lags] Failed submit data, cannot found `end.lag`")
return
if partition["end"].get("timestamp", -1) == -1:
self.log.error("[_submit_partition_lags] Failed submit data, cannot found `end.timestamp`")
return
lag = partition[u"end"][u"lag"]
timestamp = partition[u"end"][u"timestamp"] / 1000
self.gauge("kafka.consumer.partition_lag", lag, tags=tags)
def _check_burrow(self, burrow_address, extra_tags):
"""
Check the Burrow health endpoint
"""
url = urljoin(burrow_address, "/burrow/admin")
try:
tags = ['instance:%s' % self.hostname] + extra_tags
response = requests.get(url, timeout=CHECK_TIMEOUT)
response.raise_for_status()
except Exception as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL, tags=tags,
message=str(e))
raise
else:
self.service_check(SERVICE_CHECK_NAME, AgentCheck.OK,
tags=tags,
message='Connection to %s was successful' % url)
def _topic_offsets(self, clusters, burrow_address, extra_tags):
"""
Retrieve the offsets for all topics in the clusters
"""
for cluster in clusters:
cluster_path = "%s/%s/topic" % (CLUSTER_ENDPOINT, cluster)
topic_list = self._rest_request_to_json(burrow_address, cluster_path).get("topics", [])
for topic in topic_list:
topic_path = "%s/%s" % (cluster_path, topic)
response = self._rest_request_to_json(burrow_address, topic_path)
tags = ["topic:%s" % topic, "cluster:%s" % cluster] + extra_tags
self._submit_offsets_from_json(offsets_type="topic", json=response, tags=tags)
def _consumer_groups_offsets(self, clusters, burrow_address, extra_tags):
"""
Retrieve the offsets for all consumer groups in the clusters
"""
for cluster in clusters:
consumers_path = "%s/%s/consumer" % (CLUSTER_ENDPOINT, cluster)
consumers_list = self._rest_request_to_json(burrow_address, consumers_path).get("consumers", [])
for consumer in consumers_list:
topics_path = "%s/%s" % (consumers_path, consumer)
topics_list = self._rest_request_to_json(burrow_address, topics_path).get("topics", [])
for topic in topics_list:
topic_path = "%s/%s/topic/%s" % (CLUSTER_ENDPOINT, cluster, topic)
response = self._rest_request_to_json(burrow_address, topic_path)
if not response:
continue
tags = ["topic:%s" % topic, "cluster:%s" % cluster,
"consumer:%s" % consumer] + extra_tags
self._submit_offsets_from_json(offsets_type="consumer", json=response, tags=tags)
def _submit_offsets_from_json(self, offsets_type, json, tags):
"""
Find the offsets and push them into the metrics
"""
offsets = json.get("offsets")
if offsets:
# for unconsumed or empty partitions, change an offset of -1 to 0 so the
# sum isn't affected by the number of empty partitions.
offsets = [max(offset, 0) for offset in offsets]
self.gauge("kafka.%s.offsets.total" % offsets_type, sum(offsets), tags=tags)
for partition_number, offset in enumerate(offsets):
new_tags = tags + ["partition:%s" % partition_number]
self.gauge("kafka.%s.offsets" % offsets_type, offset, tags=new_tags)
def _find_clusters(self, address, target):
"""
Find the available clusters in Burrow, return all clusters if
target is not set.
"""
available_clusters = self._rest_request_to_json(address, CLUSTER_ENDPOINT).get("clusters")
if not available_clusters:
raise Exception("There are no clusters in Burrow")
if not target:
return available_clusters
else:
clusters = []
for name in target:
if name in available_clusters:
clusters.append(name)
else:
self.log.error("Cluster '%s' does not exist" % name )
return clusters
def _rest_request_to_json(self, address, object_path):
'''
Query the given URL and return the JSON response
'''
response_json = None
service_check_tags = ['instance:%s' % self.hostname]
url = urljoin(address, object_path)
try:
response = requests.get(url)
response_json = response.json()
if response_json["error"]:
self.log.error("Burrow Request failed: %s: %s" % (object_path, response_json["message"]))
return {}
except requests.exceptions.Timeout as e:
self.log.error("Request timeout: {0}, {1}".format(url, e))
raise
except (requests.exceptions.HTTPError,
requests.exceptions.InvalidURL,
requests.exceptions.ConnectionError) as e:
self.log.error("Request failed: {0}, {1}".format(url, e))
raise
except ValueError as e:
self.log.error(str(e))
raise
else:
self.log.debug('Connection to %s was successful' % url)
return response_json
|
<filename>hardware/controller.py
from multiprocessing.connection import Listener
from nanpy import (ArduinoApi, SerialManager, Ultrasonic)
from picamera import PiCamera
from time import sleep
import sys
import camera_config
import numpy as np
LEFT = 0
RIGHT = 1
FORWARD = 2
BACKWARD = 3
MAX_SPEED = 255
MIN_SPEED = 60
class Controller():
'''
Controller set up functions
'''
def __init__(self):
self.arduino = self.setup_arduino()
self.camera = self.setup_camera()
self.speed_factor = 0.45 # student center
# self.speed_factor = 0.5 # capstone room
# self.speed_factor = 0.55 # jack's house
def setup_arduino(self):
arduino = self.establish_connection()
# set up pin modes on arduino
self.LOW = arduino.LOW
self.HIGH = arduino.HIGH
self.outpins = {
"enA": 10,
"in1": 2,
"in2": 3,
"enB": 11,
"in3": 4,
"in4": 5,
"trigPin": 12
}
self.inpins = {
"echoPin": 13
}
arduino.pinMode(self.outpins["enA"], arduino.OUTPUT)
arduino.pinMode(self.outpins["in1"], arduino.OUTPUT)
arduino.pinMode(self.outpins["in2"], arduino.OUTPUT)
arduino.pinMode(self.outpins["enB"], arduino.OUTPUT)
arduino.pinMode(self.outpins["in3"], arduino.OUTPUT)
arduino.pinMode(self.outpins["in4"], arduino.OUTPUT)
# arduino.pinMode(self.outpins["trigPin"], arduino.OUTPUT)
# arduino.pinMode(self.inpins["echoPin"], arduino.INPUT)
# Not sure why below doens't work - TODO: fix later
# for k, p in enumerate(self.outpins.items()):
# arduino.pinMode(p, arduino.OUTPUT)
# for k, p in enumerate(self.inpins.items()):
# arduino.pinMode(p, arduino.INPUT)
print("Arduino initialized.")
return arduino
def establish_connection(self):
# establish connection to the Arduino
try:
conn = SerialManager()
trig = 12
echo = 13
a = ArduinoApi(connection = conn)
self.ultrasonic = Ultrasonic(echo, trig, False, connection = conn)
return a
except:
sys.exit("Failed to establish connection with the Arduino")
return None
def setup_camera(self):
try:
camera = PiCamera()
self.resolution = (camera_config.resolution_width, camera_config.resolution_height)
camera.resolution = self.resolution
print("Camera initialized.")
return camera
except:
print("Failed to set up camera")
sys.exit("Failed to set up camera")
return None
'''
Public functions for pictures / rover movements etc.
'''
# **************** Camera ****************
def capture(self, output = 'image', format = 'jpeg'):
if self.camera != None:
self.camera.capture(output, format = format)
else:
print("Controller has no camera.")
def capture_continuous(self, output = 'image', format = 'jpeg'):
# returns an infinite iterator with output names = images + "-{counter}"
if self.camera != None:
return self.camera.capture_continuous(output + "-{counter}", format = format)
else:
print("Controller has no camera.")
def capture_opencv(self):
if self.camera != None:
image = np.empty((self.resolution[1] * self.resolution[0] * 3,), dtype=np.uint8)
self.camera.capture(image, 'bgr')
image = image.reshape((self.resolution[1], self.resolution[0], 3))
return image
else:
print("Controller has no camera.")
# **************** Rover ****************
def get_proper_speed(self, speed):
s = int(self.speed_factor * speed)
if s > MAX_SPEED:
s = MAX_SPEED
elif s < MIN_SPEED:
s = MIN_SPEED
return s
def set_speed(self, speed):
speed = self.get_proper_speed(speed)
self.arduino.analogWrite(self.outpins["enA"], speed)
self.arduino.analogWrite(self.outpins["enB"], speed)
def move_l_wheel(self, direction = 1):
if(direction == 1): #Move forward
self.arduino.digitalWrite(self.outpins["in1"], self.HIGH)
self.arduino.digitalWrite(self.outpins["in2"], self.LOW)
else: #Move backwards
self.arduino.digitalWrite(self.outpins["in1"], self.LOW)
self.arduino.digitalWrite(self.outpins["in2"], self.HIGH)
def move_r_wheel(self, direction = 1):
if(direction == 1): #Move forward
self.arduino.digitalWrite(self.outpins["in3"], self.HIGH)
self.arduino.digitalWrite(self.outpins["in4"], self.LOW)
else: #Move backwards
self.arduino.digitalWrite(self.outpins["in3"], self.LOW)
self.arduino.digitalWrite(self.outpins["in4"], self.HIGH)
def move(self, direction = FORWARD, speed = MAX_SPEED):
if direction > 1:
direction = direction - 2
self.move_l_wheel(direction)
self.move_r_wheel(direction)
self.set_speed(speed)
def turn(self, direction = RIGHT, speed = MAX_SPEED):
#direction can either be 0 or 1
#0 : turn counter clockwise LEFT
#1 : turn clockwise RIGHT
self.move_l_wheel(direction) #crappy implementation, either 0 or 1
self.move_r_wheel(direction+1) #crappy implementation, either 1 or 2
self.set_speed(speed)
def stop(self):
self.arduino.digitalWrite(self.outpins["in1"], self.LOW)
self.arduino.digitalWrite(self.outpins["in2"], self.LOW)
self.arduino.digitalWrite(self.outpins["in3"], self.LOW)
self.arduino.digitalWrite(self.outpins["in4"], self.LOW)
self.arduino.analogWrite(self.outpins["enA"], 0)
self.arduino.analogWrite(self.outpins["enB"], 0)
def get_distance(self):
distance = self.ultrasonic.get_distance()
return distance
'''
Destructor
'''
def __del__(self):
self.stop();
if self.camera != None: self.camera.close()
# pass |
<filename>upload.py
#!/usr/bin/env python3
import configargparse
import shutil
import tempfile
import urllib.request
from urllib.parse import urlparse
import requests
import logging
from http import HTTPStatus
from pymarc import parse_xml_to_array
DEFAULT_CONFIG_FILE = 'config.yaml'
# TAGS according to https://www.loc.gov/marc/bibliographic/ecbdlist.html
ELECTRONIC_LOCATION_AND_ACCESS = '856'
# indicators according to https://www.loc.gov/marc/bibliographic/ecbdlist.html
HTTP_ACCESS_METHOD = '4'
ACCEPTED_TYPES = ['MP4', 'MKV', 'MOV']
parser = configargparse.ArgumentParser(
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
default_config_files=['config.yaml'],
description='Register files in the Onedata system.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument(
'--host', '-H',
action='store',
help='Oneprovider host.',
dest='host',
required=True)
requiredNamed.add_argument(
'--space-id', '-spi',
action='store',
help='Id of the space in which the files will be registered.',
dest='space_id',
required=True)
requiredNamed.add_argument(
'--storage-id', '-sti',
action='store',
help='Id of the storage on which the files are located. Storage must be created as an `imported` storage with path type equal to `canonical`.',
dest='storage_id',
required=True)
requiredNamed.add_argument(
'--token', '-t',
action='store',
help='Onedata access token.',
dest='token',
required=True)
requiredNamed.add_argument(
'--collection-url', '-c',
action='append',
help='URL to MARC21 record describing collection of files. Many collections can be passed (e.g. `-c URL1 -c URL2`).',
dest='collections',
required=True)
parser.add_argument(
'--file-mode', '-m',
action='store',
help='POSIX mode with which files will be registered, represented as an octal string.',
dest='mode',
default="0664"
)
parser.add_argument(
'--disable-auto-detection', '-dd',
action='store_true',
help='Flag which disables automatic detection of file attributes and verification whether file exists on storage. '
'Passing this flag results in faster registration of files but there is a risk of registering files that '
'don\'t exist on storage. Such files will be visible in the space but not accessible.',
dest='disable_auto_detection',
default=False
)
parser.add_argument(
'--logging-frequency', '-lf',
action='store',
type=int,
help='Frequency of logging. Log will occur after registering every logging_freq number of files.',
dest='logging_freq',
default=None)
parser.add_argument(
'--disable-cert-verification', '-dv',
action='store_true',
help='Flag which disables verification of SSL certificate.',
dest='disable_cert_verification',
default=False)
parser.add_argument(
'--config-file',
action='store',
is_config_file=True,
help='Path to config file which will override the default {0}'.format(DEFAULT_CONFIG_FILE),
dest='config_file'
)
REGISTER_FILE_ENDPOINT = "https://{0}/api/v3/oneprovider/data/register"
def strip_server_url(storage_file_id):
parsed_url = urlparse(storage_file_id)
if parsed_url.scheme:
return parsed_url.path
else:
return storage_file_id
def register_file(storage_file_id, size, checksum):
headers = {
'X-Auth-Token': args.token,
"content-type": "application/json"
}
storage_file_id = strip_server_url(storage_file_id)
payload = {
'spaceId': args.space_id,
'storageId': args.storage_id,
'storageFileId': storage_file_id,
'destinationPath': storage_file_id,
'size': size,
'mode': args.mode,
'xattrs': {
'checksum': checksum
},
'autoDetectAttributes': not args.disable_auto_detection
}
try:
response = requests.post(REGISTER_FILE_ENDPOINT.format(args.host), json=payload, headers=headers, verify=(not args.disable_cert_verification))
if response.status_code == HTTPStatus.CREATED:
return True
else:
logging.error("Registration of {0} failed with HTTP status {1}.\n""Response: {2}"
.format(storage_file_id, response.status_code, response.content)),
return False
except Exception as e:
logging.error("Registration of {0} failed due to {1}".format(storage_file_id, e), exc_info=True)
def download_and_load_marc21_record(url):
with urllib.request.urlopen(url) as response:
with tempfile.NamedTemporaryFile(delete=True) as tmp_file:
shutil.copyfileobj(response, tmp_file)
tmp_file.flush()
with open(tmp_file.name, 'r') as f:
records = parse_xml_to_array(f)
if records:
return records[0]
def get_file_fields(collection_url):
collection_record = download_and_load_marc21_record(collection_url)
if collection_record:
return collection_record.get_fields(ELECTRONIC_LOCATION_AND_ACCESS)
def get_access_method(field):
return field.indicator1
def is_http_access_method(field):
return get_access_method(field) == HTTP_ACCESS_METHOD
def get_subfield(field, subfield_name):
if field.get_subfields(subfield_name):
return field.get_subfields(subfield_name)[0]
def get_type(field):
return get_subfield(field, 'q')
def get_size(field):
size = get_subfield(field, 's')
if size:
return int(size)
def get_control_number(field):
return get_subfield(field, 'w')
def get_uri(field):
return get_subfield(field, 'u')
def get_md5_checksum(field):
control_number = get_control_number(field)
return parse_md5(control_number)
def parse_md5(control_number):
return control_number.split(';')[1]
args = parser.parse_args()
total_size = 0
total_count = 0
for collection_url in args.collections:
print("Processing collection {0}".format(collection_url))
file_fields = get_file_fields(collection_url)
for file_field in file_fields:
if is_http_access_method(file_field):
if get_type(file_field) in ACCEPTED_TYPES:
if register_file(get_uri(file_field), get_size(file_field), get_md5_checksum(file_field)):
total_size += get_size(file_field)
total_count += 1
if args.logging_freq and total_count % args.logging_freq == 0 and total_count > 0:
print("Registered {0} files".format(total_count))
print("\nTotal registered files count: {0}".format(total_count))
print("Total size: {0}".format(total_size))
|
<gh_stars>0
import os
import re
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from PIL import Image
import cv2
def next_greater_power_of_2(x):
return 2 ** (int(x) - 1).bit_length()
def next_lower_power_of_2(x):
return 2 ** ((int(x) - 1).bit_length() - 1)
def add_prefix_and_suffix_4_basename(path, prefix=None, suffix=None):
dir_path, basename = os.path.split(path)
filename, ext = os.path.splitext(basename)
filename = str(prefix if prefix is not None else '') + filename + str(suffix if suffix is not None else '') + ext
return os.path.join(dir_path, filename)
def standard_normalizaion(x):
return (x - np.mean(x)) / np.std(x)
def wise_standard_normalizaion(data, normalization=None):
data = np.array(data)
if normalization is None:
return data
assert normalization in ['sample-wise', 'channel-wise', 'samplepoint-wise']
data_ndim = data.ndim
if data_ndim == 2:
data = data[np.newaxis,]
for i in range(len(data)):
if normalization == 'sample-wise':
data[i, :, :] = standard_normalizaion(data[i, :, :])
elif normalization == 'channel-wise':
data[i, :, :] = [standard_normalizaion(data[i, j, :]) for j in range(data.shape[-2])]
elif normalization == 'samplepoint-wise':
data[i, :, :] = np.array([standard_normalizaion(data[i, :, j]) for j in range(data.shape[-1])]).T
else:
print('-' * 20, 'normalization is incorrectly assigned', '-' * 20)
exit(1)
if data_ndim == 2:
return np.array(data)[0]
return np.array(data)
def split_data(data, split=0.8, shuffle=True):
x = data[0]
y = data[1]
data_size = len(x)
split_index = int(data_size * split)
indices = np.arange(data_size)
if shuffle:
indices = np.random.permutation(indices)
x_train = x[indices[:split_index]]
y_train = y[indices[:split_index]]
x_test = x[indices[split_index:]]
y_test = y[indices[split_index:]]
return x_train, y_train, x_test, y_test
def split_data_wid(data, split=0.8, shuffle=True):
x = data[0]
y = data[1]
s = data[2]
data_size = len(x)
split_index = int(data_size * split)
indices = np.arange(data_size)
if shuffle:
indices = np.random.permutation(indices)
x_train = x[indices[:split_index]]
y_train = y[indices[:split_index]]
s_train = s[indices[:split_index]]
x_test = x[indices[split_index:]]
y_test = y[indices[split_index:]]
return x_train, y_train, s_train, x_test, y_test
def split_data_both(data, split=0.8, shuffle=True):
x = data[0]
x_poison = data[1]
y = data[2]
s = data[3]
data_size = len(x)
split_index = int(data_size * split)
indices = np.arange(data_size)
if shuffle:
indices = np.random.permutation(indices)
x_train = x[indices[:split_index]]
x_train_poison = x_poison[indices[:split_index]]
y_train = y[indices[:split_index]]
s_train = s[indices[:split_index]]
x_test = x[indices[split_index:]]
y_test = y[indices[split_index:]]
return x_train, x_train_poison, y_train, s_train, x_test, y_test
def shuffle_data(data, random_seed=None):
'''
data: [x, y] type: numpy
'''
x, y = data
data_size = x.shape[0]
shuffle_index = get_shuffle_index(data_size, random_seed=random_seed)
return x[shuffle_index], y[shuffle_index]
def get_shuffle_index(data_size, random_seed=None):
if random_seed is not None:
np.random.seed(random_seed)
return np.random.permutation(np.arange(data_size))
def bca(y_true, y_pred):
m = confusion_matrix(y_true, y_pred)
numb = m.shape[0]
acc_each_label = 0
for i in range(numb):
acc = m[i, i] / np.sum(m[i, :], keepdims=False).astype(np.float32)
acc_each_label += acc
return acc_each_label / numb
def get_split_indices(data_size, split=[9, 1], shuffle=True):
if len(split) < 2:
raise TypeError(
'The length of split should be larger than 2 while the length of your split is {}!'.format(len(split)))
split = np.array(split)
split = split / np.sum(split)
if shuffle:
indices = get_shuffle_index(data_size)
else:
indices = np.arange(data_size)
split_indices_list = []
start = 0
for i in range(len(split) - 1):
end = start + int(np.floor(split[i] * data_size))
split_indices_list.append(indices[start:end])
start = end
split_indices_list.append(indices[start:])
return split_indices_list
def batch_iter(data, batchsize, shuffle=True, random_seed=None):
# Example: batches = list(utils.batch_iter([x_train, y_train], batchsize=batchsize, shuffle=True, random_seed=None))
'''split dataset into batches'''
if shuffle:
x, y = shuffle_data(data, random_seed=random_seed)
else:
x, y = data
data_size = x.shape[0]
nb_batches = np.ceil(data_size / batchsize).astype(np.int)
for batch_id in range(nb_batches):
start_index = batch_id * batchsize
end_index = min((batch_id + 1) * batchsize, data_size)
yield x[start_index:end_index], y[start_index:end_index]
# def batch_iter( data, batchsize, shuffle=True, random_seed=None ):
# data = np.array(list(data))
# data_size = data.shape[0]
# num_batches = np.ceil(data_size/batchsize).astype(np.int)
# # Shuffle the data
# if shuffle:
# shuffle_indices = get_shuffle_index(data_size)
# shuffled_data = data[shuffle_indices]
# else:
# shuffled_data = data
# for batch_num in range(num_batches):
# start_index = batch_num*batchsize
# end_index = min((batch_num+1)*batchsize, data_size)
# yield shuffled_data[start_index:end_index]
#
def calculate_accuracy(y, y_pred, target_id=None):
"""
Computes the accuracy as well as num_adv of attack of the target class.
Args:
y: ground truth labels. Accepts one hot encodings or labels.
y_pred: predicted labels. Accepts probabilities or labels.
target_id: target class
Returns:
accuracy
accuracy_nb: number of samples which are classified correctly
target_rate:
target_total: number of samples which changed their labels from others to target_id
"""
y = checked_argmax(y, to_numpy=True) # tf.argmax(y, axis=-1).numpy()
y_pred = checked_argmax(y_pred, to_numpy=True) # tf.argmax(y_pred, axis=-1).numpy()
accuracy = np.mean(np.equal(y, y_pred))
accuracy_nb = np.sum(np.equal(y, y_pred))
if target_id is not None:
non_target_idx = (y != target_id)
target_total = np.sum((y_pred[non_target_idx] == target_id))
target_rate = target_total / np.sum(non_target_idx)
# Cases where non_target_idx is 0, so target_rate becomes nan
if np.isnan(target_rate):
target_rate = 1. # 100% target num_adv for this batch
return accuracy, accuracy_nb, target_rate, target_total
else:
return accuracy, accuracy_nb
def checked_argmax(y, to_numpy=False):
"""
Performs an argmax after checking if the input is either a tensor
or a numpy matrix of rank 2 at least.
Should be used in most cases for conformity throughout the
codebase.
Args:
y: an numpy array or tensorflow tensor
to_numpy: bool, flag to convert a tensor to a numpy array.
Returns:
an argmaxed array if possible, otherwise original array.
"""
if y.ndim > 1:
y = np.argmax(y, axis=-1)
if to_numpy:
return np.array(y)
else:
return y
def extract_nb_from_str(str):
pattern = re.compile(r'\d+')
res = re.findall(pattern, str)
return list(map(int, res))
def get_files_by_suffix(root, suffix):
if isinstance(suffix, str):
suffix = (suffix,)
else:
suffix = tuple(suffix)
file_list = []
for parent, dirs, files in os.walk(root):
for f in files:
path = os.path.normpath(os.path.join(parent, f))
if path.endswith(suffix):
# img: (('.jpg', '.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
file_list.append(path)
return file_list
def get_files_by_prefix(root, prefix):
if isinstance(prefix, str):
prefix = (prefix,)
else:
prefix = tuple(prefix)
file_list = []
for parent, dirs, files in os.walk(root):
for f in files:
if f.startswith(prefix):
path = os.path.normpath(os.path.join(parent, f))
file_list.append(path)
return file_list
def get_dirs_by_suffix(root, suffix):
if isinstance(suffix, str):
suffix = (suffix,)
else:
suffix = tuple(suffix)
dir_list = []
for parent, dirs, files in os.walk(root):
for d in dirs:
path = os.path.normpath(os.path.join(parent, d))
if path.endswith(suffix):
dir_list.append(path)
return dir_list
def get_dirs_by_prefix(root, prefix):
if isinstance(prefix, str):
prefix = (prefix,)
else:
prefix = tuple(prefix)
dir_list = []
for parent, dirs, files in os.walk(root):
for d in dirs:
if d.startswith(prefix):
path = os.path.normpath(os.path.join(parent, d))
dir_list.append(path)
return dir_list
def plot_curve(data, title=None, img_path=None, show=True, y_lim=None, linestyle='-', linewidth=1):
'''
data: tuple of every curve's label, data and color
for example:
curve_name = ['Training acc_t', 'Validation acc_t', 'Test acc_t']
curve_data = [train_acc, val_acc, test_acc]
color = ['r', 'y', 'cyan']
utils.plot_curve(data=list(zip(curve_name, curve_data, color)), title=title, img_path=img_path)
'''
plt.figure()
for i in data:
x_len = len(i[1])
x = list(range(0, x_len))
plt.plot(x, i[1], i[2], label=i[0], linestyle=linestyle, linewidth=linewidth)
if y_lim is not None:
plt.ylim(y_lim)
plt.title(title)
plt.legend()
if img_path is not None:
if not os.path.exists(os.path.dirname(img_path)):
os.mkdir(os.path.dirname(img_path))
plt.savefig(img_path)
if show:
plt.show()
else:
plt.close()
def plot_hist(data, title=None, img_path=None, bins=100, show=True):
'''
data: tuple of every curve's label, data and color
for example:
curve_name = ['Training acc_t', 'Validation acc_t', 'Test acc_t']
curve_data = [train_acc, val_acc, test_acc]
color = ['r', 'y', 'cyan']
utils.plot_curve(data=list(zip(curve_name, curve_data, color)), title=title, img_path=s_img_path)
'''
plt.figure()
for i in data:
plt.hist(i[1], bins, color=i[2], label=i[0])
# plt.ylim(0, 1.1)
plt.title(title)
plt.legend()
if img_path is not None:
plt.savefig(img_path)
if show:
plt.show()
else:
plt.close()
def img_splice(img_paths, save_path, sgl_img_size):
'''
img_paths: 2-D list storing the paths of images
sgl_img_size: size of single image
'''
width, height = sgl_img_size
nb_column = max([len(i) for i in img_paths])
nb_row = len(img_paths)
res_img = Image.new(mode='RGB', size=(width * nb_column, height * nb_row), color=(255, 255, 255))
for i in range(len(img_paths)):
for j in range(len(img_paths[i])):
# load imgs
img = Image.open(img_paths[i][j])
res_img.paste(img, (width * j, height * (i),
width * (j + 1), height * (i + 1)))
res_img.save(save_path)
return res_img
def otsu_threshold(data):
data = np.array([data], dtype=np.uint8)
threshold, res_data, = cv2.threshold(data, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return res_data[0], threshold
if __name__ == '__main__':
dir = 'G:\SmartWalker\SmartWalker-master\SoundSourceLocalization'
file = 'G:\SmartWalker\SmartWalker-master\SoundSourceLocalization\lib//audiolib.py'
print(add_prefix_and_suffix_4_basename(dir, 13, 14))
print(add_prefix_and_suffix_4_basename(file, 13, 14))
|
#!/usr/bin/env python
"""
# Problem Description:
Given a tile index of (x,y,z) of size 256x256,
1. Find out (lat_deg, lng_deg) and the extent covered by the maptile (ie. the radius in meters)
2. Grab the road network (and other entities, like building boundaries or types) from OSM -- use OSMnx -- in the area covered by the map tile
3. Compute the traditional geospatial features (ie. measures of spatail complexity from Boeing 2016)
4. Concatenate the features into a vector -- which will be used as 'geospatial feature vector', the counterpart of BiVAE's content code.
# Usage:
# Examples:
python retrieve_and_rasterize.py -c la --out_dir_root='.' --records_dir_root='.'
nohup python retrieve_and_rasterize.py -c la &> log_2021_05_09/la.out &
nohup python retrieve_and_rasterize.py -c shanghai &> log_2021_05_09/shanghai.out &
nohup python retrieve_and_rasterize.py -c seoul &> log_2021_05_09/seoul.out &
nohup python retrieve_and_rasterize.py -c rome &> log_2021_05_09/rome.out &
nohup python retrieve_and_rasterize.py -c paris &> log_2021_05_09/paris.out &
nohup python retrieve_and_rasterize.py -c montreal &> log_2021_05_09/montreal.out &
nohup python retrieve_and_rasterize.py -c manhattan &> log_2021_05_09/manhattan.out &
nohup python retrieve_and_rasterize.py -c chicago &> log_2021_05_09/chicago.out &
nohup python retrieve_and_rasterize.py -c charlotte &> log_2021_05_09/charlotte.out &
nohup python retrieve_and_rasterize.py -c boston &> log_2021_05_09/boston.out &
nohup python retrieve_and_rasterize.py -c berlin &> log_2021_05_09/berlin.out &
nohup python retrieve_and_rasterize.py -c amsterdam &> log_2021_05_09/amsterdam.out &
nohup python retrieve_and_rasterize.py -c vegas &> log_2021_05_09/vegas.out &
# nohup python retrieve_and_rasterize.py -c london &> london.out &
# styles =['StamenTonerBackground','OSMDefault', 'CartoVoyagerNoLabels']#'StamenWatercolor']#, 'StamenTonerLines']
nohup python retrieve_and_rasterize.py -c la -s StamenTonerBackground &> la.out &
nohup python retrieve_and_rasterize.py -c shanghai -s StamenTonerBackground &> shanghai.out &
nohup python retrieve_and_rasterize.py -c seoul -s StamenTonerBackground &> seoul.out &
nohup python retrieve_and_rasterize.py -c rome -s StamenTonerBackground&> rome.out &
nohup python retrieve_and_rasterize.py -c paris -s StamenTonerBackground &> paris.out &
nohup python retrieve_and_rasterize.py -c montreal -s StamenTonerBackground &> montreal.out &
nohup python retrieve_and_rasterize.py -c manhattan -s StamenTonerBackground &> manhattan.out &
nohup python retrieve_and_rasterize.py -c chicago -s StamenTonerBackground &> chicago.out &
nohup python retrieve_and_rasterize.py -c charlotte -s StamenTonerBackground &> charlotte.out &
nohup python retrieve_and_rasterize.py -c boston -s StamenTonerBackground &> boston.out &
nohup python retrieve_and_rasterize.py -c berlin -s StamenTonerBackground &> berlin.out &
nohup python retrieve_and_rasterize.py -c amsterdam -s StamenTonerBackground &> amsterdam.out &
nohup python retrieve_and_rasterize.py -c vegas -s StamenTonerBackground &> vegas.out &
"""
# ## Load libraries
import argparse
import os, sys
import time
from pathlib import Path
from typing import List, Dict
import joblib
import matplotlib
matplotlib.use('Agg')
import osmnx as ox
# %matplotlib inline
ox.config(log_console=False, use_cache=True)
# ox.__version__
# ## Set Path
# TODO: Deal with this in a cleaner way
# 1. Add project root and src folders to `sys.path`
# 2. Set DATA_ROOT to `maptile_v2` folder
this_nb_path = Path(os.getcwd())
# ROOT = this_nb_path.parent.parent
ROOT = Path('/data/hayley-old/TileMani/')
SRC = ROOT / 'src'
DATA_ROOT = Path("/data/hayley-old/maptiles_v2/")
paths2add = [this_nb_path, ROOT]
print("Project root: ", str(ROOT))
print('Src folder: ', str(SRC))
print("This nb path: ", str(this_nb_path))
for p in paths2add:
if str(p) not in sys.path:
sys.path.insert(0, str(p))
print(f"\n{str(p)} added to the path.")
# Import helper functions
from tilemani.utils import parse_maptile_fp
from tilemani.utils import mkdir, write_record
from tilemani.retrieve.retriever import get_road_graph_and_bbox, get_geoms
from tilemani.rasterize.rasterizer import rasterize_road_and_bldg
from tilemani.rasterize.rasterizer import single_rasterize_road_and_bldg
from tilemani.compute.features import compute_road_network_stats
# ### Process each cities' maptiles
# - Rasterize G_r (road networks)
# - Rasterize bldg geoms
# - Rasterize both road and bldg to the same image
#
# - Save road network graph
# - Save bldg geometry info as geojson
#
# - while collecting: {x,y,z,lat,lng,radius, road_retreival_status} and road network stats
#
# Parameters for the script
# city = 'paris'
# style = 'StamenTonerLines'
# zoom = '14'
# network_type = 'drive_service'
# bgcolors = ['k', 'r', 'g', 'b', 'y']
# edge_colors = ['cyan']
# bldg_colors = ['silver']
# lw_factors = [0.5]
# save = True
# dpi = 50
# figsize = (7,7)
# show = False #True
# show_only_once = False
# verbose = False #True
def retrieve_and_rasterize_locs_in_a_folder(
city: str,
style: str,
zoom: str,
network_type='drive_service',
bgcolors=['k', 'r', 'g', 'b', 'y'],
edge_colors=['cyan'],
bldg_colors=['silver'],
lw_factors=[0.5],
save=True,
dpi=50,
figsize=(7, 7),
show=False, # True,
show_only_once=False,
verbose=False, # True,
out_dir_root=Path('./temp/images'),
records_dir_root=Path('./temp/records'),
) -> List[Dict]:
mkdir(out_dir_root)
mkdir(records_dir_root)
img_dir = DATA_ROOT / city / style / zoom
if not img_dir.exists():
raise ValueError(f"{img_dir} doesn't exist. Check the spelling and upper/lower case of city, style, zoom")
if verbose:
print(f"Image_dir: ", img_dir)
# breakpoint() #debug
# list of each record of location (which is a dict)
records = []
for i, img_fp in enumerate(img_dir.iterdir()):
if not img_fp.is_file(): continue
record = parse_maptile_fp(img_fp)
record['city'] = city
record['style'] = style
tileXYZ = (record['x'], record['y'], record['z'])
if verbose:
print("=" * 10)
print(f"Processing {city} -- {tileXYZ}")
# Retrieve road graph and bldg geoms
G_r, bbox = get_road_graph_and_bbox(tileXYZ, network_type)
gdf_b = get_geoms(tileXYZ, tag={'building': True})
# Rasterize road graph with *my* plot_figure_ground (not ox.plot_figure_ground)
rasterize_road_and_bldg(
G_r,
gdf_b,
tileXYZ,
bbox,
bgcolors,
edge_colors,
bldg_colors,
lw_factors=lw_factors,
save=save,
out_dir_root=out_dir_root / city,
verbose=verbose,
show=show,
show_only_once=show_only_once,
figsize=figsize,
dpi=dpi)
# Raster in grayscale (bgcolor='w','edge_color='k', bldg_color='silver')
single_rasterize_road_and_bldg(
G_r,
gdf_b,
tileXYZ,
bbox=bbox,
lw_factor=lw_factors[0],
save=save,
out_dir_root=out_dir_root / city,
verbose=verbose,
show=show,
figsize=figsize,
dpi=dpi
)
# Save retrieval results
record['retrieved_road'] = G_r is not None
record['retrieved_bldg'] = gdf_b is not None
filename = f"{record['x']}_{record['y']}_{record['z']}"
if save:
# Save the graph (of roads) as Graphml file
filename = f"{record['x']}_{record['y']}_{record['z']}"
fp = out_dir_root / city / 'RoadGraph' / f'{tileXYZ[-1]}' / f'{filename}.graphml'
if G_r is not None:
ox.save_graphml(G_r,
filepath=fp)
if verbose:
print('\tSaved road graph as graphml: ', fp)
# Save the GeoDataFrame (for bldg data) as Geojson
fp = out_dir_root / city / 'BldgGeom' / f'{tileXYZ[-1]}' / f'{filename}.geojson'
if not fp.parent.exists():
fp.mkdir(parents=True)
print(f'Created {fp.parent}')
if gdf_b is not None and not gdf_b.empty:
try:
_gdf = gdf_b.apply(lambda c: c.astype(str) if c.name != "geometry" else c, axis=0)
_gdf.to_file(fp, driver='GeoJSON')
if verbose:
print('\tSaved BLDG Geopandas as geopackage: ', fp)
except:
print(f"\tFailed to save BLDG Geopandas as gpkg: ", sys.exc_info()[0])
# Compute states from G_r, gdf_b and save to record dict
if G_r is not None:
road_stats = compute_road_network_stats(G_r)
record.update(road_stats)
# Write this location's record to a json file
# todo: test this part -- see if each record is written as individual csv file
if save:
record_dir = out_dir_root / city / 'RoadStat'
mkdir(record_dir)
write_record(record, record_dir / f'{filename}.csv', verbose=verbose)
# Append the record to records
records.append(record)
print(len(records), end="...")
# Write the final `records` to a file
vidx = 0
# filename to store
records_fn = f'{city}-{style}-{tileXYZ[-1]}-ver{vidx}.pkl'
while (records_dir_root / records_fn).exists():
vidx += 1
records_fn = f'{city}-{style}-{tileXYZ[-1]}-ver{vidx}.pkl'
print(f'records file already exists --> Increased the version idx to {vidx}...')
joblib.dump(records, records_dir_root / records_fn)
print(f'\tSaved the final records for {city} to: {records_dir_root / records_fn}')
return records
if __name__ == "__main__":
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument('-c', "--city", type=str, required=True,
help="<Required> Name of the city folder")
parser.add_argument('-s', "--style", type=str, default='StamenTonerLines',
help="<Optional> Name of the style folder. Default: StamenTonerLines")
parser.add_argument("-z", "--zoom", type=str, default='14',
help="<Optional> Zoom level")
parser.add_argument("-nw", "--network_type", type=str, default='drive_service',
help="<Optional> Network type to query from OSM. Default: drive_service")
parser.add_argument("--out_dir_root", type=str, default='./temp/images',
help="<Optional> Name of the output folder root. Default: ./temp/images")
parser.add_argument("--records_dir_root", type=str, default='./temp/records',
help="<Optional> Name of the root folder to store 'records'. Default: ./temp/records")
args = parser.parse_args()
city = args.city
style = args.style
zoom = args.zoom
network_type = args.network_type
out_dir_root = Path(args.out_dir_root)
records_dir_root = Path(args.records_dir_root)
print("Args: ", args)
start = time.time()
retrieve_and_rasterize_locs_in_a_folder(
city,
style,
zoom,
network_type,
save=True,
verbose=False,
out_dir_root=out_dir_root,
records_dir_root=records_dir_root)
print(f"Done: {city}, {style}, {zoom}. Took: {time.time() - start}")
|
import os
from jacowvalidator.docutils.styles import get_style_summary
from jacowvalidator.docutils.margins import get_margin_summary
from jacowvalidator.docutils.languages import get_language_summary
from jacowvalidator.docutils.title import get_title_summary, get_title_summary_latex
from jacowvalidator.docutils.authors import get_author_summary, get_author_summary_latex
from jacowvalidator.docutils.abstract import get_abstract_summary, get_abstract_summary_latex
from jacowvalidator.docutils.heading import get_heading_summary
from jacowvalidator.docutils.paragraph import get_paragraph_summary, get_all_paragraph_summary
from jacowvalidator.docutils.references import get_reference_summary
from jacowvalidator.docutils.figures import get_figure_summary
from jacowvalidator.docutils.tables import get_table_summary
from jacowvalidator.spms import reference_csv_check, HELP_INFO as SPMS_HELP_INFO, EXTRA_INFO as SPMS_EXTRA_INFO
from jacowvalidator.models import Conference
class AbstractNotFoundError(Exception):
"""Raised when the paper submitted by a user has no matching entry in the
spms references list of papers"""
pass
def parse_paragraphs(doc):
title_index = author_index = abstract_index = reference_index = -1
current_style = None
summary = {}
for i, p in enumerate(doc.paragraphs):
# first paragraph is the title
text = p.text.strip()
if not text:
continue
# first non empty paragraph is the title
# Assume all of title is same style so end of title is when the style changes
if title_index == -1:
title_index = i
elif title_index != -1 and author_index == -1 and current_style != p.style.name:
author_index = i
# find abstract heading
if text.lower() == 'abstract':
abstract_index = i
summary['Abstract'] = get_abstract_summary(p)
current_style = p.style.name
# all headings, paragraphs captions, figures, tables, equations should be between these two
# if abstract_index > 0 and reference_index == -1:
# print(i)
# # check if a known jacow style
# for section_type, section_data in DETAILS.items():
# if 'styles' in section_data:
# if p.style.name in section_data['styles']['jacow']:
# found = f"{section_type} - {p.style.name}"
# print(found)
# break
# elif p.style.name in section_data['styles']['normal']:
# found = f"{section_type} -- {p.style.name}"
# print(found)
# break
# else:
# for sub_type, sub_data in section_data.items():
# if p.style.name in sub_data['styles']['jacow']:
# found = f"{section_type} - {sub_type} - {p.style.name}"
# print(found)
# elif 'normal' in sub_data['styles'] and p.style.name in sub_data['styles']['normal']:
# found = f"{section_type} -- {sub_type} -- {p.style.name}"
# print(found)
# break
# find reference heading
if text.lower() == 'references':
reference_index = i
break
# if abstract not found
if abstract_index == -1:
raise AbstractNotFoundError("Abstract header not found")
# authors is all the text between title and abstract heading
summary['Title'] = get_title_summary(doc.paragraphs[title_index: author_index])
summary['Authors'] = get_author_summary(doc.paragraphs[author_index: abstract_index])
return summary
def create_upload_variables(doc):
summary = {}
doc_summary = parse_paragraphs(doc)
# get style details
summary = {
'Styles': get_style_summary(doc),
'Margins': get_margin_summary(doc),
'Languages': get_language_summary(doc),
'List': get_all_paragraph_summary(doc),
'Title': doc_summary['Title'],
'Authors': doc_summary['Authors'],
'Abstract': doc_summary['Abstract'],
'Headings': get_heading_summary(doc),
'Paragraphs': get_paragraph_summary(doc),
'References': get_reference_summary(doc),
'Figures': get_figure_summary(doc),
'Tables': get_table_summary(doc)
}
# get title and author to use in SPMS check
title = summary['Title']['details']
authors = summary['Authors']['details']
return summary, authors, title
def create_spms_variables(paper_name, authors, title, conference_path, conference_id=False):
summary = {}
conferences = Conference.query.all()
if len(conferences) > 0:
author_text = ''.join([a['text'] + ", " for a in authors])
title_text = ''.join([a['text'] for a in title])
reference_csv_details = reference_csv_check(paper_name, title_text, author_text, conference_path)
conference_detail = conference_path
if conference_id:
conference_detail = conference_id
summary['SPMS'] = {
'title': ' SPMS ('+conference_detail+') Abstract Title Author Check',
'help_info': SPMS_HELP_INFO,
'extra_info': SPMS_EXTRA_INFO,
'ok': reference_csv_details['title']['match'] and reference_csv_details['author']['match'],
'message': 'SPMS Abstract Title Author Check issues',
'details': reference_csv_details['summary'],
'anchor': 'spms',
'conference': conference_detail
}
else:
reference_csv_details = False
return summary, reference_csv_details
def create_upload_variables_latex(doc):
summary = {
'Title': get_title_summary_latex(doc.title),
'Authors': get_author_summary_latex(doc.author),
'Abstract': get_abstract_summary_latex(doc.abstract)
}
# get title and author to use in SPMS check
title = summary['Title']
authors = [summary['Authors']]
return summary, authors, title
|
<filename>tests/test_auth.py
import unittest
from datetime import datetime
from unittest import mock
from openhim_mediator_utils.auth import Auth
API_URL = 'https://localhost:8080'
USERNAME = 'user'
class Authenticate(unittest.TestCase):
def setUp(self):
self.auth = Auth({'verify_cert': False, 'apiURL': API_URL, 'username': USERNAME})
@mock.patch('urllib3.disable_warnings')
@mock.patch('requests.get')
def test_disables_ssl_warnings_when_verify_cert_is_false(self, mock_get, mock_disable_warnings):
# arrange
mock_get.return_value = self._get_mock_response(
status=200,
content='Sample content',
json_data={'salt': 'some salt'}
)
# act
self.auth.authenticate()
# assert
self.assertTrue(mock_disable_warnings.called)
@mock.patch('urllib3.disable_warnings')
@mock.patch('requests.get')
def test_does_not_disable_ssl_warnings_when_verify_cert_is_true(self, mock_get, mock_disable_warnings):
# arrange
self.auth.options['verify_cert'] = True
mock_get.return_value = self._get_mock_response(
status=200,
content='Sample content',
json_data={'salt': 'some salt'}
)
# act
self.auth.authenticate()
# assert
self.assertFalse(mock_disable_warnings.called)
@mock.patch('requests.get')
def test_raises_exception_when_response_code_is_not_200(self, mock_get):
# arrange
mock_get.return_value = self._get_mock_response(
status=500,
content='Internal Server Error',
json_data={'salt': 'some salt'}
)
# assert
self.assertRaises(Exception, self.auth.authenticate)
@mock.patch('requests.get')
def test_sets_salt_when_request_succeeds(self, mock_get):
# arrange
mock_get.return_value = self._get_mock_response(
status=200,
content='Internal Server Error',
json_data={'salt': 'some salt'}
)
# act
body = self.auth.authenticate()
# assert
self.assertEqual(body['salt'], 'some salt')
@staticmethod
def _get_mock_response(status=200, content='Body', json_data=None):
mock_response = mock.Mock()
mock_response.status_code = status
mock_response.content = content
if json_data:
mock_response.json = mock.Mock(return_value=json_data)
return mock_response
class GenAuthHeaders(unittest.TestCase):
def test_raises_exception_when_no_salt(self):
# arrange
auth = Auth(None)
# assert
self.assertRaises(Exception, auth.gen_auth_headers)
@mock.patch('hashlib.sha512')
@mock.patch('datetime.datetime')
def test_returns_correct_headers_when_no_exceptions(self, fake_datetime, fake_sha512):
# arrange
auth = Auth({'username': USERNAME, 'password': 'password'})
auth.salt = 'random salt'
fake_date = str(datetime.utcnow())
fake_datetime.utcnow.return_value = fake_date
expected_token = 'this is a test token'
fake_sha512.return_value = self._get_mock_sha512(expected_token)
# act
result = auth.gen_auth_headers()
# assert
self.assertIn('auth-username', result.keys())
self.assertEqual(result['auth-username'], USERNAME)
self.assertIn('auth-ts', result.keys())
self.assertEqual(result['auth-ts'], fake_date)
self.assertIn('auth-salt', result.keys())
self.assertEqual(result['auth-salt'], auth.salt)
self.assertIn('auth-token', result.keys())
self.assertEqual(result['auth-token'], expected_token)
@staticmethod
def _get_mock_sha512(token=None):
mock_sha512 = mock.Mock()
mock_sha512.hexdigest.return_value = token
mock_sha512.update.return_value = None
return mock_sha512
if __name__ == '__main__':
unittest.main()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
from astropy.extern import six
from astropy.utils.compat.odict import OrderedDict
import numpy as np
import yaml
from . constants import YAML_TAG_PREFIX
from . import reference
from . import schema
from . import tagged
from . import treeutil
# ----------------------------------------------------------------------
# Custom loader/dumpers
def yaml_to_base_type(node, loader):
"""
Converts a PyYAML node type to a basic Python data type.
Parameters
----------
node : yaml.Node
The node is converted to a basic Python type using the following:
- MappingNode -> dict
- SequenceNode -> list
- ScalarNode -> str, int, float etc.
loader : yaml.Loader
Returns
-------
basic : object
Basic Python data type.
"""
if isinstance(node, yaml.MappingNode):
return loader.construct_mapping(node, deep=True)
elif isinstance(node, yaml.SequenceNode):
return loader.construct_sequence(node, deep=True)
elif isinstance(node, yaml.ScalarNode):
return loader.construct_scalar(node)
else:
raise TypeError("Don't know how to implicitly construct '{0}'".format(
type(node)))
class AsdfDumper(yaml.SafeDumper):
"""
A specialized YAML dumper that understands "tagged basic Python
data types" as implemented in the `tagged` module.
"""
def represent_data(self, data):
node = super(AsdfDumper, self).represent_data(data)
tag_name = tagged.get_tag(data)
if tag_name is not None:
node.tag = tag_name
return node
_flow_style_map = {
'flow': True,
'block': False
}
def represent_sequence(dumper, sequence):
flow_style = _flow_style_map.get(sequence.flow_style, None)
sequence = sequence.data
return super(AsdfDumper, dumper).represent_sequence(
None, sequence, flow_style)
def represent_mapping(dumper, mapping):
flow_style = _flow_style_map.get(mapping.flow_style, None)
node = super(AsdfDumper, dumper).represent_mapping(
None, mapping.data, flow_style)
if mapping.property_order:
values = node.value
new_mapping = {}
for key, val in values:
new_mapping[key.value] = (key, val)
new_values = []
for key in mapping.property_order:
if key in mapping:
new_values.append(new_mapping[key])
for key, val in values:
if key.value not in mapping.property_order:
new_values.append((key, val))
node.value = new_values
return node
_style_map = {
'inline': '"',
'folded': '>',
'literal': '|'
}
def represent_scalar(dumper, value):
style = _style_map.get(value.style, None)
return super(AsdfDumper, dumper).represent_scalar(
None, value.data, style)
AsdfDumper.add_representer(tagged.TaggedList, represent_sequence)
AsdfDumper.add_representer(tagged.TaggedDict, represent_mapping)
AsdfDumper.add_representer(tagged.TaggedString, represent_scalar)
class AsdfLoader(yaml.SafeLoader):
"""
A specialized YAML loader that can construct "tagged basic Python
data types" as implemented in the `tagged` module.
"""
def construct_object(self, node, deep=False):
tag = node.tag
if node.tag in self.yaml_constructors:
return super(AsdfLoader, self).construct_object(node, deep=False)
data = yaml_to_base_type(node, self)
data = tagged.tag_object(tag, data)
return data
# ----------------------------------------------------------------------
# Handle omap (ordered mappings)
YAML_OMAP_TAG = YAML_TAG_PREFIX + 'omap'
# Add support for loading YAML !!omap objects as OrderedDicts and dumping
# OrderedDict in the omap format as well.
def ordereddict_constructor(loader, node):
try:
omap = loader.construct_yaml_omap(node)
return OrderedDict(*omap)
except yaml.constructor.ConstructorError:
return list(*loader.construct_yaml_seq(node))
def represent_ordered_mapping(dumper, tag, data):
# TODO: Again, adjust for preferred flow style, and other stylistic details
# NOTE: For block style this uses the compact omap notation, but for flow style
# it does not.
# TODO: Need to see if I can figure out a mechanism so that classes that
# use this representer can specify which values should use flow style
values = []
node = yaml.SequenceNode(tag, values,
flow_style=dumper.default_flow_style)
if dumper.alias_key is not None:
dumper.represented_objects[dumper.alias_key] = node
for key, value in data.items():
key_item = dumper.represent_data(key)
value_item = dumper.represent_data(value)
node_item = yaml.MappingNode(YAML_OMAP_TAG,
[(key_item, value_item)],
flow_style=False)
values.append(node_item)
return node
def represent_ordereddict(dumper, data):
return represent_ordered_mapping(dumper, YAML_OMAP_TAG, data)
AsdfLoader.add_constructor(YAML_OMAP_TAG, ordereddict_constructor)
AsdfDumper.add_representer(OrderedDict, represent_ordereddict)
# ----------------------------------------------------------------------
# Handle numpy scalars
for scalar_type in (np.float32, np.float64, np.float128):
AsdfDumper.add_representer(scalar_type, AsdfDumper.represent_float)
for scalar_type in (np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64):
AsdfDumper.add_representer(scalar_type, AsdfDumper.represent_int)
# ----------------------------------------------------------------------
# Unicode fix on Python 2
if six.PY2:
# This dumps Python unicode strings as regular YAML strings rather
# than !!python/unicode. See http://pyyaml.org/ticket/11
def _unicode_representer(dumper, value):
return dumper.represent_scalar("tag:yaml.org,2002:str", value)
AsdfDumper.add_representer(unicode, _unicode_representer)
AsdfLoader.add_constructor('tag:yaml.org,2002:str',
AsdfLoader.construct_scalar)
def custom_tree_to_tagged_tree(tree, ctx):
"""
Convert a tree, possibly containing custom data types that aren't
directly representable in YAML, to a tree of basic data types,
annotated with tags.
"""
def walker(node):
tag = ctx.type_index.from_custom_type(type(node))
if tag is not None:
node = tag.to_tree(node, ctx)
node = tagged.tag_object(tag.yaml_tag, node)
return node
return node
return treeutil.walk_and_modify(tree, walker)
def tagged_tree_to_custom_tree(tree, ctx):
"""
Convert a tree containing only basic data types, annotated with
tags, to a tree containing custom data types.
"""
def walker(node):
tag_name = tagged.get_tag(node)
if tag_name is not None:
tag_type = ctx.type_index.from_yaml_tag(tag_name)
if tag_type is not None:
return tag_type.from_tree(node.data, ctx)
return node
return treeutil.walk_and_modify(tree, walker)
def validate_for_tag(tag, tree, ctx):
"""
Validates a tree for a given tag.
"""
schema_path = ctx.tag_to_schema_resolver(tag)
if schema_path != tag:
s = schema.load_schema(schema_path, ctx.url_mapping)
schema.validate(tree, s, ctx.url_mapping)
def validate_tagged_tree(tree, ctx):
"""
Validate a tree of tagged basic data types against any relevant
schemas, both at the root level and anywhere a tag is found with a
matching schema.
"""
def walker(node):
tag_name = tagged.get_tag(node)
if tag_name is not None:
validate_for_tag(tag_name, node, ctx)
return treeutil.walk(tree, walker)
def validate(tree, ctx):
"""
Validate a tree, possibly containing custom data types, against
any relevant schemas, both at the root level and anywhere else a
tag is found with a matching schema.
"""
tagged_tree = custom_tree_to_tagged_tree(tree, ctx)
validate_tagged_tree(tagged_tree, ctx)
def load_tree(yaml_content, ctx):
"""
Load YAML, returning a tree of objects and custom types.
Parameters
----------
yaml_content : bytes
The raw serialized YAML content.
ctx : Context
The parsing context.
"""
class AsdfLoaderTmp(AsdfLoader):
pass
AsdfLoaderTmp.ctx = ctx
tree = yaml.load(yaml_content, Loader=AsdfLoaderTmp)
tree = reference.find_references(tree, ctx)
validate_tagged_tree(tree, ctx)
tree = tagged_tree_to_custom_tree(tree, ctx)
return tree
def dump_tree(tree, fd, ctx):
"""
Dump a tree of objects, possibly containing custom types, to YAML.
Parameters
----------
tree : object
Tree of objects, possibly containing custom data types.
fd : pyasdf.generic_io.GenericFile
A file object to dump the serialized YAML to.
ctx : Context
The writing context.
"""
class AsdfDumperTmp(AsdfDumper):
pass
AsdfDumperTmp.ctx = ctx
if hasattr(tree, 'yaml_tag'):
tag = tree.yaml_tag
tag = tag[:tag.index('/core/asdf') + 1]
tags = {'!': tag}
else:
tags = None
tree = custom_tree_to_tagged_tree(tree, ctx)
validate_tagged_tree(tree, ctx)
yaml.dump_all(
[tree], stream=fd, Dumper=AsdfDumperTmp,
explicit_start=True, explicit_end=True,
version=ctx.versionspec.yaml_version,
allow_unicode=True,
encoding='utf-8',
tags=tags)
|
# File: ds_search_entities_connector.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
import phantom.app as phantom
from phantom.action_result import ActionResult
from digital_shadows_consts import *
from dsapi.service.search_entities_service import SearchEntitiesService
from exception_handling_functions import ExceptionHandling
class DSSearchEntitiesConnector(object):
def __init__(self, connector):
"""
:param connector: DigitalShadowsConnector
"""
self._connector = connector
config = connector.get_config()
self._handle_exception_object = ExceptionHandling()
self._ds_api_key = config[DS_API_KEY_CFG]
self._ds_api_secret_key = config[DS_API_SECRET_KEY_CFG]
def search_entities(self, param):
action_result = ActionResult(dict(param))
self._connector.add_action_result(action_result)
self._connector.save_progress("process started...!!! ")
# type = param.get('types').split(',')
type = ["CLIENT_INCIDENT", "DATA_BREACH", "AGGREGATE_DATA_BREACH", "INTELLIGENCE", "TECHNICAL_SOURCE", "WEB_SOURCE"]
date_range = param.get('date_range')
query = param.get('query')
"""
incident_types = param.get('incident_types')
incident_subtypes = param.get('incident_subtypes')
incident_severities = param.get('incident_severities')
web_page_networks = param.get('web_page_networks')
forum_post_networks = param.get('forum_post_networks')
marketplace_listing_networks = param.get('marketplace_listing_networks')
market_places = param.get('marketplaces')
chat_protocols = param.get('chat_protocols')
chat_servers = param.get('chat_servers')
chat_channels = param.get('chat_channels')
threat_level_types = param.get('threat_level_types')
web_page_site_categories = param.get('web_page_site_categories')
forum_post_site_categories = param.get('forum_post_site_categories')
blog_names = param.get('blog_names')
date_period = param.get('date_period')
start_date = param.get('from')
end_date = param.get('until')
"""
try:
search_service = SearchEntitiesService(self._ds_api_key, self._ds_api_secret_key)
except Exception as e:
error_message = self._handle_exception_object.get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "{0} {1}".format(SERVICE_ERR_MSG, error_message))
try:
search_view = search_service.search_entity_view(dateRange=date_range, query_string=query, types=type)
except Exception as e:
error_message = self._handle_exception_object.get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. {0}".format(error_message))
"""
search_view = search_service.search_entity_view(types=type, dateRange=date_range, incidentTypes=incident_types, incidentSubtypes=incident_subtypes,
incidentSeverities=incident_severities, webPageNetworks=web_page_networks,
forumPostNetworks=forum_post_networks, marketplaceListingNetworks=marketplace_listing_networks,
marketplaces=market_places, chatProtocols=chat_protocols, chatServers=chat_servers,
chatChannels=chat_channels, threatLevelTypes=threat_level_types,
webPageSiteCategories=web_page_site_categories, forumPostSiteCategories=forum_post_site_categories,
blogNames=blog_names, datePeriod=date_period, from_date=start_date,
until=end_date, query_string=query)
"""
self._connector.save_progress("View: {}".format(search_view))
try:
search_entity_pages = search_service.find_all_pages(view=search_view)
# self._connector.save_progress("entity: " + str(search_entity_pages))
entity_total = len(search_entity_pages)
except StopIteration:
error_message = 'No Search Entity objects retrieved from the Digital Shadows API in page groups'
return action_result.set_status(phantom.APP_ERROR, "Error Details: {0}".format(error_message))
except Exception as e:
error_message = self._handle_exception_object.get_error_message_from_exception(e)
return action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. {}".format(error_message))
if entity_total > 0:
summary = {
'entity_count': entity_total,
'entity_found': True
}
action_result.update_summary(summary)
for entity_page in search_entity_pages:
for entity in entity_page:
# self._connector.save_progress("entity payload: " + str(entity.payload))
action_result.add_data(entity.payload)
action_result.set_status(phantom.APP_SUCCESS, 'String search entities are fetched')
else:
summary = {
'entity_count': 0,
'entity_found': False
}
action_result.update_summary(summary)
action_result.set_status(phantom.APP_SUCCESS, 'Entities not found for search string')
return action_result.get_status()
|
# -*- coding: utf-8 -*-
import os
import sys
import psycopg2 as pg
import elasticsearch as es
script_path = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir))
sys.path.append(script_path)
from weatherLib.weatherDoc import WeatherData
from weatherLib.weatherUtil import WLogger
__INSERT_OBS = "insert into weather_work " + \
"(tsa, time, temperature, humidity, pressure, " + \
"light, fwVersion, swVersion, version, " + \
"isThermometer, isBarometer, isHygrometer, isClock) " + \
"values (%(tsa)s, %(time)s, %(temperature)s, %(humidity)s, %(pressure)s, " + \
"%(light)s, %(fwVersion)s, %(swVersion)s, %(version)s, " + \
"%(isThermometer)s, %(isBarometer)s, %(isHygrometer)s, %(isClock)s); "
host = 'localhost'
user = 'weather'
password = '<PASSWORD>'
database = 'weather'
logger = WLogger(loggerName='weather.tools')
logger.logMessage("Starting...")
hostlist = [ {'host:':'localhost', 'port':9200} ]
#hostlist = [
# {'host':'elastic00','port':9200},
# {'host':'elastic01','port':9200},
# {'host':'elastic02','port':9200},
# ]
def scanIndex(indexName, filtered):
doc = WeatherData(using=client)
s_filt = doc.search(using=client,index=indexName).\
filter('range', **{'tsa': {'lt':20180916001872}})
s_all = doc.search(using=client,index=indexName)
logger.logMessage("Collecting and saving documents from elasticsearch.")
if filtered:
logger.logMessage("Using filtered scan.")
s = s_filt.scan()
else:
logger.logMessage("Using unfiltered scan.")
s = s_all.scan()
dumpFile = './dump-{0}.dmp'.format(indexName)
duplicates = './duplicates-{0}.dat'.format(indexName)
logger.logMessage('Dumping to file {0}'.format(dumpFile))
fakeTSA = 1
with open(dumpFile,'w') as f:
num = 0
for d in s:
dic = d.to_dict()
if not 'fwVersion' in dic:
dic['fwVersion'] = '00.00.00'
if dic['fwVersion'] < '02.00.00':
if not 'version' in dic:
dic['version'] = '1.0.0'
if not 'swVersion' in dic:
dic['swVersion'] = dic['version']
for k in ['isThermometer','isBarometer','isClock','isHygrometer']:
dic[k] = True
if not 'tsa' in dic:
dic['tsa'] = fakeTSA
fakeTSA += 1
# logger.logMessage(dic,"DEBUG")
with pgConn.cursor() as cur:
try:
cur.execute(__INSERT_OBS, dic)
except KeyError:
logger.logMessage(level="ERROR",message="Wrong document: {0}".format(dic))
pgConn.rollback()
raise
except pg.IntegrityError:
logger.logMessage(level="WARNING",message="TSA {0} is duplicated.".format(dic['tsa']))
with open(duplicates,'a') as d:
d.write('{0}\n'.format(dic))
f.write("{0}\n".format(dic))
num += 1
if num % 500 == 0:
pgConn.commit()
logger.logMessage(level="DEBUG",message="Saved {0} documents.".format(num))
logger.logMessage("Document scan ended, {0} documents written.".format(num))
pgConn.commit()
def getIndexes():
catClient = es.client.CatClient(client)
allIndices = catClient.indices(index='weather-*',format='json')
indices = [i for i in allIndices if i['status'] != 'close']
indices.sort(key=lambda x: x['index'])
names = [n['index'] for n in indices]
return names
client = es.Elasticsearch(hostlist)
pgConn = pg.connect(host=host,user=user,password=password,database=database)
indices = getIndexes()
#for doc in doclist:
#wdb._logger.logMessage(level='DEBUG',message=
# wdb.insertObs(doc)
|
<gh_stars>0
from urllib import parse
import datetime
import requests
import urllib
import os
from os import path
from pathlib import Path
import sys
import time
import math
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION, ALL_COMPLETED, as_completed
import threading
import socket
class download_m3u8:
thread_num = 100
count = 0
def get_url_list(self, host, body):
lines = body.split(str.encode('\n'))
ts_url_list = []
for line in lines:
if not line.startswith(str.encode('#')) and line.decode('utf-8') != '':
if line.startswith(str.encode('http')):
ts_url_list.append(line)
else:
ts_url_list.append('%s/%s' % (host, line.decode('utf-8')))
#print('line=====>>>>> %s/%s', host, line.decode('utf-8'))
return ts_url_list
def get_host(self,url):
url_base = url[0:url.rfind('/')]
return url_base
def get_m3u8_body(self, url, download_path, file_name):
url = url[0:url.rfind('m3u8')+4]
print('read m3u8 file:', url)
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=10, pool_maxsize=10, max_retries=10)
session.mount('http://', adapter)
session.mount('https://', adapter)
r = session.get(url, timeout=10)
curr_path = download_path + "/{0}.m3u8".format(file_name)
with open(curr_path, "wb") as code:
if file_name != 'index':
lines = r.content.decode()
print('lines=====>>>>>{0}'.format(lines))
lines = lines.split('\n')
for line in lines:
if line.find('.m3u8') != -1:
line = file_name + '/index.m3u8'
#print('line=====>>>>>{0}'.format(line))
line = line + '\n'
code.write(line.encode())
else:
code.write(r.content)
return r.content
'''def download_ts_file(self, ts_url_list, download_dir):
i = 0
for ts_url in reversed(ts_url_list):
i += 1
file_name = ts_url[ts_url.rfind('/'):]
curr_path = '%s%s' % (download_dir, file_name)
print('\n[process]: %s/%s' % (i, len(ts_url_list)))
print('[download]:', ts_url)
#print('[target]:', curr_path)
if os.path.isfile(curr_path):
print('[warn]: file already exist')
continue
urllib.request.urlretrieve(ts_url, curr_path)'''
# 利用urllib.request.urlretrieve()下载文件
def download(self, start, end, urls, download_path):
file_name = start
for i in urls[start:end]:
#print('file_name=====%s' % file_name)
#print(i)
curr_path = download_path + "/{0}.ts".format(file_name)
#urllib.request.urlretrieve(i, curr_path)
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=10, pool_maxsize=10, max_retries=3)
session.mount('http://', adapter)
session.mount('https://', adapter)
r = session.get(i, stream=True, timeout=10)
with open(curr_path, "wb") as code:
code.write(r.content)
self.count += 1
file_name += 1
print("下载进度:%.2f" % (self.count / len(urls)), end='\r')
def download_xcc(self, urls, i, download_path):
ts_url = urls[i]
ts_file_name = ts_url[ts_url.rfind('/')+1:]
curr_path = download_path + "/{0}".format(ts_file_name)
'''session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=10, pool_maxsize=10, max_retries=3)
session.mount('http://', adapter)
session.mount('https://', adapter)
r = session.get(ts_url, stream=True, timeout=8)
with open(curr_path, "wb") as code:
code.write(r.content)'''
# 设置超时时间为30s
socket.setdefaulttimeout(30)
# 解决下载不完全问题且避免陷入死循环
try:
urllib.request.urlretrieve(ts_url, curr_path)
except socket.timeout:
count = 1
while count <= 5:
try:
urllib.request.urlretrieve(ts_url, curr_path)
break
except socket.timeout:
err_info = 'Reloading for %d time' % count if count == 1 else 'Reloading for %d times' % count
print(err_info)
count += 1
if count > 5:
print("download job failed!")
self.count += 1
print("下载进度:%.2f%%" % (self.count / len(urls) * 100), end='\r')
return i
def download_m4a_file(self, url_path, download_path, file_name):
print("url_path=====>>>>>{0}".format(url_path))
# 设置超时时间为30s
socket.setdefaulttimeout(30)
# 解决下载不完全问题且避免陷入死循环
try:
urllib.request.urlretrieve(url_path, download_path+file_name)
except socket.timeout:
print("key file download timeout!")
return 1
def download_ts_file(self, ts_urls, download_path):
start_time = time.time() # 开始时间
# 利用Python的多线程进行下载
'''file_size = len(ts_urls)
part = file_size // self.thread_num
print('part=====>>>>>\t%d' % part)
for i in range(self.thread_num):
start = part * i
if i == self.thread_num - 1: # 最后一块
end = file_size
else:
end = start + part
t = threading.Thread(target=self.download, name='Thread Name %s' % i, kwargs={'start': start, 'end': end, 'urls': ts_urls, 'download_path': download_path})
t.setDaemon(True)
t.start()
while True:
print('self.count / len(ts_urls) %.6f' % (self.count / len(ts_urls)))
if math.fabs(self.count / len(ts_urls) - 1.00) < 0.000001:
break
else:
time.sleep(2)'''
# 等待所有线程下载完成
'''main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
print("线程名:%s" % t.name)
t.join()
print("结束线程名:%s" % t.name)'''
# 利用Python的线程池进行下载
with ThreadPoolExecutor(max_workers=100) as t:
obj_list = []
for i in range(len(ts_urls)):
obj = t.submit(self.download_xcc, ts_urls, i, download_path)
obj_list.append(obj)
for future in as_completed(obj_list):
a = 1
#data = future.result()
#print(f"main: {data}")
#future_tasks = [executor.submit(self.download_xcc, ts_urls, i, download_path) for i in range(len(ts_urls))]
#wait(future_tasks, return_when=ALL_COMPLETED)
# 等待第一个任务抛出异常,就阻塞线程池
#wait(task_list, return_when=FIRST_EXCEPTION)
# 等待正在执行任务执行完成
#done, unfinished = wait(future_tasks, timeout=800, return_when=ALL_COMPLETED)
'''for d in done:
print('执行中:%s, 已完成:%s' % (d.running(), d.done()))
print(d.result())'''
# 统计所用时间
end_time = time.time()
print('Total cost time:=====>>>>>%s' % (end_time - start_time))
def file_scan(self, path):
file_list = []
# 生成器
for root, dirs, files in os.walk(path):
for fn in files:
p = str(root+'/'+fn)
file_list.append(p)
return file_list
def del_files_dir(self, path):
for root, dirs, files in os.walk(path):
for name in files:
os.remove(os.path.join(root, name))
#print("Delete File: " + os.path.join(root, name))
os.rmdir(path)
return True
def combine(self, ts_path, combine_path, file_name):
start_time = time.time() # 开始时间
file_list = self.file_scan(ts_path)
file_list.sort()
file_path = combine_path + file_name + '.ts'
path = os.path.dirname(file_list[0])
with open(file_path, 'wb+') as fw:
for i in range(len(file_list)):
file = path + '/' + str(i) + '.ts'
#print('i======>>>>>%s', i)
#print('file_list[i]======>>>>>%s', file_list[i])
#print('file======>>>>>%s', file)
my_file = Path(file)
if my_file.is_file():
fw.write(open(file, 'rb').read())
# 统计所用时间
end_time = time.time()
print('combine file Total cost time:=====>>>>>%s' % (end_time - start_time))
self.del_files_dir(ts_path)
def start_download(self, url, file_dir, file_name):
if url.find('.m4a') != -1:
self.download_m4a_file(url,file_dir,file_name)
|
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.types import StructType, StructField, MapType, StringType
from pyspark.sql.functions import explode, map_keys, map_values
""" MapType Column
PySpark MapType is used to represent map key-value pair similar to python Dictionary (Dict), it extends DataType class
which is a superclass of all types in PySpark
MapType:
- keyType mandatory DataType argument
- valueType mandatory DataType argument
- valueContainsNull: optional boolean argument.
keyType and valueType can be any type that extends the DataType class. for e.g StringType, IntegerType, ArrayType,
MapType, StructType (struct) e.t.c.
Key can not have null value, Value can have null value if valueContainsNUll is set to True (Default value)
1. Create MapType column in a data frame check main()
2. Access MapType column value. Exp1
3. explode MapType column Exp2
4. Get all keys list or values list of the map Exp3
"""
def exp1(df: DataFrame):
# we can access map type column value by using getItem(key), key is the key of map key-value pair.
print("Exp1 access map type column value by using getItem(key)")
df.select("name", df.properties.getItem("eye").alias("eye"), df.properties.getItem("hair").alias("hair")).show()
"""Exp2 Explode map type column
- explode(column): It takes a map type column and generate two columns, the default column name is key, value.
"""
def exp2(df: DataFrame):
# we can not use explode(MapType) in withColumn. Because, withColumn creates one column, explode(MapType) will
# create two
print("Exp2 explode map type with default generated column name")
df.select("name", explode(df.properties)).show()
print("Exp2 explode map type with specific column name")
df.select("name", explode(df.properties).alias("property_key", "property_value")).show()
print("Exp2 explode map type with specific column name")
df.select(explode(df.properties).alias("property_key", "property_value")).show()
""" Exp3 Get map keys, and map values
- map_keys(column): It takes a map column, and returns a list of keys of the map
- map_values(column): It takes a map column, and returns a list of values of the map
The type of column must be map. otherwise the function will failed with type mismatch error
"""
def exp3(df: DataFrame):
# map_keys on string column will fail
try:
df.select(map_keys(df.name)).show()
except Exception as e:
print("failed, error message: {}".format(e))
print("Exp3 Get key and value list of map type column")
df.withColumn("property_keys", map_keys(df.properties)) \
.withColumn("property_values", map_values(df.properties)) \
.show()
def main():
spark = SparkSession.builder.master("local[2]").appName("ArrayTypeColumn").getOrCreate()
schema = StructType([
StructField('name', StringType(), True),
StructField('properties', MapType(StringType(), StringType()), True)
])
# note the '' is not null, its an empty string.
# We can notice that by default map allows null value, which means valueContainsNull is set to true by default
data = [
('James', {'hair': 'black', 'eye': 'brown'}),
('Michael', {'hair': 'brown', 'eye': None}),
('Robert', {'hair': 'red', 'eye': 'black'}),
('Washington', {'hair': 'grey', 'eye': 'grey'}),
('Jefferson', {'hair': 'brown', 'eye': ''})
]
df = spark.createDataFrame(data=data, schema=schema)
print("Source data frame")
df.printSchema()
df.show(truncate=False)
# run exp1
# exp1(df)
# run exp2
# exp2(df)
# run exp3
exp3(df)
if __name__ == "__main__":
main()
|
<gh_stars>1-10
# coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet-20 ensemble on CIFAR-10.
This script only performs evaluation, not training. We recommend training
ensembles by launching independent runs of `deterministic.py` over different
seeds. Set `output_dir` to the directory containing these checkpoints.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import deterministic # local file import
import utils # local file import
import six
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
# TODO(trandustin): We inherit FLAGS.{batch_size,output_dir} from deterministic.
# This is not intuitive, which suggests we need to either refactor to avoid
# importing from a binary or duplicate the model definition here.
flags.mark_flag_as_required('output_dir')
FLAGS = flags.FLAGS
def ensemble_negative_log_likelihood(labels, logits):
"""Negative log-likelihood for ensemble.
For each datapoint (x,y), the ensemble's negative log-likelihood is:
```
-log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +
log ensemble_size.
```
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.convert_to_tensor(labels)
logits = tf.convert_to_tensor(logits)
ensemble_size = float(logits.shape[0])
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return -tf.reduce_logsumexp(-nll, axis=0) + tf.math.log(ensemble_size)
def gibbs_cross_entropy(labels, logits):
"""Average cross entropy for ensemble members (Gibbs cross entropy).
For each datapoint (x,y), the ensemble's Gibbs cross entropy is:
```
GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m).
```
The Gibbs cross entropy approximates the average cross entropy of a single
model drawn from the (Gibbs) ensemble.
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.convert_to_tensor(labels)
logits = tf.convert_to_tensor(logits)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return tf.reduce_mean(nll, axis=0)
def main(argv):
del argv # unused arg
if FLAGS.num_cores > 1 or not FLAGS.use_gpu:
raise ValueError('Only single GPU is currently supported.')
tf.enable_v2_behavior()
dataset_train, ds_info = utils.load_dataset(tfds.Split.TRAIN, with_info=True)
dataset_test = utils.load_dataset(tfds.Split.TEST)
dataset_train = dataset_train.batch(FLAGS.batch_size)
dataset_test = dataset_test.batch(FLAGS.batch_size)
model = deterministic.resnet_v1(
input_shape=ds_info.features['image'].shape,
depth=20,
num_classes=ds_info.features['label'].num_classes,
l2=0.)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Search for checkpoints from their index file; then remove the index suffix.
ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.output_dir,
'**/*.ckpt.index'))
ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
ensemble_size = len(ensemble_filenames)
logging.info('Ensemble size: %s', ensemble_size)
logging.info('Ensemble number of weights: %s',
ensemble_size * model.count_params())
logging.info('Ensemble filenames: %s', str(ensemble_filenames))
# Collect the logits output for each ensemble member and train/test data
# point. We also collect the labels.
# TODO(trandustin): Refactor data loader so you can get the full dataset in
# memory without looping.
logits_train = []
logits_test = []
labels_train = []
labels_test = []
for m, ensemble_filename in enumerate(ensemble_filenames):
model.load_weights(ensemble_filename)
logits = []
for features, labels in dataset_train:
logits.append(model(features, training=False))
if m == 0:
labels_train.append(labels)
logits = tf.concat(logits, axis=0)
logits_train.append(logits)
if m == 0:
labels_train = tf.concat(labels_train, axis=0)
logits = []
for features, labels in dataset_test:
logits.append(model(features, training=False))
if m == 0:
labels_test.append(labels)
logits = tf.concat(logits, axis=0)
logits_test.append(logits)
if m == 0:
labels_test = tf.concat(labels_test, axis=0)
logging.info('Predictions completed for checkpoint %s', ensemble_filename)
metrics = {}
# Compute the ensemble's NLL and Gibbs cross entropy for each data point.
# Then average over the dataset.
nll_train = ensemble_negative_log_likelihood(labels_train, logits_train)
nll_test = ensemble_negative_log_likelihood(labels_test, logits_test)
gibbs_ce_train = gibbs_cross_entropy(labels_train, logits_train)
gibbs_ce_test = gibbs_cross_entropy(labels_test, logits_test)
metrics['train_nll'] = tf.reduce_mean(nll_train)
metrics['test_nll'] = tf.reduce_mean(nll_test)
metrics['train_gibbs_cross_entropy'] = tf.reduce_mean(gibbs_ce_train)
metrics['test_gibbs_cross_entropy'] = tf.reduce_mean(gibbs_ce_test)
# Given the per-element logits tensor of shape [ensemble_size, dataset_size,
# num_classes], average over the ensemble members' probabilities. Then
# compute accuracy and average over the dataset.
probs_train = tf.reduce_mean(tf.nn.softmax(logits_train), axis=0)
probs_test = tf.reduce_mean(tf.nn.softmax(logits_test), axis=0)
accuracy_train = tf.keras.metrics.sparse_categorical_accuracy(labels_train,
probs_train)
accuracy_test = tf.keras.metrics.sparse_categorical_accuracy(labels_test,
probs_test)
metrics['train_accuracy'] = tf.reduce_mean(accuracy_train)
metrics['test_accuracy'] = tf.reduce_mean(accuracy_test)
logging.info('Metrics: %s', metrics)
if __name__ == '__main__':
app.run(main)
|
import sys, os
import numpy as np
from scipy import stats
from collections import defaultdict
import nanoraw_helper as nh
VERBOSE = False
def correct_multiple_testing(pvals):
""" Use FDR Benjamini-Hochberg multiple testing correction
"""
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
pvals_corrected_raw = pvals_sorted / (np.arange(
1,len(pvals)+1)/float(len(pvals)))
pvals_corrected = np.minimum.accumulate(
pvals_corrected_raw[::-1])[::-1]
pvals_corrected[pvals_corrected>1] = 1
return pvals_corrected[sortrevind]
def _calc_fm_pval(pvals):
return 1.0 - stats.chi2.cdf(
np.sum(np.log(pvals)) * -2,
pvals.shape[0] * 2)
def calc_fishers_method(pos_pvals, offset):
pvals_np = np.empty(pos_pvals[-1][1] + 1)
pvals_np[:] = np.NAN
pvals_np[[list(zip(*pos_pvals)[1])]] = np.maximum(
zip(*pos_pvals)[0], nh.SMALLEST_PVAL)
fishers_pvals = [
_calc_fm_pval(pvals_np[pos - offset:pos + offset + 1])
if pos - offset >= 0 and
pos + offset + 1 <= pvals_np.shape[0] and
not np.any(np.isnan(pvals_np[pos - offset:pos + offset + 1])
) else 1.0
for _, pos, _, _ in pos_pvals]
return fishers_pvals
def mann_whitney_u_test(samp1, samp2):
s1_len = samp1.shape[0]
s2_len = samp2.shape[0]
tot_len = s1_len + s2_len
all_vals = np.concatenate([samp1, samp2])
ranks = np.empty(tot_len, int)
ranks[all_vals.argsort()] = np.arange(1, tot_len + 1)
s1_ranks_sum = ranks[:s1_len].sum()
#s2_ranks_sum = ranks[s1_len:].sum()
u1 = s1_ranks_sum - (s1_len * (s1_len + 1)) / 2
#u2 = s2_ranks_sum - (s2_len * (s2_len + 1)) / 2
mu = s1_len * s2_len / 2
rhou = np.sqrt(s1_len * s2_len * (s1_len + s2_len + 1) / 12)
z = np.abs(u1 - mu) / rhou
pval = stats.norm.cdf(-z) * 2.0
return pval
def get_all_significance(
raw_read_coverage1, raw_read_coverage2, test_type, min_test_vals,
all_stats_fn, fishers_method_offset):
if VERBOSE: sys.stderr.write(
'Test significance of difference in base signal.\n')
# get num_region most significantly different regions from
# each chrm then find global most signif after
position_pvals = []
for chrm_strand in set(raw_read_coverage1).intersection(
raw_read_coverage2):
chrm, strand = chrm_strand
# get base events across all reads per chromosome/strand
# so that all events aren't stored in RAM
chrm_strand_base_events1 = nh.get_reads_events(
raw_read_coverage1[chrm_strand], strand == '-')
chrm_strand_base_events2 = nh.get_reads_events(
raw_read_coverage2[chrm_strand], strand == '-')
if test_type == 'ttest':
chrm_pvals = [
(np.abs(stats.ttest_ind(
chrm_strand_base_events1[pos],
chrm_strand_base_events2[pos])[1]), pos,
chrm_strand_base_events1[pos].shape[0],
chrm_strand_base_events2[pos].shape[0])
for pos in sorted(set(
chrm_strand_base_events1).intersection(
chrm_strand_base_events2))
if min(chrm_strand_base_events1[pos].shape[0],
chrm_strand_base_events2[pos].shape[0])
>= min_test_vals]
elif test_type == 'mw_utest':
# store z-scores from u-test
chrm_pvals = [
(mann_whitney_u_test(
chrm_strand_base_events1[pos],
chrm_strand_base_events2[pos]), pos,
chrm_strand_base_events1[pos].shape[0],
chrm_strand_base_events2[pos].shape[0])
for pos in sorted(set(
chrm_strand_base_events1).intersection(
chrm_strand_base_events2))
if min(chrm_strand_base_events1[pos].shape[0],
chrm_strand_base_events2[pos].shape[0])
>= min_test_vals]
else:
raise RuntimeError, ('Invalid significance test type ' +
'provided: ' + str(test_type))
if len(chrm_pvals) == 0: continue
chrm_pvals_f = calc_fishers_method(
chrm_pvals, fishers_method_offset) \
if fishers_method_offset > 0 else zip(*chrm_pvals)[0]
position_pvals.extend(
(pval_f, pval, pos, chrm, strand, cov1, cov2)
for (pval, pos, cov1, cov2), pval_f in
zip(chrm_pvals, chrm_pvals_f))
if len(position_pvals) == 0:
sys.stderr.write(
'*' * 60 + '\nERROR: No regions contain minimum ' +
'number of reads.\n' + '*' * 60 + '\n')
sys.exit()
position_pvals = sorted(position_pvals)
fdr_corr_pvals_f = correct_multiple_testing(zip(*position_pvals)[0])
fdr_corr_pvals = correct_multiple_testing(
sorted(zip(*position_pvals)[1]))
all_stats = [(pval_f, qval_f, pval, qval,
pos, chrm, strand, cov1, cov2)
for qval_f, qval, (pval_f, pval,
pos, chrm, strand, cov1, cov2) in
zip(fdr_corr_pvals_f, fdr_corr_pvals, position_pvals)]
if all_stats_fn is not None:
chrm_strand_stats = defaultdict(list)
for pval_f, qval_f, pval, qval, pos, chrm, strand, cov1, cov2 in all_stats:
chrm_strand_stats[(chrm, strand)].append((
pos, pval_f, qval_f, pval, qval, cov1, cov2))
with open(all_stats_fn, 'w') as stats_fp:
for (chrm, strand), pos_stats in chrm_strand_stats.items():
stats_fp.write('>>>>::' + chrm + '::' + strand + '\n')
stats_fp.write('\n'.join([
'{:d}\t{:.2g}\t{:.2g}\t{:.2g}\t{:.2g}\t{:d}\t{:d}'.format(
pos, pval_f, qval_f, pval, qval, cov1, cov2)
for pos, pval_f, qval_f, pval, qval, cov1, cov2 in
sorted(pos_stats)]) + '\n')
return all_stats
def get_most_signif_regions(all_stats, num_bases, num_regions,
qval_thresh=None):
# applied threshold for scores on each chromosome, so now
# we include all here
if qval_thresh is not None:
num_regions = np.argmax(
[x > qval_thresh for x in zip(*all_stats)[1]])
if num_regions == 0:
sys.stderr.write(
'*' * 60 + '\nERROR: No regions identified q-value ' +
'below thresh. Minumum q-value: {:.2g}\n'.format(
all_stats[0][1]) + '*' * 60 + '\n')
sys.exit()
plot_intervals = zip(
['{:03d}'.format(rn) for rn in range(num_regions)],
[(chrm, max(pos - int(num_bases / 2.0), 0), strand,
'(q-value:{0:.2g} p-value:{1:.2g})'.format(qval_f, pval_f))
for pval_f, qval_f, pval, qval, pos, chrm, strand, cov1, cov2 in
all_stats[:num_regions]])
return plot_intervals
def parse_stats(stats_fn):
all_stats = []
with open(stats_fn) as stats_fp:
curr_chrm, curr_strand = None, None
try:
for line in stats_fp:
if line.startswith('>>>>'):
_, curr_chrm, curr_strand = line.strip().split("::")
else:
if curr_chrm is None or curr_strand is None:
sys.stderr.write(
'WARNING: Incorrectly formatted ' +
'statistics file. No chrm or strand ' +
'before statistics lines\n')
pos, pval_f, qval_f, pval, qval, cov1, cov2 = line.split()
all_stats.append((
float(pval_f), float(qval_f),
float(pval), float(qval), int(pos),
curr_chrm, curr_strand, int(cov1), int(cov2)))
except ValueError:
sys.stderr.write(
'*' * 60 + '\nERROR: Attempt to load statistics ' +
'file failed. May be an old version of statistics ' +
'file. Try deleting statistics file and ' +
'recalculating using current nanoraw version.\n' +
'*' * 60 + '\n')
sys.exit()
return sorted(all_stats)
# functions for distances between event means
def sliding_window_dist(sig_diffs1, sig_diffs2, slide_span, num_bases):
return np.sqrt(min(np.sum(np.square(
sig_diffs1[i1:i1+num_bases] - sig_diffs2[i2:i2+num_bases]))
for i1 in range((slide_span * 2) + 1)
for i2 in range((slide_span * 2) + 1)))
def euclidian_dist(sig_diffs1, sig_diffs2):
return np.sqrt(np.sum(np.square(sig_diffs1 - sig_diffs2)))
def get_pairwise_dists(reg_sig_diffs, index_q, dists_q, slide_span=None):
if slide_span > 0:
num_bases=reg_sig_diffs[0].shape[0] - (slide_span * 2)
while not index_q.empty():
try:
index = index_q.get(block=False)
except Queue.Empty:
break
if slide_span > 0:
row_dists = np.array(
[sliding_window_dist(
reg_sig_diffs[index], reg_sig_diffs[j],
slide_span, num_bases)
for j in range(0,index+1)] +
[0 for _ in range(index+1, len(reg_sig_diffs))])
else:
row_dists = np.array(
[euclidian_dist(reg_sig_diffs[index], reg_sig_diffs[j])
for j in range(0,index+1)] +
[0 for _ in range(index+1, len(reg_sig_diffs))])
dists_q.put((index, row_dists))
return
if __name__ == '__main__':
raise NotImplementedError, (
'This is a module. See commands with `nanoraw -h`')
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Geant4(CMakePackage):
"""Geant4 is a toolkit for the simulation of the passage of particles
through matter. Its areas of application include high energy, nuclear
and accelerator physics, as well as studies in medical and space
science."""
homepage = "http://geant4.cern.ch/"
url = "https://gitlab.cern.ch/geant4/geant4/-/archive/v10.7.1/geant4-v10.7.1.tar.gz"
tags = ['hep']
maintainers = ['drbenmorgan']
version('11.0.0', sha256='04d11d4d9041507e7f86f48eb45c36430f2b6544a74c0ccaff632ac51d9644f1')
version('10.7.3', sha256='8615d93bd4178d34f31e19d67bc81720af67cdab1c8425af8523858dcddcf65b', preferred=True)
version('10.7.2', sha256='593fc85883a361487b17548ba00553501f66a811b0a79039276bb75ad59528cf')
version('10.7.1', sha256='2aa7cb4b231081e0a35d84c707be8f35e4edc4e97aad2b233943515476955293')
version('10.7.0', sha256='c991a139210c7f194720c900b149405090058c00beb5a0d2fac5c40c42a262d4')
version('10.6.3', sha256='bf96d6d38e6a0deabb6fb6232eb00e46153134da645715d636b9b7b4490193d3')
version('10.6.2', sha256='e381e04c02aeade1ed8cdd9fdbe7dcf5d6f0f9b3837a417976b839318a005dbd')
version('10.6.1', sha256='4fd64149ae26952672a81ce5579d3806fda4bd251d486897093ac57633a42b7e')
version('10.6.0', sha256='eebe6a170546064ff81ab3b00f513ccd1d4122a026514982368d503ac55a4ee4')
version('10.5.1', sha256='2397eb859dc4de095ff66059d8bda9f060fdc42e10469dd7890946293eeb0e39')
version('10.4.3', sha256='67f3bb6405a2c77e573936c2b933f5a4a33915aa379626a2eb3012009b91e1da')
version('10.4.0', sha256='e919b9b0a88476e00c0b18ab65d40e6a714b55ee4778f66bac32a5396c22aa74')
version('10.3.3', sha256='bcd36a453da44de9368d1d61b0144031a58e4b43a6d2d875e19085f2700a89d8')
_cxxstd_values = ('11', '14', '17')
variant('cxxstd',
default=_cxxstd_values[0],
values=_cxxstd_values,
multi=False,
description='Use the specified C++ standard when building.')
conflicts('cxxstd=11', when='@11:', msg='geant4@11: only supports cxxstd=17')
conflicts('cxxstd=14', when='@11:', msg='geant4@11: only supports cxxstd=17')
variant('threads', default=True, description='Build with multithreading')
variant('vecgeom', default=False, description='Enable vecgeom support')
variant('opengl', default=False, description='Optional OpenGL support')
variant('x11', default=False, description='Optional X11 support')
variant('motif', default=False, description='Optional motif support')
variant('qt', default=False, description='Enable Qt support')
variant('python', default=False, description='Enable Python bindings')
variant('tbb', default=False, description='Use TBB as a tasking backend', when='@11:')
variant('vtk', default=False, description='Enable VTK support', when='@11:')
depends_on('cmake@3.16:', type='build', when='@11.0.0:')
depends_on('cmake@3.8:', type='build', when='@10.6.0:')
depends_on('cmake@3.5:', type='build')
for _vers in ["11.0.0", "10.7.3", "10.7.2", "10.7.1", "10.7.0", "10.6.3",
"10.6.2", "10.6.1", "10.6.0", "10.5.1", "10.4.3", "10.4.0",
"10.3.3"]:
depends_on('geant4-data@' + _vers, type='run', when='@' + _vers)
depends_on("expat")
depends_on("zlib")
depends_on('tbb', when='+tbb')
depends_on('vtk@8.2:', when='+vtk')
# Python, with boost requirement dealt with in cxxstd section
depends_on('python@3:', when='+python')
extends('python', when='+python')
conflicts('+python', when='@:10.6.1',
msg='Geant4 <= 10.6.1 cannot be built with Python bindings')
for std in _cxxstd_values:
# CLHEP version requirements to be reviewed
depends_on('clhep@2.4.5.1: cxxstd=' + std,
when='@11.0.0: cxxstd=' + std)
depends_on('clhep@2.4.4.0: cxxstd=' + std,
when='@10.7.0: cxxstd=' + std)
depends_on('clhep@2.3.3.0: cxxstd=' + std,
when='@10.3.3:10.6 cxxstd=' + std)
# Spack only supports Xerces-c 3 and above, so no version req
depends_on('xerces-c netaccessor=curl cxxstd=' + std,
when='cxxstd=' + std)
# Vecgeom specific versions for each Geant4 version
depends_on('vecgeom@1.1.18:1.1 cxxstd=' + std,
when='@11.0.0: +vecgeom cxxstd=' + std)
depends_on('vecgeom@1.1.8:1.1 cxxstd=' + std,
when='@10.7.0: +vecgeom cxxstd=' + std)
depends_on('vecgeom@1.1.5 cxxstd=' + std,
when='@10.6.0:10.6 +vecgeom cxxstd=' + std)
depends_on('vecgeom@1.1.0 cxxstd=' + std,
when='@10.5.0:10.5 +vecgeom cxxstd=' + std)
depends_on('vecgeom@0.5.2 cxxstd=' + std,
when='@10.4.0:10.4 +vecgeom cxxstd=' + std)
depends_on('vecgeom@0.3rc cxxstd=' + std,
when='@10.3.0:10.3 +vecgeom cxxstd=' + std)
# Boost.python, conflict handled earlier
depends_on('boost@1.70: +python cxxstd=' + std,
when='+python cxxstd=' + std)
# Visualization driver dependencies
depends_on("gl", when='+opengl')
depends_on("glu", when='+opengl')
depends_on("glx", when='+opengl+x11')
depends_on("libx11", when='+x11')
depends_on("libxmu", when='+x11')
depends_on("motif", when='+motif')
depends_on("qt@5: +opengl", when="+qt")
# As released, 10.03.03 has issues with respect to using external
# CLHEP.
patch('CLHEP-10.03.03.patch', level=1, when='@10.3.3')
# These patches can be applied independent of the cxxstd value?
patch('cxx17.patch', when='@:10.3 cxxstd=17')
patch('cxx17_geant4_10_0.patch', level=1, when='@10.4.0 cxxstd=17')
patch('geant4-10.4.3-cxx17-removed-features.patch',
level=1, when='@10.4.3 cxxstd=17')
def cmake_args(self):
spec = self.spec
# Core options
options = [
'-DGEANT4_USE_SYSTEM_CLHEP=ON',
'-DGEANT4_USE_SYSTEM_EXPAT=ON',
'-DGEANT4_USE_SYSTEM_ZLIB=ON',
'-DGEANT4_USE_G3TOG4=ON',
'-DGEANT4_USE_GDML=ON',
'-DXERCESC_ROOT_DIR={0}'.format(spec['xerces-c'].prefix)
]
# Use the correct C++ standard option for the requested version
if spec.version >= Version('11.0'):
options.append(
self.define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'))
else:
options.append(
self.define_from_variant('GEANT4_BUILD_CXXSTD', 'cxxstd'))
# Don't install the package cache file as Spack will set
# up CMAKE_PREFIX_PATH etc for the dependencies
if spec.version >= Version('10.6'):
options.append('-DGEANT4_INSTALL_PACKAGE_CACHE=OFF')
# Multithreading
options.append(self.define_from_variant('GEANT4_BUILD_MULTITHREADED',
'threads'))
options.append(self.define_from_variant('GEANT4_USE_TBB', 'tbb'))
if '+threads' in spec:
# Locked at global-dynamic to allow use cases that load the
# geant4 libs at application runtime
options.append('-DGEANT4_BUILD_TLS_MODEL=global-dynamic')
# Never install the data with geant4, but point to the dependent
# geant4-data's install directory to correctly set up the
# Geant4Config.cmake values for Geant4_DATASETS .
options.append(self.define('GEANT4_INSTALL_DATA', False))
options.append(self.define('GEANT4_INSTALL_DATADIR', self.datadir))
# Vecgeom
if '+vecgeom' in spec:
options.append('-DGEANT4_USE_USOLIDS=ON')
options.append('-DUSolids_DIR=%s' % spec[
'vecgeom'].prefix.lib.CMake.USolids)
# Visualization options
if 'platform=darwin' not in spec:
if "+x11" in spec and "+opengl" in spec:
options.append('-DGEANT4_USE_OPENGL_X11=ON')
if "+motif" in spec and "+opengl" in spec:
options.append('-DGEANT4_USE_XM=ON')
if "+x11" in spec:
options.append('-DGEANT4_USE_RAYTRACER_X11=ON')
if '+qt' in spec:
options.append('-DGEANT4_USE_QT=ON')
options.append(
'-DQT_QMAKE_EXECUTABLE=%s' %
spec['qt'].prefix.bin.qmake)
options.append(self.define_from_variant('GEANT4_USE_VTK', 'vtk'))
# Python
if spec.version > Version('10.6.1'):
options.append(self.define_from_variant('GEANT4_USE_PYTHON',
'python'))
return options
@property
def datadir(self):
dataspec = self.spec['geant4-data']
return join_path(
dataspec.prefix.share,
'{0}-{1}'.format(dataspec.name, dataspec.version.dotted)
)
|
<reponame>lamypark/ingredient2vec<filename>src/utils/DataLoader.py
import os
import collections
import smart_open
import random
import numpy as np
import Config
"""
Load basic ingredients and compounds data from Nature Scientific Report(Ahn, 2011)
"""
class DataLoader:
# {ingredient_id: [ingredient_id1, ingredient_id2, ...] }
def load_relations(self, path):
relations = {}
with open(path, 'r') as f:
for line in f:
if line[0] == '#':
pass
else:
line_split = line.rstrip().split('\t')
ingredient_id = line_split[0]
compound_id = line_split[1]
if ingredient_id in relations:
relations[ingredient_id].append(compound_id)
else:
relations[ingredient_id] = [compound_id]
return relations
# {ingredient_id: [ingredient_name, ingredient_category]}
def load_ingredients(self, path):
ingredients = {}
ingredients_list = []
with open(path, 'r') as f:
for line in f:
if line[0] == '#':
pass
else:
line_split = line.rstrip().split('\t')
ingredients_id = line_split[0]
ingredients_list = line_split[1:]
ingredients[ingredients_id] = ingredients_list
return ingredients
# {compound_id: [compound_name, CAS_number]}
def load_compounds(self, path):
compounds = {}
compounds_list = []
with open(path, 'r') as f:
for line in f:
if line[0] == '#':
pass
else:
line_split = line.rstrip().split('\t')
compounds_id = line_split[0]
compounds_list = line_split[1:]
compounds[compounds_id] = compounds_list
return compounds
# {compound_id: [compound_name, CAS_number, inChiKey, Smiles]}
def load_compounds_updated(self, path):
compounds = {}
compounds_list = []
with open(path, 'r') as f:
for line in f:
if line[0] == '#':
pass
else:
line_split = line.rstrip().split('\t')
compounds_id = line_split[0]
compounds_list = line_split[1:]
compounds[compounds_id] = compounds_list
return compounds
def batch_iter(self, data, batch_size):
#data = np.array(data)
data_size = len(data)
num_batches = int(len(data)/batch_size)
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield data[start_index:end_index]
def load_data(self, train, feat_dim):
from nltk.stem import WordNetLemmatizer
import GensimModels
gensimLoader = GensimModels.GensimModels()
model_loaded = gensimLoader.load_word2vec(path=Config.path_embeddings_ingredients)
cult2id = {}
id2cult = []
comp2id = {'Nan':0}
id2comp = ['Nan']
train_cult = []
train_comp = []
train_comp_len = []
comp_thr = 5
max_comp_cnt = 0
filtred_comp = 0
train_f = open(train, 'r')
lines = train_f.readlines()[4:]
random.shuffle(lines)
train_thr = int(len(lines) * 0.7)
valid_thr = int(len(lines) * 0.8)
print "Build composer dictionary..."
for i, line in enumerate(lines):
tokens = line.strip().split(',')
culture = tokens[0]
composers = tokens[1:]
if cult2id.get(culture) is None:
cult2id[culture] = len(cult2id)
id2cult.append(culture)
if comp_thr > len(composers):
filtred_comp += 1
continue
if max_comp_cnt < len(composers):
max_comp_cnt = len(composers)
for composer in composers:
if comp2id.get(composer) is None:
comp2id[composer] = len(comp2id)
id2comp.append(composer)
train_cult.append(cult2id.get(culture))
train_comp.append([comp2id.get(composer) for composer in composers])
for comp in train_comp:
train_comp_len.append(len(comp))
if len(comp) < max_comp_cnt:
comp += [0]*(max_comp_cnt - len(comp))
wv = model_loaded.wv
w = model_loaded.index2word
#print [model_loaded[idx] for idx in w]
wv_var = np.var([model_loaded[idx] for idx in w])
'''
compid2vec = np.array([np.random.rand(feat_dim) if comp not in wv
else model_loaded[comp] for comp in id2comp])
'''
wnl = WordNetLemmatizer()
mu, sigma = 0, 1
compid2vec = []
unk_cnt = 0
for comp in id2comp:
if comp in wv:
compid2vec.append(model_loaded[comp])
elif wnl.lemmatize(comp) in wv:
compid2vec.append(model_loaded[wnl.lemmatize(comp)])
elif comp.rstrip().split('_')[-1] in wv:
compid2vec.append(model_loaded[comp.rstrip().split('_')[-1]])
elif wnl.lemmatize(comp.rstrip().split('_')[-1]) in wv:
compid2vec.append(model_loaded[wnl.lemmatize(comp.rstrip().split('_')[-1])])
else:
compid2vec.append(np.random.normal(mu, sigma, feat_dim))
unk_cnt += 1
print "unk cnt :", unk_cnt, "in", len(id2comp)
print "filtered composer count is", filtred_comp
return id2cult, id2comp, train_cult[:train_thr], train_comp[:train_thr], train_comp_len[:train_thr], train_cult[train_thr:valid_thr], train_comp[train_thr:valid_thr], train_comp_len[train_thr:valid_thr], train_cult[valid_thr:], train_comp[valid_thr:], train_comp_len[valid_thr:], max_comp_cnt, compid2vec
# Ingredient_to_category
def ingredient_to_category(self, tag, ingredients):
for ingr_id in ingredients:
if ingredients[ingr_id][0] == tag:
return ingredients[ingr_id][1]
else:
continue
return
# Corpus tag to index
def tag_to_index(tags, corpus):
for doc_id in range(len(corpus)):
if tags == corpus[doc_id].tags[0]:
return doc_id
else:
continue
return
# Corpus index to tag
def index_to_tag(index, corpus):
return corpus[index].tags
# Cuisine - Ingredients
def load_cultures(self, path):
cultures = {}
ingredient_list = []
vocab = []
with open(path, 'r') as f:
culture_id = 0
for culture_id, line in enumerate(f):
if line[0] == '#':
pass
else:
line_split = line.rstrip().split(',')
culture_label = line_split[0]
ingredient_list = line_split[1:]
cultures[culture_id] = [ingredient_list, [culture_label]]
for ingr in ingredient_list:
vocab.append(ingr)
return cultures, set(vocab)
if __name__ == '__main__':
dl = DataLoader()
#ingredients = dl.load_ingredients(Config.path_ingr_info)
#compounds = dl.load_compounds(Config.path_comp_info)
#relations = dl.load_relations(Config.path_ingr_comp)
cuisines = dl.load_cuisine(Config.path_cuisine)
|
<reponame>techman83/maestral-dropbox
# -*- coding: utf-8 -*-
"""
@author: <NAME> (<EMAIL>)
(c) <NAME>; This work is licensed under a Creative Commons
Attribution-NonCommercial-NoDerivs 2.0 UK: England & Wales License.
This module is the heart of Maestral, it contains the classes for sync functionality.
"""
# system imports
import os
import os.path as osp
from stat import S_ISDIR
import resource
import logging
import gc
import time
import tempfile
import random
import json
from threading import Thread, Event, Lock, RLock, current_thread
from concurrent.futures import ThreadPoolExecutor, as_completed
from queue import Queue, Empty
from collections import abc
import itertools
from contextlib import contextmanager
import functools
from enum import IntEnum
import pprint
# external imports
import pathspec
import umsgpack
import dropbox
from dropbox.files import Metadata, DeletedMetadata, FileMetadata, FolderMetadata
from watchdog.events import FileSystemEventHandler
from watchdog.events import (EVENT_TYPE_CREATED, EVENT_TYPE_DELETED,
EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED)
from watchdog.events import (DirModifiedEvent, FileModifiedEvent, DirCreatedEvent,
FileCreatedEvent, DirDeletedEvent, FileDeletedEvent,
DirMovedEvent, FileMovedEvent)
from watchdog.utils.dirsnapshot import DirectorySnapshot
from atomicwrites import atomic_write
# local imports
from maestral.config import MaestralConfig, MaestralState
from maestral.fsevents import Observer
from maestral.constants import (IDLE, SYNCING, PAUSED, STOPPED, DISCONNECTED,
EXCLUDED_FILE_NAMES, MIGNORE_FILE, IS_FS_CASE_SENSITIVE)
from maestral.errors import (MaestralApiError, RevFileError, DropboxDeletedError,
DropboxAuthError, SyncError, PathError, InotifyError,
NotFoundError, os_to_maestral_error)
from maestral.utils.content_hasher import DropboxContentHasher
from maestral.utils.notify import MaestralDesktopNotifier, FILECHANGE
from maestral.utils.path import (
generate_cc_name, path_exists_case_insensitive, to_cased_path,
move, delete, is_child, is_equal_or_child
)
from maestral.utils.appdirs import get_data_path
logger = logging.getLogger(__name__)
_cpu_count = os.cpu_count()
# ========================================================================================
# Syncing functionality
# ========================================================================================
class Conflict(IntEnum):
"""
Enumeration of sync conflict types.
:cvar int RemoteNewer: Remote item is newer.
:cvar int Conflict: Conflict.
:cvar int Identical: Items are identical.
:cvar int LocalNewerOrIdentical: Local item is newer or identical.
"""
RemoteNewer = 0
Conflict = 1
Identical = 2
LocalNewerOrIdentical = 2
class InQueue:
"""
A context manager that puts ``items`` into ``queue`` when entering the context and
removes them when exiting. This is used by maestral to keep track of uploads and
downloads.
"""
def __init__(self, queue, *items):
"""
:param queue: Instance of :class:`queue.Queue`.
:param iterable items: Items to put in queue.
"""
self.items = items
self.queue = queue
def __enter__(self):
for item in self.items:
self.queue.put(item)
def __exit__(self, err_type, err_value, err_traceback):
remove_from_queue(self.queue, *self.items)
class FSEventHandler(FileSystemEventHandler):
"""
Handles captured file events and adds them to :class:`UpDownSync`'s file event queue
to be uploaded by :meth:`upload_worker`. This acts as a translation layer between
:class:`watchdog.Observer` and :class:`UpDownSync`.
:param Event syncing: Set when syncing is running.
:param Event startup: Set when startup is running.
:param UpDownSync sync: UpDownSync instance.
:cvar int ignore_timeout: Timeout in seconds after which ignored paths will be
discarded.
"""
ignore_timeout = 2
def __init__(self, syncing, startup, sync):
self.syncing = syncing
self.startup = startup
self.sync = sync
self.sync.fs_events = self
self._ignored_paths = list()
self._mutex = Lock()
self.local_file_event_queue = Queue()
@contextmanager
def ignore(self, *local_paths,
event_types=(EVENT_TYPE_MOVED, EVENT_TYPE_DELETED, EVENT_TYPE_CREATED),
recursive=False, is_dir=False):
"""
A context manager to ignore file events related to the given paths. Once a
matching event has been registered, further file events for the corresponding path
will no longer be ignored unless ``recursive`` is ``True``. If no matching event
has occurred before leaving the context, the paths will be ignored for
``ignore_timeout`` sec after leaving then context and then discarded. This
accounts for possible delays in the emission of local file system events.
This context manager is used to filter out file system events caused by maestral
itself, for instance during a download or when moving a conflict.
:param iterable local_paths: Local paths to ignore.
:param iterable event_types: Event types that should be ignored. Members should be
'moved', 'deleted' or 'created'.
:param bool recursive: True if all events of child paths (of the same type) should
be ignored as well.
:param bool is_dir: True if only directory events should be ignored.
"""
with self._mutex:
now = time.time()
new_ignores = list()
for path in local_paths:
new_ignores.append(
dict(
path=path,
start_time=now,
ttl=None,
event_types=event_types,
recursive=recursive,
is_dir=is_dir or recursive,
)
)
self._ignored_paths.extend(new_ignores)
try:
yield
finally:
with self._mutex:
for ignore in new_ignores:
ignore['ttl'] = time.time() + self.ignore_timeout
def _expire_ignored_paths(self):
"""Removes all expired ignore entries."""
with self._mutex:
now = time.time()
for ignore in self._ignored_paths.copy():
ttl = ignore['ttl']
if ttl and ttl < now:
self._ignored_paths.remove(ignore)
def _prune_ignored(self, event):
"""
Checks if a file system event should been explicitly ignored because it was likely
triggered by Maestral. Split moved events if necessary and returns the event to
keep (if any)
:param FileSystemEvent event: Local file system event.
:returns: Event to keep or ``None``.
:rtype: :class:`watchdog.FileSystemEvent`
"""
self._expire_ignored_paths()
for ignore in self._ignored_paths:
path = ignore['path']
event_types = ignore['event_types']
recursive = ignore['recursive']
is_dir = ignore['is_dir']
start_time = ignore['start_time']
if event.event_type in event_types and event.is_directory == is_dir:
if (not event.event_type == EVENT_TYPE_DELETED
and self.sync.get_ctime(get_dest_path(event)) < start_time):
# event occurred before exclusion started
return event
if event.event_type == EVENT_TYPE_MOVED:
# check for source and destination path
src_path = event.src_path
dest_path = event.dest_path
if recursive:
ignore_src = is_equal_or_child(src_path, path)
ignore_dest = is_equal_or_child(dest_path, path)
else:
ignore_src = src_path == path
ignore_dest = dest_path == path
if ignore_src and ignore_dest:
if not recursive:
self._ignored_paths.remove(ignore)
return None
elif ignore_src:
if not recursive:
self._ignored_paths.remove(ignore)
return split_moved_event(event)[1]
elif ignore_dest:
if not recursive:
self._ignored_paths.remove(ignore)
return split_moved_event(event)[0]
else:
if recursive:
ignore_src = is_equal_or_child(event.src_path, path)
else:
ignore_src = event.src_path == path
if ignore_src:
if not recursive:
self._ignored_paths.remove(ignore)
return None
return event
def on_any_event(self, event):
"""
Callback on any event. Checks if the system file event should be ignored. If not,
adds it to the queue for events to upload. If syncing is paused or stopped, all
events will be ignored.
:param event: Watchdog file event.
"""
# ignore events if we are not during startup or sync
if not (self.syncing.is_set() or self.startup.is_set()):
return
# check for ignored paths, split moved events if necessary
event = self._prune_ignored(event)
if event:
self.local_file_event_queue.put(event)
class MaestralStateWrapper(abc.MutableSet):
"""
A wrapper for a list in the saved state that implements a MutableSet interface. All
given paths are stored in lower-case, reflecting Dropbox's insensitive file system.
:param str config_name: Name of config.
:param str section: Section name in state.
:param str option: Option name in state.
"""
_lock = RLock()
def __init__(self, config_name, section, option):
super().__init__()
self.config_name = config_name
self.section = section
self.option = option
self._state = MaestralState(config_name)
def __iter__(self):
with self._lock:
return iter(self._state.get(self.section, self.option))
def __contains__(self, dbx_path):
with self._lock:
return dbx_path in self._state.get(self.section, self.option)
def __len__(self):
with self._lock:
return len(self._state.get(self.section, self.option))
def discard(self, dbx_path):
dbx_path = dbx_path.lower().rstrip(osp.sep)
with self._lock:
state_list = self._state.get(self.section, self.option)
state_list = set(state_list)
state_list.discard(dbx_path)
self._state.set(self.section, self.option, list(state_list))
def add(self, dbx_path):
dbx_path = dbx_path.lower().rstrip(osp.sep)
with self._lock:
state_list = self._state.get(self.section, self.option)
state_list = set(state_list)
state_list.add(dbx_path)
self._state.set(self.section, self.option, list(state_list))
def clear(self):
"""Clears all elements."""
with self._lock:
self._state.set(self.section, self.option, [])
def __repr__(self):
return f'<{self.__class__.__name__}(section=\'{self.section}\',' \
f'option=\'{self.option}\', entries={list(self)})>'
def catch_sync_issues(func):
"""
Decorator that catches all SyncErrors and logs them.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
res = func(self, *args, **kwargs)
if res is None:
res = True
except SyncError as exc:
file_name = os.path.basename(exc.dbx_path)
logger.warning('Could not sync %s', file_name, exc_info=True)
if exc.dbx_path is not None:
if exc.local_path is None:
exc.local_path = self.to_local_path(exc.dbx_path)
self.sync_errors.put(exc)
if any(isinstance(a, Metadata) for a in args):
self.download_errors.add(exc.dbx_path)
res = False
return res
return wrapper
class UpDownSync:
"""
Class that contains methods to sync local file events with Dropbox and vice versa.
Notes on event processing:
Remote events come in three types, DeletedMetadata, FolderMetadata and FileMetadata.
The Dropbox API does not differentiate between created, moved or modified events.
Maestral processes the events as follows:
1) ``_clean_remote_changes``: Combine multiple events per file path into one. This
is rarely necessary, Dropbox typically already provides a only single event per
path but this is not guaranteed and may change. One exception is sharing a
folder: This is done by removing the folder from Dropbox and re-mounting it as a
shared folder and produces at least one DeletedMetadata and one FolderMetadata
event. If querying for changes *during* this process, multiple DeletedMetadata
events may be returned. If a File / Folder event implies a type changes, e.g.,
replacing a folder with a file, we explicitly generate the necessary
DeletedMetadata here to simplify conflict resolution.
2) ``_filter_excluded_changes_remote``: Filters out events that occurred for files
or folders excluded by selective sync as well as hard-coded file names which are
always excluded (e.g., `.DS_Store`).
3) ``apply_remote_changes``: Sorts all events hierarchically, with top-level events
coming first. Deleted and folder events are processed in order, file events in
parallel with up to 6 worker threads.
4) ``create_local_entry``: Checks for sync conflicts by comparing the file version,
as determined from its rev number, with our locally saved rev. We assign folders
a rev of 'folder' and deleted / non-existent items a rev of None. If revs are
equal, the local item is the same or newer as an Dropbox, no download / deletion
occurs. If revs are different, we compare content hashes. Folders are assigned a
hash of 'folder'. If hashes are equal, no download occurs. Finally we check if
the local item has been modified since the last download sync. In case of a
folder, we take newest change of any of its children. If the local item has not
been modified since the last sync, it will be overridden. Otherwise, we create a
conflicting copy.
Local file events come in eight types: For both files and folders we collect created,
moved, modified and deleted events. They are processed as follows:
1) ``FSEventHandler``: Our file system event handler tries to discard any events
that originate from Maestral itself, e.g., from downloads. In case of a moved
event, if only one of the two paths should be ignored at this stage, the event
will be split into a deleted event (old path) and a created event (new path) and
one of the two will be ignored.
2) We wait until no new changes happen for at least 1.0 sec.
3) ``_filter_excluded_changes_local``: Filters out events ignored by a `.mignore`
pattern as well as hard-coded file names which are always excluded.
4) ``_clean_local_events``: Cleans up local events in two stages. First, multiple
events per path are combined into a single event to reproduce the file changes.
The only exceptions is when the item type changes from file to folder or vice
versa: in this case, both deleted and created events are kept. Second, when a
whole folder is moved or deleted, we discard the moved and deleted events of its
children.
4) ``apply_local_changes``: Sort local changes hierarchically and apply events in
the order of deleted, folders and files. File uploads will be carrier out in
parallel with up to 6 threads. Conflict resolution and upload / move / deletion
will be handled by ``create_remote_entry`` as follows:
5) Conflict resolution: For created and moved events, we check if the new path has
been excluded by the user with selective sync but still exists on Dropbox. If
yes, it will be renamed by appending "(selective sync conflict)". On case-
sensitive file systems, we check if the new path differs only in casing from an
existing path. If yes, it will be renamed by appending "(case conflict)". If a
file has been replaced with a folder or vice versa, check if any un-synced
changes will be lost replacing the remote item. Create a conflicting copy if
necessary. Dropbox does not handle conflict resolution for us in this case.
7) For created or modified files, check if the local content hash equals the remote
content hash. If yes, we don't upload but update our rev number.
8) Upload the changes, specify the rev which we want to replace / delete. If the
remote item is newer (different rev), Dropbox will handle conflict resolution.
9) Confirm the successful upload and check if Dropbox has renamed the item to a
conflicting copy. In the latter case, apply those changes locally.
10) Update local revs with the new revs assigned by Dropbox.
:param MaestralApiClient client: Dropbox API client instance.
"""
lock = Lock()
_rev_lock = RLock()
_last_sync_lock = RLock()
_max_history = 30
_num_threads = min(32, os.cpu_count() * 3)
def __init__(self, client):
self.client = client
self.config_name = self.client.config_name
self.fs_events = None
self._conf = MaestralConfig(self.config_name)
self._state = MaestralState(self.config_name)
self.notifier = MaestralDesktopNotifier.for_config(self.config_name)
self.download_errors = MaestralStateWrapper(
self.config_name, section='sync', option='download_errors'
)
self.pending_downloads = MaestralStateWrapper(
self.config_name, section='sync', option='pending_downloads'
)
# queues used for internal communication
self.sync_errors = Queue() # entries are `SyncIssue` instances
self.queued_newly_included_downloads = Queue() # entries are local_paths
# the following queues are only for monitoring / user info
# and are expected to contain correctly cased local paths or Dropbox paths
self.queued_for_download = Queue()
self.queued_for_upload = Queue()
self.queue_uploading = Queue()
self.queue_downloading = Queue()
# load cached properties
self._dropbox_path = self._conf.get('main', 'path')
self._mignore_path = osp.join(self._dropbox_path, MIGNORE_FILE)
self._rev_file_path = get_data_path('maestral', f'{self.config_name}.index')
self._rev_dict_cache = dict()
self._migrate_rev_dict()
self._load_rev_dict_from_file(raise_exception=True)
self._excluded_items = self._conf.get('main', 'excluded_items')
self._mignore_rules = self._load_mignore_rules_form_file()
self._last_sync_for_path = dict()
self._max_cpu_percent = self._conf.get('sync', 'max_cpu_percent') * _cpu_count
# ==== settings ======================================================================
@property
def dropbox_path(self):
"""
Path to local Dropbox folder, as loaded from the config file. Before changing
:attr:`dropbox_path`, make sure that syncing is paused. Move the dropbox folder to
the new location before resuming the sync. Changes are saved to the config file.
"""
return self._dropbox_path
@dropbox_path.setter
def dropbox_path(self, path):
"""Setter: dropbox_path"""
self._dropbox_path = path
self._mignore_path = osp.join(self._dropbox_path, MIGNORE_FILE)
self._conf.set('main', 'path', path)
@property
def rev_file_path(self):
"""Path to sync index with rev numbers (read only)."""
return self._rev_file_path
@property
def excluded_items(self):
"""List of all files and folders excluded from sync. Changes are saved to the
config file. If a parent folder is excluded, its children will automatically be
removed from the list. If only children are given but not the parent folder,
any new items added to the parent will be synced. Change this property *before*
downloading newly included items or deleting excluded items."""
return self._excluded_items
@excluded_items.setter
def excluded_items(self, folder_list):
"""Setter: excluded_items"""
clean_list = self.clean_excluded_items_list(folder_list)
self._excluded_items = clean_list
self._conf.set('main', 'excluded_items', clean_list)
@staticmethod
def clean_excluded_items_list(folder_list):
"""
Removes all duplicates and children of excluded items from the excluded items
list.
:param list folder_list: Items to exclude.
:returns: Cleaned up items.
:rtype: list[str]
"""
# remove duplicate entries by creating set, strip trailing '/'
folder_list = set(f.lower().rstrip(osp.sep) for f in folder_list)
# remove all children of excluded folders
clean_list = list(folder_list)
for folder in folder_list:
clean_list = [f for f in clean_list if not is_child(f, folder)]
return clean_list
@property
def max_cpu_percent(self):
"""Maximum CPU usage for parallel downloads or uploads in percent of the total
available CPU time. Individual workers in a thread pool will pause until the
usage drops below this value. Tasks in the main thread such as indexing file
changes may still use more CPU time. Setting this to 100% means that no limits on
CPU usage will be applied."""
return self._max_cpu_percent
@max_cpu_percent.setter
def max_cpu_percent(self, percent):
"""Setter: max_cpu_percent."""
self._max_cpu_percent = percent
self._conf.set('app', 'max_cpu_percent', percent // _cpu_count)
# ==== sync state ====================================================================
@property
def last_cursor(self):
"""Cursor from last sync with remote Dropbox. The value is updated and saved to
the config file on every successful download of remote changes."""
return self._state.get('sync', 'cursor')
@last_cursor.setter
def last_cursor(self, cursor):
"""Setter: last_cursor"""
self._state.set('sync', 'cursor', cursor)
logger.debug('Remote cursor saved: %s', cursor)
@property
def last_sync(self):
"""Time stamp from last sync with remote Dropbox. The value is updated and
saved to the config file on every successful upload of local changes."""
return self._state.get('sync', 'lastsync')
@last_sync.setter
def last_sync(self, last_sync):
"""Setter: last_sync"""
logger.debug('Local cursor saved: %s', last_sync)
self._state.set('sync', 'lastsync', last_sync)
@property
def last_reindex(self):
"""Time stamp of last indexing. This is used to determine when the next full
indexing should take place."""
return self._state.get('sync', 'last_reindex')
@last_reindex.setter
def last_reindex(self, time_stamp):
"""Setter: last_reindex."""
self._state.set('sync', 'last_reindex', time_stamp)
def get_last_sync_for_path(self, dbx_path):
"""
Returns the timestamp of last sync for an individual path.
:param str dbx_path: Path relative to Dropbox folder.
:returns: Time of last sync.
:rtype: float
"""
with self._last_sync_lock:
dbx_path = dbx_path.lower()
return self._last_sync_for_path.get(dbx_path, None) or self.last_sync
def set_last_sync_for_path(self, dbx_path, last_sync):
"""
Sets the timestamp of last sync for a path.
:param str dbx_path: Path relative to Dropbox folder.
:param float last_sync: Time of last sync.
"""
with self._last_sync_lock:
dbx_path = dbx_path.lower()
if last_sync == 0.0:
try:
del self._last_sync_for_path[dbx_path]
except KeyError:
pass
else:
self._last_sync_for_path[dbx_path] = last_sync
# ==== rev file management ===========================================================
def get_rev_index(self):
"""
Returns a copy of the revision index containing the revision
numbers for all synced files and folders.
:returns: Copy of revision index.
:rtype: dict
"""
with self._rev_lock:
return self._rev_dict_cache.copy()
def get_local_rev(self, dbx_path):
"""
Gets revision number of local file.
:param str dbx_path: Path relative to Dropbox folder.
:returns: Revision number as str or ``None`` if no local revision number
has been saved.
:rtype: str
"""
with self._rev_lock:
dbx_path = dbx_path.lower()
rev = self._rev_dict_cache.get(dbx_path, None)
return rev
def set_local_rev(self, dbx_path, rev):
"""
Saves revision number ``rev`` for local file. If ``rev`` is ``None``, the
entry for the file is removed.
:param str dbx_path: Path relative to Dropbox folder.
:param str, None rev: Revision number as string or ``None``.
"""
with self._rev_lock:
dbx_path = dbx_path.lower()
if rev == self._rev_dict_cache.get(dbx_path, None):
# rev is already set, nothing to do
return
if rev is None:
# remove entry and all its children revs
for path in dict(self._rev_dict_cache):
if is_equal_or_child(path, dbx_path):
self._rev_dict_cache.pop(path, None)
self._append_rev_to_file(path, None)
else:
# add entry
self._rev_dict_cache[dbx_path] = rev
self._append_rev_to_file(dbx_path, rev)
# set all parent revs to 'folder'
dirname = osp.dirname(dbx_path)
while dirname != '/':
self._rev_dict_cache[dirname] = 'folder'
self._append_rev_to_file(dirname, 'folder')
dirname = osp.dirname(dirname)
def _clean_and_save_rev_file(self):
"""Cleans the revision index from duplicate entries and keeps only the last entry
for any individual path. Then saves the index to the drive."""
self._save_rev_dict_to_file()
def clear_rev_index(self):
"""Clears the revision index."""
with self._rev_lock:
self._rev_dict_cache.clear()
self._save_rev_dict_to_file()
@contextmanager
def _handle_rev_read_exceptions(self, raise_exception=False):
title = None
new_exc = None
try:
yield
except (FileNotFoundError, IsADirectoryError):
logger.info('Maestral index could not be found')
# reset sync state
self.last_sync = 0.0
self._rev_dict_cache = dict()
self.last_cursor = ''
except PermissionError as exc:
title = 'Could not load index'
msg = (f'Insufficient permissions for "{self.rev_file_path}". Please '
'make sure that you have read and write permissions.')
new_exc = RevFileError(title, msg).with_traceback(exc.__traceback__)
except OSError as exc:
title = 'Could not load index'
msg = 'Please resync your Dropbox to rebuild the index.'
new_exc = RevFileError(title, msg).with_traceback(exc.__traceback__)
if new_exc and raise_exception:
raise new_exc
elif new_exc:
exc_info = (type(new_exc), new_exc, new_exc.__traceback__)
logger.error(title, exc_info=exc_info)
@contextmanager
def _handle_rev_write_exceptions(self, raise_exception=False):
title = None
new_exc = None
try:
yield
except PermissionError as exc:
title = 'Could not save index'
msg = (f'Insufficient permissions for "{self.rev_file_path}". Please '
'make sure that you have read and write permissions.')
new_exc = RevFileError(title, msg).with_traceback(exc.__traceback__)
except OSError as exc:
title = 'Could not save index'
msg = 'Please check the logs for more information'
new_exc = RevFileError(title, msg).with_traceback(exc.__traceback__)
if new_exc and raise_exception:
raise new_exc
elif new_exc:
exc_info = (type(new_exc), new_exc, new_exc.__traceback__)
logger.error(title, exc_info=exc_info)
def _migrate_rev_dict(self):
try:
with self._handle_rev_read_exceptions():
with open(self.rev_file_path, 'rb') as f:
self._rev_dict_cache = umsgpack.unpack(f)
if isinstance(self._rev_dict_cache, dict):
self._save_rev_dict_to_file()
except umsgpack.InsufficientDataException:
pass
self._rev_dict_cache = dict()
def _load_rev_dict_from_file(self, raise_exception=False):
"""
Loads Maestral's rev index from ``rev_file_path``. Every line contains the rev
number for a single path, saved in a json format. Only the last entry for every
path is kept since it is the newest.
:param bool raise_exception: If ``True``, raises an exception when loading fails.
If ``False``, an error message is logged instead.
:raises: :class:`errors.RevFileError`
"""
with self._rev_lock:
self._rev_dict_cache.clear()
with self._handle_rev_read_exceptions(raise_exception):
with open(self.rev_file_path, 'r') as f:
for line in f:
try:
entry = json.loads(line.strip('\n'))
self._rev_dict_cache.update(entry)
except json.decoder.JSONDecodeError as exc:
if line.endswith('\n'):
raise exc
else:
# last line of file, likely an interrupted write
pass
# clean up empty revs
for path, rev in self._rev_dict_cache.copy().items():
if not rev:
del self._rev_dict_cache[path]
def _save_rev_dict_to_file(self, raise_exception=False):
"""
Save Maestral's rev index to ``rev_file_path``.
:param bool raise_exception: If ``True``, raises an exception when saving fails.
If ``False``, an error message is logged instead.
:raises: :class:`errors.RevFileError`
"""
with self._rev_lock:
with self._handle_rev_write_exceptions(raise_exception):
with atomic_write(self.rev_file_path, mode='w', overwrite=True) as f:
for path, rev in self._rev_dict_cache.items():
f.write(json.dumps({path: rev}) + '\n')
def _append_rev_to_file(self, path, rev, raise_exception=False):
"""
Appends a new line with '{path}: {rev}\n' to the rev file. This is quicker than
saving the entire rev index. When loading the rev file, older entries will be
overwritten with newer ones and all entries with rev == None will be discarded.
:param str path: Path for rev.
:param str, None rev: Dropbox rev number.
:raises: RevFileError if ``raise_exception`` is ``True``.
"""
with self._rev_lock:
with self._handle_rev_write_exceptions(raise_exception):
with open(self.rev_file_path, mode='a') as f:
f.write(json.dumps({path: rev}) + '\n')
# ==== mignore management ============================================================
@property
def mignore_path(self):
"""Path to mignore file on local drive (read only)."""
return self._mignore_path
@property
def mignore_rules(self):
"""List of mignore rules following git wildmatch syntax."""
if self.get_ctime(self.mignore_path) != self._mignore_ctime_loaded:
self._mignore_rules = self._load_mignore_rules_form_file()
return self._mignore_rules
def _load_mignore_rules_form_file(self):
self._mignore_ctime_loaded = self.get_ctime(self.mignore_path)
try:
with open(self.mignore_path, 'r') as f:
spec = f.read()
except FileNotFoundError:
spec = ''
return pathspec.PathSpec.from_lines('gitwildmatch', spec.splitlines())
# ==== helper functions ==============================================================
def ensure_dropbox_folder_present(self):
"""
Checks if the Dropbox folder still exists where we expect it to be.
:raises: :class:`errors.DropboxDeletedError`
"""
if not osp.isdir(self.dropbox_path):
title = 'Dropbox folder has been moved or deleted'
msg = ('Please move the Dropbox folder back to its original location '
'or restart Maestral to set up a new folder.')
raise DropboxDeletedError(title, msg)
def to_dbx_path(self, local_path):
"""
Converts a local path to a path relative to the Dropbox folder. Casing of the
given ``local_path`` will be preserved.
:param str local_path: Absolute path on local drive.
:returns: Relative path with respect to Dropbox folder.
:rtype: str
:raises: :class:`ValueError` the path lies outside of the local Dropbox folder.
"""
dbx_root_list = osp.normpath(self.dropbox_path).split(osp.sep)
path_list = osp.normpath(local_path).split(osp.sep)
# Work out how much of the file path is shared by dropbox_path and path
# noinspection PyTypeChecker
i = len(osp.commonprefix([dbx_root_list, path_list]))
if i == len(path_list): # path corresponds to dropbox_path
return '/'
elif i != len(dbx_root_list): # path is outside of dropbox_path
raise ValueError(f'Specified path "{local_path}" is outside of Dropbox '
f'directory "{self.dropbox_path}"')
return '/{}'.format('/'.join(path_list[i:]))
def to_local_path(self, dbx_path):
"""
Converts a Dropbox path to the corresponding local path.
The ``path_display`` attribute returned by the Dropbox API only guarantees correct
casing of the basename and not of the full path. This is because Dropbox itself is
not case sensitive and stores all paths in lowercase internally. To the extend
where parent directories of ``dbx_path`` exist on the local drive, their casing
will be used. Otherwise, the casing from ``dbx_path`` is used. This aims to
preserve the correct casing of file and folder names and prevents the creation of
duplicate folders with different casing on a local case-sensitive file system.
:param str dbx_path: Path relative to Dropbox folder.
:returns: Corresponding local path on drive.
:rtype: str
"""
dbx_path = dbx_path.replace('/', osp.sep)
dbx_path_parent, dbx_path_basename = osp.split(dbx_path)
local_parent = to_cased_path(dbx_path_parent, root=self.dropbox_path)
if local_parent == '':
return osp.join(self.dropbox_path, dbx_path.lstrip(osp.sep))
else:
return osp.join(local_parent, dbx_path_basename)
def has_sync_errors(self):
"""Returns ``True`` in case of sync errors, ``False`` otherwise."""
return self.sync_errors.qsize() > 0
def clear_sync_error(self, local_path=None, dbx_path=None):
"""
Clears all sync errors for ``local_path`` or ``dbx_path``.
:param str local_path: Absolute path on local drive.
:param str dbx_path: Path relative to Dropbox folder.
:raises: :class:`ValueError` if no path is given.
"""
if not (local_path or dbx_path):
raise ValueError('Either local_path or dbx_path must be given.')
if not dbx_path:
dbx_path = self.to_dbx_path(local_path)
if self.has_sync_errors():
for error in list(self.sync_errors.queue):
equal = error.dbx_path.lower() == dbx_path.lower()
child = is_child(error.dbx_path.lower(), dbx_path.lower())
if equal or child:
remove_from_queue(self.sync_errors, error)
self.download_errors.discard(dbx_path)
def clear_all_sync_errors(self):
"""Clears all sync errors."""
with self.sync_errors.mutex:
self.sync_errors.queue.clear()
self.download_errors.clear()
@staticmethod
def is_excluded(path):
"""
Checks if file is excluded from sync. Certain file names are always excluded from
syncing, following the Dropbox support article:
https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
The include file system files such as 'desktop.ini' and '.DS_Store' and some
temporary files. This is determined by the basename alone and `is_excluded`
therefore accepts both relative and absolute paths.
:param str path: Path of item. Can be absolute or relative.
:returns: ``True`` if excluded, ``False`` otherwise.
:rtype: bool
"""
path = path.lower()
# is root folder?
if path in ['/', '']:
return True
basename = osp.basename(path)
# in excluded files?
test0 = basename in EXCLUDED_FILE_NAMES
# is temporary file?
# 1) office temporary files
test1 = basename.startswith('~$')
test2 = basename.startswith('.~')
# 2) other temporary files
test3 = basename.startswith('~') and basename.endswith('.tmp')
return any((test0, test1, test2, test3))
def is_excluded_by_user(self, dbx_path):
"""
Check if file has been excluded from sync by the user.
:param str dbx_path: Path relative to Dropbox folder.
:returns: ``True`` if excluded, ``False`` otherwise.
:rtype: bool
"""
dbx_path = dbx_path.lower()
return any(is_equal_or_child(dbx_path, path) for path in self.excluded_items)
def is_mignore(self, event):
"""
Check if local file change has been excluded by an mignore pattern.
:param FileSystemEvent event: Local file event.
:returns: ``True`` if excluded, ``False`` otherwise.
:rtype: bool
"""
if len(self.mignore_rules.patterns) == 0:
return False
dbx_path = self.to_dbx_path(event.src_path)
return (self._is_mignore_path(dbx_path, is_dir=event.is_directory)
and not self.get_local_rev(dbx_path))
def _is_mignore_path(self, dbx_path, is_dir=False):
relative_path = dbx_path.lstrip('/')
if is_dir:
relative_path += '/'
return self.mignore_rules.match_file(relative_path)
def _slow_down(self):
if self._max_cpu_percent == 100:
return
if 'pool' in current_thread().name:
cpu_usage = cpu_usage_percent()
while cpu_usage > self._max_cpu_percent:
cpu_usage = cpu_usage_percent(0.5 + 2 * random.random())
# ==== Upload sync ===================================================================
def upload_local_changes_while_inactive(self):
"""
Collects changes while sync has not been running and uploads them to Dropbox.
Call this method when resuming sync.
"""
logger.info('Indexing local changes...')
try:
events, local_cursor = self._get_local_changes_while_inactive()
logger.debug('Retrieved local changes:\n%s', pprint.pformat(events))
events = self._clean_local_events(events)
except FileNotFoundError:
self.ensure_dropbox_folder_present()
return
if len(events) > 0:
self.apply_local_changes(events, local_cursor)
logger.debug('Uploaded local changes while inactive')
else:
self.last_sync = local_cursor
logger.debug('No local changes while inactive')
def _get_local_changes_while_inactive(self):
changes = []
now = time.time()
snapshot = DirectorySnapshot(self.dropbox_path)
# remove root entry from snapshot
del snapshot._inode_to_path[snapshot.inode(self.dropbox_path)]
del snapshot._stat_info[self.dropbox_path]
# get lowercase paths
lowercase_snapshot_paths = {x.lower() for x in snapshot.paths}
# get modified or added items
for path in snapshot.paths:
stats = snapshot.stat_info(path)
# check if item was created or modified since last sync
# but before we started the FileEventHandler (~now)
dbx_path = self.to_dbx_path(path).lower()
ctime_check = now > stats.st_ctime > self.get_last_sync_for_path(dbx_path)
# always upload untracked items, check ctime of tracked items
rev = self.get_local_rev(dbx_path)
is_new = not rev
is_modified = rev and ctime_check
if is_new:
if snapshot.isdir(path):
event = DirCreatedEvent(path)
else:
event = FileCreatedEvent(path)
changes.append(event)
elif is_modified:
if snapshot.isdir(path) and rev == 'folder':
event = DirModifiedEvent(path)
changes.append(event)
elif not snapshot.isdir(path) and rev != 'folder':
event = FileModifiedEvent(path)
changes.append(event)
elif snapshot.isdir(path):
event0 = FileDeletedEvent(path)
event1 = DirCreatedEvent(path)
changes += [event0, event1]
elif not snapshot.isdir(path):
event0 = DirDeletedEvent(path)
event1 = FileCreatedEvent(path)
changes += [event0, event1]
# get deleted items
rev_dict_copy = self.get_rev_index()
for path in rev_dict_copy:
# warning: local_path may not be correctly cased
local_path = self.to_local_path(path)
if local_path.lower() not in lowercase_snapshot_paths:
if rev_dict_copy[path] == 'folder':
event = DirDeletedEvent(local_path)
else:
event = FileDeletedEvent(local_path)
changes.append(event)
del snapshot
del lowercase_snapshot_paths
return changes, now
def wait_for_local_changes(self, timeout=5, delay=1):
"""
Waits for local file changes. Returns a list of local changes with at most one
entry per path.
:param float timeout: If no changes are detected within timeout (sec), an empty
list is returned.
:param float delay: Delay in sec to wait for subsequent changes that may be
duplicates.
:returns: (list of file events, time_stamp)
:rtype: (list, float)
"""
self.ensure_dropbox_folder_present()
try:
events = [self.fs_events.local_file_event_queue.get(timeout=timeout)]
local_cursor = time.time()
except Empty:
return [], time.time()
# keep collecting events until idle for `delay`
while True:
try:
events.append(self.fs_events.local_file_event_queue.get(timeout=delay))
local_cursor = time.time()
except Empty:
break
logger.debug('Retrieved local file events:\n%s', pprint.pformat(events))
return self._clean_local_events(events), local_cursor
def _filter_excluded_changes_local(self, events):
"""
Checks for and removes file events referring to items which are excluded from
syncing.
:param list events: List of file events.
:returns: (``events_filtered``, ``events_excluded``)
"""
events_filtered = []
events_excluded = []
for event in events:
local_path = get_dest_path(event)
dbx_path = self.to_dbx_path(local_path)
if self.is_excluded(dbx_path):
events_excluded.append(event)
elif self.is_mignore(event):
# moved events with an ignored path are
# already split into deleted, created pairs
events_excluded.append(event)
else:
events_filtered.append(event)
logger.debug('Filtered local file events:\n%s', pprint.pformat(events_filtered))
return events_filtered, events_excluded
def _clean_local_events(self, events):
"""
Takes local file events within the monitored period and cleans them up so that
there is only a single event per path. Collapses moved and deleted events of
folders with those of their children.
:param events: Iterable of :class:`watchdog.FileSystemEvent`.
:returns: List of :class:`watchdog.FileSystemEvent`.
:rtype: list
"""
# COMBINE EVENTS TO ONE EVENT PER PATH
# Move events are difficult to combine with other event types, we split them into
# deleted and created events and recombine them later if none of the paths has
# other events associated with it or is excluded from sync.
histories = dict()
for i, event in enumerate(events):
if event.event_type == EVENT_TYPE_MOVED:
deleted, created = split_moved_event(event)
deleted.id = i
created.id = i
try:
histories[deleted.src_path].append(deleted)
except KeyError:
histories[deleted.src_path] = [deleted]
try:
histories[created.src_path].append(created)
except KeyError:
histories[created.src_path] = [created]
else:
try:
histories[event.src_path].append(event)
except KeyError:
histories[event.src_path] = [event]
unique_events = []
for h in histories.values():
if len(h) == 1:
unique_events.append(h[0])
else:
path = h[0].src_path
n_created = len([e for e in h if e.event_type == EVENT_TYPE_CREATED])
n_deleted = len([e for e in h if e.event_type == EVENT_TYPE_DELETED])
if n_created > n_deleted: # item was created
if h[-1].is_directory:
unique_events.append(DirCreatedEvent(path))
else:
unique_events.append(FileCreatedEvent(path))
if n_created < n_deleted: # item was deleted
if h[0].is_directory:
unique_events.append(DirDeletedEvent(path))
else:
unique_events.append(FileDeletedEvent(path))
else:
first_created_idx = next(iter(i for i, e in enumerate(h) if e.event_type == EVENT_TYPE_CREATED), -1)
first_deleted_idx = next(iter(i for i, e in enumerate(h) if e.event_type == EVENT_TYPE_DELETED), -1)
if n_created == 0 or first_deleted_idx < first_created_idx:
# item was modified
if h[0].is_directory and h[-1].is_directory:
unique_events.append(DirModifiedEvent(path))
elif not h[0].is_directory and not h[-1].is_directory:
unique_events.append(FileModifiedEvent(path))
elif h[0].is_directory:
unique_events.append(DirDeletedEvent(path))
unique_events.append(FileCreatedEvent(path))
elif h[-1].is_directory:
unique_events.append(FileDeletedEvent(path))
unique_events.append(DirCreatedEvent(path))
else:
# item was only temporary
pass
# event order does not matter anymore from this point because we have already
# consolidated events for every path
# REMOVE DIR_MODIFIED_EVENTS
cleaned_events = set(e for e in unique_events if not isinstance(e, DirModifiedEvent))
# recombine moved events
moved_events = dict()
for event in unique_events:
if hasattr(event, 'id'):
try:
moved_events[event.id].append(event)
except KeyError:
moved_events[event.id] = [event]
for event_list in moved_events.values():
if len(event_list) == 2:
src_path = next(e.src_path for e in event_list
if e.event_type == EVENT_TYPE_DELETED)
dest_path = next(e.src_path for e in event_list
if e.event_type == EVENT_TYPE_CREATED)
if event_list[0].is_directory:
new_event = DirMovedEvent(src_path, dest_path)
else:
new_event = FileMovedEvent(src_path, dest_path)
if not self._should_split_excluded(new_event):
cleaned_events.difference_update(event_list)
cleaned_events.add(new_event)
# COMBINE MOVED AND DELETED EVENTS OF FOLDERS AND THEIR CHILDREN INTO ONE EVENT
# Avoid nested iterations over all events here, they are on the order of O(n^2)
# which becomes costly then the user moves or deletes folder with a large number
# of children. Benchmark: aim to stay below 1 sec for 20,000 nested events on
# representative laptops.
# 1) combine moved events of folders and their children into one event
dir_moved_paths = set((e.src_path, e.dest_path) for e in cleaned_events
if isinstance(e, DirMovedEvent))
if len(dir_moved_paths) > 0:
child_moved_events = dict()
for path in dir_moved_paths:
child_moved_events[path] = []
for event in cleaned_events:
if event.event_type == EVENT_TYPE_MOVED:
try:
dirnames = (osp.dirname(event.src_path),
osp.dirname(event.dest_path))
child_moved_events[dirnames].append(event)
except KeyError:
pass
for event_list in child_moved_events.values():
cleaned_events.difference_update(event_list)
# 2) combine deleted events of folders and their children to one event
dir_deleted_paths = set(e.src_path for e in cleaned_events
if isinstance(e, DirDeletedEvent))
if len(dir_deleted_paths) > 0:
child_deleted_events = dict()
for path in dir_deleted_paths:
child_deleted_events[path] = []
for event in cleaned_events:
if event.event_type == EVENT_TYPE_DELETED:
try:
dirname = osp.dirname(event.src_path)
child_deleted_events[dirname].append(event)
except KeyError:
pass
for event_list in child_deleted_events.values():
cleaned_events.difference_update(event_list)
logger.debug('Cleaned up local file events:\n%s', pprint.pformat(cleaned_events))
del events
del unique_events
return list(cleaned_events)
def _should_split_excluded(self, event):
if event.event_type != EVENT_TYPE_MOVED:
raise ValueError('Can only split moved events')
dbx_src_path = self.to_dbx_path(event.src_path)
dbx_dest_path = self.to_dbx_path(event.dest_path)
if (self.is_excluded(event.src_path)
or self.is_excluded(event.dest_path)
or self.is_excluded_by_user(dbx_src_path)
or self.is_excluded_by_user(dbx_dest_path)):
return True
else:
return self._should_split_mignore(event)
def _should_split_mignore(self, event):
if len(self.mignore_rules.patterns) == 0:
return False
dbx_src_path = self.to_dbx_path(event.src_path)
dbx_dest_path = self.to_dbx_path(event.dest_path)
return (self._is_mignore_path(dbx_src_path, event.is_directory)
or self._is_mignore_path(dbx_dest_path, event.is_directory))
def _handle_case_conflict(self, event):
"""
Checks for other items in the same directory with same name but a different
case. Renames items if necessary.Only needed for case sensitive file systems.
:param FileSystemEvent event: Created or moved event.
:returns: ``True`` or ``False``.
:rtype: bool
"""
if not IS_FS_CASE_SENSITIVE:
return False
if event.event_type not in (EVENT_TYPE_CREATED, EVENT_TYPE_MOVED):
return False
# get the created path (src_path or dest_path)
dest_path = get_dest_path(event)
dirname, basename = osp.split(dest_path)
# check number of paths with the same case
if len(path_exists_case_insensitive(basename, root=dirname)) > 1:
dest_path_cc = generate_cc_name(dest_path, suffix='case conflict')
with self.fs_events.ignore(dest_path, recursive=osp.isdir(dest_path),
event_types=(EVENT_TYPE_DELETED,
EVENT_TYPE_MOVED)):
exc = move(dest_path, dest_path_cc)
if exc:
raise os_to_maestral_error(exc, local_path=dest_path_cc)
logger.info('Case conflict: renamed "%s" to "%s"', dest_path, dest_path_cc)
return True
else:
return False
def _handle_selective_sync_conflict(self, event):
"""
Checks for other items in the same directory with same name but a different
case. Only needed for case sensitive file systems.
:param FileSystemEvent event: Created or moved event.
:returns: ``True`` or ``False``.
:rtype: bool
"""
if event.event_type not in (EVENT_TYPE_CREATED, EVENT_TYPE_MOVED):
return False
local_path = get_dest_path(event)
dbx_path = self.to_dbx_path(local_path)
if self.is_excluded_by_user(dbx_path):
local_path_cc = generate_cc_name(local_path,
suffix='selective sync conflict')
with self.fs_events.ignore(local_path, recursive=osp.isdir(local_path),
event_types=(EVENT_TYPE_DELETED, EVENT_TYPE_MOVED)):
exc = move(local_path, local_path_cc)
if exc:
raise os_to_maestral_error(exc, local_path=local_path_cc)
logger.info('Selective sync conflict: renamed "%s" to "%s"',
local_path, local_path_cc)
return True
else:
return False
def apply_local_changes(self, events, local_cursor):
"""
Applies locally detected changes to the remote Dropbox.
:param iterable events: List of local file system events.
:param float local_cursor: Time stamp of last event in ``events``.
"""
events, _ = self._filter_excluded_changes_local(events)
sorted_events = dict(deleted=[], dir_created=[], dir_moved=[], file=[])
for e in events:
if e.event_type == EVENT_TYPE_DELETED:
sorted_events['deleted'].append(e)
elif e.is_directory and e.event_type == EVENT_TYPE_CREATED:
sorted_events['dir_created'].append(e)
elif e.is_directory and e.event_type == EVENT_TYPE_MOVED:
sorted_events['dir_moved'].append(e)
elif not e.is_directory and e.event_type != EVENT_TYPE_DELETED:
sorted_events['file'].append(e)
# update queues
for e in itertools.chain(*sorted_events.values()):
self.queued_for_upload.put(get_dest_path(e))
# apply deleted events first, folder moved events second
# neither event type requires an actual upload
if sorted_events['deleted']:
logger.info('Uploading deletions...')
for event in sorted_events['deleted']:
self.create_remote_entry(event)
if sorted_events['dir_moved']:
logger.info('Moving folders...')
for event in sorted_events['dir_moved']:
self.create_remote_entry(event)
# apply file and created folder events in parallel since order does not matter
success = []
last_emit = time.time()
with ThreadPoolExecutor(max_workers=self._num_threads,
thread_name_prefix='maestral-upload-pool') as executor:
fs = (executor.submit(self.create_remote_entry, e) for e in
itertools.chain(sorted_events['file'], sorted_events['dir_created']))
n_files = len(sorted_events['file']) + len(sorted_events['dir_created'])
for f, n in zip(as_completed(fs), range(1, n_files + 1)):
if time.time() - last_emit > 1 or n in (1, n_files):
# emit message at maximum every second
logger.info(f'Uploading {n}/{n_files}...')
last_emit = time.time()
success.append(f.result())
if all(success):
self.last_sync = local_cursor # save local cursor
self._clean_and_save_rev_file()
@catch_sync_issues
def create_remote_entry(self, event):
"""
Applies a local file system event to the remote Dropbox and clears any existing
sync errors belonging to that path. Any :class:`errors.MaestralApiError` will be
caught and logged as appropriate.
:param FileSystemEvent event: Watchdog file system event.
"""
self._slow_down()
local_path_from = event.src_path
local_path_to = get_dest_path(event)
# book keeping
remove_from_queue(self.queued_for_upload, local_path_from, local_path_to)
self.clear_sync_error(local_path=local_path_to)
self.clear_sync_error(local_path=local_path_from)
with InQueue(self.queue_uploading, local_path_to):
if event.event_type is EVENT_TYPE_CREATED:
self._on_created(event)
elif event.event_type is EVENT_TYPE_MOVED:
self._on_moved(event)
elif event.event_type is EVENT_TYPE_MODIFIED:
self._on_modified(event)
elif event.event_type is EVENT_TYPE_DELETED:
self._on_deleted(event)
@staticmethod
def _wait_for_creation(path):
"""
Wait for a file at a path to be created or modified.
:param str path: Absolute path to file
"""
try:
while True:
size1 = osp.getsize(path)
time.sleep(0.2)
size2 = osp.getsize(path)
if size1 == size2:
return
except OSError:
return
def _on_moved(self, event):
"""
Call when a local item is moved.
Keep in mind that we may be moving a whole tree of items. But its better deal
with the complexity than to delete and re-uploading everything. Thankfully, in
case of directories, we always process the top-level first. Trying to move the
children will then be delegated to `on_create` (because the old item no longer
lives on Dropbox) and that won't upload anything because file contents have
remained the same.
:param FilSystemEvent event: Watchdog file system event.
:raises: :class:`errors.MaestralApiError`
"""
local_path_from = event.src_path
local_path_to = event.dest_path
dbx_path_from = self.to_dbx_path(local_path_from)
dbx_path_to = self.to_dbx_path(local_path_to)
if self._handle_selective_sync_conflict(event):
return
if self._handle_case_conflict(event):
return
md_from_old = self.client.get_metadata(dbx_path_from)
# If not on Dropbox, e.g., because its old name was invalid,
# create it instead of moving it.
if not md_from_old:
if isinstance(event, DirMovedEvent):
new_event = DirCreatedEvent(local_path_to)
else:
new_event = FileCreatedEvent(local_path_to)
return self._on_created(new_event)
md_to_new = self.client.move(dbx_path_from, dbx_path_to, autorename=True)
self.set_local_rev(dbx_path_from, None)
# handle remote conflicts
if md_to_new.path_lower != dbx_path_to.lower():
logger.info('Upload conflict: renamed "%s" to "%s"',
dbx_path_to, md_to_new.path_display)
else:
self._set_local_rev_recursive(md_to_new)
logger.debug('Moved "%s" to "%s" on Dropbox', dbx_path_from, dbx_path_to)
def _set_local_rev_recursive(self, md):
if isinstance(md, FileMetadata):
self.set_local_rev(md.path_lower, md.rev)
elif isinstance(md, FolderMetadata):
self.set_local_rev(md.path_lower, 'folder')
result = self.client.list_folder(md.path_lower, recursive=True)
for md in result.entries:
if isinstance(md, FileMetadata):
self.set_local_rev(md.path_lower, md.rev)
elif isinstance(md, FolderMetadata):
self.set_local_rev(md.path_lower, 'folder')
def _on_created(self, event):
"""
Call when a local item is created.
:param FileSystemEvent event: Watchdog file system event.
:raises: :class:`errors.MaestralApiError`
"""
local_path = event.src_path
if self._handle_selective_sync_conflict(event):
return
if self._handle_case_conflict(event):
return
dbx_path = self.to_dbx_path(local_path)
md_old = self.client.get_metadata(dbx_path)
self._wait_for_creation(local_path)
if event.is_directory:
if isinstance(md_old, FolderMetadata):
self.set_local_rev(dbx_path, 'folder')
return
else:
md_new = self.client.make_dir(dbx_path, autorename=True)
else:
# check if file already exists with identical content
if isinstance(md_old, FileMetadata):
local_hash = get_local_hash(local_path)
if local_hash == md_old.content_hash:
# file hashes are identical, do not upload
self.set_local_rev(md_old.path_lower, md_old.rev)
return
rev = self.get_local_rev(dbx_path)
if not rev:
# add a new file, let Dropbox rename it if something is in the way
mode = dropbox.files.WriteMode('add')
elif rev == 'folder':
# try to overwrite the destination
mode = dropbox.files.WriteMode('overwrite')
else:
# try to update the given rev, create conflict otherwise
mode = dropbox.files.WriteMode('update', rev)
try:
md_new = self.client.upload(local_path, dbx_path,
autorename=True, mode=mode)
except NotFoundError:
logger.debug('Could not upload "%s":pip install the item does not exist',
event.src_path)
return
if md_new.path_lower != dbx_path.lower():
local_path_cc = self.to_local_path(md_new.path_display)
with self.fs_events.ignore(local_path, local_path_cc,
recursive=osp.isdir(local_path)):
exc = move(local_path, local_path_cc)
if exc:
raise os_to_maestral_error(exc, local_path=local_path_cc,
dbx_path=md_new.path_display)
# Delete revs of old path but don't set revs for new path here. This will
# force conflict resolution on download in case of intermittent changes.
self.set_local_rev(dbx_path, None)
logger.debug('Upload conflict: renamed "%s" to "%s"',
dbx_path, md_new.path_lower)
else:
rev = getattr(md_new, 'rev', 'folder')
self.set_local_rev(md_new.path_lower, rev)
logger.debug('Created "%s" on Dropbox', dbx_path)
def _on_created_folders_batch(self, events):
"""
Creates multiple folders on Dropbox in a batch request. We currently don't use
this because folders are created in parallel anyways and we perform conflict
checks before uploading each folder. This is currently not used for syncing.
:param list[DirCreatedEvent] events: List of directory creates events.
:raises: :class:`errors.MaestralApiError`
"""
local_paths = []
dbx_paths = []
for e in events:
local_path = e.src_path
dbx_path = self.to_dbx_path(local_path)
if not e.is_directory:
raise ValueError('All events must be of type '
'watchdog.events.DirCreatedEvent')
if (not self._handle_case_conflict(e)
and not self._handle_selective_sync_conflict(e)):
md_old = self.client.get_metadata(dbx_path)
if isinstance(md_old, FolderMetadata):
self.set_local_rev(dbx_path, 'folder')
local_paths.append(local_path)
dbx_paths.append(dbx_path)
res_list = self.client.make_dir_batch(dbx_paths, autorename=True)
for res, dbx_path, local_path in zip(res_list, dbx_paths, local_paths):
if isinstance(res, Metadata):
if res.path_lower != dbx_path.lower():
local_path_cc = self.to_local_path(res.path_display)
with self.fs_events.ignore(local_path, local_path_cc,
recursive=osp.isdir(local_path)):
exc = move(local_path, local_path_cc)
if exc:
raise os_to_maestral_error(exc, local_path=local_path_cc,
dbx_path=res.path_display)
# Delete revs of old path but don't set revs for new path here. This
# will force conflict resolution on download in case of intermittent
# changes.
self.set_local_rev(dbx_path, None)
logger.debug('Upload conflict: renamed "%s" to "%s"',
dbx_path, res.path_lower)
else:
self.set_local_rev(res.path_lower, 'folder')
logger.debug('Created "%s" on Dropbox', dbx_path)
elif isinstance(res, SyncError):
res.local_path = local_path
self.sync_errors.put(res)
def _on_modified(self, event):
"""
Call when local item is modified.
:param FileSystemEvent event: Watchdog file system event.
:raises: :class:`errors.MaestralApiError`
"""
if not event.is_directory: # ignore directory modified events
local_path = event.src_path
dbx_path = self.to_dbx_path(local_path)
self._wait_for_creation(local_path)
# check if item already exists with identical content
md_old = self.client.get_metadata(dbx_path)
if isinstance(md_old, FileMetadata):
local_hash = get_local_hash(local_path)
if local_hash == md_old.content_hash:
# file hashes are identical, do not upload
self.set_local_rev(md_old.path_lower, md_old.rev)
logger.debug('Modification of "%s" detected but file content is '
'the same as on Dropbox', dbx_path)
return
rev = self.get_local_rev(dbx_path)
if rev == 'folder':
mode = dropbox.files.WriteMode('overwrite')
elif not rev:
logger.debug('"%s" appears to have been modified but cannot '
'find old revision', dbx_path)
mode = dropbox.files.WriteMode('add')
else:
mode = dropbox.files.WriteMode('update', rev)
try:
md_new = self.client.upload(local_path, dbx_path,
autorename=True, mode=mode)
except NotFoundError:
logger.debug('Could not upload "%s": the item does not exist', dbx_path)
return
if md_new.path_lower != dbx_path.lower():
local_path_cc = self.to_local_path(md_new.path_display)
with self.fs_events.ignore(local_path, local_path_cc):
try:
os.rename(local_path, local_path_cc)
except OSError:
delete(local_path)
# Delete revs of old path but don't set revs for new path here. This will
# force conflict resolution on download in case of intermittent changes.
self.set_local_rev(dbx_path, None)
logger.debug('Upload conflict: renamed "%s" to "%s"',
dbx_path, md_new.path_lower)
else:
self.set_local_rev(md_new.path_lower, md_new.rev)
logger.debug('Uploaded modified "%s" to Dropbox', md_new.path_lower)
def _on_deleted(self, event):
"""
Call when local item is deleted. We try not to delete remote items which have been
modified since the last sync.
:param FileSystemEvent event: Watchdog file system event.
:raises: :class:`errors.MaestralApiError`
"""
path = event.src_path
dbx_path = self.to_dbx_path(path)
if self.is_excluded_by_user(dbx_path):
logger.debug('Not deleting "%s": is excluded by selective sync', dbx_path)
return
local_rev = self.get_local_rev(dbx_path)
is_file = not event.is_directory
is_directory = event.is_directory
md = self.client.get_metadata(dbx_path, include_deleted=True)
if is_directory and isinstance(md, FileMetadata):
logger.debug('Expected folder at "%s" but found a file instead, checking '
'which one is newer', md.path_display)
# don't delete a remote file if it was modified since last sync
if md.server_modified.timestamp() >= self.get_last_sync_for_path(dbx_path):
logger.debug('Skipping deletion: remote item "%s" has been modified '
'since last sync', md.path_display)
self.set_local_rev(dbx_path, None)
return
if is_file and isinstance(md, FolderMetadata):
# don't delete a remote folder if we were expecting a file
# TODO: Delete the folder if its children did not change since last sync.
# Is there a way of achieving this without listing the folder or listing
# all changes and checking when they occurred?
logger.debug('Skipping deletion: expected file at "%s" but found a '
'folder instead', md.path_display)
self.set_local_rev(dbx_path, None)
return
try:
# will only perform delete if Dropbox remote rev matches `local_rev`
self.client.remove(dbx_path, parent_rev=local_rev if is_file else None)
except NotFoundError:
logger.debug('Could not delete "%s": the item no longer exists on Dropbox',
dbx_path)
except PathError:
logger.debug('Could not delete "%s": the item has been changed '
'since last sync', dbx_path)
# remove revision metadata
self.set_local_rev(dbx_path, None)
# ==== Download sync =================================================================
@catch_sync_issues
def get_remote_folder(self, dbx_path='/', ignore_excluded=True):
"""
Gets all files/folders from Dropbox and writes them to the local folder
:attr:`dropbox_path`. Call this method on first run of the Maestral. Indexing
and downloading may take several minutes, depending on the size of the user's
Dropbox folder.
:param str dbx_path: Path relative to Dropbox folder. Defaults to root ('/').
:param bool ignore_excluded: If ``True``, do not index excluded folders.
:returns: ``True`` on success, ``False`` otherwise.
:rtype: bool
"""
dbx_path = dbx_path or '/'
is_dbx_root = dbx_path == '/'
success = []
if is_dbx_root:
logger.info('Downloading your Dropbox')
else:
logger.info('Downloading %s', dbx_path)
if not any(is_child(folder, dbx_path) for folder in self.excluded_items):
# if there are no excluded subfolders, index and download all at once
ignore_excluded = False
# get a cursor for the folder
cursor = self.client.get_latest_cursor(dbx_path)
root_result = self.client.list_folder(dbx_path, recursive=(not ignore_excluded),
include_deleted=False, limit=500)
# download top-level folders / files first
logger.info(SYNCING)
_, s = self.apply_remote_changes(root_result, save_cursor=False)
success.append(s)
if ignore_excluded:
# download sub-folders if not excluded
for entry in root_result.entries:
if isinstance(entry, FolderMetadata) and not self.is_excluded_by_user(
entry.path_display):
success.append(self.get_remote_folder(entry.path_display))
if is_dbx_root:
self.last_cursor = cursor
self.last_reindex = time.time()
return all(success)
def get_remote_item(self, dbx_path):
"""
Downloads a remote file or folder and updates its local rev. If the remote item no
longer exists, the corresponding local item will be deleted. Given paths will be
added to the (persistent) pending_downloads list for the duration of the download
so that they will be resumed in case Maestral is terminated during the download.
If ``dbx_path`` refers to a folder, the download will be handled by
:meth:`get_remote_folder`. If it refers to a single file, the download will be
performed by :meth:`create_local_entry`.
This method can be used to fetch individual items outside of the regular sync
cycle, for instance when including a new file or folder.
:param str dbx_path: Path relative to Dropbox folder.
:returns: ``True`` on success, ``False`` otherwise.
:rtype: bool
"""
self.pending_downloads.add(dbx_path)
md = self.client.get_metadata(dbx_path, include_deleted=True)
if isinstance(md, FolderMetadata):
res = self.get_remote_folder(dbx_path)
else: # FileMetadata or DeletedMetadata
with InQueue(self.queue_downloading, md.path_display):
res = self.create_local_entry(md)
self.pending_downloads.discard(dbx_path)
return res
@catch_sync_issues
def wait_for_remote_changes(self, last_cursor, timeout=40, delay=2):
"""
Blocks until changes to the remote Dropbox are available.
:param str last_cursor: Cursor form last sync.
:param int timeout: Timeout in seconds before returning even if there are no
changes. Dropbox adds random jitter of up to 90 sec to this value.
:param float delay: Delay in sec to wait for subsequent changes that may be
duplicates. This delay is typically only necessary folders are shared /
un-shared with other Dropbox accounts.
"""
logger.debug('Waiting for remote changes since cursor:\n%s', last_cursor)
has_changes = self.client.wait_for_remote_changes(last_cursor, timeout=timeout)
time.sleep(delay)
logger.debug('Detected remote changes: %s', has_changes)
return has_changes
@catch_sync_issues
def list_remote_changes(self, last_cursor):
"""
Lists remote changes since the last download sync.
:param str last_cursor: Cursor from last download sync.
:returns: Remote changes.
:rtype: :class:`dropbox.files.ListFolderResult`
"""
changes = self.client.list_remote_changes(last_cursor)
logger.debug('Listed remote changes:\n%s', entries_to_str(changes.entries))
clean_changes = self._clean_remote_changes(changes)
logger.debug('Cleaned remote changes:\n%s', entries_to_str(clean_changes.entries))
return clean_changes
def _filter_excluded_changes_remote(self, changes):
"""Removes all excluded items from the given list of changes.
:param changes: :class:`dropbox.files.ListFolderResult` instance.
:returns: (``changes_filtered``, ``changes_discarded``)
:rtype: tuple[:class:`dropbox.files.ListFolderResult`]
"""
entries_filtered = []
entries_discarded = []
for e in changes.entries:
if self.is_excluded_by_user(e.path_lower) or self.is_excluded(e.path_lower):
entries_discarded.append(e)
else:
entries_filtered.append(e)
changes_filtered = dropbox.files.ListFolderResult(
entries=entries_filtered, cursor=changes.cursor, has_more=False)
changes_discarded = dropbox.files.ListFolderResult(
entries=entries_discarded, cursor=changes.cursor, has_more=False)
return changes_filtered, changes_discarded
def apply_remote_changes(self, changes, save_cursor=True):
"""
Applies remote changes to local folder. Call this on the result of
:meth:`list_remote_changes`. The saved cursor is updated after a set of changes
has been successfully applied. Entries in the local index are created after
successful completion.
:param changes: :class:`dropbox.files.ListFolderResult` instance or ``False`` if
requests failed.
:param bool save_cursor: If True, :attr:`last_cursor` will be updated from the
last applied changes. Take care to only save a cursors which represent the
state of the entire Dropbox
:returns: List of changes that were made to local files and bool indicating if all
download syncs were successful.
:rtype: (list, bool)
"""
if not changes:
return False
# filter out excluded changes
changes_included, changes_excluded = self._filter_excluded_changes_remote(changes)
# remove all deleted items from the excluded list
_, _, deleted_excluded = self._separate_remote_entry_types(changes_excluded)
for d in deleted_excluded:
new_excluded = [item for item in self.excluded_items
if not is_equal_or_child(item, d.path_lower)]
self.excluded_items = new_excluded
# sort changes into folders, files and deleted
folders, files, deleted = self._separate_remote_entry_types(changes_included)
# sort according to path hierarchy
# do not create sub-folder / file before parent exists
deleted.sort(key=lambda x: x.path_display.count('/'))
folders.sort(key=lambda x: x.path_display.count('/'))
files.sort(key=lambda x: x.path_display.count('/'))
for md in deleted + folders + files:
self.queued_for_download.put(md.path_display)
downloaded = [] # local list of all changes
# apply deleted items
if deleted:
logger.info('Applying deletions...')
for item in deleted:
res = self.create_local_entry(item)
downloaded.append(res)
# create local folders, start with top-level and work your way down
if folders:
logger.info('Creating folders...')
for folder in folders:
res = self.create_local_entry(folder)
downloaded.append(res)
# apply created files
n_files = len(files)
last_emit = time.time()
with ThreadPoolExecutor(max_workers=self._num_threads,
thread_name_prefix='maestral-download-pool') as executor:
fs = (executor.submit(self.create_local_entry, file) for file in files)
for f, n in zip(as_completed(fs), range(1, n_files + 1)):
if time.time() - last_emit > 1 or n in (1, n_files):
# emit messages at maximum every second
logger.info(f'Downloading {n}/{n_files}...')
last_emit = time.time()
downloaded.append(f.result())
success = all(downloaded)
if save_cursor:
self.last_cursor = changes.cursor
self._clean_and_save_rev_file()
return [entry for entry in downloaded if not isinstance(entry, bool)], success
def check_download_conflict(self, md):
"""
Check if a local item is conflicting with remote change. The equivalent check when
uploading and a change will be carried out by Dropbox itself.
Checks are carried out against our index, reflecting the latest sync state.
:param Metadata md: Dropbox SDK metadata.
:returns: Conflict check result.
:rtype: :class:`Conflict`
:raises: :class:`errors.MaestralApiError`
"""
# get metadata of remote item
if isinstance(md, FileMetadata):
remote_rev = md.rev
remote_hash = md.content_hash
elif isinstance(md, FolderMetadata):
remote_rev = 'folder'
remote_hash = 'folder'
else: # DeletedMetadata
remote_rev = None
remote_hash = None
dbx_path = md.path_lower
local_path = self.to_local_path(md.path_display)
local_rev = self.get_local_rev(dbx_path)
if remote_rev == local_rev:
# Local change has the same rev. May be newer and
# not yet synced or identical. Don't overwrite.
logger.debug('Equal revs for "%s": local item is the same or newer '
'than on Dropbox', dbx_path)
return Conflict.LocalNewerOrIdentical
elif remote_rev != local_rev:
# Dropbox server version has a different rev, likely is newer.
# If the local version has been modified while sync was stopped,
# those changes will be uploaded before any downloads can begin.
# Conflict resolution will then be handled by Dropbox.
# If the local version has been modified while sync was running
# but changes were not uploaded before the remote version was
# changed as well, the local ctime will be newer than last_sync:
# (a) The upload of the changed file has already started. Upload thread
# will hold the lock and we won't be here checking for conflicts.
# (b) The upload has not started yet. Manually check for conflict.
local_hash = get_local_hash(local_path)
if remote_hash == local_hash:
logger.debug('Equal content hashes for "%s": no conflict', dbx_path)
self.set_local_rev(dbx_path, remote_rev)
return Conflict.Identical
elif self.get_ctime(local_path) <= self.get_last_sync_for_path(dbx_path):
logger.debug('Ctime is older than last sync for "%s": remote item '
'is newer', dbx_path)
return Conflict.RemoteNewer
elif not remote_rev:
logger.debug('No remote rev for "%s": Local item has been modified '
'since remote deletion', dbx_path)
return Conflict.LocalNewerOrIdentical
else:
logger.debug('Ctime is newer than last sync for "%s": conflict', dbx_path)
return Conflict.Conflict
def get_ctime(self, local_path, ignore_excluded=True):
"""
Returns the ctime of a local item or -1.0 if there is nothing at the path. If
the item is a directory, return the largest ctime of itself and its children.
:param str local_path: Absolute path on local drive.
:param bool ignore_excluded: If ``True``, the ctimes of children for which
:meth:`is_excluded` evaluates to ``True`` are disregarded. This is only
relevant if ``local_path`` points to a directory and has no effect if it
points to a path.
:returns: Ctime or -1.0.
:rtype: float
"""
try:
stat = os.stat(local_path)
if S_ISDIR(stat.st_mode):
ctime = stat.st_ctime
with os.scandir(local_path) as it:
for entry in it:
ignore = ignore_excluded and self.is_excluded(entry.name)
if not ignore:
ctime = max(ctime, entry.stat().st_ctime)
return ctime
else:
return os.stat(local_path).st_ctime
except FileNotFoundError:
return -1.0
def notify_user(self, changes):
"""
Sends system notification for file changes.
:param list changes: List of Dropbox metadata which has been applied locally.
"""
# get number of remote changes
n_changed = len(changes)
if n_changed == 0:
return
user_name = None
change_type = 'changed'
# find out who changed the item(s), get the user name if its only a single user
dbid_list = set(self._get_modified_by_dbid(md) for md in changes)
if len(dbid_list) == 1:
# all files have been modified by the same user
dbid = dbid_list.pop()
if dbid == self._conf.get('account', 'account_id'):
user_name = 'You'
else:
account_info = self.client.get_account_info(dbid)
user_name = account_info.name.display_name
if n_changed == 1:
# display user name, file name, and type of change
md = changes[0]
file_name = os.path.basename(md.path_display)
if isinstance(md, DeletedMetadata):
change_type = 'removed'
elif isinstance(md, FileMetadata):
revs = self.client.list_revisions(md.path_lower, limit=2)
is_new_file = len(revs.entries) == 1
change_type = 'added' if is_new_file else 'changed'
elif isinstance(md, FolderMetadata):
change_type = 'added'
else:
# display user name if unique, number of files, and type of change
file_name = f'{n_changed} items'
if all(isinstance(x, DeletedMetadata) for x in changes):
change_type = 'removed'
elif all(isinstance(x, FolderMetadata) for x in changes):
change_type = 'added'
file_name = f'{n_changed} folders'
elif all(isinstance(x, FileMetadata) for x in changes):
file_name = f'{n_changed} files'
if user_name:
msg = f'{user_name} {change_type} {file_name}'
else:
msg = f'{file_name} {change_type}'
self.notifier.notify(msg, level=FILECHANGE)
def _get_modified_by_dbid(self, md):
"""
Returns the Dropbox ID of the user who modified a shared item or our own ID if the
item was not shared.
:param Metadata md: Dropbox file, folder or deleted metadata
:return: Dropbox ID
:rtype: str
"""
try:
return md.sharing_info.modified_by
except AttributeError:
return self._conf.get('account', 'account_id')
@staticmethod
def _separate_remote_entry_types(result):
"""
Sorts entries in :class:`dropbox.files.ListFolderResult` into
FolderMetadata, FileMetadata and DeletedMetadata.
:returns: Tuple of (folders, files, deleted) containing instances of
:class:`dropbox.files.FolderMetadata`, :class:`dropbox.files.FileMetadata`,
and :class:`dropbox.files.DeletedMetadata` respectively.
:rtype: tuple
"""
sorted = dict(folders=[], files=[], deleted=[])
for x in result.entries:
if isinstance(x, FolderMetadata):
sorted['folders'].append(x)
elif isinstance(x, FileMetadata):
sorted['files'].append(x)
elif isinstance(x, DeletedMetadata):
sorted['deleted'].append(x)
return sorted['folders'], sorted['files'], sorted['deleted']
def _clean_remote_changes(self, changes):
"""
Takes remote file events since last sync and cleans them up so that there is only
a single event per path.
Dropbox will sometimes report multiple changes per path. Once such instance is
when sharing a folder: ``files/list_folder/continue`` will report the shared
folder and its children as deleted and then created because the folder *is*
actually deleted from the user's Dropbox and recreated as a shared folder which
then gets mounted to the user's Dropbox. Ideally, we want to deal with this
without re-downloading all its contents.
:param changes: :class:`dropbox.files.ListFolderResult`
:returns: Cleaned up changes with a single Metadata entry per path.
:rtype: :class:`dropbox.files.ListFolderResult`
"""
# Note: we won't have to deal with modified or moved events,
# Dropbox only reports DeletedMetadata or FileMetadata / FolderMetadata
histories = dict()
for entry in changes.entries:
try:
histories[entry.path_lower].append(entry)
except KeyError:
histories[entry.path_lower] = [entry]
new_entries = []
for h in histories.values():
if len(h) == 1:
new_entries.extend(h)
else:
last_event = h[-1]
was_dir = self.get_local_rev(last_event.path_lower) == 'folder'
# Dropbox guarantees that applying events in the provided order
# will reproduce the state in the cloud. We therefore keep only
# the last event, unless there is a change in item type.
if (was_dir and isinstance(last_event, FileMetadata)
or not was_dir and isinstance(last_event, FolderMetadata)):
deleted_event = DeletedMetadata(
name=last_event.name,
path_lower=last_event.path_lower,
path_display=last_event.path_display,
parent_shared_folder_id=last_event.parent_shared_folder_id
)
new_entries.append(deleted_event)
new_entries.append(last_event)
else:
new_entries.append(last_event)
changes.entries = new_entries
return changes
@catch_sync_issues
def create_local_entry(self, entry):
"""
Applies a file change from Dropbox servers to the local Dropbox folder.
Any :class:`errors.MaestralApiError` will be caught and logged as appropriate.
Entries in the local index are created after successful completion.
:param Metadata entry: Dropbox entry metadata.
:returns: Copy of the Dropbox metadata if the change was applied locally,
``True`` if the change already existed locally and ``False`` in case of a
:class:`errors.MaestralApiError`, for instance caused by sync issues.
:rtype: Metadata, bool
"""
self._slow_down()
local_path = self.to_local_path(entry.path_display)
# book keeping
self.clear_sync_error(dbx_path=entry.path_display)
remove_from_queue(self.queued_for_download, entry.path_display)
with InQueue(self.queue_downloading, entry.path_display):
conflict_check = self.check_download_conflict(entry)
applied = None
if conflict_check in (Conflict.Identical, Conflict.LocalNewerOrIdentical):
return applied
elif conflict_check == Conflict.Conflict and isinstance(entry, FolderMetadata):
# only move folders here, file will be moved after download is complete
new_local_path = generate_cc_name(local_path)
with self.fs_events.ignore(local_path, recursive=osp.isdir(local_path),
event_types=(EVENT_TYPE_DELETED,
EVENT_TYPE_MOVED)):
exc = move(local_path, new_local_path)
if exc:
raise os_to_maestral_error(exc, local_path=new_local_path)
if isinstance(entry, FileMetadata):
# Store the new entry at the given path in your local state.
# If the required parent folders don’t exist yet, create them.
# If there’s already something else at the given path,
# replace it and remove all its children.
# we download to a temporary file first (this may take some time)
with tempfile.NamedTemporaryFile(prefix='maestral_download_',
delete=False) as f:
tmp_fname = f.name
md = self.client.download(f'rev:{entry.rev}', tmp_fname)
# re-check for conflict and move the conflict
# out of the way if anything has changed
if self.check_download_conflict(entry) == Conflict.Conflict:
new_local_path = generate_cc_name(local_path)
with self.fs_events.ignore(local_path,
recursive=osp.isdir(local_path),
event_types=(EVENT_TYPE_DELETED,
EVENT_TYPE_MOVED)):
exc = move(local_path, new_local_path)
if exc:
raise os_to_maestral_error(exc, local_path=new_local_path)
if osp.isdir(local_path):
with self.fs_events.ignore(local_path,
recursive=osp.isdir(local_path),
event_types=(EVENT_TYPE_DELETED,)):
delete(local_path)
# move the downloaded file to its destination
with self.fs_events.ignore(local_path,
event_types=(EVENT_TYPE_DELETED,
EVENT_TYPE_CREATED)):
exc = move(tmp_fname, local_path)
if exc:
raise os_to_maestral_error(exc, dbx_path=entry.path_display,
local_path=local_path)
self.set_last_sync_for_path(entry.path_lower, self.get_ctime(local_path))
self.set_local_rev(entry.path_lower, md.rev)
logger.debug('Created local file "%s"', entry.path_display)
self._save_to_history(entry.path_display)
applied = entry
elif isinstance(entry, FolderMetadata):
# Store the new entry at the given path in your local state.
# If the required parent folders don’t exist yet, create them.
# If there’s already something else at the given path,
# replace it but leave the children as they are.
if osp.isfile(local_path):
with self.fs_events.ignore(local_path,
event_types=(EVENT_TYPE_DELETED,)):
delete(local_path)
try:
with self.fs_events.ignore(local_path, is_dir=True,
event_types=(EVENT_TYPE_CREATED,)):
os.makedirs(local_path)
except FileExistsError:
pass
self.set_last_sync_for_path(entry.path_lower, self.get_ctime(local_path))
self.set_local_rev(entry.path_lower, 'folder')
logger.debug('Created local folder "%s"', entry.path_display)
applied = entry
elif isinstance(entry, DeletedMetadata):
# If your local state has something at the given path,
# remove it and all its children. If there’s nothing at the
# given path, ignore this entry.
with self.fs_events.ignore(local_path, recursive=osp.isdir(local_path),
event_types=(EVENT_TYPE_DELETED, )):
err = delete(local_path)
self.set_local_rev(entry.path_lower, None)
self.set_last_sync_for_path(entry.path_lower, time.time())
if not err:
logger.debug('Deleted local item "%s"', entry.path_display)
applied = entry
else:
logger.debug('Deletion failed: %s', err)
return applied
def _save_to_history(self, dbx_path):
# add new file to recent_changes
recent_changes = self._state.get('sync', 'recent_changes')
recent_changes.append(dbx_path)
# eliminate duplicates
recent_changes = list(dict.fromkeys(recent_changes))
self._state.set('sync', 'recent_changes', recent_changes[-self._max_history:])
# ========================================================================================
# Workers for upload, download and connection monitoring threads
# ========================================================================================
def helper(mm):
"""
A worker for periodic maintenance:
1) Updates the current space usage by calling
:meth:`client.MaestralApiClient.get_space_usage`. This doubles as a check for the
connection to Dropbox servers.
2) Pauses syncing when the connection is lost and resumes syncing when reconnected
and syncing has not been paused by the user.
3) Triggers weekly reindexing.
:param MaestralMonitor mm: MaestralMonitor instance.
"""
while mm.running.is_set():
try:
# use an inexpensive call to `get_space_usage` to test connection
mm.sync.client.get_space_usage()
if not mm.connected.is_set() and not mm.paused_by_user.is_set():
mm.startup.set()
# rebuild the index periodically
elif (time.time() - mm.sync.last_reindex > mm.reindex_interval
and mm.idle_time > 20 * 60):
mm.rebuild_index()
mm.connected.set()
time.sleep(mm.connection_check_interval)
except ConnectionError:
if mm.connected.is_set():
logger.debug(DISCONNECTED, exc_info=True)
logger.info(DISCONNECTED)
mm.syncing.clear()
mm.connected.clear()
time.sleep(mm.connection_check_interval / 2)
except DropboxAuthError as e:
mm.running.clear()
mm.syncing.clear()
logger.error(e.title, exc_info=True)
except Exception:
mm.running.clear()
mm.syncing.clear()
logger.error('Unexpected error', exc_info=True)
def download_worker(sync, syncing, running, connected):
"""
Worker to sync changes of remote Dropbox with local folder.
:param UpDownSync sync: Instance of :class:`UpDownSync`.
:param Event syncing: Event that indicates if workers are running or paused.
:param Event running: Event to shutdown local file event handler and worker threads.
:param Event connected: Event that indicates if we can connect to Dropbox.
"""
while running.is_set():
syncing.wait()
try:
has_changes = sync.wait_for_remote_changes(sync.last_cursor)
if not (running.is_set() and syncing.is_set()):
continue
if has_changes:
with sync.lock:
logger.info(SYNCING)
changes = sync.list_remote_changes(sync.last_cursor)
downloaded, _ = sync.apply_remote_changes(changes)
sync.notify_user(downloaded)
logger.info(IDLE)
gc.collect()
except ConnectionError:
syncing.clear()
connected.clear()
logger.debug(DISCONNECTED, exc_info=True)
logger.info(DISCONNECTED)
except MaestralApiError as e:
running.clear()
syncing.clear()
logger.error(e.title, exc_info=True)
except Exception:
running.clear()
syncing.clear()
logger.error('Unexpected error', exc_info=True)
def download_worker_added_item(sync, syncing, running, connected):
"""
Worker to download items which have been newly included in sync.
:param UpDownSync sync: Instance of :class:`UpDownSync`.
:param Event syncing: Event that indicates if workers are running or paused.
:param Event running: Event to shutdown local file event handler and worker threads.
:param Event connected: Event that indicates if we can connect to Dropbox.
"""
while running.is_set():
syncing.wait()
dbx_path = sync.queued_newly_included_downloads.get()
if not (running.is_set() and syncing.is_set()):
sync.pending_downloads.add(dbx_path)
continue
try:
with sync.lock:
sync.get_remote_item(dbx_path)
gc.collect()
logger.info(IDLE)
except ConnectionError:
syncing.clear()
connected.clear()
logger.debug(DISCONNECTED, exc_info=True)
logger.info(DISCONNECTED)
except MaestralApiError as e:
running.clear()
syncing.clear()
logger.error(e.title, exc_info=True)
except Exception:
running.clear()
syncing.clear()
logger.error('Unexpected error', exc_info=True)
def upload_worker(sync, syncing, running, connected):
"""
Worker to sync local changes to remote Dropbox.
:param UpDownSync sync: Instance of :class:`UpDownSync`.
:param Event syncing: Event that indicates if workers are running or paused.
:param Event running: Event to shutdown local file event handler and worker threads.
:param Event connected: Event that indicates if we can connect to Dropbox.
"""
while running.is_set():
syncing.wait()
try:
events, local_cursor = sync.wait_for_local_changes(timeout=5)
if not (running.is_set() and syncing.is_set()):
continue
if len(events) > 0:
with sync.lock:
logger.info(SYNCING)
sync.apply_local_changes(events, local_cursor)
logger.info(IDLE)
gc.collect()
else:
sync.last_sync = local_cursor
except ConnectionError:
syncing.clear()
connected.clear()
logger.debug(DISCONNECTED, exc_info=True)
logger.info(DISCONNECTED)
except MaestralApiError as e:
running.clear()
syncing.clear()
logger.error(e.title, exc_info=True)
except Exception:
running.clear()
syncing.clear()
logger.error('Unexpected error', exc_info=True)
def startup_worker(sync, syncing, running, connected, startup, paused_by_user):
"""
Worker to sync local changes to remote Dropbox.
:param UpDownSync sync: Instance of :class:`UpDownSync`.
:param Event syncing: Event that indicates if workers are running or paused.
:param Event running: Event to shutdown local file event handler and worker threads.
:param Event connected: Event that indicates if we can connect to Dropbox.
:param Event startup: Set when we should run startup routines.
:param Event paused_by_user: Set when syncing has been paused by the user.
"""
while running.is_set():
startup.wait()
try:
with sync.lock:
# run / resume initial download
# local changes during this download will be registered
# by the local FileSystemObserver but only uploaded after
# `syncing` has been set
if sync.last_cursor == '':
sync.clear_all_sync_errors()
sync.get_remote_folder()
sync.last_sync = time.time()
if not running.is_set():
continue
# retry failed / interrupted downloads
logger.info('Checking for pending downloads...')
for dbx_path in list(sync.download_errors):
sync.get_remote_item(dbx_path)
for dbx_path in list(sync.pending_downloads):
sync.get_remote_item(dbx_path)
# upload changes while inactive
sync.upload_local_changes_while_inactive()
# enforce immediate check for remote changes
changes = sync.list_remote_changes(sync.last_cursor)
downloaded, _ = sync.apply_remote_changes(changes)
sync.notify_user(downloaded)
if not running.is_set():
continue
gc.collect()
if not paused_by_user.is_set():
syncing.set()
startup.clear()
logger.info(IDLE)
except ConnectionError:
syncing.clear()
connected.clear()
logger.debug(DISCONNECTED, exc_info=True)
logger.info(DISCONNECTED)
except MaestralApiError as e:
running.clear()
syncing.clear()
logger.error(e.title, exc_info=True)
except Exception:
running.clear()
syncing.clear()
logger.error('Unexpected error', exc_info=True)
startup.clear()
# ========================================================================================
# Main Monitor class to start, stop and coordinate threads
# ========================================================================================
class MaestralMonitor:
"""
Class to sync changes between Dropbox and a local folder. It creates four
threads: `observer` to catch local file events, `upload_thread` to upload
caught changes to Dropbox, `download_thread` to query for and download
remote changes, and `connection_thread` which periodically checks the
connection to Dropbox servers.
:param MaestralApiClient client: The Dropbox API client, a wrapper around the Dropbox
Python SDK.
:ivar Thread local_observer_thread: Watchdog observer thread that detects local file
system events.
:ivar Thread upload_thread: Thread that sorts uploads local changes.
:ivar Thread download_thread: Thread to query for and download remote changes.
:ivar Thread file_handler: Handler to queue file events from `observer` for upload.
:ivar UpDownSync sync: Object to coordinate syncing. This is the brain of Maestral.
It contains the logic to process local and remote file events and to apply them
while checking for conflicts.
:ivar Event connected: Set when connected to Dropbox servers.
:ivar Event startup: Set when startup scripts have to be run after syncing
was inactive, for instance when Maestral is started, the internet connection is
reestablished or syncing is resumed after pausing.
:ivar Event syncing: Set when sync is running.
:ivar Event running: Set when the sync threads are alive.
:ivar Event paused_by_user: Set when sync is paused by the user.
:ivar Queue queue_downloading: Holds *local file paths* that are being downloaded.
:ivar Queue queue_uploading: Holds *local file paths* that are being uploaded.
"""
connection_check_interval = 4
def __init__(self, client):
self.client = client
self.config_name = self.client.config_name
self.sync = UpDownSync(self.client)
self.connected = Event()
self.syncing = Event()
self.running = Event()
self.paused_by_user = Event()
self.paused_by_user.set()
self.startup = Event()
self.fs_event_handler = FSEventHandler(self.syncing, self.startup, self.sync)
self._startup_time = None
self.reindex_interval = self.sync._conf.get('sync', 'reindex_interval')
@property
def uploading(self):
"""Returns a list of all items currently uploading."""
return list(self.sync.queue_uploading.queue)
@property
def downloading(self):
"""Returns a list of all items currently downloading."""
return list(self.sync.queue_downloading.queue)
@property
def queued_for_upload(self):
"""Returns a list of all items queued for upload."""
return list(self.sync.queued_for_upload.queue)
@property
def queued_for_download(self):
"""Returns a list of all items queued for download."""
return list(self.sync.queued_for_download.queue)
def start(self):
"""Creates observer threads and starts syncing."""
if self.running.is_set():
# do nothing if already running
return
self.running = Event() # create new event to let old threads shut down
self.local_observer_thread = Observer(timeout=0.1)
self.local_observer_thread.setName('maestral-fsobserver')
self._watch = self.local_observer_thread.schedule(
self.fs_event_handler, self.sync.dropbox_path, recursive=True
)
for emitter in self.local_observer_thread.emitters:
emitter.setName('maestral-fsemitter')
self.helper_thread = Thread(
target=helper,
daemon=True,
args=(self,),
name='maestral-helper'
)
self.startup_thread = Thread(
target=startup_worker,
daemon=True,
args=(
self.sync, self.syncing, self.running, self.connected,
self.startup, self.paused_by_user
),
name='maestral-sync-startup'
)
self.download_thread = Thread(
target=download_worker,
daemon=True,
args=(
self.sync, self.syncing, self.running, self.connected,
),
name='maestral-download'
)
self.download_thread_added_folder = Thread(
target=download_worker_added_item,
daemon=True,
args=(
self.sync, self.syncing, self.running, self.connected,
),
name='maestral-folder-download'
)
self.upload_thread = Thread(
target=upload_worker,
daemon=True,
args=(
self.sync, self.syncing, self.running, self.connected,
),
name='maestral-upload'
)
try:
self.local_observer_thread.start()
except OSError as exc:
if 'inotify' in exc.args[0]:
title = 'Inotify limit reached'
msg = ('Changes to your Dropbox folder cannot be monitored because it '
'contains too many items. Please increase the inotify limit in '
'your system by adding the following line to /etc/sysctl.conf:\n\n'
'fs.inotify.max_user_watches=524288')
new_exc = InotifyError(title, msg).with_traceback(exc.__traceback__)
exc_info = (type(new_exc), new_exc, new_exc.__traceback__)
logger.error(title, exc_info=exc_info)
return
else:
raise exc
self.running.set()
self.syncing.clear()
self.connected.set()
self.startup.set()
self.helper_thread.start()
self.startup_thread.start()
self.upload_thread.start()
self.download_thread.start()
self.download_thread_added_folder.start()
self.paused_by_user.clear()
self._startup_time = time.time()
def pause(self):
"""Pauses syncing."""
self.paused_by_user.set()
self.syncing.clear()
self._wait_for_idle()
logger.info(PAUSED)
def resume(self):
"""Checks for changes while idle and starts syncing."""
if not self.paused_by_user.is_set():
return
self.startup.set()
self.paused_by_user.clear()
def stop(self):
"""Stops syncing and destroys worker threads."""
if not self.running.is_set():
return
logger.info('Shutting down threads...')
self.running.clear()
self.syncing.clear()
self.paused_by_user.clear()
self.startup.clear()
self._wait_for_idle()
self.local_observer_thread.stop()
self.local_observer_thread.join()
self.helper_thread.join()
self.upload_thread.join()
logger.info(STOPPED)
@property
def idle_time(self):
"""Returns the idle time in seconds since the last file change or zero if syncing
is not running."""
if len(self.sync._last_sync_for_path) > 0:
return time.time() - max(self.sync._last_sync_for_path.values())
elif self.syncing.is_set():
return time.time() - self._startup_time
else:
return 0.0
def reset_sync_state(self):
if self.syncing.is_set() or self.startup.is_set() or self.sync.lock.locked():
raise RuntimeError('Cannot reset sync state while syncing.')
self.sync.last_cursor = ''
self.sync.last_sync = 0.0
self.sync.clear_rev_index()
logger.debug('Sync state reset')
def rebuild_index(self):
"""
Rebuilds the rev file by comparing remote with local files and updating rev
numbers from the Dropbox server. Files are compared by their content hashes and
conflicting copies are created if the contents differ. File changes during the
rebuild process will be queued and uploaded once rebuilding has completed.
Rebuilding will be performed asynchronously.
:raises: :class:`errors.MaestralApiError`
"""
logger.info('Rebuilding index...')
self.pause()
self.sync.last_cursor = ''
self.sync.clear_rev_index()
if not self.running.is_set():
self.start()
else:
self.resume()
def _wait_for_idle(self):
self.sync.lock.acquire()
self.sync.lock.release()
def _threads_alive(self):
"""Returns ``True`` if all threads are alive, ``False`` otherwise."""
try:
threads = (
self.local_observer_thread,
self.upload_thread, self.download_thread,
self.download_thread_added_folder,
self.helper_thread,
self.startup_thread
)
except AttributeError:
return False
base_threads_alive = (t.is_alive() for t in threads)
watchdog_emitters_alive = (e.is_alive() for e
in self.local_observer_thread.emitters)
return all(base_threads_alive) and all(watchdog_emitters_alive)
# ========================================================================================
# Helper functions
# ========================================================================================
def get_dest_path(event):
return getattr(event, 'dest_path', event.src_path)
def split_moved_event(event):
"""
Splits a given FileSystemEvent into Deleted and Created events of the same type.
:param FileSystemEvent event: Original event.
:returns: Tuple of deleted and created events.
:rtype: tuple
"""
if event.is_directory:
CreatedEvent = DirCreatedEvent
DeletedEvent = DirDeletedEvent
else:
CreatedEvent = FileCreatedEvent
DeletedEvent = FileDeletedEvent
return DeletedEvent(event.src_path), CreatedEvent(event.dest_path)
def get_local_hash(local_path):
"""
Computes content hash of a local file.
:param str local_path: Absolute path on local drive.
:returns: Content hash to compare with Dropbox's content hash,
or 'folder' if the path points to a directory. ``None`` if there
is nothing at the path.
:rtype: str
"""
hasher = DropboxContentHasher()
try:
with open(local_path, 'rb') as f:
while True:
chunk = f.read(1024)
if len(chunk) == 0:
break
hasher.update(chunk)
return str(hasher.hexdigest())
except IsADirectoryError:
return 'folder'
except FileNotFoundError:
return None
finally:
del hasher
def remove_from_queue(queue, *items):
"""
Tries to remove an item from a queue.
:param Queue queue: Queue to remove item from.
:param items: Items to remove
"""
with queue.mutex:
for item in items:
try:
queue.queue.remove(item)
except ValueError:
pass
def entries_to_str(entries):
str_reps = [f'<{e.__class__.__name__}(path_display={e.path_display})>'
for e in entries]
return '[' + ',\n '.join(str_reps) + ']'
def cpu_usage_percent(interval=0.1):
"""Returns a float representing the current process CPU
utilization as a percentage. This copies the similar
method from psutil.
Compares process times to system CPU times elapsed before
and after the interval (blocking). It is recommended for
accuracy that this function be called with an interval of
at least 0.1 sec.
A value > 100.0 can be returned in case of processes running
multiple threads on different CPU cores.
The returned value is explicitly NOT split evenly between
all available logical CPUs. This means that a busy loop process
running on a system with 2 logical CPUs will be reported as
having 100% CPU utilization instead of 50%.
"""
if not interval > 0:
raise ValueError(f'interval is not positive (got {interval!r})')
def timer():
return time.monotonic() * _cpu_count
st1 = timer()
rt1 = resource.getrusage(resource.RUSAGE_SELF)
time.sleep(interval)
st2 = timer()
rt2 = resource.getrusage(resource.RUSAGE_SELF)
delta_proc = (rt2.ru_utime - rt1.ru_utime) + (rt2.ru_stime - rt1.ru_stime)
delta_time = st2 - st1
try:
overall_cpus_percent = ((delta_proc / delta_time) * 100)
except ZeroDivisionError:
return 0.0
else:
single_cpu_percent = overall_cpus_percent * _cpu_count
return round(single_cpu_percent, 1)
|
<gh_stars>1-10
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.common.plugin import plugin
from rally.common import validation as common_validation
from rally.task import validation
from tests.unit import fakes
from tests.unit import test
class ValidationUtilsTestCase(test.TestCase):
def setUp(self):
super(ValidationUtilsTestCase, self).setUp()
class Plugin(plugin.Plugin):
pass
Plugin._meta_init()
self.addCleanup(Plugin.unregister)
self.Plugin = Plugin
def test_old_validator_admin(self):
validator_func = mock.Mock()
validator_func.return_value = None
validator = validation.validator(validator_func)
self.assertEqual(self.Plugin,
validator("a", "b", "c", d=1)(self.Plugin))
self.assertEqual(1, len(self.Plugin._meta_get("validators")))
vname, args, kwargs = self.Plugin._meta_get("validators")[0]
validator_cls = common_validation.Validator.get(vname)
validator_inst = validator_cls(*args, **kwargs)
fake_admin = fakes.fake_credential()
credentials = {"openstack": {"admin": fake_admin, "users": []}}
result = validator_inst.validate(credentials, {}, None, None)
self.assertIsInstance(result, common_validation.ValidationResult)
self.assertTrue(result.is_valid)
validator_func.assert_called_once_with(
{}, None, mock.ANY, "a", "b", "c", d=1)
deployment = validator_func.call_args[0][2]
self.assertEqual({"admin": fake_admin, "users": []},
deployment.get_credentials_for("openstack"))
def test_old_validator_users(self):
validator_func = mock.Mock()
validator_func.return_value = None
validator = validation.validator(validator_func)
self.assertEqual(self.Plugin,
validator("a", "b", "c", d=1)(self.Plugin))
self.assertEqual(1, len(self.Plugin._meta_get("validators")))
vname, args, kwargs = self.Plugin._meta_get("validators")[0]
validator_cls = common_validation.Validator.get(vname)
validator_inst = validator_cls(*args, **kwargs)
fake_admin = fakes.fake_credential()
fake_users1 = fakes.fake_credential()
fake_users2 = fakes.fake_credential()
users = [{"credential": fake_users1}, {"credential": fake_users2}]
credentials = {"openstack": {"admin": fake_admin, "users": users}}
result = validator_inst.validate(credentials, {}, None, None)
self.assertIsInstance(result, common_validation.ValidationResult)
self.assertTrue(result.is_valid)
fake_users1.clients.assert_called_once_with()
fake_users2.clients.assert_called_once_with()
validator_func.assert_has_calls((
mock.call({}, fake_users1.clients.return_value, mock.ANY,
"a", "b", "c", d=1),
mock.call({}, fake_users2.clients.return_value, mock.ANY,
"a", "b", "c", d=1)
))
for args in validator_func.call_args:
deployment = validator_func.call_args[0][2]
self.assertEqual({"admin": fake_admin, "users": users},
deployment.get_credentials_for("openstack"))
def test_old_validator_users_error(self):
validator_func = mock.Mock()
validator_func.return_value = common_validation.ValidationResult(False)
validator = validation.validator(validator_func)
self.assertEqual(self.Plugin,
validator("a", "b", "c", d=1)(self.Plugin))
self.assertEqual(1, len(self.Plugin._meta_get("validators")))
vname, args, kwargs = self.Plugin._meta_get("validators")[0]
validator_cls = common_validation.Validator.get(vname)
validator_inst = validator_cls(*args, **kwargs)
fake_admin = fakes.fake_credential()
fake_users1 = fakes.fake_credential()
fake_users2 = fakes.fake_credential()
users = [{"credential": fake_users1}, {"credential": fake_users2}]
credentials = {"openstack": {"admin": fake_admin, "users": users}}
result = validator_inst.validate(credentials, {}, None, None)
self.assertIsInstance(result, common_validation.ValidationResult)
self.assertFalse(result.is_valid)
fake_users1.clients.assert_called_once_with()
fake_users2.clients.assert_called_once_with()
validator_func.assert_called_once_with(
{}, fake_users1.clients.return_value, mock.ANY,
"a", "b", "c", d=1)
deployment = validator_func.call_args[0][2]
self.assertEqual({"admin": fake_admin, "users": users},
deployment.get_credentials_for("openstack"))
@mock.patch("rally.task.validation.LOG.warning")
def test_deprecated_validator(self, mock_log_warning):
my_deprecated_validator = validation.deprecated_validator(
"new_validator", "deprecated_validator", "0.10.0")
self.Plugin = my_deprecated_validator("foo", bar="baz")(self.Plugin)
self.assertEqual([("new_validator", ("foo",), {"bar": "baz"})],
self.Plugin._meta_get("validators"))
mock_log_warning.assert_called_once_with(
"Plugin '%s' uses validator 'rally.task.validation.%s' which is "
"deprecated in favor of '%s' (it should be used via new decorator "
"'rally.common.validation.add') in Rally v%s.",
self.Plugin.get_name(), "deprecated_validator", "new_validator",
"0.10.0")
def _unwrap_validator(self, validator, *args, **kwargs):
name = self.id()
@plugin.base()
class Foo(plugin.Plugin,
validation.validation.ValidatablePluginMixin):
pass
@plugin.configure(name)
class TempPlugin(Foo):
pass
self.addCleanup(TempPlugin.unregister)
validator(*args, **kwargs)(TempPlugin)
def wrap_validator(config):
return (Foo.validate(name, {}, config, {}) or
[common_validation.ValidationResult(True)])
return wrap_validator
def test_share_proto_compatibility(self):
validator = self._unwrap_validator(
validation.validate_share_proto)
res = validator({"args": {"share_proto": "GLUSTERFS"}})
self.assertEqual(1, len(res))
self.assertTrue(res[0].is_valid, res[0].msg)
res = validator({"args": {"share_proto": "fake"}})
self.assertEqual(1, len(res))
self.assertFalse(res[0].is_valid)
@mock.patch("rally.common.yamlutils.safe_load")
@mock.patch("rally.plugins.openstack.validators.os.access")
@mock.patch("rally.plugins.openstack.validators.open")
def test_workbook_contains_workflow_compatibility(
self, mock_open, mock_access, mock_safe_load):
mock_safe_load.return_value = {
"version": "2.0",
"name": "wb",
"workflows": {
"wf1": {
"type": "direct",
"tasks": {
"t1": {
"action": "std.noop"
}
}
}
}
}
validator = self._unwrap_validator(
validation.workbook_contains_workflow, "definition",
"workflow_name")
context = {
"args": {
"definition": "fake_path1",
"workflow_name": "wf1"
}
}
result = validator(context)
self.assertEqual(1, len(result))
self.assertTrue(result[0].is_valid, result[0].msg)
self.assertEqual(1, mock_open.called)
self.assertEqual(1, mock_access.called)
self.assertEqual(1, mock_safe_load.called)
|
<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer sample
"""
def get_sample_options(defaults=None):
"""Adding arguments for the sample subcommand
"""
if defaults is None:
defaults = {}
options = {
# Input fields to include in the sample.
'--sample-fields': {
"action": 'store',
"dest": 'sample_fields',
"default": defaults.get('sample_fields', None),
"help": ("Comma-separated list of input fields"
" (predictors) to create the sample.")},
# If a BigML sample is provided, the script will use it to get
# sample info.
'--sample': {
'action': 'store',
'dest': 'sample',
'default': defaults.get('sample', None),
'help': "BigML sample Id."},
# The path to a file containing sample ids.
'--samples': {
'action': 'store',
'dest': 'samples',
'default': defaults.get('samples', None),
'help': ("Path to a file containing sample/ids. One sample"
" per line (e.g., sample/50a206a8035d0706dc000376"
").")},
# If a BigML json file containing a smaple structure is provided,
# the script will use it.
'--sample-file': {
'action': 'store',
'dest': 'sample_file',
'default': defaults.get('sample_file', None),
'help': "BigML sample JSON structure file."},
# Does not create a sample just a dataset.
'--no-sample': {
'action': 'store_true',
'dest': 'no_sample',
'default': defaults.get('no_sample', False),
'help': "Do not create a sample."},
# The path to a file containing sample attributes.
'--sample-attributes': {
'action': 'store',
'dest': 'sample_attributes',
'default': defaults.get('sample_attributes', None),
'help': ("Path to a json file describing sample"
" attributes.")},
# Create a sample, not just a dataset.
'--no-no-sample': {
'action': 'store_false',
'dest': 'no_sample',
'default': defaults.get('no_sample', False),
'help': "Create a sample."},
# Prediction-header.
'--sample-header': {
'action': 'store_true',
'dest': 'sample_header',
'default': defaults.get('sample_header', False),
'help': "Headers are added to the sample file."},
# Prediction-header.
'--no-sample-header': {
'action': 'store_false',
'dest': 'sample_header',
'default': defaults.get('sample_header', False),
'help': "No headers are added to the sample file."},
# Field query string.
'--fields-filter': {
"action": 'store',
"dest": 'fields_filter',
"default": defaults.get('fields_filter', None),
"help": ("Query string to filter the rows according to the"
" values of its fields.")},
# Index as id for the selected rows.
'--row-index': {
'action': 'store_true',
'dest': 'row_index',
'default': defaults.get('row_index', False),
'help': "An absolute row number is added to the selected rows."},
# Don't add an index as id for the selected rows.
'--no-row-index': {
'action': 'store_false',
'dest': 'row_index',
'default': defaults.get('row_index', False),
'help': "An absolute row number is added to the selected rows."},
# Sampling mode.
'--mode': {
'action': 'store',
'dest': 'mode',
'default': defaults.get('mode', None),
'choices': ["deterministic", "linear", "random"],
'help': "Sampling mode."},
# Number of times a row is present in the sample.
'--occurrence': {
'action': 'store_true',
'dest': 'occurrence',
'default': defaults.get('occurrence', False),
'help': "Number of times a row is present in the sample."},
# Don't add the number of times a row is present in the sample.
'--no-occurrence': {
'action': 'store_false',
'dest': 'occurrence',
'default': defaults.get('occurrence', False),
'help': ("Don't add the number of times a row is present in"
" the sample.")},
# precision
'--precision': {
"action": 'store',
"dest": 'precision',
"type": int,
"default": defaults.get('precision', None),
"help": ("Number of decimals in the returned values.")},
# Number of rows returned by the sample.
'--rows': {
"action": 'store',
"dest": 'rows',
"type": int,
"default": defaults.get('rows', None),
"help": ("Number of rows returned by the sample.")},
# Offset before the row sample.
'--row-offset': {
"action": 'store',
"dest": 'row_offset',
"type": int,
"default": defaults.get('row_offset', 0),
"help": ("Offset before the row sample.")},
# Field used for sorting.
'--row-order-by': {
"action": 'store',
"dest": 'row_order_by',
"default": defaults.get('row_order_by', None),
"help": ("Field ids used for sorting.")},
# Sorted comma-separated list of fields to be used as columns
# for sample rows.
'--row-fields': {
"action": 'store',
"dest": 'row_fields',
"default": defaults.get('row_fields', None),
"help": ("Sorted comma-separated list of fields to be used"
"as columns for sample rows.")},
# Comma-separated couple of field names to compute the Pearsons'
# and Spearman's correlations and linear regression terms between them.
'--stat-fields': {
"action": 'store',
"dest": 'stat_fields',
"default": defaults.get('stat_fields', None),
"help": ("Comma-separated couple of field ids to compute the"
" Pearsons' and Spearman's correlations and"
" linear regression terms between them.")},
# Field name to compute the Pearsons'
# and Spearman's correlations and linear regression terms between them
# and the rest of numeric fields.
'--stat-field': {
"action": 'store',
"dest": 'stat_field',
"default": defaults.get('stat_field', None),
"help": ("Numeric field to compute the"
" Pearsons' and Spearman's correlations and"
" linear regression terms between them"
" and the rest of numeric fields.")},
# Unique, when set to true, repeated rows are removed.
'--unique': {
'action': 'store_true',
'dest': 'unique',
'default': defaults.get('unique', False),
'help': "If True, repeated rows are removed."},
# Don't remove repeated rows.
'--no-unique': {
'action': 'store_false',
'dest': 'unique',
'default': defaults.get('unique', False),
'help': ("Don't remove repeated rows.")}}
return options
|
import numpy as np
import tensorflow as tf
import copy
import scipy.stats
import time
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf-ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
if tempBool:
print( "Elapsed time: %f seconds.\n" %tempTimeInterval )
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
def pseudo_action_swap_matrix(pi,phi):
C=len(pi)
RaceAllSwap = np.log(pi[:,np.newaxis])-phi[np.newaxis,:]
Race = np.diag(RaceAllSwap)
action_true = np.argmin(Race)
#tic()
if C<=6: # True: #True:
#Slow version for large C
pseudo_actions=np.full((C, C), action_true)
for m in range(C):
for jj in range(m):
RaceSwap = Race.copy()
RaceSwap[m], RaceSwap[jj]=RaceAllSwap[jj,m],RaceAllSwap[m,jj]
s_action = np.argmin(RaceSwap)
pseudo_actions[m,jj], pseudo_actions[jj,m] = s_action, s_action
else:
Race_min = Race[action_true]
#Fast version for large C
pseudo_actions=np.full((C, C), action_true)
SwapSuccess = RaceAllSwap<=Race_min
SwapSuccess[action_true,:]=True
np.fill_diagonal(SwapSuccess,0)
m_idx,j_idx = np.where(SwapSuccess)
for i in range(len(m_idx)):
m,jj = m_idx[i],j_idx[i]
RaceSwap = Race.copy()
RaceSwap[m], RaceSwap[jj]=RaceAllSwap[jj,m],RaceAllSwap[m,jj]
if m==action_true or jj == action_true:
s_action = np.argmin(RaceSwap)
pseudo_actions[m,jj], pseudo_actions[jj,m] = s_action, s_action
else:
if RaceSwap[m]<RaceSwap[jj]:
pseudo_actions[m,jj], pseudo_actions[jj,m] = m, m
else:
pseudo_actions[m,jj], pseudo_actions[jj,m] = jj, jj
return pseudo_actions
def pseudo_action_swap_vector(pi,phi,Cat_ref):
C=len(pi)
Race = np.log(pi)-phi
action_true=np.argmin(Race)
pseudo_actions=np.full(C,action_true)
for m in range(C):
jj=Cat_ref
RaceSwap = Race.copy()
if m!=jj:
RaceSwap[m] = np.log(pi[jj])-phi[m]
RaceSwap[jj] = np.log(pi[m])-phi[jj]
pseudo_actions[m] = np.argmin(RaceSwap)
return pseudo_actions
def loss_reinforce(model, states, labels, drs, ent = False):
logit = model(states)
if ent:
probability = tf.nn.softmax(logit)
entropy = scipy.stats.entropy(probability)
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logit, labels=labels) * drs - entropy * 0.01)
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logit, labels=labels) * drs)
def loss_reinforce_batch(model, states, actions, advantages):
logit = model(states)
prob = tf.nn.softmax(logit)
ent = tf.nn.softmax_cross_entropy_with_logits(labels = prob, logits = logit)
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logit, labels=actions) * tf.squeeze(advantages)) - \
tf.reduce_mean(0.01 * ent)
def gradient_reinforce_batch(model, states, actions, advantages):
with tf.GradientTape() as tape:
loss_fn = loss_reinforce_batch(model, states, actions, advantages)
return tape.gradient(loss_fn, model.variables)
def cat_entropy(logits):
a0 = logits - tf.reduce_max(logits, 1, keep_dims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def loss_arm(model, states, grad_alpha, ent_par):
# grad_alpha: T * num_of_actions
logit = model(states)
logprob = logit-tf.reduce_logsumexp(logit,1,keepdims=True)
prob = tf.exp(logprob)
ent = tf.reduce_sum(-prob*logprob)
if ent_par>0:
return tf.reduce_sum(tf.multiply(logit, grad_alpha)) - ent_par*ent
else:
return tf.reduce_sum(tf.multiply(logit, grad_alpha))
def loss_critic(model, state, drs):
return tf.reduce_mean(tf.square(drs - model(state)))
def gradient_reinforce(model, states, actions, drs, ent = False):
with tf.GradientTape() as tape:
loss_fn = loss_reinforce(model, states, actions, drs, ent)
return tape.gradient(loss_fn, model.variables)
def gradient_arm(model, states, grad_alpha, ent_par):
with tf.GradientTape() as tape:
loss_fn = loss_arm(model, states, grad_alpha, ent_par)
return tape.gradient(loss_fn, model.variables)
def gradient_critic(model, state, drs):
with tf.GradientTape() as tape:
loss_fn = loss_critic(model, state, drs)
return tape.gradient(loss_fn, model.variables)
def discount_reward(rewards, gamma): # no normalization
dr = np.sum(np.power(gamma,np.arange(len(rewards)))*rewards)
return dr
def discount_rewards(rewards, gamma): # no normalization
drs = np.sum(np.power(gamma,np.arange(len(rewards)))*rewards)[None]
return drs
def swap(array, a,b):
array[a], array[b] = array[b], array[a]
return array
def evaluate(model_actor, env, nA, seed):
env.seed(seed)
state = env.reset()[None,:]
state = np.float32(state)
score = 0
while True:
logits = np.array(model_actor(state)[0])
pi = np.random.dirichlet(np.ones(nA))
#action = np.argmin(pi * np.exp(-logits))
action = np.argmin(np.log(pi)-logits)
next_state,reward,done,_ = env.step(action)
next_state = np.float32(next_state)
next_state = next_state[None,:] ## add one layer on the structure, e.g.[1,2,3] to [[1,2,3]]
# Compute gradient and save with reward in memory for our weight update
score += reward
# Dont forget to update your old state to the new state
state = next_state
if done:
break
return score
def Q_value(model_critic, state, action, nA):
action_one_hot = tf.one_hot(action, nA, 1.0, 0.0)
q = model_critic(state)
pred = tf.reduce_sum(q * action_one_hot, reduction_indices=-1)
return pred
def loss_critic_q(model_critic, states, actions, drs, nA,Prob,rewards,gamma,model_actor,unique_pseudo_actions,pseudo_action_sequences,pi_sequence,time_permute_used,n_true_,e):
action_one_hot = tf.one_hot(actions, nA, 1.0, 0.0)
q = model_critic(tf.concat([states,action_one_hot], 1))
q_values = q
phi = model_actor(states)
Prob1 = tf.nn.softmax(phi)
q_values_next=0
for aa in range(nA):
action_one_hot = tf.one_hot(tf.fill((len(actions),),aa), nA, 1.0, 0.0)
q_values_next+=(model_critic(tf.concat([states,action_one_hot], 1)))*Prob1[:,aa][:,None]
q_values_next = tf.stop_gradient(q_values_next)
pseudo_action_one_hot = tf.one_hot(unique_pseudo_actions[:,1], nA, 1.0, 0.0)
pseudo_reward_total = model_critic(tf.concat([states[unique_pseudo_actions[:,0]],pseudo_action_one_hot], 1))
f = []
ttt=0
#for t in time_permute_used:
for t in range(n_true_):
if len(np.where(t==time_permute_used)[0])>0:
if t<n_true_-1:
total_reward = rewards[t]+ gamma*tf.reduce_sum(model_critic(states[t+1][None])*Prob[t+1])
else:
total_reward = rewards[t]
ft = tf.ones((nA,nA)) * total_reward
idxt=np.where(unique_pseudo_actions[:,0]==t)[0]
for idx in idxt:
aa = unique_pseudo_actions[idx,1]
matrix_tmp = tf.to_float(pseudo_action_sequences[ttt]==aa) * (pseudo_reward_total[idx] - total_reward)
ft = ft + matrix_tmp
meanft = tf.reduce_mean(ft,axis=0)
sec_tmp = tf.convert_to_tensor(1.0/nA-pi_sequence[t], dtype = tf.float32)
sec_tmp = tf.reshape(sec_tmp, (1,-1))
f.append(tf.matmul(sec_tmp, ft-meanft)) # make it a row vector
ttt+=1
else:
f.append(tf.zeros(nA))
f1 = tf.stack(f, axis=0)
f = tf.reshape(f1, (-1,nA))
logit = model_actor(states)
var_grad = tf.reduce_sum(tf.square(tf.multiply(logit, f)))
return tf.reduce_sum(tf.square(q_values[:-1]-tf.convert_to_tensor(rewards[:-1],dtype = tf.float32)[:,None]-gamma*q_values_next[1:]))\
+ tf.reduce_sum(tf.square(q_values[:-2]-tf.convert_to_tensor(rewards[:-2],dtype = tf.float32)[:,None]-gamma*tf.convert_to_tensor(rewards[1:-1],dtype = tf.float32)[:,None]-gamma**2*q_values_next[2:]))*0.8\
+ tf.reduce_sum(tf.square(drs[:,None] - q_values))*0.3
def gradient_critic_q(model_critic, states, actions, drs, nA,Prob,rewards,gamma,model_actor,unique_pseudo_actions,pseudo_action_sequences,pi_sequence,time_permute_used,n_true_,e):
with tf.GradientTape() as tape:
loss_fn = loss_critic_q(model_critic, states, actions, drs, nA,Prob,rewards,gamma,model_actor,unique_pseudo_actions,pseudo_action_sequences,pi_sequence,time_permute_used,n_true_,e)
return tape.gradient(loss_fn, model_critic.variables)
def gradient_actor_q(model_critic, states, actions, drs, nA,Prob,rewards,gamma,model_actor):
with tf.GradientTape() as tape:
loss_fn = loss_critic_q(model_critic, states, actions, drs, nA,Prob,rewards,gamma,model_actor)
return tape.gradient(loss_fn, model_actor.variables)
def loss_critic_sa(model_critic_sa, states, actions, drs, nA):
action_one_hot = tf.one_hot(actions, nA, 1.0, 0.0)
q= model_critic_sa(tf.concat([states,action_one_hot], 1))
q_values = tf.reduce_sum(q * action_one_hot, reduction_indices=1)
q_values_next = q
return tf.reduce_mean(tf.square(drs[:,None] - q_values))
def gradient_critic_sa(model_critic_sa, states, actions, drs, nA):
with tf.GradientTape() as tape:
loss_fn = loss_critic_sa(model_critic_sa, states, actions, drs, nA)
return tape.gradient(loss_fn, model_critic_sa.variables)
def policy(model_actor, state, nA):
logits = np.array(model_actor(state)[0])
pi = np.array([np.random.dirichlet(np.ones(nA)) for i in range(len(logits))])
action = np.argmin(pi * np.exp(-logits), axis = 1) # 1 is row-wise and 0 is column-wise
return action
def loss_dqn(model_critic,y, states, actions, nA, Q_value):
x = Q_value(model_critic, states, actions, nA)
return tf.reduce_sum(tf.square(x-y))
def gradient_dqn(model_critic, y, states, actions, nA,Q_value):
with tf.GradientTape() as tape:
loss_fn = loss_dqn(model_critic,y,states,actions,nA, Q_value)
return tape.gradient(loss_fn, model_critic.variables)
|
<reponame>fcco/SkySol<filename>skysol/lib/visualization.py
# Import MatplotLib for visualization
import matplotlib.pyplot as plt
import time
from datetime import datetime
import cv2
import os
import numpy as np
from skysol.lib import optical_flow, misc, drawings
from numpy import degrees, radians, arctan2, pi
from matplotlib.dates import date2num, DateFormatter, DayLocator, HourLocator, MinuteLocator
from matplotlib.ticker import MaxNLocator, LinearLocator
from scipy.ndimage.interpolation import rotate
from PIL import Image
import cmocean
#===============================================================================
#
# Matplotlib settings
#
#===============================================================================
plt.rcParams['ytick.labelsize'] = 11.
plt.rcParams['xtick.labelsize'] = 11.
plt.rcParams['axes.labelcolor'] = '000000'
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['axes.labelsize'] = 12.
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['axes.facecolor'] = 'ffffff'
plt.rcParams['xtick.major.size' ] = 5.5 # major tick size in points
plt.rcParams['xtick.minor.size' ] = 3.5 # major tick size in points
plt.rcParams['ytick.major.size' ] = 5.5 # major tick size in points
plt.rcParams['ytick.minor.size' ] = 3.5 # major tick size in points
plt.rcParams['ytick.major.width' ] = 2 # major tick size in points
plt.rcParams['xtick.major.width' ] = 2 # major tick size in points
plt.rcParams['ytick.color'] = '000000'
plt.rcParams['xtick.color'] = '000000'
plt.rcParams['grid.color'] = 'black' # grid color
plt.rcParams['grid.linestyle'] = ':' # dotted
plt.rcParams['grid.linewidth'] = 0.2 # in points
plt.rcParams['font.size'] = 11.
plt.rcParams['axes.titlesize'] = 'large'
plt.rcParams['legend.fontsize'] = 'small'
plt.rc('mathtext', fontset='cm', default='regular')
def patch_image_cache(style, cache_dir='tilecache'):
"""
Monkey patch the ``get_image()`` method of ``tiles`` to read and write image
tiles from ``cache_dir`` before trying to download them.
"""
from cartopy.io.img_tiles import GoogleTiles
tiles = GoogleTiles(style=style)
# Ensure cache directory exists.
os.makedirs(cache_dir, exist_ok=True)
def get_image(tile):
cache_img = os.path.join(cache_dir, style + '_%d_%d_%d.png' % tile )
if os.path.exists(cache_img):
img = Image.open(cache_img).convert(tiles.desired_tile_form)
return img, tiles.tileextent(tile), 'lower'
# Call get_image() method of tiles instance and store the downloaded image.
img, extent, origin = type(tiles).get_image(tiles, tile)
img.save(cache_img, 'PNG')
return img, extent, origin
tiles.get_image = get_image
return tiles
def fill_between(x, y1, y2=0, ax=None, **kwargs):
"""Plot filled region between `y1` and `y2`.
This function works exactly the same as matplotlib's fill_between, except
that it also plots a proxy artist (specifically, a rectangle of 0 size)
so that it can be added it appears on a legend.
"""
ax = ax if ax is not None else plt.gca()
ax.fill_between(x, y1, y2, **kwargs)
p = plt.Rectangle((0, 0), 0, 0)#, **kwargs)
ax.add_patch(p)
return p
def scale_bar(ax, lat, lon, length, location=(0.5, 0.05), linewidth=5):
"""
ax is the axes to draw the scalebar on.
location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot
length is the length of the scalebar in km.
linewidth is the thickness of the scalebar.
"""
import utm
import cartopy.crs as ccrs
#Projection in metres, need to change this to suit your own figure
zone = utm.latlon_to_zone_number(lat,lon)
if lat < 0:
sh = True
else:
sh = False
utm_c = ccrs.UTM(zone, southern_hemisphere=sh)
#Get the extent of the plotted area in coordinates in metres
x0, x1, y0, y1 = ax.get_extent(utm_c)
#Turn the specified scalebar location into coordinates in metres
sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1]
#Generate the x coordinate for the ends of the scalebar
for i in range(0,length):
if i % 2 == 0:
c = 'k'
else:
c = 'w'
bar_xs = [sbcx - length * 500 + i * 1000, sbcx - length * 500 + (i+1) * 1000]
#Plot the scalebar
ax.plot(bar_xs, [sbcy, sbcy], transform=utm_c, color=c, linewidth=linewidth)
#Plot the scalebar label
sbcy = sbcy + (y1 - y0) * 0.02
ax.text(sbcx, sbcy, str(length) + ' km', color="black", transform=utm_c, fontweight="bold",
horizontalalignment='center', verticalalignment='bottom', fontsize=15)
def plot(outfile, in_img, actdate, nstations, pyr, csk, ini,cmv, \
xsun, ysun, mask, csl, cmap, features, hist_flag=False, text_flag=False,
params=None):
fig = plt.figure(figsize=(16,9))
ncols = 5; nrows = 3
# get station index
if ini.fcst_flag and nstations > 0:
k = [j for j in range(0, nstations) if int(pyr[j].ind) == int(ini.statlist[0])][0]
else:
k = 0
# Cloud classification
if ini.cloud_class_apply:
CC_long = ['Cumulus','Cirrus','Altocumulus','Clear Sky','Stratocumulus', 'Stratus', 'Nimbostratus']
CC_short = ['Cu','Ci/Cs','Ac/Cc','Clear','Sc', 'St', 'Ns/Cb']
ccstr_long = CC_long[params['imgclass']-1]
ccstr_short = CC_short[params['imgclass']-1]
if meta['imgclass'] > 0:
cpstr = str(np.round(params['imgprob'][params['imgclass']-1],2))
else:
cpstr = "-1"
else:
ccstr_long = ""
ccstr_short = ""
cpstr = ""
img = cmap.copy()
if ini.cbh_flag:
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
#-------------------------------------------------------------------
# create map
#-------------------------------------------------------------------
style = "satellite"
# load OSM background image
background = patch_image_cache(style, \
ini.rootdir + '/tmp')
ax = plt.subplot2grid((nrows,ncols), (0,2), \
colspan=2, rowspan=2, projection=background.crs)
# set boundaries of map
ax.set_extent((ini.lon_min, ini.lon_max, ini.lat_min, ini.lat_max ))
bnd = ax.get_extent()
# Add the background to the map
res = ini.x_res * ini.grid_size
if res > 10000:
ax.add_image(background,12,alpha=0.9)
elif res > 5000 and res <= 10000:
ax.add_image(background,13,alpha=0.9)
else:
ax.add_image(background,14,alpha=0.9)
#ax.imshow(background)
gl = ax.gridlines(draw_labels=True,
linewidth=1, color='white', alpha=0.6, linestyle='--')
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
# draw cloud/shadow map
ax.imshow(img,cmap=plt.cm.gray,alpha=0.5, \
zorder=1, vmin=0, transform=background.crs, origin="upper",\
extent=bnd)
# Draw a scale bar
scale_bar(ax, ini.lat0, ini.lon0, 5, linewidth=10)
# Mark camera position
sct = ax.scatter(ini.lon0, ini.lat0, \
s=25, marker='x',c="red", transform=background.crs.as_geodetic())
else:
# draw cloud map
ax = plt.subplot2grid((nrows,ncols), (0,2), colspan=2, rowspan=2)
sct = ax.imshow(img, vmin=0, cmap=plt.cm.get_cmap('RdBu_r'))
ax.grid('off')
plt.title('Irradiance Map')
plt.axis('off')
# Forecast arrow
if ini.flow_flag:
# Point forecast
xvals = []; yvals = []; vals = []
cm = plt.cm.get_cmap('RdBu_r')
cm = cmocean.cm.solar
# Draw forecast arrow
if ini.draw_forecast_path:
for i in range(0, ini.fcst_horizon):
inind = int(i / ini.fcst_res)
x = int(pyr[k].fpos[i][1])
y = int(pyr[k].fpos[i][0])
if x > cmap.shape[0] - 2 or x <= 0 or y <= 0 or y > cmap.shape[1]-2: continue
xvals.append(x); yvals.append(y)
cskval = csk.ghi[csk.tind]
vals.append(pyr[k].fghi[inind])
if ini.cbh_flag:
xvals = np.array(xvals)[np.isfinite(vals)]
yvals = np.array(yvals)[np.isfinite(vals)]
vals = np.array(vals)[np.isfinite(vals)]
lats, lons = misc.grid2latlon(ini.lat0,ini.lon0,ini.x_res, ini.y_res, ini.grid_size, xvals, yvals)
if len(xvals) > 0:
sct = ax.scatter(lons, lats, s=30, vmin=0.15 * cskval,
vmax=cskval + 0.15 * cskval, marker='o', c=vals, cmap=cm, \
edgecolor='none', transform=background.crs.as_geodetic(),zorder=10)
# Draw station dots
sct2 = plot_stat(ax, ini, nstations, pyr, csk.ghi[csk.tind], k,
transform=background.crs.as_geodetic())
else:
sct = ax.scatter(xvals, yvals, s=30, vmin=0.15 * csk.ghi[csk.tind],
vmax=csk.ghi[csk.tind] + 0.15 * csk.ghi[csk.tind], marker='o', c=vals, cmap=cm,
edgecolor='none')
# Colorbar
try:
cbar = plt.colorbar(mappable=sct, pad=.02, aspect=18, shrink=0.85)
except ( AttributeError, TypeError, UnboundLocalError ):
pass
# Select area to cut from image
imgsize = in_img.orig_color.shape
x0 = int(ini.cy-ini.fx)
if x0 < 0: x0 = 0
x1 = int(ini.cy+ini.fx)
if x1 > imgsize[0]: x1 = imgsize[0]
y0 = int(ini.cx-ini.fy)
if y0 < 0: y0 = 0
y1 = int(ini.cx+ini.fy)
if y1 > imgsize[1]: y1 = imgsize[1]
# Origin Image
plt.subplot2grid((nrows,ncols), (0,0), colspan=1, rowspan=1)
img = in_img.orig_color_draw.copy()
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
#img = rotate(img[x0:x1,y0:y1],-np.degrees(ini.rot_angles[2]))
cv2.circle(img,(ysun,xsun),15,0,-1)
img = img[x0:x1,y0:y1]
plt.axis('off')
plt.imshow(img)
plt.title('Original Image')
del img
# RBR
ax = plt.subplot2grid((nrows,ncols), (1,1))
img = in_img.rbr.copy() * 1.0
img[mask] = np.nan
img = img[x0:x1,y0:y1]
a = ax.imshow(img,vmin=ini.rbr_thres-0.2, vmax=ini.rbr_thres+0.2,cmap=plt.cm.viridis)
cbar = plt.colorbar(a,pad=.03,aspect=15,shrink=0.7, format="%.2f" )
plt.axis('off')
if csl == 0: plt.title('RBR')
if csl == 1: plt.title('RBR - CSL')
if csl == 2: plt.title('RBR corrected')
if hist_flag:
in_img.rbr[mask]=np.nan
plt.subplot2grid((nrows,ncols), (2,0),colspan=1)
plt.hist((in_img.rbr.flatten()), \
range=(0.3,1.3), bins=125, color="red",alpha=0.5,normed=True)
plt.ylim(0,15)
plt.axvline(ini.rbr_thres, color='b', linestyle='dashed', linewidth=2)
plt.legend(['RBR threshold','RBR'],loc=2)
if ini.csi_mode == "hist" and ini.radiation:
ind = pyr[k].tind
y = np.divide( pyr[k].ghi[ind-ini.avg_csiminmax:ind], csk.ghi[csk.tind-ini.avg_csiminmax:csk.tind] )
y = y[np.isfinite(y)]
if len(y) > (0.6*ini.avg_csiminmax):
ax = plt.subplot2grid((nrows,ncols), (2,1),colspan=1)
plt.hist((y),bins=ini.hist_bins, color="red",range=(0.0,1.5))
plt.axvline(pyr[k].csi_min, color='b', linestyle='dashed', linewidth=2)
plt.axvline(pyr[k].csi_max, color='b', linestyle='dashed', linewidth=2)
plt.xlim(0.2,1.5)
ax.text(0.2,1.05,'k* histogram',fontsize=9,transform=ax.transAxes)
# Clear Sky Reference
if csl == 1:
plt.subplot2grid((nrows,ncols), (1,0))
img = in_img.cslimage
img[mask] = np.nan
img = img[x0:x1,y0:y1]
a = plt.imshow(img,vmin=0.5, vmax=1.2,cmap=plt.cm.viridis)
plt.title('CSL')
plt.axis('off')
plt.colorbar(a,pad=.03, aspect=15,shrink=0.7)
if ini.plot_features:
for f in range(0,len(features.vec)):
if f > len(features.vec)/2:
xo = 0.7; yo = 0.3-(f-len(features.vec)/2)/50.
else:
xo = 0.43; yo = 0.3-f/50.
txt = '%g' % (features.vec[f])
fig.text(xo,yo,features.names[f][0:26])
fig.text(xo+0.17,yo,txt)
# RBR differences
# img = in_img.rbr_orig - in_img.rbr
# plt.subplot2grid((nrows,ncols), (1,0))
# img[mask] = np.nan
# a = plt.imshow(img[x0:x1,y0:y1],cmap=plt.cm.get_cmap('bwr'),vmin=-0.2,vmax=0.2)
# plt.axis('off')
# plt.colorbar(a,pad=.03, aspect=15,shrink=0.7)
# plt.title('RBR diff')
# Binary cloud mask
plt.subplot2grid((nrows,ncols), (0,1))
img = in_img.binary_color.copy()
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img[in_img.mask_horizon] = 0
img = img[x0:x1,y0:y1]
#img = rotate(img,np.degrees(ini.rot_angles[2]))
plt.title('Cloud decision')
plt.axis('off')
plt.imshow(img)
# Draw timeseries
past = ini.plot_last_vals
horizon = int(ini.fcst_horizon/ini.fcst_res) + 1
horizon = int(ini.fcst_horizon)
if hist_flag:
ax = plt.subplot2grid((nrows,ncols), (2,2),colspan=3)
elif ini.plot_features:
ax = plt.subplot2grid((nrows,ncols), (2,0),colspan=2)
else:
ax = plt.subplot2grid((nrows,ncols), (2,0),colspan=5)
maxval = 0
i = 0
if ini.radiation:
# Plot measurements
if ini.live:
slc = slice(pyr[k].tind-past,pyr[k].tind,ini.fcst_res)
x = pyr[k].time[slc]
y = pyr[k].ghi[slc]
y2 = pyr[k].dhi[slc]
else:
slc = slice(pyr[k].tind-past,pyr[k].tind+horizon,ini.fcst_res)
x = pyr[k].time[slc]
y = pyr[k].ghi[slc]
y2 = pyr[k].dhi[slc]
dates=[datetime.utcfromtimestamp(ts) for ts in x ]
if len(dates) > 0: plt.plot(dates, y, 'b-',lw=2.0, label="Measurement")
if len(y2) > 0:
fill_between(dates,0,y2,alpha=0.5,linewidth=0,facecolor="yellow", label="DHI")
fill_between(dates,y2,y,alpha=0.5,linewidth=0,facecolor="orange", label="DNI")
# Analysis Values
nvals = ini.plot_last_vals / ini.camera_res / ini.rate
x = pyr[k].aghi_time[-int(nvals):]
dates=[datetime.utcfromtimestamp(ts) for ts in x if ~np.isnan(ts) ]
if len(dates) > 0:
y = pyr[k].aghi[-len(dates):]
plt.plot(dates, y, 'gv', label="Analysis")
# Clear sky irradiance
slc = slice(csk.tind-ini.plot_last_vals, csk.tind+ini.fcst_horizon, ini.fcst_res)
x = csk.time[slc]
dates=[datetime.utcfromtimestamp(ts) for ts in x ]
y = csk.ghi[slc]
plt.plot(dates, y, '--', color='black', label="Clear Sky")
maxval = 1.7 * csk.actval
plt.ylabel('Irradiance in $Wm^{-2}$')
# Forecast Values
x = pyr[k].ftime
dates=[ datetime.utcfromtimestamp(ts) for ts in x if ~np.isnan(ts) ]
y = pyr[k].fghi[:len(dates)]
plt.plot(dates,y,'r-',lw=2.0, label="Forecast")
# Vertical line to plot current time instance
plt.axvline(actdate, color='b', linestyle='--', lw=2.0)
plt.xlabel('Time [UTC]')
plt.legend(loc="upper left", ncol=3, fontsize=8)
plt.ylim([0,maxval])
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
ax.xaxis.set_major_locator(LinearLocator(numticks=6))
ax.xaxis_date()
# Draw Text
ax = plt.subplot2grid((nrows,ncols), (0,4),rowspan=2)
ax.axis('off')
nowtime = datetime.strftime(datetime.utcnow(),"%Y-%m-%d %H:%M:%S")
acttime = datetime.strftime(actdate,"%Y-%m-%d %H:%M:%S")
loctime = str(in_img.locdate.isoformat(' '))
ax.text(-0.3,0.95,acttime + str(' UTC'), weight="bold")
ax.text(0.2,0.02,'Created:\n' + nowtime + str(' UTC'),fontsize=9)
ax.text(-0.3,0.9,"Sun Zenith = " + str(round(params['sza'],1)) + '$^\circ$' )
ax.text(-0.3,0.86,"Sun Azimuth = " + str(round(params['saz'],1)) + '$^\circ$' )
if ini.cbh_flag: ax.text(-0.3,0.82,'Cloud Base Height: ' + \
str(int(params['cbh'])) + ' m ')
ax.text(-0.3,0.79,'Cloud Type: ' + ccstr_long + ' ' + ccstr_short + ' ' + cpstr)
ax.text(-0.3,0.72,'Radiation measurements \n' + params['txt'] + ':' )
ax.text(-0.3,0.65,"GHI = " + str(round(params['ghi'],1)) + ' $W/m^2$ (' + str(round(params['csi_ghi'],2))+')' )
ax.text(-0.3,0.61,"DHI = " + str(round(params['dhi'],1)) + ' $W/m^2$ (' + str(round(params['csi_dhi'],2))+')' )
ax.text(-0.3,0.57,"DNI = " + str(round(params['dni'],1)) + ' $W/m^2$ (' + str(round(params['csi_dni'],2))+')' )
if ini.mode <= 1:
ax.text(-0.3,0.40,'Cloud Cover = ' + str(round(params['cc'],1)) + ' %' )
if ini.flow_flag:
if ini.cbh_flag:
unit = "m/s"
else:
unit = "pix/s"
ax.text(-0.3, 0.34, '#CMV = ' + str(np.sum(cmv.flag)))
um = cmv.speed[-1]; vm = cmv.direction[-1]
ume = cmv.sspeed[-1]; vme = cmv.sdirection[-1]
ax.text(-0.3,0.30,'All speed = ' + str(round(um,2)) + '$\pm$' + str(round(ume,2)) + unit)
ax.text(-0.3,0.26,'All direction = ' + str(round(np.degrees(vm),2)) + '$\pm$' + str(round(np.degrees(vme),2)) +'$^\circ$')
um = cmv.mean_speed; vm = cmv.mean_direction
ume = cmv.std_speed; vme = cmv.std_direction
ax.text(-0.3,0.22,'Global speed = ' + str(round(um,2)) + '$\pm$' + str(round(ume,2)) + unit)
ax.text(-0.3,0.18,'Global direction = ' + str(round(np.degrees(vm),2)) + '$\pm$' + str(round(np.degrees(vme),2)) +'$^\circ$')
ax.text(-0.3,0.14,'Lens Clear = ' + str(params['img_qc']))
if in_img.useful_image:
qc = "OK"
else:
qc = "BAD"
ax.text(-0.3,0.10,'Quality Flag = ' + qc)
# Final settings
fig.set_figwidth = 16.
fig.set_figheight = 9.
fig.set_dpi = 50.
fig.subplots_adjust(hspace=0.15,wspace=0.4,left=0.05, right=0.97, top=0.95, bottom=0.08)
plt.savefig(outfile,format=ini.outformat)
plt.clf()
plt.close('all')
def plot_stat(ax, ini, nstations, pyr, cskval, k, transform=None):
cm = plt.cm.get_cmap('RdBu_r')
cm = cmocean.cm.solar
xsct = []; ysct = []; val = []; flag = []; isdata = []
# collect data from single stations
for i in range(0, nstations):
x = float(pyr[i].map_y)
y = float(pyr[i].map_x)
z = np.nanmean(pyr[i].ghi[pyr[i].tind-ini.rate:pyr[i].tind])
xsct.append(x)
ysct.append(y)
val.append(z)
if np.isfinite(z):
isdata.append(True)
flag.append(pyr[i].qflag[pyr[i].tind])
else:
isdata.append(False)
flag.append(-1)
isdata = np.array(isdata)
xsct = np.array(xsct)
ysct = np.array(ysct)
val = np.array(val)
# geographical coordinates
if transform is not None:
if np.sum(isdata) == 0:
lats, lons = misc.grid2latlon(ini.lat0,ini.lon0,ini.x_res, ini.y_res, \
ini.grid_size, xsct, ysct)
sct = ax.scatter(lons, lats, \
s=25, marker='x',c="red", transform=transform)
else:
lats, lons = misc.grid2latlon(ini.lat0,ini.lon0,ini.x_res, ini.y_res, \
ini.grid_size, np.array(xsct), np.array(ysct))
sct = ax.scatter(lons[isdata], lats[isdata], s=25, marker='x', vmin=0.15 * cskval, \
vmax=cskval + 0.15 * cskval, c=val[isdata], cmap=cm, edgecolor='none', \
transform=transform, zorder=30)
else:
# Use grid coordinates
sct = ax.scatter(xsct[k], ysct[k], s=25, marker='o', vmin=0.15 * cskval, \
vmax=cskval + 0.15 * cskval, c=val[k], cmap=cm, edgecolor='none')
return sct
def plot_detection_full(outfile, ini, in_img, mask, **params):
ncols = 5; nrows = 2
textsize=19
# Select area to cut from image
imgsize = in_img.orig_color.shape
x0 = int(ini.cy-ini.fx)
if x0 < 0: x0 = 0
x1 = int(ini.cy+ini.fx)
if x1 > imgsize[0]: x1 = imgsize[0]
y0 = int(ini.cx-ini.fy)
if y0 < 0: y0 = 0
y1 = int(ini.cx+ini.fy)
if y1 > imgsize[1]: y1 = imgsize[1]
fig = plt.figure(figsize=(15,6))
# Original Image
ax = plt.subplot2grid((nrows,ncols), (0,0))
img = in_img.orig_color.copy()
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
img = img[x0:x1,y0:y1]
plt.axis('off')
ax.text(0.03,0.85,'a)',color="white",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
plt.imshow(img)
# Pixel Intensity
ax = plt.subplot2grid((nrows,ncols), (1,0))
img = 1.0 * in_img.orig_gray.copy()
img[mask] = np.nan
img = img[x0:x1,y0:y1]
a = plt.imshow(img, vmin=0, vmax=255, cmap=plt.cm.viridis)
cb = plt.colorbar(a,shrink=0.7,aspect=15)
for t in cb.ax.get_yticklabels():
t.set_fontsize(12)
ax.text(0.03,0.9,'b)',color="black",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
plt.axis('off')
# CSL Image
ax = plt.subplot2grid((nrows,ncols), (0,1))
try:
img = in_img.cslimage
img[mask] = np.nan
a = plt.imshow(img[x0:x1,y0:y1],vmin=0.5, vmax=1.2, cmap=plt.cm.viridis)
ax.text(0.03,0.9,'c)',color="black",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
plt.axis('off')
cb = plt.colorbar(a,shrink=0.7,aspect=15)
for t in cb.ax.get_yticklabels():
t.set_fontsize(12)
except AttributeError:
pass
# RBR Original
ax = plt.subplot2grid((nrows,ncols), (0,2))
img = in_img.rbr_orig
img[mask] = np.nan
a = plt.imshow(img[x0:x1,y0:y1],vmin=0.5, vmax=1.2, cmap=plt.cm.viridis)
ax.text(0.03,0.9,'e)',color="black",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
plt.axis('off')
cb = plt.colorbar(a,pad=.03, aspect=15,shrink=0.7)
for t in cb.ax.get_yticklabels():
t.set_fontsize(12)
# RBR diff
img = in_img.rbr_orig - in_img.rbr
ax = plt.subplot2grid((nrows,ncols), (1,1))
img[mask] = np.nan
a = plt.imshow(img[x0:x1,y0:y1],cmap=plt.cm.get_cmap('bwr'),vmin=-0.5,vmax=0.5)
plt.axis('off')
ax.text(0.03,0.9,'d)',color="black",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
cb = plt.colorbar(a,pad=.03, aspect=15,shrink=0.7)
for t in cb.ax.get_yticklabels():
t.set_fontsize(12)
# RBR modified
ax = plt.subplot2grid((nrows,ncols), (1,2))
img = in_img.rbr.copy() * 1.0
img[mask] = np.nan
img = img[x0:x1,y0:y1]
a = ax.imshow(img,vmin=ini.rbr_thres-0.2, vmax=ini.rbr_thres+0.2, cmap=plt.cm.viridis)
cb = plt.colorbar(a,pad=.03,aspect=15,shrink=0.7)
for t in cb.ax.get_yticklabels():
t.set_fontsize(12)
ax.text(0.03,0.9,'f)',color="black",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
plt.axis('off')
# Cloud map original
sky_bool = in_img.rbr_orig <= ini.rbr_thres
cloud_bool = in_img.rbr_orig > ini.rbr_thres
binary_color = in_img.orig_color.copy()
binary_color[sky_bool, :] = [ 255, 0 , 0 ]
binary_color[cloud_bool, :] = [ 255, 255 , 255 ]
binary_color[mask, :] = 0 # mask
ax = plt.subplot2grid((nrows,ncols), (0,3))
img = binary_color.copy()
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img[x0:x1,y0:y1]
plt.axis('off')
ax.imshow(img)
ax.text(0.03,0.85,'g)',color="white",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
# Cloud map corrected
ax = plt.subplot2grid((nrows,ncols), (1,3))
img = in_img.binary_color.copy()
#cloud_bool = (img != [0,0,0]) & (img != [255,0,0])
#img[cloud_bool] = 255
#if ini.mask_sun: img[in_img.sun_mask] = 0
#if ini.dyn_horizon: img[in_img.sun_mask] = 0
img[mask] = 0
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img[x0:x1,y0:y1]
plt.axis('off')
ax.imshow(img)
ax.text(0.03,0.85,'h)',color="white",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
# Histogram
ax = plt.subplot2grid((nrows,ncols), (0,4))
in_img.rbr_orig[mask] = np.nan
ax.hist(( in_img.rbr_orig.flatten()) , range=(0.0,1.1), bins=256, normed=True, color="blue")
ax.set_ylim(0,15)
plt.axvline(ini.rbr_thres, color='b', linestyle='dashed', linewidth=2)
ax.text(0.03,0.85,'i)',color="black",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
ax = plt.subplot2grid((nrows,ncols), (1,4))
in_img.rbr[mask] = np.nan
plt.hist((in_img.rbr.flatten()), range=(0.0,1.1), bins=256, color="blue",normed=True)
plt.ylim(0,15)
plt.axvline(ini.rbr_thres, color='b', linestyle='dashed', linewidth=2)
ax.text(0.03,0.85,'j)',color="black",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
# Final settings
fig.subplots_adjust(hspace=0.1,wspace=0.2,left=0.01, right=0.99, top=0.99, bottom=0.05)
plt.savefig(outfile,format=ini.outformat)
plt.clf()
def plot_detection(outfile, ini, in_img):
""" Plot only raw image and binary decision """
ncols = 2; nrows = 1
textsize=19
# Select area to cut from image
imgsize = in_img.orig_color.shape
x0 = int(ini.cy-ini.fx)
if x0 < 0: x0 = 0
x1 = int(ini.cy+ini.fx)
if x1 > imgsize[0]: x1 = imgsize[0]
y0 = int(ini.cx-ini.fy)
if y0 < 0: x0 = 0
y1 = int(ini.cx+ini.fy)
if y1 > imgsize[1]: y1 = imgsize[1]
fig = plt.figure(figsize=(6,3))
# Original Image
ax = plt.subplot2grid((nrows,ncols), (0,0))
img = in_img.orig_color.copy()
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
img = img[x0:x1,y0:y1]
plt.axis('off')
ax.text(0.03,0.85,'a)',color="white",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
ax.imshow(img)
# Cloud map
ax = plt.subplot2grid((nrows,ncols), (0,1))
img = in_img.binary_color.copy()
cloud_bool = (img != [0,0,0]) & (img != [255,0,0])
img[cloud_bool] = 255
#if ini.mask_sun: img[in_img.sun_mask] = 0
#if ini.dyn_horizon: img[in_img.sun_mask] = 0
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img[x0:x1,y0:y1]
plt.axis('off')
ax.imshow(img)
ax.text(0.03,0.85,'b)',color="white",fontweight="bold",fontsize=textsize,transform=ax.transAxes)
# Final settings
fig.subplots_adjust(hspace=0.1,wspace=0.2,left=0.01, right=0.99, top=0.99, bottom=0.05)
plt.savefig(outfile,format=ini.outformat)
plt.clf()
def plot_paper_juelich(outfile,in_img,actdate,nstations,pyr,csk,ini,cmv, \
xsun,ysun, mask,csl,\
cmap,features,hist_flag=False,text_flag=False,**params):
plt.close('all')
fig = plt.figure(1,figsize=(9,9),facecolor='w', edgecolor='k')
if hist_flag:
ncols = 3; nrows = 3
else:
ncols = 3; nrows = 3
st = time.time()
# get station index
if not ini.cbh_flag and ini.fcst_flag:
k = [j for j in range(0, nstations) if int(pyr[j].ind) == int(ini.statlist[0])][0]
else:
k = 0
# Select area to cut from image
imgsize = in_img.orig_color.shape
x0 = int(ini.cy-ini.fx)
if x0 < 0: x0 = 0
x1 = int(ini.cy+ini.fx)
if x1 > imgsize[0]: x1 = imgsize[0]
y0 = int(ini.cx-ini.fy)
if y0 < 0: x0 = 0
y1 = int(ini.cx+ini.fy)
if y1 > imgsize[1]: y1 = imgsize[1]
# Origin Image
plt.subplot2grid((nrows,ncols), (0,0))
img = in_img.orig_color_draw.copy()
img[mask] = 0
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
#img[xsun-15:xsun+15,ysun-15:ysun+15,:] = 0 # mark sun position
img = img[x0:x1,y0:y1]
img = rotate(img,-np.degrees(ini.rot_angles[2]))
i1 = np.min(np.where(img!=0)[0])
i2 = np.max(np.where(img!=0)[0])
j1 = np.min(np.where(img!=0)[1])
j2 = np.max(np.where(img!=0)[1])
img = img[i1:i2,j1:j2]
#img = np.float32(img)
#img[img<=0]=np.nan
plt.axis('off')
plt.imshow(img)
plt.title('Masked original image')
del img
plt.subplot2grid((nrows,ncols), (1,0))
img = in_img.binary_color.copy()
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img[x0:x1,y0:y1]
img = rotate(img,-np.degrees(ini.rot_angles[2]))
#img = np.float32(img)
#img[img<=0]=np.nan
i1 = np.min(np.where(img!=0)[0])
i2 = np.max(np.where(img!=0)[0])
j1 = np.min(np.where(img!=0)[1])
j2 = np.max(np.where(img!=0)[1])
img = img[i1:i2,j1:j2]
plt.title('Cloud decision map')
plt.axis('off')
plt.imshow(img)
ax = plt.subplot2grid((3,3), (0,1), colspan=2, rowspan=2)
xvals = []; yvals = []; vals = []
# Draw shadow map
cm = plt.cm.get_cmap('jet')
img = cv2.cvtColor(cmap,cv2.COLOR_RGB2GRAY) * 1.0
img[cmap[:,:,2]==0] = np.nan
img[cmap[:,:,2]==255] = 200
img[(cmap[:,:,2]>1)&(cmap[:,:,2]<255)]=100
for i in range(0, ini.fcst_horizon):
x = int(pyr[k].fpos[i][1])
y = int(pyr[k].fpos[i][0])
if x > cmap.shape[0] - 2 or x <= 0 or y <= 0 or y > cmap.shape[1] - 2: continue
xvals.append(x); yvals.append(y)
vals.append(pyr[k].fghi[i])
#if pyr[k].fghi[i] > 500:
# cmap[x, y] = 200
#else:
# cmap[x, y] = 90
ax1 = plt.scatter(xvals, yvals, s=30, vmin=0.15 * csk.actval, vmax=csk.actval + 0.15*csk.actval, marker='o', c=vals, cmap=cm, edgecolor='none')
plot_stat(ax, ini, nstations, pyr, csk.ghi[csk.tind], k)
arr = cv2.imread(ini.rootdir + '/config/arrow.png')
if arr is not None:
arr[arr==64]=255;arr[arr==68]=255;arr[arr==0]=255
arr = cv2.cvtColor(arr, cv2.COLOR_RGB2GRAY)
ofs = int(img.shape[0] / 150)
arr = cv2.resize(arr,(arr.shape[0]/2,arr.shape[1]/2))
img[ofs:ofs+arr.shape[0],ofs:ofs+arr.shape[1]] = arr
# Title
tit = 'Cloud base height: ' + str(params['cbh'])
plt.title(tit, fontsize=12)
ax.imshow(img,alpha=0.6,cmap=plt.cm.gray)
# Rename ticks
s = (ini.grid_size * ini.x_res) / 2.
nticks = len(ax.get_xticks()) - 1
steps = 2.*s / (nticks-1)
new_ticks = list(map(int,np.arange(-s-steps,s+(2*steps),steps)))
ax.xaxis.set_ticklabels(new_ticks,fontsize=10)
ax.yaxis.set_ticklabels(new_ticks, fontsize=10)
#ax.set_ylabel('latitudinal distance from camera [m]',fontsize=13)
ax.set_xlabel('longitudinal distance from camera [m]',fontsize=13)
# Draw timeseries
ax = plt.subplot2grid((nrows,ncols), (2,0),colspan=3)
maxval = 0
i = 0
# Forecast Values
x = csk.time[csk.tind:csk.tind+ini.fcst_horizon]
dates=[datetime.utcfromtimestamp(ts) for ts in x ]
y = pyr[k].fghi[:]
plt.plot_date(x=dates,y=y,fmt='r-',lw=2.0)
if ini.radiation:
# Analyses Values
slc = slice(csk.tind-1800,csk.tind,ini.camera_res)
x = csk.time[slc]
dates=[datetime.utcfromtimestamp(ts) for ts in x ]
y = pyr[k].aghi
plt.plot_date(x=dates,y=y,fmt='gv')
# Plot measurements
x = pyr[k].time[ pyr[k].tind-1800:pyr[k].tind+ini.fcst_horizon]
y = pyr[k].ghi[ pyr[k].tind-1800:pyr[k].tind+ini.fcst_horizon]
y2 = pyr[k].dhi[ pyr[k].tind-1800:pyr[k].tind+ini.fcst_horizon]
dates=[datetime.utcfromtimestamp(ts) for ts in x ]
plt.plot_date(x=dates, y=y, fmt='b-',lw=2.0)
if len(y2)>0:
p = fill_between(dates,0,y2,alpha=0.5,linewidth=0,facecolor="yellow")
p = fill_between(dates,y2,y,alpha=0.5,linewidth=0,facecolor="orange")
# Clear sky irradiance
x = csk.time[csk.tind-1800:csk.tind+ini.fcst_horizon]
dates=[datetime.utcfromtimestamp(ts) for ts in x ]
y = csk.ghi[csk.tind-1800:csk.tind+ini.fcst_horizon]
plt.plot_date(x=dates,y=y,fmt='--',color='black')
maxval = 1.5 * csk.actval
plt.axvline(dates[1800], color='b', linestyle='--',lw=2.0)
plt.xlabel('Time [UTC]',fontsize=14)
plt.ylabel('Irradiance [W/m$^2$]',fontsize=14)
if ini.radiation:
plt.legend(['Forecast','Analysis','Measurement','Clear Sky'])
else:
plt.legend(['Forecast','Clear Sky'])
plt.ylim([0,maxval])
if ini.location == "juelich":
plt.title('Station #' + str(pyr[k].ind) )
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
plt.grid('off')
#ax.xaxis.set_major_locator(HourLocator())
ax.xaxis_date()
# Draw Text
acttime = datetime.strftime(datetime.utcnow(),"%Y-%m-%d %H:%M:%S")
plt.text(0.7,0.95,str(np.char.capitalize(ini.location)) + ' \n' + \
actdate + str(' UTC'), weight="bold")#, fontsize=22. )
# Final settings
fig.subplots_adjust(hspace=0.4,wspace=0.3,left=0.085, right=0.95, top=0.95, bottom=0.08)
plt.draw()
plt.savefig(outfile,dpi=200)
plt.clf()
def plot_original_mod(img, outfile, mask, dt, ini, cam, title="Sky Imager", **meta):
from PIL import Image, ImageFont, ImageDraw
from skysol.lib.drawings import draw_boundaries, sunpath
data = img.orig_color_draw.copy()
data = sunpath(data, dt, ini, cam)
data[mask,:] = 0
try:
data = cv2.resize(data, img.segments.shape)
data = np.array(255*draw_boundaries(data, img.segments), dtype="uint8")
data = rotate(data,-np.degrees(ini.rot_angles[2]))
except:
data = cv2.resize(data, (600,600))
data = data[:,:,::-1]
data = rotate(data,-np.degrees(ini.rot_angles[2]))
i1 = np.min(np.where(data!=0)[0])
i2 = np.max(np.where(data!=0)[0])
j1 = np.min(np.where(data!=0)[1])
j2 = np.max(np.where(data!=0)[1])
data = data[i1:i2,j1:j2]
#data = data[::-1,:,:]
# PIL part
im = Image.fromarray(data,'RGB')
draw = ImageDraw.Draw(im)
lx, ly = im.size
datestr = datetime.strftime(img.locdate, "%Y-%m-%d")
timestr = datetime.strftime(img.locdate, "%H:%M:%S")
#txtfont = ImageFont.truetype("data/arial.ttf", 22)
draw.text((10,10), title,fill='red')
#txtfont = ImageFont.truetype("data/arial.ttf", 20)
draw.text((lx-120,10), datestr,fill='red', align="right")
draw.text((lx-120,30), timestr, fill = 'red', align="right")
#txtfont = ImageFont.truetype("data/arial.ttf", 18)
draw.text((10,ly-60), "Sun Elev. = %.1f°" % (90-meta['sza']), fill = 'red', align="right")
draw.text((10,ly-40), "Sun Azimuth = %.1f°" % meta['saz'], fill = 'red', align="right")
draw.text((10,ly-20), "Cloud Cover = %.1f%%" % meta['cc'], fill = 'red', align="right")
if meta['img_qc']:
qf = "OK"
else:
qf = "BAD"
draw.text((lx-100,ly-60), "QC = %s" % qf, fill = 'red', align="right")
CC_long = ['Cumulus','Cirrus','Altocumulus','Clear Sky','Stratocumulus', 'Stratus', 'Nimbostratus']
CC_short = ['Cu','Ci/Cs','Ac/Cc','Clear','Sc', 'St', 'Ns/Cb']
ccstr_long = CC_long[meta['imgclass']-1]
ccstr_short = CC_short[meta['imgclass']-1]
if meta['imgclass'] > 0:
cpstr = str(np.round(meta['imgprob'][meta['imgclass']-1],2))
else:
cpstr = "-1"
draw.text((lx-120,ly-40), "%s (%s%%)" % (ccstr_short, cpstr), fill = 'red', align="right")
im.save(outfile)
def plot_cmv(outfile, in_img, ini, cam, cmv,
xsun, ysun, mask, map_x, map_y, **params):
"""
Time for example: 2013-04-25 14:15:30
Shi-Tomasi Corner Detection parameter:
self.feature_params = dict( minDistance = 50,
blockSize = 12)
self.maxCorners = 500
self.qualityLevel = 0.03
"""
plt.close('all')
# Figure size
fig = plt.figure(1,figsize=(8,8),facecolor='w', edgecolor='k')
# Image Subplot order
nrows = 2; ncols = 2
# Original Image
ax = plt.subplot2grid((nrows,ncols), (0,0))
img = in_img.orig_color_draw.copy()
img[cmv.cmv_mask,:] = [0,0,255]
img[mask] = 0
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
#img = img[x0:x1,y0:y1]
cv2.circle(img,(ysun,xsun),20,[250,200,25],-1)
#img = rotate(img,-np.degrees(ini.rot_angles[2]))
i1 = np.min(np.where(img!=0)[0])
i2 = np.max(np.where(img!=0)[0])
j1 = np.min(np.where(img!=0)[1])
j2 = np.max(np.where(img!=0)[1])
img = img[i1:i2,j1:j2]
plt.axis('off')
plt.imshow(img)
del img
ax.text(0.03,0.95,'Fisheye RGB Image',color="white",fontweight="bold",fontsize=12,transform=ax.transAxes)
# Segmented cloud/sky image with CMV
ax = plt.subplot2grid((nrows,ncols), (0,1))
img = in_img.orig_gray.copy()
img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
img[cmv.cmv_mask,:] = [255,0,0]
# Draw every 50th cloud path
if len(cmv.x) > 0: drawings.cloud_path(img, cmv.x, cmv.y, lcolor=[25,25,125])
img[mask] = 0
img[in_img.mask_horizon] = 0
cv2.circle(img,(ysun,xsun),20,[250,200,25],-1)
#img = rotate(img,-np.degrees(ini.rot_angles[2]))
img = img[i1:i2,j1:j2]
plt.axis('off')
plt.imshow(img, cmap=plt.cm.viridis)
ax.text(0.03,0.95,'Fisheye Intensity',color="white",fontweight="bold",fontsize=12,transform=ax.transAxes)
del img
# Projected RGB with CMV
ax = plt.subplot2grid((nrows,ncols), (1,0))
img = in_img.orig_color_draw.copy()
img[cmv.cmv_mask,:] = [0,0,255]
img = cam.grid(img, map_x, map_y)
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
plt.axis('off')
plt.imshow(img)
ax.text(0.03,0.95,'Perspective projection RGB',color="white",fontweight="bold",fontsize=12,transform=ax.transAxes)
# Projected binary with CMV
ax = plt.subplot2grid((nrows,ncols), (1,1))
img = in_img.binary_color
img[cmv.cmv_mask,:] = [0,0,255]
if len(cmv.x) > 0: drawings.cloud_path(img, cmv.x, cmv.y, lcolor=[25,25,125])
img = cam.grid(img, map_x, map_y)
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
plt.axis('off')
plt.imshow(img)
ax.text(0.03,0.95,'Perspective projection cloud map',color="white",
fontweight="bold",fontsize=12,transform=ax.transAxes)
fig.text(0.5,0.5, "#CMV %d - Direction %.1f° - Speed %.1f" % (np.sum(cmv.flag),np.degrees(cmv.mean_direction), \
cmv.mean_speed), fontsize=15, horizontalalignment='center')
# Final settings
plt.tight_layout()#pad=0.5, w_pad=0.5, h_pad=0.5)
plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01)
plt.draw()
plt.savefig(outfile,dpi=100)
plt.clf()
plt.close('all')
|
"""Module unittests.test_sequence_algorithms.py
This module contains methods to test the sequence_algorithms module via pytest.
It uses good_mock_server to validate the positive test cases
and bad_mock_server for the negative test cases.
"""
import pytest
import json
import click
from click.testing import CliRunner
from compliance_suite.sequence_algorithms import *
from compliance_suite.test_runner import TestRunner
from compliance_suite.tests import Test
from unittests.constants import GOOD_SERVER_URL as good_mock_server, BAD_SERVER_URL as bad_mock_server
good_runner = TestRunner(good_mock_server)
good_runner.session_params = {
"limit": 400000,
"trunc512": True,
"circular_supported": True,
"redirection": None
}
bad_runner = TestRunner(bad_mock_server)
bad_runner.session_params = {
"limit": 400000,
"trunc512": True,
"circular_supported": True,
"redirection": None
}
def testing_sequence_algorithms():
pass
test = Test(testing_sequence_algorithms)
def test_sequence_implement():
test.result = 2
sequence_implement(test, good_runner)
assert test.result == 1
test.result = 2
sequence_implement(test, bad_runner)
assert test.result == -1
def test_sequence_implement_default():
test.result = 2
sequence_implement_default(test, good_runner)
assert test.result == 1
test.result = 2
sequence_implement_default(test, bad_runner)
assert test.result == -1
def test_sequence_query_by_trunc512():
test.result = 2
sequence_query_by_trunc512(test, good_runner)
assert test.result == 1
test.result = 2
sequence_query_by_trunc512(test, bad_runner)
assert test.result == -1
def test_sequence_invalid_checksum_404_error():
test.result = 2
sequence_invalid_checksum_404_error(test, good_runner)
assert test.result == 1
test.result = 2
sequence_invalid_checksum_404_error(test, bad_runner)
assert test.result == -1
def test_sequence_invalid_encoding_406_error():
test.result = 2
sequence_invalid_encoding_406_error(test, good_runner)
assert test.result == 1
test.result = 2
sequence_invalid_encoding_406_error(test, bad_runner)
assert test.result == -1
def test_sequence_start_end():
test.result = 2
sequence_start_end(test, good_runner)
assert test.result == 1
test.result = 2
sequence_start_end(test, bad_runner)
assert test.result == -1
def test_sequence_start_end_success_cases():
test.result = 2
test.case_outputs = []
test.cases = [
(['?start=10&end=10', 10, 10], 0),
(['?start=10&end=20', 10, 20], 10),
(['?start=10&end=11', 10, 11], 1),
(['?start=230208', 230208, None], 10),
(['?end=5', None, 5], 5),
(['?start=230217&end=230218', 230217, 230218], 1),
(['?start=0', 0, None], 230218),
(['?&end=230218', None, 230218], 230218),
(['?start=0&end=230218', 0, 230218], 230218),
(['?start=1&end=230218', 1, 230218], 230217),
(['?start=230217', 230217, None], 1),
(['?end=0', None, 0], 0)
]
sequence_start_end_success_cases(test, good_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == 1
test.result = 2
test.case_outputs = []
sequence_start_end_success_cases(test, bad_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == -1
def test_sequence_range():
test.result = 2
sequence_range(test, good_runner)
assert test.result == 1
test.result = 2
sequence_range(test, bad_runner)
assert test.result == -1
def test_sequence_range_success_cases():
test.result = 2
test.case_outputs = []
test.cases = [
(['bytes=10-19', 10, 19], [206, 10]),
(['bytes=10-230217', 10, 230217], [206, 230208]),
(['bytes=10-999999', 10, 999999], [206, 230208]),
(['bytes=0-230217', 0, 230217], [206, 230218]),
(['bytes=0-999999', 0, 999999], [206, 230218]),
(['bytes=0-0', 0, 0], [206, 1]),
(['bytes=230217-230217', 230217, 230217], [206, 1])
]
sequence_range_success_cases(test, good_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == 1
test.result = 2
test.case_outputs = []
sequence_range_success_cases(test, bad_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == -1
def test_sequence_circular():
test.result = 2
test.case_outputs = []
test.cases = [
('?start=5374&end=5', ['ATCCAACCTGCAGAGTT', 17]),
('?start=5374&end=0', ['ATCCAACCTGCA', 12]),
('?start=5380&end=25', ['CCTGCAGAGTTTTATCGCTTCCATGACGCAG', 31]),
]
sequence_circular(test, good_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == 1
test.result = 2
test.case_outputs = []
sequence_circular(test, bad_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == -1
def test_sequence_start_end_errors():
test.result = 2
test.case_outputs = []
test.cases = [
(['6681ac2f62509cfc220d78751b8dc524', '?start=abc&end=20'], 400),
(['6681ac2f62509cfc220d78751b8dc524', '?start=-10&end=-29', {}], 400),
(['6681ac2f62509cfc220d78751b8dc524', '?start=abc'], 400),
# Range out of bounds. Size of the sequence being tested is 5386.
(['3332ed720ac7eaa9b3655c06f6b9e196', '?start=67&end=5387'], 416),
(['3332ed720ac7eaa9b3655c06f6b9e196', '?start=5386&end=5375'], 416),
(['3332ed720ac7eaa9b3655c06f6b9e196', '?start=5386&end=5386'], 416),
(['3332ed720ac7eaa9b3655c06f6b9e196', '?start=5386&end=5'], 416),
]
sequence_start_end_errors(test, good_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == 1
test.result = 2
test.case_outputs = []
sequence_start_end_errors(test, bad_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == -1
def test_sequence_range_errors():
test.result = 2
test.case_outputs = []
test.cases = [
(['6681ac2f62509cfc220d78751b8dc524', 'units=20-30'], 400),
(['6681ac2f62509cfc220d78751b8dc524', 'bytes=ab-19'], 400),
(['6681ac2f62509cfc220d78751b8dc524', 'bytes=-10--19'], 400),
(['6681ac2f62509cfc220d78751b8dc524', 'bytes=10--19'], 400),
(['6681ac2f62509cfc220d78751b8dc524', 'bytes=-10-'], 400),
(['6681ac2f62509cfc220d78751b8dc524', 'bytes==10-19'], 400),
# Range out of bounds as fbs > lbs which is not allowed
(['3332ed720ac7eaa9b3655c06f6b9e196', 'bytes=5200-19'], 416),
(['3332ed720ac7eaa9b3655c06f6b9e196', 'bytes=59-50'], 416),
(['3332ed720ac7eaa9b3655c06f6b9e196', 'bytes=5385-5382'], 416),
# Range out of bounds. Size of the sequence tested is 5386
(['3332ed720ac7eaa9b3655c06f6b9e196', 'bytes=5387-5391'], 416),
(['3332ed720ac7eaa9b3655c06f6b9e196', 'bytes=5386-5387'], 416),
(['3332ed720ac7eaa9b3655c06f6b9e196', 'bytes=9999-99999'], 416)
]
sequence_range_errors(test, good_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == 1
test.result = 2
test.case_outputs = []
sequence_range_errors(test, bad_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
assert case_output["result"] == -1
def test_sequence_circular_support_true_errors():
test.case_outputs = []
test.result = 2
test.cases = [
(['6681ac2f62509cfc220d78751b8dc524', '?start=220218&end=671'], 416)
]
# if circular support in session params is False, then the test is skipped
good_runner.session_params["circular_supported"]= False
sequence_circular_support_true_errors(test, good_runner)
assert test.result == 0
# if circular support in session params is True
good_runner.session_params["circular_supported"]= True
test.case_outputs =[]
test.result = 2
sequence_circular_support_true_errors(test, good_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
# good_mock_server supports circular sequences.
# the status code != 501 as expected
assert case_output["result"] == 1
test.case_outputs = []
test.result = 2
sequence_circular_support_true_errors(test, bad_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
# bad_mock_server supports circular sequences.
# It provides incorrect error codes
assert case_output["result"] == -1
def test_sequence_circular_support_false_errors():
test.case_outputs = []
test.result = 2
test.cases = [
(['6681ac2f62509cfc220d78751b8dc524', '?start=220218&end=671'], 501),
(['3332ed720ac7eaa9b3655c06f6b9e196', '?start=20&end=4'], 501)
]
# if circular support in session params is True, then the test is skipped
good_runner.session_params["circular_supported"]= True
sequence_circular_support_false_errors(test, good_runner)
assert test.result == 0
# if circular support in session params is False
good_runner.session_params["circular_supported"]=False
test.result = 2
test.case_outputs = []
sequence_circular_support_false_errors(test, good_runner)
assert len(test.case_outputs) == len(test.cases)
for case_output in test.case_outputs:
# good_mock_server supports circular sequences.
# the status code != 501 as expected
assert case_output["result"] == -1
test.case_outputs = []
test.result = 2
sequence_circular_support_false_errors(test, bad_runner)
# bad_mock_server supports circular sequence. It also provides incorrect error codes
for case_output in test.case_outputs:
assert case_output["result"] == -1 |
<reponame>haygcao/UnicomDailyTask<filename>activity/womail/mailxt5.py
# -*- coding: utf8 -*-
import re
import requests
from utils.common import Common
from utils.bol import rsa_encrypt_password
from lxml import etree
from random import randint
class XT5CoreMail(Common):
def __init__(self, mobile, password):
super(XT5CoreMail, self).__init__()
self.mobile = mobile
self.password = password
self.session = requests.Session()
self.session.headers = requests.structures.CaseInsensitiveDict({
"Referer": "https://mail.wo.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"
})
def getSid(self):
url = 'https://mail.wo.cn/'
resp = self.session.get(url)
text = resp.text
result = re.findall(r'&sid=([^"]+)', text)
self.session.cookies.update({
'uid': f'{self.mobile}%40wo.cn'
})
if result:
return result[0]
return ''
def getTempSession(self):
url = 'https://mail.wo.cn/coremail/s/?func=user:getTempSession'
resp = self.session.post(url=url)
text = resp.content
try:
return etree.HTML(text).xpath('string(//object[@name="var"]/string[@name="sid"])')
except:
return ''
def getPasswordKey(self):
# sid = self.getTempSession()
sid = self.getSid()
# url = f'https://mail.wo.cn/coremail/s/?func=user:getPasswordKey&sid={sid}'
url = f'https://mail.wo.cn/coremail/s/json?sid={sid}&func=user:getPasswordKey'
resp = self.session.post(url=url)
data = resp.json()
# try:
# return (
# etree.HTML(text).xpath('string(//object[@name="var"]/string[@name="sid"])'),
# {
# 'type': etree.HTML(text).xpath('string(//object[@name="key"]/string[@name="type"])'),
# 'e': etree.HTML(text).xpath('string(//object[@name="key"]/string[@name="e"])'),
# 'n': etree.HTML(text).xpath('string(//object[@name="key"]/string[@name="n"])')
# }
# )
# except:
# return '', {}
return data.get('var', {}).get('sid', ''), data.get('var', {}).get('key', {})
def login(self):
sid, key = self.getPasswordKey()
url = f'https://mail.wo.cn/coremail/index.jsp?cus=1&sid={sid}'
data = {
'locale': 'zh_CN',
'nodetect': 'false',
'destURL': '',
'supportLoginDevice': 'true',
'accessToken': '',
'timestamp': '',
'signature': '',
'nonce': '',
'device': '{"uuid":"webmail_windows","imie":"webmail_windows","friendlyName":"chrome 95","model":"windows","os":"windows","osLanguage":"zh-CN","deviceType":"Webmail"}',
'supportDynamicPwd': 'true',
'supportBind2FA': 'true',
'authorizeDevice': '',
'loginType': '',
'uid': self.mobile,
'domain': '',
'password': <PASSWORD>(self.password, key),
'action:login': ''
}
resp = self.session.post(url=url, data=data)
# print(resp.text)
return re.findall('[\s\S]+sid = "(.+)"', resp.text)[0]
def cmcuLogin(self, sid, key):
timestamp = self.timestamp
url = f'https://mail.wo.cn/coremail/s/?func=cmcu:login&sid={sid}'
data = {
"uid": f"{<EMAIL>",
"password": <PASSWORD>(self.password, key),
"locale": "zh_CN",
"supportDynamicPwd": True,
"supportSms": True,
"device": {
"uuid": f"hxphone_{timestamp}",
"model": "android",
"os": "android",
"imie": f"hxphone_{timestamp}",
"friendlyName": "na",
"osLanguage": "zh-CN",
"deviceType": "Hxphone"
}
}
resp = self.session.post(url=url, json=data, headers={
'Content-Type': 'text/x-json',
'Origin': 'https://mail.wo.cn',
'Referer': 'https://mail.wo.cn/coremail/hxphone/',
'X-CM-SERVICE': 'PHONE'
})
print(resp.text)
def userLogin(self, sid, key):
timestamp = self.timestamp
url = f'https://mail.wo.cn/coremail/s/?func=user:login&sid={sid}'
data = {
"uid": f"{<EMAIL>",
"password": <PASSWORD>_<PASSWORD>_password(self.password, key),
"locale": "zh_CN",
"supportDynamicPwd": True,
"supportSms": True,
"device": {
"uuid": f"hxphone_{timestamp}",
"model": "android",
"os": "android",
"imie": f"hxphone_{timestamp}",
"friendlyName": "na",
"osLanguage": "zh-CN",
"deviceType": "Hxphone"
}
}
resp = self.session.post(url=url, json=data, headers={
'Content-Type': 'text/x-json',
'Origin': 'https://mail.wo.cn',
'Referer': 'https://mail.wo.cn/coremail/hxphone/',
'X-CM-SERVICE': 'PHONE'
})
text = resp.content
print(text)
try:
return {
'sid': etree.HTML(text).xpath('string(//object[@name="var"]/string[@name="sid"])'),
'uid': etree.HTML(text).xpath('string(//object[@name="var"]/string[@name="uid"])'),
'primaryEmail': etree.HTML(text).xpath('string(//object[@name="var"]/string[@name="primaryEmail"])'),
'accessToken': etree.HTML(text).xpath('string(//object[@name="var"]/string[@name="accessToken"])'),
'accessSecret': etree.HTML(text).xpath('string(//object[@name="var"]/string[@name="accessSecret"])'),
'nonce': etree.HTML(text).xpath('string(//object[@name="var"]/string[@name="nonce"])'),
}
except:
return {
'sid': '',
'uid': '',
'primaryEmail': '',
'accessToken': '',
'accessSecret': '',
'nonce': '',
}
def addClubInfo(self, sid, userAction):
url = f'https://mail.wo.cn/coremail/s/json?func=club:addClubInfo&sid={sid}'
data = {"userAction": userAction}
resp = self.session.post(url=url, json=data, headers={
'Content-Type': 'text/x-json'
})
print(resp.json())
def run(self):
sid = self.login()
self.addClubInfo(sid, 'login')
self.flushTime(randint(1, 3))
self.addClubInfo(sid, 'baiduCloud')
self.flushTime(randint(1, 3))
self.addClubInfo(sid, 'listMail')
self.flushTime(randint(1, 3))
self.addClubInfo(sid, 'uploadFile')
self.flushTime(randint(1, 3))
self.addClubInfo(sid, 'sendMail')
if __name__ == '__main__':
pass
|
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources import CINDER_VOLUME_DATASOURCE
from vitrage.datasources.heat.stack import HEAT_STACK_DATASOURCE
from vitrage.datasources import NOVA_HOST_DATASOURCE
from vitrage.datasources import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources import NOVA_ZONE_DATASOURCE
from vitrage.tests.functional.datasources.base import TestDataSourcesBase
from vitrage.tests.mocks import mock_driver
class TestHeatStack(TestDataSourcesBase):
DATASOURCES_OPTS = [
cfg.ListOpt('types',
default=[HEAT_STACK_DATASOURCE,
NOVA_HOST_DATASOURCE,
NOVA_INSTANCE_DATASOURCE,
NOVA_ZONE_DATASOURCE,
CINDER_VOLUME_DATASOURCE],
help='Names of supported driver data sources'),
cfg.ListOpt('path',
default=['vitrage.datasources'],
help='base path for data sources')
]
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
super(TestHeatStack, cls).setUpClass()
cls.conf = cfg.ConfigOpts()
cls.conf.register_opts(cls.PROCESSOR_OPTS, group='entity_graph')
cls.conf.register_opts(cls.DATASOURCES_OPTS, group='datasources')
cls.load_datasources(cls.conf)
def test_heat_stack_validity(self):
# Setup
processor = self._create_processor_with_graph(self.conf)
self.assertEqual(self._num_total_expected_vertices(),
len(processor.entity_graph))
spec_list = mock_driver.simple_stack_generators(
stack_num=1,
instance_and_volume_num=1,
snapshot_events=1)
static_events = mock_driver.generate_random_events_list(spec_list)
heat_stack_event = static_events[0]
# Action
processor.process_event(heat_stack_event)
# Test assertions
self.assertEqual(self._num_total_expected_vertices() + 3,
len(processor.entity_graph))
stack_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter={
VProps.CATEGORY: EntityCategory.RESOURCE,
VProps.TYPE: HEAT_STACK_DATASOURCE
})
self.assertEqual(1, len(stack_vertices))
instance_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter={
VProps.CATEGORY: EntityCategory.RESOURCE,
VProps.TYPE: NOVA_INSTANCE_DATASOURCE
})
self.assertEqual(self.NUM_INSTANCES + 1, len(instance_vertices))
cinder_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter={
VProps.CATEGORY: EntityCategory.RESOURCE,
VProps.TYPE: CINDER_VOLUME_DATASOURCE
})
self.assertEqual(1, len(cinder_vertices))
stack_neighbors = processor.entity_graph.neighbors(
stack_vertices[0].vertex_id)
self.assertEqual(2, len(stack_neighbors))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# finpie - a simple library to download some financial data
# https://github.com/peterlacour/finpie
#
# Copyright (c) 2020 <NAME>
#
# Licensed under the MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import re
import time
import requests
import numpy as np
import pandas as pd
import datetime as dt
import dask.dataframe as dd
# import dask.dataframe as dd
from tqdm import tqdm
from io import StringIO
#from alpha_vantage.timeseries import TimeSeries
from concurrent.futures import ThreadPoolExecutor
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# from iexfinance.stocks import get_historical_intraday
from finpie.base import DataBase
#from base import DataBase
def historical_prices( ticker, start = None, end = None):
'''
'''
if start == None:
start = -2208988800
if end == None:
last_close = (dt.datetime.today() ).strftime("%Y-%m-%d")
end = int(time.mktime(time.strptime(f'{last_close} 00:00:00', '%Y-%m-%d %H:%M:%S')))
url = f'https://query2.finance.yahoo.com/v7/finance/download/{ticker}?period1={start}&period2={end}&interval=1d'
r = requests.get(url).text
df = pd.read_csv(StringIO(r))
df.columns = [ col.lower().replace(' ', '_') for col in df.columns ]
df.index = pd.to_datetime(df.date, format = '%Y-%m-%d')
df.drop('date', inplace = True, axis = 1)
return df
def yahoo_option_chain( ticker ):
'''
'''
url = f'https://query2.finance.yahoo.com/v7/finance/options/{ticker}?getAllData=True'
r = requests.get(url).json()
calls = []
puts = []
for o in r['optionChain']['result'][0]['options']:
calls.append( pd.DataFrame( o['calls'] ) )
puts.append( pd.DataFrame( o['puts'] ) )
calls = pd.concat(calls)
puts = pd.concat(puts)
calls.columns = [ re.sub( r"([A-Z])", r"_\1", col).lower() for col in calls.columns ]
puts.columns = [ re.sub( r"([A-Z])", r"_\1", col).lower() for col in puts.columns ]
calls.expiration = pd.to_datetime( [ dt.datetime.fromtimestamp( x ).date() for x in calls.expiration ] )
calls.last_trade_date = pd.to_datetime( [ dt.datetime.fromtimestamp( x ) for x in calls.last_trade_date ] )
puts.expiration = pd.to_datetime( [ dt.datetime.fromtimestamp( x ).date() for x in puts.expiration ] )
puts.last_trade_date = pd.to_datetime( [ dt.datetime.fromtimestamp( x ) for x in puts.last_trade_date ] )
calls.reset_index(drop = True, inplace = True)
puts.reset_index(drop = True, inplace = True)
return calls, puts
def cboe_option_chain( ticker, head = False):
db = DataBase()
db.head = head
url = 'http://www.cboe.com/delayedquote/quote-table-download'
try:
driver = db._load_driver(caps = 'none')
driver.get(url)
element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//input[@id="txtTicker"]')))
driver.find_element_by_xpath('//input[@id="txtTicker"]').send_keys(ticker)
driver.find_element_by_xpath('//input[@id="txtTicker"]').send_keys(Keys.ENTER)
db._downloads_done('quotedata.dat')
driver.close()
driver.quit()
except:
print('Failed to load data...')
driver.close()
driver.quit()
return None
df = pd.read_csv(db.download_path + '/quotedata.dat', error_bad_lines=False, warn_bad_lines=False)
underlying_price = float( df.columns[-2] )
df = pd.read_csv(db.download_path + '/quotedata.dat', skiprows = [0,1,3], error_bad_lines=False, warn_bad_lines=False)
df['underlying'] = underlying_price
os.remove(db.download_path + '/quotedata.dat')
df.columns = [ col.replace(' ', '_').lower().replace('_date', '') for col in df.columns ]
puts = df.loc[:, ['expiration', 'puts' ] + [ col for col in df.columns if '.1' in col ] + [ 'strike', 'underlying' ] ]
puts.columns = [ col.replace('.1', '') for col in puts.columns ]
calls = df.loc[:, [ col for col in df.columns if '.1' not in col ] ]
calls.drop('puts', inplace = True, axis = 1)
return calls, puts
def historical_futures_contracts(date_range):
'''
Function to retrieve historical futures prices of all available futures contracts,
including currency, interest rate, energy, meat, metals, softs, grains, soybeans,
fiber and index futures.
Notice that the download is not very fast and 20 years of data takes around 2 hours
to download and contains around 2 million rows.
input: pandas date range, e.g. pd.date_range('2000-01-01', '2020-01-01')
output: pandas dataframe with prices for all available futures for the
specified time period
'''
with ThreadPoolExecutor(4) as pool:
res = list( tqdm( pool.map(_download_prices, date_range), total = len(date_range) ))
df_out = dd.concat( [ i for i in res if type(i) != type([0]) ], axis = 0 )
df_out = df_out.compute()
df_out.index.name = 'date'
return df_out
def futures_contracts(date):
df = _download_prices(date).compute()
df.index.name = 'date'
return df
def _download_prices(date):
'''
input: datetime object
output: pandas dataframe with prices for all available futures for the
specified date
'''
db = DataBase()
errors = []
if type(date) == type('str'):
date = pd.to_datetime(date, format = '%Y-%m-%d')
y = str(date.year)
if len(str(date.month)) == 2:
m = str(date.month)
else:
m = '0' + str(date.month)
if len(str(date.day)) == 2:
d = str(date.day)
else:
d = '0' + str(date.day)
try:
url = f'https://www.mrci.com/ohlc/{y}/{y[-2:]+m+d}.php'
soup = db._get_session(url)
df = pd.read_html( str(soup.find('map').find_next('table')) )[0]
try:
futures_lookup = pd.read_csv( os.path.dirname(__file__) + '/futures_lookup.csv').name.tolist()
except:
futures_lookup = pd.read_csv( os.path.dirname(__file__) + '\\futures_lookup.csv').name.tolist()
indices = [ i for i, j in enumerate(df.iloc[:,0]) if j in futures_lookup ]
columns = ['month', 'date', 'open', 'high', 'low', 'close', 'change', 'volume', 'open_interest', 'change_in_oi' ]
if len(df.columns) == 11:
df = df.iloc[indices[0]:-2, :len(df.columns)-1]
else:
df = df.iloc[indices[0]:-2, :]
#session.close()
except:
errors.append(date)
#session.close()
return errors
df.columns = columns
#[ i for i in np.unique(df.month).tolist() if i not in futures_lookup ]
first = True
for i in range(1, len(indices)):
temp = df.loc[indices[i-1]+1:indices[i]-2].copy()
temp['future'] = df.loc[indices[i-1], 'month']
if first:
out = temp.copy()
first = False
else:
out = out.append(temp)
out = out[ out.iloc[:,1] != 'Total Volume and Open Interest']
# out.to_csv('futures.csv')
out.index = [date] * len(out) #pd.to_datetime( [ f'{i[-2:]}/{i[2:4]}/{i[:2]}' for i in out.date ] )
out.replace('\+', '', regex = True, inplace = True)
out.replace('unch', np.nan, inplace = True)
out = db._col_to_float(out)
return dd.from_pandas(out, npartitions = 1)
'''
def alpha_vantage_prices(ticker, api_token, start_date = None):
ts = TimeSeries(key = api_token, output_format = 'pandas')
data, meta_data = ts.get_daily_adjusted(symbol = ticker, outputsize = 'full' )
columns = ['open', 'high', 'low', 'close', 'adjusted_close', 'volume', 'dividend_amount', 'split_coefficient' ]
data.columns = columns
data.reset_index(level=0, inplace=True)
data.iloc[:,1:] = data.iloc[:,1:].astype('float')
data.date = pd.to_datetime(data.date)
data.sort_values('date', ascending = True, inplace = True)
data.index = data.date
if start_date != None:
data = data[start_date:]
data.reset_index(drop = True, inplace = True)
data.index = data.date
data.drop('date', axis = 1, inplace = True)
return data
'''
'''
def tingo_prices( ticker, api_token, start_date = None, end_date = None, freq = '1min'):
if start_date == None:
start_date = '1980-01-01'
if end_date == None:
end_date = dt.datetime.today().date().strftime('%Y-%m-%d')
headers = {'Content-Type': 'application/json' }
requestResponse = requests.get(f"https://api.tiingo.com/iex/{ticker}/prices?startDate={start_date}&endDate={end_date}&resampleFreq={freq}&token={api_token}", headers=headers)
df = pd.DataFrame(requestResponse.json())
df.date = pd.to_datetime(df.date)
# iterate through latest dates to get more than the last 10000 rows
last = df.copy()
df = dd.from_pandas(df, npartitions = 1)
while last.date.iloc[0].date() > pd.to_datetime(start_date):
headers = {'Content-Type': 'application/json' }
requestResponse = requests.get(f"https://api.tiingo.com/iex/{ticker}/prices?startDate={start_date}&endDate={ last.date.iloc[0].date().strftime('%Y-%m-%d')}&resampleFreq={freq}&token={api_token}", headers=headers)
temp = pd.DataFrame(requestResponse.json())
temp.date = pd.to_datetime(temp.date)
if last.iloc[0,0] == temp.iloc[0,0]:
break
last = temp.copy()
df = df.append(dd.from_pandas(temp, npartitions = 1))
df = df.compute()
df.sort_values('date', ascending = True, inplace = True)
df.index = df.date
df.drop('date', axis = 1, inplace = True)
return df
def tingo_forex_intraday( currency_pair, api_token, start_date, end_date = None, freq = '1min' ):
if end_date == None:
end_date = dt.datetime.today().date().strftime('%Y-%m-%d')
headers = {'Content-Type': 'application/json' }
requestResponse = requests.get(f'https://api.tiingo.com/tiingo/fx/{currency_pair}/prices?&endDate={end_date}&resampleFreq=1min&token={api_token}', headers = headers)
df = pd.DataFrame(requestResponse.json())
df.date = pd.to_datetime(df.date)
# iterate through latest dates to get more than the last 10000 rows
last = df.copy()
df = dd.from_pandas(df, npartitions = 1)
while last.date.iloc[0].date() > pd.to_datetime(start_date):
headers = {'Content-Type': 'application/json' }
requestResponse = requests.get(f"https://api.tiingo.com/tiingo/fx/{currency_pair}/prices?endDate={(last.date.iloc[0]).date().strftime('%Y-%m-%d')}&resampleFreq={freq}&token={api_token}", headers=headers)
try:
temp = pd.DataFrame(requestResponse.json())
temp.date = pd.to_datetime(temp.date)
if last.iloc[0,0] == temp.iloc[0,0]:
break
last = temp.copy()
df = df.append(dd.from_pandas(temp, npartitions = 1))
except:
last.date.iloc[0] -= dt.timedelta(1)
headers = {'Content-Type': 'application/json' }
requestResponse = requests.get(f"https://api.tiingo.com/tiingo/fx/{currency_pair}/prices?endDate={(last.date.iloc[0]).date().strftime('%Y-%m-%d')}&resampleFreq={freq}&token={api_token}", headers=headers)
temp = pd.DataFrame(requestResponse.json())
temp.date = pd.to_datetime(temp.date)
if last.iloc[0,0] == temp.iloc[0,0]:
break
last = temp.copy()
df = df.append(dd.from_pandas(temp, npartitions = 1))
df = df.compute()
df.sort_values('date', ascending = True, inplace = True)
df.index = df.date
df.drop('date', axis = 1, inplace = True)
return df
def iex_intraday(ticker, api_token, start_date = None, end_date = None):
if end_date == None:
date = dt.datetime.today()
else:
date = pd.to_datetime(end_date)
if start_date == None:
start_date = pd.to_datetime('2000-01-01')
df = dd.from_pandas(get_historical_intraday(ticker, date, token = api_token, output_format = 'pandas'), npartitions = 1)
e, i = 0, 0
date = dt.datetime.today() - dt.timedelta(i)
while e <= 5 and date > start_date:
date = dt.datetime.today() - dt.timedelta(i)
df2 = get_historical_intraday(ticker, date, token = api_token, output_format = 'pandas')
if not df2.empty:
df = df.append(dd.from_pandas(df2, npartitions = 1))
time.sleep(.5)
e = 0
else:
e += 1
i += 1
df.sort_index(ascending = True, inplace = True)
return df
'''
|
<gh_stars>1-10
#!/usr/bin/env python
import os
import re
import argparse
from yarp import Registry
class UsrClassHandler(object):
def __init__(self, usrclass_location):
self.hive = Registry.RegistryHive(
open(usrclass_location, 'rb')
)
log_mapping = {
"log": None,
"log1": None,
"log2": None
}
base_name = os.path.basename(
usrclass_location
)
base_location = os.path.dirname(
usrclass_location
)
for file_name in os.listdir(base_location):
full_path = os.path.join(
base_location,
file_name
)
if os.path.isfile(full_path):
match = re.search('^{}[.](LOG\d?)$'.format(base_name), file_name, flags=re.I)
if match:
group = match.group(1)
log_mapping[group.lower()] = open(full_path, 'rb')
print("Attempting Hive Recovery...")
recovery_result = self.hive.recover_auto(
log_mapping['log'],
log_mapping['log1'],
log_mapping['log2']
)
print("Recovery: {}".format(recovery_result.recovered))
def get_bagmru_key(self):
key = self.hive.find_key("Local Settings\\Software\\Microsoft\\Windows\\Shell\\BagMRU")
return key
def get_arguments():
usage = "Extract shellbags from usrclass hive for research purposes."
arguments = argparse.ArgumentParser(
description=usage
)
arguments.add_argument(
"-s", "--source",
dest="source",
action="store",
required=True,
help="The source USRCLASS.DAT file."
)
arguments.add_argument(
"-p", "--prefix",
dest="prefix",
action="store",
default="",
required=False,
help="Prefix to append to file names."
)
arguments.add_argument(
"-o", "--output",
dest="output",
action="store",
required=True,
help="The output folder."
)
return arguments
def extract_value(key_value, out_path, out_stack, prefix):
value_name = key_value.name()
if re.match(r"\d{1,}$", value_name):
value_data = key_value.data_raw()
file_name = ".".join(out_stack)
file_name = ".".join([file_name, value_name])
file_name = prefix + file_name
out_file = os.path.join(
out_path,
file_name
)
print("Writing out: {}".format(out_file))
with open(out_file, 'wb') as fh:
fh.write(value_data)
def key_extraction(key, out_path, out_stack, prefix):
for key_value in key.values():
extract_value(
key_value,
out_path,
out_stack,
prefix
)
for sub_key in key.subkeys():
out_stack.append(sub_key.name())
key_extraction(
sub_key,
out_path,
out_stack,
prefix
)
out_stack.pop()
def handle_usrclass_reg(options):
handler = UsrClassHandler(
options.source
)
bagmru_key = handler.get_bagmru_key()
key_extraction(
bagmru_key,
options.output,
["BagMRU"],
options.prefix
)
def main():
arguments = get_arguments()
options = arguments.parse_args()
print("Source: {}".format(options.source))
if not os.path.exists(options.output):
os.makedirs(options.output)
if os.path.isfile(options.source):
handle_usrclass_reg(
options
)
else:
raise(Exception("File needed."))
if __name__ == "__main__":
main()
|
"""
Work with files: copy, move, check file exist
"""
import os
import sys
import shutil
from .makedir import makedir
# http://stackoverflow.com/questions/123198/how-do-i-copy-a-file-in-python
def copyfile(src, dst, override=False, verbosity=False):
"""Copy file
Keyword Arguments:
src -- source file name
dst -- destination file name
override -- override file, if exist
verbosity -- verbosity operation
Return: True if operation successful
"""
result = False
if verbosity:
print('Copy file: %s -> %s' % (src, dst))
try:
if os.path.exists(dst):
if override:
shutil.copy(src, dst)
result = True
else:
print("Can't copy file: %s to %s, file already exist" % (src, dst))
else:
path = os.path.dirname(dst)
if path != '':
makedir(os.path.dirname(dst))
shutil.copy(src, dst)
result = True
except Exception as e:
print(e)
return result
def file_exist(fname, verbosity=False, abort_and_exit=False):
"""Check if file exist
Keyword Arguments:
fname -- file names
verbosity -- verbosity operation
Return: True if file exist
"""
result = False
try:
if os.path.exists(fname):
if os.path.isfile(fname):
result = True
except Exception as e:
print(e)
if verbosity:
if not result:
print("Can't open file: '%s'" % fname)
if abort_and_exit:
sys.exit("ERROR! Open file: '%s'\nExit from program!" % fname)
return result
def movefile_vrb(src, dst, verbosity=False):
"""verbosity move file
"""
if verbosity:
print('Move file: %s -> %s' % (src, dst))
shutil.move(src, dst)
def movefile(src, dst, override=False, add_index=False, verbosity=False):
"""Move or rename file
Keyword Arguments:
src -- source file name
dst -- destination file name
override -- override file, if exist
add_index -- add index to file name (only if files don't overrided)
verbosity -- verbosity operation
Return: True if operation successful
"""
result = False
try:
if os.path.exists(dst):
if override:
os.remove(dst)
movefile_vrb(src, dst, verbosity)
result = True
else:
if add_index:
if not file_exist(dst):
movefile_vrb(src, dst, verbosity)
result = True
else:
path = os.path.dirname(dst)
extension = os.path.splitext(dst)[1]
fname = os.path.basename(dst)
fname_without_ext = os.path.splitext(fname)[0]
index = 'a'
while True:
new_fname = fname_without_ext + index + extension
new_path = os.path.join(path, new_fname)
if not file_exist(new_path):
movefile_vrb(src, new_path, verbosity)
result = True
break
index = chr(ord(index) + 1)
else:
print("Can't move without override requpment: %s" % dst)
else:
movefile_vrb(src, dst, verbosity)
except Exception as e:
print(e)
return result
if __name__ == '__main__':
src_file = 'files.py'
dst_file0 = 'files0.py'
dst_file1 = 'tmp/files1.py'
dst_file2 = 'tmp/files2.py'
print('\nCopy file:')
copyfile(src_file, dst_file1, True, True)
print('\nCopy file without override:')
copyfile(src_file, dst_file1, False, True)
print('\nCopy file, override:')
copyfile(src_file, dst_file1, True, True)
print('')
print('\nMove file:')
copyfile(src_file, dst_file0, True, True)
movefile(dst_file0, dst_file2, True, False, True)
print('\nMove file without overide:')
copyfile(src_file, dst_file0, True, True)
movefile(dst_file0, dst_file2, True, False, True)
copyfile(src_file, dst_file0, True, True)
movefile(dst_file0, dst_file2, False, False, True)
print('\nMove file with overide:')
copyfile(src_file, dst_file0, True, True)
movefile(dst_file0, dst_file2, True, False, True)
copyfile(src_file, dst_file0, True, True)
movefile(dst_file0, dst_file2, True, False, True)
print('\nMove file with index adding:')
copyfile(src_file, dst_file0, True, True)
movefile(dst_file0, dst_file2, True, False, True)
for i in range(3):
copyfile(src_file, dst_file0, True, True)
movefile(dst_file0, dst_file2, False, True, True)
|
IMAGE_SIZE = (299,299) # The dimensions to which all images found will be resized.
BATCH_SIZE = 16
NUMBER_EPOCHS = 5
TENSORBOARD_DIRECTORY = "../logs/simple_model/tensorboard"
TRAIN_DIRECTORY = "../data/train/"
VALID_DIRECTORY = "../data/valid/"
NUMBER_TRAIN_SAMPLES = 17500
NUMBER_VALIDATION_SAMPLES = 5000
WEIGHTS_DIRECTORY = "../weights/"
###########
# base model
###########
from keras.applications.inception_v3 import InceptionV3
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
###########
# FCN layer
###########
from keras.layers import Dense, Dropout, GlobalAveragePooling2D
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(64, activation='relu')(x)
x = Dropout(0.3)(x)
# and a logistic layer
predictions = Dense(2, activation='softmax')(x)
###########
# complete model
###########
from keras.models import Model
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
############
# load weights
############
import os.path
model_save_path = WEIGHTS_DIRECTORY + 'inceptionv3_pretrained_weights.h5'
if os.path.exists(model_save_path) == True:
print("Loading weights from: {}".format(model_save_path))
model.load_weights(model_save_path)
#############
# Set the non trainable layers
#############
for layer in base_model.layers:
layer.trainable = False
print(len(base_model.layers))
#############
#Keras callbacks
#############
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
# Early stop in case of getting worse
early_stop = EarlyStopping(monitor = 'val_loss', patience = 3, verbose = 0)
# Checkpoint the model weights
checkpoint_file_path = WEIGHTS_DIRECTORY + 'inceptionv3_pretrained_weights.h5'
checkpointer = ModelCheckpoint(filepath=checkpoint_file_path, verbose=1, save_best_only=True)
print('Setting {} as folder for checkpoints.'.format(checkpoint_file_path))
callbacks = [checkpointer, early_stop]
#############
# model optimizer
#############
OPTIMIZER_LEARNING_RATE = 1e-2
OPTIMIZER_DECAY = 1e-4
OPTIMIZER_MOMENTUM = 0.89
OPTIMIZER_NESTEROV_ENABLED = False
from keras.optimizers import SGD
optimizer = SGD(lr=OPTIMIZER_LEARNING_RATE,
decay=OPTIMIZER_DECAY,
momentum=OPTIMIZER_MOMENTUM,
nesterov=OPTIMIZER_NESTEROV_ENABLED)
##############
# compile
##############
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=["accuracy"])
##############
# train data generator
##############
from keras.preprocessing.image import ImageDataGenerator
## train generator with shuffle but no data augmentation
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_batch_generator = train_datagen.flow_from_directory(TRAIN_DIRECTORY,
target_size = IMAGE_SIZE,
class_mode = 'categorical',
batch_size = BATCH_SIZE)
##############
# validation data generator
##############
from keras.preprocessing.image import ImageDataGenerator
## train generator with shuffle but no data augmentation
validation_datagen = ImageDataGenerator(rescale = 1./255)
valid_batch_generator = validation_datagen.flow_from_directory(VALID_DIRECTORY,
target_size = IMAGE_SIZE,
class_mode = 'categorical',
batch_size = BATCH_SIZE)
##############
# Training
##############
# fine-tune the model
hist = model.fit_generator(
train_batch_generator,
steps_per_epoch=NUMBER_TRAIN_SAMPLES/BATCH_SIZE,
epochs=NUMBER_EPOCHS, # epochs: Integer, total number of iterations on the data.
validation_data=valid_batch_generator,
validation_steps=NUMBER_VALIDATION_SAMPLES/BATCH_SIZE,
callbacks=callbacks,
verbose=1)
##############
# save weights
##############
print('Saving InceptionV3 training weigths to ', model_save_path)
model.save(model_save_path)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 6 13:55:32 2021
@author: ccamargo
"""
import numpy as np
import xarray as xr
import sys
sys.path.append("/Users/ccamargo/Documents/py_scripts/")
import utils_SL as sl
import utils_SLE_v2 as sle
# import utils_hec as hec
# import os
# import cmocean as cmo
# from numba import jit
import datetime as dt
import matplotlib.pyplot as plt
#%%
path='/Volumes/LaCie_NIOZ/data/barystatic/intrinsic_unc/use/'+'comb/'
# ds=xr.open_dataset(path+'SL_unc_JPL_IMBIE.nc')
# print(ds)
# print(ds.name)
# select time
# tdec=np.array(ds.tdec)
periods =[
(2005,2016),
(1993,2018),
(1993,2017),
(2003,2017)
]
#% %
for period in periods:
#% %
# period=periods[-1]
ds=xr.open_dataset(path+'SL_unc_JPL_IMBIE.nc')
ds_sl=xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/use/comb/ALL_datasets_1993-2020_180x360_v3_update.nc')
t0=period[0]
t1=period[1]-1
ds['tyear']=(('time'),ds.tdec)
# Select time
to=str(t0)+'-01-01';ti=str(t1)+'-01-01'
ds= ds.sel(time=slice(to,ti))
ds_sl= ds_sl.sel(time=slice(to,ti))
tdec=np.array(ds.tyear)
lat=np.array(ds.lat)
lon=np.array(ds.lon)
names=np.array(ds.name)
names = [name for name in names if not name=='TCWS_JPL']
if t0< 2002: # beofore grace
names = [name for name in names if not name.endswith('JPL')]
names_sl = [name for name in np.array(ds_sl.name) ]
for i,name in enumerate(names_sl):
if '300' in name:
name=name.split('_')[0]+'_'+name.split('_')[-1]
# for name in names:
# if name in names_sl:
# print(name)
ds_sl['name']=names_sl
std_trend= np.zeros((len(names),len(ds.lat),len(ds.lon)))
trend = np.zeros((len(names),len(ds.lat),len(ds.lon)))
trend2 = np.zeros((len(names),len(ds.lat),len(ds.lon)))
slf=np.zeros((len(names),len(ds.lat),len(ds.lon)))
for iname,name in enumerate(names):
#% %
# iname=3;name=names[iname]
da=ds.sel(name=name)
da_sl=ds_sl.sel(name=name)
print(name)
# select dataset
unc=np.array(da.SL_mm[:,:,:])
y=np.array(da_sl.SL_mm[:,:,:])
y_up = np.array(y+unc)
y_low = np.array(y-unc)
# compute trend
trend[iname,:,:], _,_,trend2[iname,:,:],std_trend[iname,:,:]=sl.get_OLS_trend(tdec, y, sigma=unc,lat=lat, lon=lon)
# trend_up, _=sl.get_reg_trend_OLS(tdec, y_up, lat, lon)
# trend_low, _=sl.get_reg_trend_OLS(tdec, y_low, lat, lon)
# trend_bound[iname,:,:] = np.max([trend_up-trend, trend-trend_low],axis=0)
# slf[iname,:,:] = sle.run_SLE(sle.height_to_EWH(std_trend[iname,:,:]).reshape(180,360),name)
# ensure that the uncertainties are combined in quadrature for the fingerprint
slf[iname,:,:] = sle.run_SLE(sle.height_to_EWH(std_trend[iname,:,:]*std_trend[iname,:,:]).reshape(180,360),name)
slf[iname,:,:]=np.sqrt(np.abs(slf[iname,:,:]))
#% % make data array
da=xr.Dataset(data_vars={'intrinsic_unc_source':(('name','lat','lon'),std_trend),
'intrinsic_unc_SLF':(('name','lat','lon'),slf),
},
coords={'lat':lat,
'lon':lon,
'name':names})
da.attrs['units']='mm/year'
da['lat'].attrs['standard_name']='latitude'
da['lat'].attrs['long_name']='Latitude'
da['lat'].attrs['units']='degrees_north'
da['lat'].attrs['axis']='Y'
da['lon'].attrs['standard_name']='longitude'
da['lon'].attrs['long_name']='Longitude'
da['lon'].attrs['units']='degrees_east'
da['lon'].attrs['axis']='X'
da.attrs['metadata']='Intrinsic uncertainty obtained by computing the standard deviation of the trend, when propagating the unc in the OLS'
da.attrs['method']=" y-> observations.\n Qyy-> variances on the diagonal.\n A=ones(length(t),2); A(:,2)=t-t0; \n Qxx=(A'*Qyy^-1*A)^-1;\nstd_trend=sqrt(Qxx(2,2));"
# % % save
pwd='/Volumes/LaCie_NIOZ/data/barystatic/results/'
da.to_netcdf(pwd+'{}-{}/intrinsic_unc_prop_{}-{}.nc'.format(t0,t1,t0,t1))
da.to_netcdf(path+'source_prop_SLF_{}-{}.nc'.format(t0,t1))
#%%
#% % plot
# from cartopy import crs as ccrs , feature as cfeature
# landcolor='darkgrey'
# dpi=300
# clim=0.1
# cmap='Blues'
# cmin=0
# cmax=clim
# interval = 0.01
# X=np.array(da.lon)
# Y=np.array(-da.lat)
# fontsize=25
# ticksize=20
# fig = plt.figure(figsize=(15,10), facecolor='w',dpi=dpi)
# for iname,name in enumerate(names):
# da2=da.sel(name=name)
# # print(name)
# ax1 = plt.subplot(3,2,iname+1, projection=ccrs.Robinson())
# # SLF monthly then trend:
# data=np.array(da2.intrinsic_unc_SLF [:,:])
# # print(np.nanmin(data))
# # print(np.nanmax(data))
# ax1.coastlines(resolution='110m', zorder=3,color=landcolor) # zorder=3 makes sure that no other plots overlay the coastlines
# ax1.add_feature(cfeature.LAND,color=landcolor,# alpha=0.5,
# zorder=3)
# ax1.set_global() # make sure the projection is maximised inside the plot. Makes a circle
# # make it discrete
# lv=np.arange(cmin,cmax+interval,interval)
# csf=plt.contourf(X,Y,np.abs(data),levels=lv,
# transform = ccrs.PlateCarree(),cmap=cmap)
# mu,glb,mask=sle.reg_to_glb(data,Y,X)
# glb=np.round(glb,3)
# cs=ax1.contour(X,Y,data,
# levels=[glb],
# # levels=[0.15, 0.30, 0.45, 0.6, 0.75],
# # vmin=-0.6,
# # vmax=0.6,
# transform = ccrs.PlateCarree(),
# #cmap='coolwarm',#extend='both'
# colors=('black',),linestyles=('--',),linewidths=(2,)
# )
# ax1.clabel(cs,cs.levels,fmt='%5.2f',colors='k',fontsize=12)
# cp=plt.pcolormesh(X,Y,np.abs(data),
# vmin=cmin,vmax=cmax,
# zorder=0,
# transform = ccrs.PlateCarree(),cmap=cmap)
# ax1.set_title('({}). '.format(str(name),size=fontsize))
# cbar_ax2 = fig.add_axes([0.152, 0.05, 0.72, 0.033])
# cbar2 = plt.colorbar(csf, cax=cbar_ax2, orientation='horizontal')
# cbar2.set_label(label='Intrinsic Uncertainty \n{}-{} (mm/yr)'.format(t0,t1),size=fontsize, family='serif')
# cbar2.ax.tick_params(labelsize=ticksize)
# plt.show()
|
from zencad import *
from api import Size, SimpleZenObj, CompoundZenObj
from config import EPS, EPS2, LEVER_ANGLE
# Fix the incorrectly named color
color.cyan = color.cian
class Pcb(SimpleZenObj):
colour = color.yellow
size = Size(72.5, 60.1, 1.3)
hole_r = 3.5 / 2.0
hole_vector_nw = vector3(hole_r + 1.1, size.y - hole_r - 1.5, 0.0)
hole_vector_sw = vector3(hole_r + 1.1, size.y - hole_r - 39.0, 0.0)
hole_vector_ne = vector3(size.x - hole_r - 1.1, size.y - hole_r - 1.5, 0.0)
hole_vector_se = vector3(size.x - hole_r - 1.1, size.y - hole_r - 39.0, 0.0)
hole_vectors = [hole_vector_nw, hole_vector_sw, hole_vector_ne, hole_vector_se]
def __init__(self):
pcb = box(size=self.size)
hole_proto = cylinder(r=self.hole_r, h=self.size.z + EPS2).moveZ(-EPS)
for v in self.hole_vectors:
pcb = pcb - hole_proto.move(v)
super().__init__(pcb)
class Lcd(SimpleZenObj):
colour = color(0.0, 0.4, 0.0)
size = Size(58.6, 38.5, 6.6 - Pcb.size.z)
offset = vector3(
6.5,
Pcb.size.y - size.y,
Pcb.size.z
)
def __init__(self):
lcd = box(size=self.size).move(self.offset)
super().__init__(lcd)
class LcdScreen(SimpleZenObj):
colour = color.green
size = Size(54.0, 30.0, 0.4)
offset = vector3(
Lcd.offset.x + (Lcd.size.x - size.x) / 2.0,
Lcd.offset.y + 1.0,
Lcd.offset.z + Lcd.size.z
)
def __init__(self):
lcd = box(size=self.size).move(self.offset)
super().__init__(lcd)
class LcdLight(SimpleZenObj):
colour = color.white
points = points([
(0.0, 0.0, 0.0),
(0.0, 34.0, 0.0),
(7.2, 22.0, 0.0),
(7.2, 12.0, 0.0)
])
width = 3.7 - Pcb.size.z
offset = vector3(
Lcd.offset.x + Lcd.size.x,
Pcb.size.y - Lcd.size.y,
Pcb.size.z
)
def __init__(self):
light = extrude(
proto=polysegment(self.points, closed=True).fill(),
vec=self.width
)
light = light.move(self.offset)
super().__init__(light)
class LcdWires(SimpleZenObj):
colour = color.mech
size = Size(9.5, 3.5, 7.0)
offset = vector3(30.5, Pcb.size.y, 0.0)
def __init__(self):
wires = box(size=self.size).move(self.offset)
super().__init__(wires)
class LcdMount(SimpleZenObj):
colour = color.mech
size = Size(6.0, 8.0, 4.5)
offset = vector3(67.0, 35.0, -2.0)
def __init__(self):
mount = box(size=self.size).move(self.offset)
super().__init__(mount)
class LcdLock1(SimpleZenObj):
colour = color.mech
radius = 2.5
height = 3.0
offset = vector3(
63.0 + radius,
24.0 + radius,
-height
)
def __init__(self):
lock = cylinder(r=self.radius, h=self.height).move(self.offset)
super().__init__(lock)
class LcdLock2(SimpleZenObj):
colour = color.mech
radius = 3.0
height = 1.0
offset = vector3(8.0, 51.0, -height)
def __init__(self):
lock = cylinder(r=self.radius, h=self.height).move(self.offset)
super().__init__(lock)
class Socket(SimpleZenObj):
colour = color.cyan
size = Size(33.0, 15.0, 12.7 - Pcb.size.z)
offset = vector3(0.0, 0.0, Pcb.size.z)
room_size = Size(5.7, 2.0, 6.3)
def __init__(self):
socket = box(size=self.size)
room = box(size=self.room_size).moveZ(self.size.z - self.room_size.z)
socket = socket - room
socket = socket.move(self.offset)
super().__init__(socket)
class SocketLever(SimpleZenObj):
colour = color.mech
radius = 1.7 / 2.0
length = 11.0 - radius
offset = vector3(
Socket.room_size.x - radius,
radius + 0.1,
Pcb.size.z + Socket.size.z - Socket.room_size.z + radius
)
angle = deg(LEVER_ANGLE - 90)
def __init__(self):
lever = cylinder(r=self.radius, h=self.length)
lever = lever.rotateY(self.angle)
lever = lever.move(self.offset)
super().__init__(lever)
class SocketLevelCap(SimpleZenObj):
colour = color.cyan
radius = 2.5
length = 7.5
def __init__(self):
cap = cylinder(r=self.radius, h=self.length).moveZ(SocketLever.length)
cap = cap.rotateY(SocketLever.angle)
cap = cap.move(SocketLever.offset)
super().__init__(cap)
class SocketTerminals(SimpleZenObj):
colour = color.mech
size = Size(17.5, 11.0, 2.5)
offset = vector3(9.0, 2.0, -size.z)
def __init__(self):
terminals = box(self.size).move(self.offset)
super().__init__(terminals)
class Button(SimpleZenObj):
colour = color(0.2, 0.2, 0.2)
size = Size(12.0, 12.0, 4.6 - Pcb.size.z)
offset = vector3(
Pcb.size.x - size.x - 1.1,
1.8,
Pcb.size.z
)
def __init__(self):
button = box(size=self.size).move(self.offset)
super().__init__(button)
class ButtonCap(SimpleZenObj):
colour = color.blue
radius = 11.5 / 2.0
height = 4.0
trim_radius = 13.0 / 2.0
trim_height = 1.7
leg_radius = 4.0
leg_height = 9.1 - Button.size.z - Pcb.size.z - trim_height
offset = vector3(
Pcb.size.x - 7.1,
7.8,
Pcb.size.z + Button.size.z
)
def __init__(self):
cap = unify(
cylinder(r=self.radius, h=self.height).moveZ(self.trim_height + self.leg_height) +
cylinder(r=self.trim_radius, h=self.trim_height).moveZ(self.leg_height) +
cylinder(r=self.leg_radius, h=self.leg_height)
)
cap = cap.move(self.offset)
super().__init__(cap)
class ButtonMount(SimpleZenObj):
colour = color.mech
size = Size(9.0, 15.5, 2.5)
offset = vector3(Pcb.size.x - 11.5, 0.0, -size.z)
def __init__(self):
mount = box(self.size).move(self.offset)
super().__init__(mount)
class ContactPads(SimpleZenObj):
colour = color.mech
size = Size(8.0, 15.5, 0.1)
offset = vector3(41.3, 1.6, Pcb.size.z)
def __init__(self):
pads = box(self.size).move(self.offset)
super().__init__(pads)
class Quartz(SimpleZenObj):
colour = color.mech
size = Size(4.0, 10.0, 4.0)
offset = vector3(48.0, 28.3, -size.z)
def __init__(self):
quartz = box(size=self.size).move(self.offset)
super().__init__(quartz)
class PowerTerminals(SimpleZenObj):
colour = color.mech
size = Size(5.0, 9.0, 1.5)
offset = vector3(0.5, 26.5, -size.z)
wires_radius = 3.5 / 2.0
wires_height = 3.5
wires_offset = vector3(
offset.x + size.x / 2.0,
offset.y + wires_radius,
-wires_height
)
def __init__(self):
terminals = (
box(self.size).move(self.offset) +
cylinder(r=self.wires_radius, h=self.wires_height).move(self.wires_offset)
)
super().__init__(terminals)
class SurfaceMount(SimpleZenObj):
colour = color.mech
points = points([
(9.5, 19.0, 0.0), (9.5, 38.0, 0.0), (22.0, 38.0, 0.0),
(27.0, 51.0, 0.0), (43.0, 51.0, 0.0), (55.0, 42.0, 0.0),
(55.0, 25.0, 0.0), (46.0, 19.0, 0.0)
])
width = 3
def __init__(self):
mount = extrude(
proto=polysegment(self.points, closed=True).fill(),
vec=self.width
)
mount = mount.moveZ(-self.width)
super().__init__(mount)
class Battery(SimpleZenObj):
colour = color.mech
size = Size(26.0, 51.0, 22.0)
def __init__(self):
battery = box(self.size)
super().__init__(battery)
class Device(CompoundZenObj):
def __init__(self):
super().__init__(
pcb=Pcb(),
lcd=Lcd(),
lcd_screen=LcdScreen(),
lcd_light=LcdLight(),
lcd_wires=LcdWires(),
lcd_mount=LcdMount(),
lcd_lock1=LcdLock1(),
lcd_lock2=LcdLock2(),
socket=Socket(),
socket_lever=SocketLever(),
socket_lever_cap=SocketLevelCap(),
socket_terminals=SocketTerminals(),
button=Button(),
button_cap=ButtonCap(),
button_mount=ButtonMount(),
contact_pads=ContactPads(),
quarts=Quartz(),
power_terminals=PowerTerminals(),
surface_mount=SurfaceMount()
)
class ScrewBase(SimpleZenObj):
radius = None # type: float
length = None # type: float
cap_r = None # type: float
cap_h = None # type: float
def __init__(self):
shape = (cylinder(r=self.radius, h=self.length) +
cylinder(r=self.cap_r, h=self.cap_h).moveZ(self.length))
shape = shape.rotateX(deg(180))
shape = shape.moveZ(self.length)
super().__init__(shape)
class ScrewSilver(ScrewBase):
colour = color.mech
radius = 1.1
length = 8.0
cap_r = 2.0
cap_h = 2.0
class ScrewBlack(ScrewBase):
colour = color(0.2, 0.2, 0.2)
radius = 1.0
length = 7.8
cap_r = 2.2
cap_h = 1.8
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Core classes.
"""
# System import
import os
import pickle
from copy import deepcopy
import subprocess
# Third party import
import torch
import torch.nn.functional as func
from torch.nn import DataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
import torch.distributed as dist
# Package import
from pynet.datasets.core import AbstractDataManager
from pynet.utils import checkpoint, freeze_until
from pynet.history import History
from pynet.visualization import Visualizer
from pynet.observable import Observable
import pynet.metrics as mmetrics
import logging
class Base(Observable):
""" Class to perform classification.
"""
def __init__(self, optimizer_name="Adam", learning_rate=1e-3,
loss_name="NLLLoss", metrics=None, use_cuda=False,
pretrained=None, freeze_until_layer=None, load_optimizer=True, use_multi_gpu=True,
**kwargs):
""" Class instantiation.
Observers will be notified, allowed signals are:
- 'before_epoch'
- 'after_epoch'
Parameters
----------
optimizer_name: str, default 'Adam'
the name of the optimizer: see 'torch.optim' for a description
of available optimizer.
learning_rate: float, default 1e-3
the optimizer learning rate.
loss_name: str, default 'NLLLoss'
the name of the loss: see 'torch.nn' for a description
of available loss.
metrics: list of str
a list of extra metrics that will be computed.
use_cuda: bool, default False
wether to use GPU or CPU.
pretrained: path, default None
path to the pretrained model or weights.
load_optimizer: boolean, default True
if pretrained is set, whether to also load the optimizer's weights or not
use_multi_gpu: boolean, default True
if several GPUs are available, use them during forward/backward pass
kwargs: dict
specify directly a custom 'model', 'optimizer' or 'loss'. Can also
be used to set specific optimizer parameters.
"""
super().__init__(
signals=["before_epoch", "after_epoch", "after_iteration"])
self.optimizer = kwargs.get("optimizer")
self.logger = logging.getLogger("pynet")
self.loss = kwargs.get("loss")
self.device = torch.device("cuda" if use_cuda else "cpu")
for name in ("optimizer", "loss"):
if name in kwargs:
kwargs.pop(name)
if "model" in kwargs:
self.model = kwargs.pop("model")
if self.optimizer is None:
if optimizer_name in dir(torch.optim):
self.optimizer = getattr(torch.optim, optimizer_name)(
self.model.parameters(),
lr=learning_rate,
**kwargs)
else:
raise ValueError("Optimizer '{0}' uknown: check available "
"optimizer in 'pytorch.optim'.")
if self.loss is None:
if loss_name not in dir(torch.nn):
raise ValueError("Loss '{0}' uknown: check available loss in "
"'pytorch.nn'.")
self.loss = getattr(torch.nn, loss_name)()
self.metrics = {}
for name in (metrics or []):
if name not in mmetrics.METRICS:
raise ValueError("Metric '{0}' not yet supported: you can try "
"to fill the 'METRICS' factory, or ask for "
"some help!".format(name))
self.metrics[name] = mmetrics.METRICS[name]
if use_cuda and not torch.cuda.is_available():
raise ValueError("No GPU found: unset 'use_cuda' parameter.")
if pretrained is not None:
checkpoint = None
try:
checkpoint = torch.load(pretrained, map_location=lambda storage, loc: storage)
except BaseException as e:
self.logger.error('Impossible to load the checkpoint: %s' % str(e))
if checkpoint is not None:
if hasattr(checkpoint, "state_dict"):
self.model.load_state_dict(checkpoint.state_dict())
elif isinstance(checkpoint, dict):
if "model" in checkpoint:
try:
## TODO: Quick fix to modify
for key in list(checkpoint['model'].keys()):
if key.replace('module.', '') != key:
checkpoint['model'][key.replace('module.', '')] = checkpoint['model'][key]
del(checkpoint['model'][key])
#####
unexpected= self.model.load_state_dict(checkpoint["model"], strict=False)
self.logger.info('Model loading info: {}'.format(unexpected))
self.logger.info('Model loaded')
except BaseException as e:
self.logger.error('Error while loading the model\'s weights: %s' % str(e))
raise ValueError("")
if "optimizer" in checkpoint:
if load_optimizer:
try:
self.optimizer.load_state_dict(checkpoint["optimizer"])
for state in self.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(self.device)
except BaseException as e:
self.logger.error('Error while loading the optimizer\'s weights: %s' % str(e))
else:
self.logger.warning("The optimizer's weights are not restored ! ")
else:
self.model.load_state_dict(checkpoint)
if freeze_until_layer is not None:
freeze_until(self.model, freeze_until_layer)
if use_multi_gpu and torch.cuda.device_count() > 1:
self.model = DataParallel(self.model)
self.model = self.model.to(self.device)
def training(self, manager: AbstractDataManager, nb_epochs: int, checkpointdir=None,
fold_index=None, epoch_index=None,
scheduler=None, with_validation=True, with_visualization=False,
nb_epochs_per_saving=1, exp_name=None, standard_optim=True,
gpu_time_profiling=False, **kwargs_train):
""" Train the model.
Parameters
----------
manager: a pynet DataManager
a manager containing the train and validation data.
nb_epochs: int, default 100
the number of epochs.
checkpointdir: str, default None
a destination folder where intermediate models/historues will be
saved.
fold_index: int or [int] default None
the index(es) of the fold(s) to use for the training, default use all the
available folds.
epoch_index: int, default None
the iteration where to start the counting from
scheduler: torch.optim.lr_scheduler, default None
a scheduler used to reduce the learning rate.
with_validation: bool, default True
if set use the validation dataset.
with_visualization: bool, default False,
whether it uses a visualizer that will plot the losses/metrics/images in a WebApp framework
during the training process
nb_epochs_per_saving: int, default 1,
the number of epochs after which the model+optimizer's parameters are saved
exp_name: str, default None
the experience name that will be launched
Returns
-------
train_history, valid_history: History
the train/validation history.
"""
train_history = History(name="Train_%s"%(exp_name or ""))
if with_validation is not None:
valid_history = History(name="Validation_%s"%(exp_name or ""))
else:
valid_history = None
train_visualizer, valid_visualizer = None, None
if with_visualization:
train_visualizer = Visualizer(train_history)
if with_validation:
valid_visualizer = Visualizer(valid_history, offset_win=10)
print(self.loss)
print(self.optimizer)
folds = range(manager.get_nb_folds())
if fold_index is not None:
if isinstance(fold_index, int):
folds = [fold_index]
elif isinstance(fold_index, list):
folds = fold_index
if epoch_index is None:
epoch_index = 0
init_optim_state = deepcopy(self.optimizer.state_dict())
init_model_state = deepcopy(self.model.state_dict())
if scheduler is not None:
init_scheduler_state = deepcopy(scheduler.state_dict())
for fold in folds:
# Initialize everything before optimizing on a new fold
self.optimizer.load_state_dict(init_optim_state)
self.model.load_state_dict(init_model_state)
if scheduler is not None:
scheduler.load_state_dict(init_scheduler_state)
loader = manager.get_dataloader(
train=True,
validation=True,
fold_index=fold)
for epoch in range(nb_epochs):
self.notify_observers("before_epoch", epoch=epoch, fold=fold)
loss, values = self.train(loader.train, train_visualizer, fold, epoch,
standard_optim=standard_optim,
gpu_time_profiling=gpu_time_profiling, **kwargs_train)
train_history.log((fold, epoch+epoch_index), loss=loss, **values)
train_history.summary()
if scheduler is not None:
scheduler.step()
print('Scheduler lr: {}'.format(scheduler.get_lr()), flush=True)
print('Optimizer lr: %f'%self.optimizer.param_groups[0]['lr'], flush=True)
if checkpointdir is not None and (epoch % nb_epochs_per_saving == 0 or epoch == nb_epochs-1) \
and epoch > 0:
if not os.path.isdir(checkpointdir):
subprocess.check_call(['mkdir', '-p', checkpointdir])
self.logger.info("Directory %s created."%checkpointdir)
checkpoint(
model=self.model,
epoch=epoch+epoch_index,
fold=fold,
outdir=checkpointdir,
name=exp_name,
optimizer=self.optimizer)
train_history.save(
outdir=checkpointdir,
epoch=epoch+epoch_index,
fold=fold)
if with_validation:
_, _, _, loss, values = self.test(loader.validation,
standard_optim=standard_optim, **kwargs_train)
valid_history.log((fold, epoch+epoch_index), validation_loss=loss, **values)
valid_history.summary()
if valid_visualizer is not None:
valid_visualizer.refresh_current_metrics()
if checkpointdir is not None and (epoch % nb_epochs_per_saving == 0 or epoch == nb_epochs-1) \
and epoch > 0:
valid_history.save(
outdir=checkpointdir,
epoch=epoch+epoch_index,
fold=fold)
self.notify_observers("after_epoch", epoch=epoch, fold=fold)
return train_history, valid_history
def train(self, loader, visualizer=None, fold=None, epoch=None, standard_optim=True,
gpu_time_profiling=False, **kwargs):
""" Train the model on the trained data.
Parameters
----------
loader: a pytorch Dataloader
Returns
-------
loss: float
the value of the loss function.
values: dict
the values of the metrics.
"""
self.model.train()
nb_batch = len(loader)
pbar = tqdm(total=nb_batch, desc="Mini-Batch")
values = {}
iteration = 0
if gpu_time_profiling:
gpu_time_per_batch = []
if not standard_optim:
loss, values = self.model(iter(loader), pbar=pbar, visualizer=visualizer)
else:
losses = []
y_pred = []
y_true = []
for dataitem in loader:
pbar.update()
inputs = dataitem.inputs
if isinstance(inputs, torch.Tensor):
inputs = inputs.to(self.device)
list_targets = []
_targets = []
for item in (dataitem.outputs, dataitem.labels):
if item is not None:
_targets.append(item.to(self.device))
if len(_targets) == 1:
_targets = _targets[0]
list_targets.append(_targets)
self.optimizer.zero_grad()
if gpu_time_profiling:
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
# TODO: Quick fix
inputs[torch.isnan(inputs)] = 0.0
outputs = self.model(inputs)
if gpu_time_profiling:
end_event.record()
torch.cuda.synchronize()
elapsed_time_ms = start_event.elapsed_time(end_event)
gpu_time_per_batch.append(elapsed_time_ms)
batch_loss = self.loss(outputs, *list_targets)
batch_loss.backward()
self.optimizer.step()
losses.append(float(batch_loss))
y_pred.extend(outputs.detach().cpu().numpy())
y_true.extend(list_targets[0].detach().cpu().numpy())
aux_losses = (self.model.get_aux_losses() if hasattr(self.model, 'get_aux_losses') else dict())
aux_losses.update(self.loss.get_aux_losses() if hasattr(self.loss, 'get_aux_losses') else dict())
for name, aux_loss in aux_losses.items():
if name not in values:
values[name] = 0
values[name] += float(aux_loss) / nb_batch
if iteration % 10 == 0:
if visualizer is not None:
visualizer.refresh_current_metrics()
if hasattr(self.model, "get_current_visuals"):
visuals = self.model.get_current_visuals()
visualizer.display_images(visuals, ncols=3)
iteration += 1
loss = np.mean(losses)
for name, metric in self.metrics.items():
if name not in values:
values[name] = 0
values[name] = float(metric(torch.tensor(y_pred), torch.tensor(y_true)))
if gpu_time_profiling:
self.logger.info("GPU Time Statistics over 1 epoch:\n\t- {:.2f} +/- {:.2f} ms calling model(data) per batch"
"\n\t- {:.2f} ms total time over 1 epoch ({} batches)".format(
np.mean(gpu_time_per_batch), np.std(gpu_time_per_batch), np.sum(gpu_time_per_batch), nb_batch))
pbar.close()
return loss, values
def testing(self, loader: DataLoader, with_logit=False, predict=False, with_visuals=False,
saving_dir=None, exp_name=None, standard_optim=True, **kwargs):
""" Evaluate the model.
Parameters
----------
loader: a pytorch DataLoader
with_logit: bool, default False
apply a softmax to the result.
predict: bool, default False
take the argmax over the channels.
with_visuals: bool, default False
returns the visuals got from the model
Returns
-------
y: array-like
the predicted data.
X: array-like
the input data.
y_true: array-like
the true data if available.
loss: float
the value of the loss function if true data availble.
values: dict
the values of the metrics if true data availble.
"""
if with_visuals:
y, y_true, X, loss, values, visuals = self.test(
loader, with_logit=with_logit, predict=predict, with_visuals=with_visuals,
standard_optim=standard_optim)
else:
y, y_true, X, loss, values = self.test(
loader, with_logit=with_logit, predict=predict, with_visuals=with_visuals,
standard_optim=standard_optim)
if saving_dir is not None:
if not os.path.isdir(saving_dir):
subprocess.check_call(['mkdir', '-p', saving_dir])
self.logger.info("Directory %s created."%saving_dir)
with open(os.path.join(saving_dir, (exp_name or 'test')+'.pkl'), 'wb') as f:
pickle.dump({'y_pred': y, 'y_true': y_true, 'loss': loss, 'metrics': values}, f)
if with_visuals:
return y, X, y_true, loss, values, visuals
return y, X, y_true, loss, values
def test(self, loader, with_logit=False, predict=False, with_visuals=False, standard_optim=True):
""" Evaluate the model on the test or validation data.
Parameter
---------
loader: a pytorch Dataset
the data loader.
with_logit: bool, default False
apply a softmax to the result.
predict: bool, default False
take the argmax over the channels.
Returns
-------
y: array-like
the predicted data.
y_true: array-like
the true data
X: array_like
the input data
loss: float
the value of the loss function.
values: dict
the values of the metrics.
"""
self.model.eval()
nb_batch = len(loader)
pbar = tqdm(total=nb_batch, desc="Mini-Batch")
loss = 0
values = {}
visuals = []
with torch.no_grad():
y, y_true, X = [], [], []
if not standard_optim:
loss, values, y, y_true, X = self.model(iter(loader), pbar=pbar)
else:
for dataitem in loader:
pbar.update()
inputs = dataitem.inputs
if isinstance(inputs, torch.Tensor):
inputs = inputs.to(self.device)
list_targets = []
targets = []
for item in (dataitem.outputs, dataitem.labels):
if item is not None:
targets.append(item.to(self.device))
y_true.extend(item.cpu().detach().numpy())
if len(targets) == 1:
targets = targets[0]
elif len(targets) == 0:
targets = None
if targets is not None:
list_targets.append(targets)
# TODO: Quick fix
inputs[torch.isnan(inputs)] = 0.0
outputs = self.model(inputs)
if with_visuals:
visuals.append(self.model.get_current_visuals())
if len(list_targets) > 0:
batch_loss = self.loss(outputs, *list_targets)
loss += float(batch_loss) / nb_batch
y.extend(outputs.cpu().detach().numpy())
if isinstance(inputs, torch.Tensor):
X.extend(inputs.cpu().detach().numpy())
aux_losses = (self.model.get_aux_losses() if hasattr(self.model, 'get_aux_losses') else dict())
aux_losses.update(self.loss.get_aux_losses() if hasattr(self.loss, 'get_aux_losses') else dict())
for name, aux_loss in aux_losses.items():
name += " on validation set"
if name not in values:
values[name] = 0
values[name] += aux_loss / nb_batch
# Now computes the metrics with (y, y_true)
for name, metric in self.metrics.items():
name += " on validation set"
values[name] = metric(torch.tensor(y), torch.tensor(y_true))
pbar.close()
if len(visuals) > 0:
visuals = np.concatenate(visuals, axis=0)
try:
if with_logit:
y = func.softmax(torch.tensor(y), dim=1).detach().cpu().numpy()
if predict:
y = np.argmax(y, axis=1)
except Exception as e:
print(e)
if with_visuals:
return y, y_true, X, loss, values, visuals
return y, y_true, X, loss, values
def MC_test(self, loader, MC=50):
""" Evaluate the model on the test or validation data by using a Monte-Carlo sampling.
Parameter
---------
loader: a pytorch Dataset
the data loader.
MC: int, default 50
nb of times to perform a feed-forward per input
Returns
-------
y: array-like dims (n_samples, MC, ...) where ... is the dims of the network's output
the predicted data.
y_true: array-like dims (n_samples, MC, ...) where ... is the dims of the network's output
the true data
"""
self.model.eval()
nb_batch = len(loader)
pbar = tqdm(total=nb_batch, desc="Mini-Batch")
with torch.no_grad():
y, y_true = [], []
for dataitem in loader:
pbar.update()
inputs = dataitem.inputs
if isinstance(inputs, torch.Tensor):
inputs = inputs.to(self.device)
current_y, current_y_true = [], []
for _ in range(MC):
for item in (dataitem.outputs, dataitem.labels):
if item is not None:
current_y_true.append(item.cpu().detach().numpy())
# TODO: Quick fix
inputs[torch.isnan(inputs)] = 0.0
outputs = self.model(inputs)
current_y.append(outputs.cpu().detach().numpy())
y.extend(np.array(current_y).swapaxes(0, 1))
y_true.extend(np.array(current_y_true).swapaxes(0, 1))
pbar.close()
return np.array(y), np.array(y_true)
|
<reponame>heatherwan/Automatic-Validation-of-Simulation-Results
import importlib
import os
import socket
import sys
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from utils.Dataset_hdf5 import DatasetHDF5
from utils.Dataset_hdf5_cv import DatasetHDF5_Kfold
from Parameters import Parameters
# ===============get basic folder=====================
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
para = Parameters()
# log
MODEL = importlib.import_module(para.model) # import network module
LOG_DIR = para.logDir
LOG_MODEL = para.logmodelDir
LOG_FOUT = open(os.path.join(LOG_DIR, f'{para.expName}.txt'), 'w')
LOG_FOUT.write(str(para.__dict__) + '\n')
# set parameters
if para.gpu:
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(para.decay_step)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
def log_string(out_str):
# for confusion matrix
if isinstance(out_str, np.ndarray):
np.savetxt(LOG_FOUT, out_str, fmt='%3d')
else:
LOG_FOUT.write(out_str + '\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.compat.v1.train.exponential_decay(
para.learningRate, # Base learning rate.
batch * para.batchSize, # Current index into the dataset.
para.decay_step, # Decay step.
para.decay_rate, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.compat.v1.train.exponential_decay(
BN_INIT_DECAY,
batch * para.batchSize,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
class Training:
def __init__(self, trainset, testset):
self.trainDataset = trainset
self.testDataset = testset
def train(self):
with tf.Graph().as_default():
with tf.device(''):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs_other(para.batchSize, para.pointNumber)
is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.compat.v1.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model_other(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
para_num = MODEL.get_para_num()
print(f'Total parameters number is {para_num}')
LOG_FOUT.write(str(para_num) + '\n')
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.compat.v1.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64))
accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(para.batchSize)
tf.compat.v1.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
if para.optimizer == 'momentum':
optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=para.momentum)
elif para.optimizer == 'adam':
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.compat.v1.train.Saver()
# Create a session
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.compat.v1.Session(config=config)
# Add summary writers
merged = tf.compat.v1.summary.merge_all()
train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, para.expName[:6] + 'train'),
sess.graph)
test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, para.expName[:6] + 'test'),
sess.graph)
# Init variables
# load pre-train model
if para.continue_train:
saver.restore(sess, f"{LOG_MODEL}/{para.expName[:6]}.ckpt")
log_string("Model restored.")
else:
init = tf.compat.v1.global_variables_initializer()
sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch,
'knn': end_points}
# min_loss = np.inf
min_loss = np.inf
max_acc = 0
for epoch in range(para.max_epoch):
log_string('**** EPOCH %03d ****' % epoch)
sys.stdout.flush()
self.train_one_epoch(sess, ops, train_writer)
self.trainDataset.reset()
loss, acc = self.eval_one_epoch(sess, ops, test_writer)
self.testDataset.reset()
if acc > max_acc: # save the min loss model
save_path = saver.save(sess, os.path.join(LOG_MODEL, f"{para.expName[:6]}.ckpt"))
log_string(f"Model saved on {epoch} in file: {save_path}")
max_acc = acc
min_loss = loss
elif acc == max_acc:
if loss < min_loss:
save_path = saver.save(sess, os.path.join(LOG_MODEL, f"{para.expName[:6]}.ckpt"))
log_string(f"Model saved on {epoch} in file: {save_path}")
min_loss = loss
def train_one_epoch(self, sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
log_string(str(datetime.now()))
# Make sure batch data is of same size
cur_batch_data = np.zeros((para.batchSize, para.pointNumber, para.dim))
cur_batch_label = np.zeros(para.batchSize, dtype=np.int32)
# set variable for statistics
total_correct = 0
total_seen = 0
total_pred = []
loss_sum = 0
batch_idx = 0
total_seen_class = [0 for _ in range(para.outputClassN)]
total_correct_class = [0 for _ in range(para.outputClassN)]
while self.trainDataset.has_next_batch():
batch_data, batch_label = self.trainDataset.next_batch(augment=True)
# batch_data = provider.random_point_dropout(batch_data)
bsize = batch_data.shape[0]
cur_batch_data[0:bsize, ...] = batch_data[:, :, :para.dim]
cur_batch_label[0:bsize] = batch_label
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']],
feed_dict=feed_dict)
train_writer.add_summary(summary, step) # tensorboard
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val * bsize
total_pred.extend(pred_val[0:bsize])
batch_idx += 1
for i in range(0, bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
log_string('Train result:')
log_string(f'mean loss: {loss_sum / float(total_seen):.3f}')
log_string(f'accuracy: {total_correct / float(total_seen):.3f}')
class_accuracies = np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float)
avg_class_acc = np.mean(class_accuracies)
log_string(f'avg class acc: {avg_class_acc:.3f}')
for i, name in para.classes.items():
log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
log_string(confusion_matrix(self.trainDataset.current_label[:len(total_pred)], total_pred))
def eval_one_epoch(self, sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
is_training = False
log_string(str(datetime.now()))
# Make sure batch data is of same size
cur_batch_data = np.zeros((para.testBatchSize, para.pointNumber, para.dim))
cur_batch_label = np.zeros(para.testBatchSize, dtype=np.int32)
# set variable for statistics
total_correct = 0
total_seen = 0
pred_label = []
loss_sum = 0
batch_idx = 0
total_seen_class = [0 for _ in range(para.outputClassN)]
total_correct_class = [0 for _ in range(para.outputClassN)]
while self.testDataset.has_next_batch():
batch_data, batch_label = self.testDataset.next_batch(augment=False)
bsize = batch_data.shape[0]
cur_batch_data[0:bsize, ...] = batch_data[:, :, :para.dim]
cur_batch_label[0:bsize] = batch_label
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']],
feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val * bsize
batch_idx += 1
for i in range(0, bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
pred_label.extend(pred_val[0:bsize])
log_string('Test result:')
log_string(f'mean loss: {(loss_sum / float(total_seen)):.3f}')
log_string(f'acc: {(total_correct / float(total_seen)):.3f}')
class_accuracies = np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float)
avg_class_acc = np.mean(class_accuracies)
log_string(f'avg class acc: {avg_class_acc:.3f}')
for i, name in para.classes.items():
log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
log_string(confusion_matrix(self.testDataset.current_label[:len(pred_label)], pred_label))
return loss_sum / float(total_seen), total_correct / float(total_seen)
class Training_cv:
def __init__(self, trainset, split_no=0):
self.dataset = trainset
self.split_no = split_no
self.min_loss = np.inf
self.test_loss = None
self.test_acc = None
self.prediction = None
self.label = None
self.result_avgacc = None
def train(self):
with tf.Graph().as_default():
with tf.device(''):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs_other(para.batchSize, para.pointNumber)
is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.compat.v1.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model_other(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
para_num = MODEL.get_para_num()
print(f'Total parameters number is {para_num}')
LOG_FOUT.write(str(para_num) + '\n')
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.compat.v1.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64))
accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(para.batchSize)
tf.compat.v1.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.compat.v1.summary.scalar('learning_rate', learning_rate)
if para.optimizer == 'momentum':
optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=para.momentum)
elif para.optimizer == 'adam':
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.compat.v1.train.Saver()
# Create a session
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.compat.v1.Session(config=config)
# Add summary writers
merged = tf.compat.v1.summary.merge_all()
train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, para.expName[:6] + f'train_{i}'),
sess.graph)
test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, para.expName[:6] + f'test_{i}'),
sess.graph)
# Init variables
init = tf.compat.v1.global_variables_initializer()
sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch,
'knn': end_points}
log_string(f'cross_validation_{i} result')
for epoch in range(para.max_epoch):
log_string('**** EPOCH %03d ****' % epoch)
sys.stdout.flush()
self.train_one_epoch(sess, ops, train_writer)
self.dataset.reset()
better = self.eval_one_epoch(sess, ops, test_writer)
self.dataset.reset(train=False)
if better: # save the min valid loss model
save_path = saver.save(sess, os.path.join(LOG_MODEL, f"{para.expName[:6]}_{i}.ckpt"))
log_string("Model saved in file: %s" % save_path)
# print out the final result for this validation split
log_string('Final Result')
log_string(f'Loss {self.min_loss}\n')
log_string(f'Accuracy {self.test_acc}\n')
matrix = confusion_matrix(self.label, self.prediction)
self.result_avgacc = matrix.diagonal() / matrix.sum(axis=1)
log_string(classification_report(self.label, self.prediction,
target_names=para.classes.values(), digits=3))
log_string(matrix)
def train_one_epoch(self, sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
log_string(str(datetime.now()))
# Make sure batch data is of same size
cur_batch_data = np.zeros((para.batchSize, para.pointNumber, para.dim))
cur_batch_label = np.zeros(para.batchSize, dtype=np.int32)
# set variable for statistics
total_correct = 0
total_seen = 0
total_pred = []
loss_sum = 0
batch_idx = 0
total_seen_class = [0 for _ in range(para.outputClassN)]
total_correct_class = [0 for _ in range(para.outputClassN)]
while self.dataset.has_next_batch():
batch_data, batch_label = self.dataset.next_batch(augment=True)
# batch_data = provider.random_point_dropout(batch_data)
bsize = batch_data.shape[0]
cur_batch_data[0:bsize, ...] = batch_data[:, :, :para.dim]
cur_batch_label[0:bsize] = batch_label
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']],
feed_dict=feed_dict)
train_writer.add_summary(summary, step) # tensorboard
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val * bsize
total_pred.extend(pred_val[0:bsize])
batch_idx += 1
for i in range(0, bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
log_string('Train result:')
log_string(f'mean loss: {loss_sum / float(total_seen):.3f}')
log_string(f'accuracy: {total_correct / float(total_seen):.3f}')
class_accuracies = np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float)
avg_class_acc = np.mean(class_accuracies)
log_string(f'avg class acc: {avg_class_acc:.3f}')
for i, name in para.classes.items():
log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
log_string(confusion_matrix(self.dataset.train_label[:len(total_pred)], total_pred))
# return loss_sum / float(total_seen), total_correct / float(total_seen)
def eval_one_epoch(self, sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
is_training = False
log_string(str(datetime.now()))
# Make sure batch data is of same size
cur_batch_data = np.zeros((para.testBatchSize, para.pointNumber, para.dim))
cur_batch_label = np.zeros(para.testBatchSize, dtype=np.int32)
# set variable for statistics
total_correct = 0
total_seen = 0
pred_label = []
loss_sum = 0
batch_idx = 0
total_seen_class = [0 for _ in range(para.outputClassN)]
total_correct_class = [0 for _ in range(para.outputClassN)]
while self.dataset.has_next_batch(train=False):
batch_data, batch_label = self.dataset.next_batch(augment=False, train=False)
bsize = batch_data.shape[0]
cur_batch_data[0:bsize, ...] = batch_data[:, :, :para.dim]
cur_batch_label[0:bsize] = batch_label
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']],
feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val * bsize
batch_idx += 1
for i in range(0, bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
pred_label.extend(pred_val[0:bsize])
log_string('Test result:')
log_string(f'mean loss: {(loss_sum / float(total_seen)):.3f}')
log_string(f'acc: {(total_correct / float(total_seen)):.3f}')
class_accuracies = np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float)
avg_class_acc = np.mean(class_accuracies)
log_string(f'avg class acc: {avg_class_acc:.3f}')
for i, name in para.classes.items():
log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
log_string(confusion_matrix(self.dataset.valid_label[:len(pred_label)], pred_label))
# if the validation loss is lower, than store in global for final presentation
if (loss_sum / float(total_seen)) < self.min_loss:
self.prediction = pred_label
self.label = self.dataset.valid_label[:len(pred_label)]
self.test_loss = loss_sum / float(total_seen)
self.test_acc = total_correct / float(total_seen)
self.min_loss = self.test_loss
return True
else:
return False
if __name__ == "__main__":
if para.validation:
trainDataset = DatasetHDF5_Kfold(para.TRAIN_FILES, batch_size=para.batchSize,
npoints=para.pointNumber, dim=para.dim, shuffle=True)
all_loss = []
all_acc = []
all_avgacc = []
for i in range(para.split_num):
log_string(f'cross validation split {i} result: \n')
trainDataset.set_data(i)
tr = Training_cv(trainDataset, i)
start_time = time.time()
tr.train()
log_string(f'test loss: {tr.test_loss}')
all_loss.append(tr.test_loss)
log_string(f'test acc: {tr.test_acc}')
all_acc.append(tr.test_acc)
log_string(f'test acgacc: {tr.result_avgacc}')
all_avgacc.append(tr.result_avgacc)
end_time = time.time()
log_string(f'running time:\t{(end_time - start_time) / 60} mins')
log_string('cross validation overall result: \n')
log_string(f'loss: {np.mean(all_loss)}')
log_string(f'acc: {np.mean(all_acc)}')
log_string(f'avgacc: {np.mean(all_avgacc, axis=0)}')
else:
trainDataset = DatasetHDF5(para.TRAIN_FILES, batch_size=para.batchSize,
npoints=para.pointNumber, dim=para.dim, shuffle=True)
testDataset = DatasetHDF5(para.TEST_FILES, batch_size=para.testBatchSize,
npoints=para.pointNumber, dim=para.dim, shuffle=False, train=False)
tr = Training(trainDataset, testDataset)
start_time = time.time()
tr.train()
end_time = time.time()
log_string(f'running time:\t{(end_time - start_time) / 60} mins')
LOG_FOUT.close()
|
<filename>jupyter_home/sources/gloaders/dancer_loader.py
import re
from logging import warning
from typing import List, Dict, Tuple, Set
from os import listdir
from os.path import isfile, join, exists
import igraph
from sources.gloaders.loader_interface import LoaderInterface
class DancerLoader(LoaderInterface):
VERTEX_STATE = 0
EDGES_STATE = 1
ERROR = 2
@classmethod
def load_from_dict(cls, d: Dict):
kwargs = {
'dataset_directory': d["dataset_directory"]
}
return DancerLoader(**kwargs)
@classmethod
def _process_vertex(cls, line: str, snapshot: igraph.Graph, members: List[int], last_id: int,
name_id_table: Dict[str, int], communities_ids: Set[int]):
first_index = line.find(";")
last_index = line.rfind(";")
if first_index != -1 and last_index != -1:
node_name = line[0:first_index]
community_id = int(line[last_index+1:-1])
snapshot.add_vertex(name=node_name)
name_id_table[node_name] = last_id
members.append(community_id)
communities_ids.add(community_id)
last_id += 1
return last_id
else:
raise RuntimeError("format error on line '{0}'".format(line))
@classmethod
def _process_edge(cls, line: str, name_id_table: Dict[str, int], tuplelist: List[Tuple[int, int]]):
first_index = line.find(";")
if first_index != -1:
source_name = line[0:first_index]
target_name = line[first_index + 1:-1]
tuplelist.append((name_id_table[source_name], name_id_table[target_name]))
else:
raise RuntimeError("format error")
@classmethod
def _remove_isolated_nodes(cls, g: igraph.Graph) -> igraph.Graph:
components = g.components(mode=igraph.WEAK)
max_size = 0
nodes = None
for actual_component in components:
size = len(actual_component)
if size > max_size:
max_size = size
nodes = actual_component
return g.subgraph(nodes)
@classmethod
def _process_graph_file(cls, file_path: str) -> Tuple[igraph.Graph, int, int, List[int], int]:
snapshot = igraph.Graph(directed=False)
members = []
comms_ids = set()
last_id = 0
name_id_table = {}
tuplelist = []
actual_state = -1
with open(file_path, 'r') as f:
for line in f:
if line.startswith("# Vertices"):
actual_state = cls.VERTEX_STATE
elif line.startswith("# Edges"):
actual_state = cls.EDGES_STATE
elif not line.startswith("#"):
if actual_state == cls.VERTEX_STATE:
try:
last_id = cls._process_vertex(line, snapshot, members, last_id, name_id_table, comms_ids)
except Exception as e:
raise RuntimeError("Exception: {0}, reach at line '{1}', on file {2}'"
.format(e, line, file_path))
elif actual_state == cls.EDGES_STATE:
try:
cls._process_edge(line, name_id_table, tuplelist)
except Exception as e:
raise RuntimeError("Exception: {0}, reach at line '{1}', on file {2}'"
.format(e, line, file_path))
else:
raise RuntimeError("Impossible state: {0}, reach at line '{1}', on file {2}'"
.format(actual_state, line, file_path))
snapshot.add_edges(tuplelist)
snapshot.simplify(multiple=True)
snapshot = cls._remove_isolated_nodes(snapshot)
if snapshot.vcount() != len(members):
warning("dataset with more than one component, beware of ground truth!")
members = cls._update_members(snapshot, members, name_id_table)
n_comms = len(comms_ids)
return snapshot, snapshot.vcount(), snapshot.ecount(), members, n_comms
@classmethod
def _read_n_snapshots(cls, dir_path: str) -> int:
whole_path = join(dir_path, "parameters")
if exists(whole_path):
with open(whole_path, 'r') as f:
for line in f:
if line.startswith("nbTimestamps"):
index = line.find(":")
return int(line[index + 1:-1])
raise RuntimeError("'nbTimestamps' not found on file '{0}'".format(whole_path))
else:
raise FileNotFoundError("parameters file not found in directory '{0}'".format(dir_path))
@classmethod
def _process_directory(cls, dir_path: str) -> Tuple[List[igraph.Graph], List[int], List[int], List[List[int]], List[int]]:
n_snapshots = cls._read_n_snapshots(dir_path)
snapshots = [igraph.Graph()]*n_snapshots
n_nodes = [0]*n_snapshots
n_edges = [0]*n_snapshots
comms = [[]]*n_snapshots
n_comms = [0]*n_snapshots
re_snp_file = re.compile("(t)(\d*)(.graph)")
for f in listdir(dir_path):
match = re_snp_file.match(f)
whole_path = join(dir_path, f)
if isfile(whole_path) and match is not None:
g, nodes, edges, members, snp_comms = cls._process_graph_file(whole_path)
n_snp = int(match.group(2))
snapshots[n_snp] = g
n_nodes[n_snp] = nodes
n_edges[n_snp] = edges
comms[n_snp] = members
n_comms[n_snp] = snp_comms
return snapshots, n_nodes, n_edges, comms, n_comms
def __init__(self, **kwargs):
super().__init__(**kwargs)
def load_datatset(self, **kwargs):
snapshots, n_nodes, n_edges, communities, n_comms = DancerLoader._process_directory(kwargs["dataset_directory"])
n_ts = len(snapshots)
info = {
"dataset_directory": kwargs["dataset_directory"],
"snapshot_count": n_ts,
"n_nodes": n_nodes,
"n_edges": n_edges,
"ground_truth": True,
"members": communities,
"n_communites": n_comms
}
return snapshots, n_ts, n_nodes, n_edges, info, communities, n_comms
@classmethod
def _update_members(cls, new_snapshot: igraph.Graph, old_members: List[int], name_id_table: Dict[str, int]) -> List[int]:
new_members = [-1] * new_snapshot.vcount()
for v in new_snapshot.vs:
node_old_id = name_id_table[v['name']]
new_members[v.index] = old_members[node_old_id]
return new_members
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#Importing Necessary Libraries
from bs4 import BeautifulSoup as soup
import requests
import lxml
import re
import pandas as pd
from threading import Thread
from elasticsearch import Elasticsearch
if __name__== "__main__":
main_url='https://en.wikipedia.org/wiki/List_of_universities_in_England'
source=requests.get(my_url).text
page_soup=soup(source,'lxml')
containers=page_soup.find('div',{'class':"mw-parser-output"})
rows= containers.table.find_all('tr')
data=[]
header_name=[]
x=0
type_name=re.split('https://en.wikipedia.org/wiki/',my_url)[1]
# csv_file=open(csv_name[1],'w')
for row in rows:
x+=1
if x==1:
header=row.find_all('th')
for hedr in header:
header_name.append(hedr.text[0:-1])
header_name.append('Url')
data.append(header_name)
else:
cols = row.find_all('td')
link='https://en.wikipedia.org'+cols[0].a.get('href')
cols = [ele.text.strip() for ele in cols]
cols.append(link)
data.append([ele for ele in cols if ele])
df = pd.DataFrame.from_records(data[1:], columns=data[0])
urls=list(df['Url'])
sub_agg_df = pd.DataFrame(columns=['University','Former names','Detailed_Location','Students','Undergraduates','Postgraduates','Url'])
print(sub_agg_df)
# defining the function for scraping individual website
def sub_scrapping(url,indx,U_name):
print(url,indx)
my_url=url
try:
print('Executing for '+my_url)
source=requests.get(my_url).text
page_soup=soup(source,'lxml')
containers=page_soup.find('table',{'class':"infobox vcard"})
indivual_data={}
sub_headers=['Former names','Location','Students','Undergraduates','Postgraduates']
for sh in range(1,6):
sub_container=containers.find_all('tr')
for ele in sub_container:
try:
if ele.th.text==sub_headers[sh-1]:
indivual_data.update({sub_headers[sh-1]:ele.td.text})
except:
pass
if len(indivual_data) < sh:
indivual_data.update({sub_headers[sh-1]:None})
df_sub=pd.DataFrame([[U_name,indivual_data['Former names'],str(indivual_data['Location']),indivual_data['Students'],indivual_data['Undergraduates'],indivual_data['Postgraduates'],my_url]],columns=['University','Former names','Detailed_Location','Students','Undergraduates','Postgraduates','Url'],index=[indx])
global sub_agg_df
sub_agg_df=sub_agg_df.append(df_sub)
except:
print('Error in fetching url '+my_url)
threadlist=[]
for x in df.itertuples():
td= Thread(target=sub_scrapping,args=(x.Url,x.Index,x.University))
td.start()
threadlist.append(td)
for b in threadlist:
b.join()
df.drop('Url', axis=1, inplace=True)
final_df=pd.merge(sub_agg_df, df, on='University')
# Optional: Saving the table in CSV format for analysis purspose in excel:
final_df.to_csv(type_name+'.csv')
es= Elasticsearch('http://localhost:9200')
es.indices.delete(index='universities')
# {
# "settings":{
# "analysis":{
# "analyzer":{
# "my_analyzer":{
# "type":"keyword",
# }
# }
# }
# }
# "mappings":{
# "doc":{
# "dynamic": "strict",
# "properties":{
# "University":{
# "type":"text",
# "fields":{
# "keyword":{
# "type":"keyword"
# }
# },
# "analyzer":"my_analyzer"
# },
# "Former names":{
# "type":"text",
# "fields":{
# "keyword":{
# "type":"keyword"
# }
# },
# "analyzer":"my_analyzer"
# },
# "Detailed_Location":{
# "type":"text",
# "fields":{
# "keyword":{
# "type":"keyword"
# }
# },
# "analyzer":"my_analyzer"
# },
# "Students":{
# "type":"integer"
# },
# "Undergraduates":{
# "type":"integer"
# },
# "Postgraduates":{
# "type":"integer"
# },
# "Url":{
# "type":"text",
# "fields":{
# "keyword":{
# "type":"keyword"
# }
# },
# "analyzer":"my_analyzer"
# },
# "Location":{
# "type":"text",
# "fields":{
# "keyword":{
# "type":"keyword"
# }
# },
# "analyzer":"my_analyzer"
# },
# "Established":{
# "type":"integer"
# },
# "Number of students":{
# "type":"integer"
# },
# "Tuition fee":{
# "type":"integer"
# },
# "Degree powers":{
# "type":"text",
# "fields":{
# "keyword":{
# "type":"keyword"
# }
# },
# "analyzer":"my_analyzer"
# }
# }
# }
# }
# }
es.indices.create(index='universities',ignore=400)
for x in final_df.iterrows():
es.index(index='universities',doc_type=type_name,id=x[0] ,body=dict(x[1]))
print('!!!!!!!!----Data Transfer Completed-----!!!!!!!')
es= Elasticsearch('http://localhost:9200')
res=es.search(index='universities',body={"from":0, "size":1,"query":{"match":{"University":"Harper Adams University"}}})
res
# Execting queries
res=es.search(index="universities",body={"from":0, "size":2, "query":{"bool":{"must":{"match":{"University":"Harper Adams University"}},"must":{"match":{"Former names":"None"}}}}})
res
|
<reponame>sahandv/science_science
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 16:10:03 2020
@author: github.com/sahandv
"""
import sys
import time
import gc
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from random import randint
from sklearn.decomposition import PCA
from sklearn.cluster import AgglomerativeClustering, KMeans, DBSCAN
from sklearn import metrics
from sklearn.metrics.cluster import silhouette_score,homogeneity_score,adjusted_rand_score
from sklearn.metrics.cluster import normalized_mutual_info_score,adjusted_mutual_info_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import TfidfTransformer , TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import preprocessing
from sciosci.assets import text_assets as ta
from DEC.DEC_keras import DEC_simple_run
# =============================================================================
# Load data and init
# =============================================================================
datapath = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/' #Ryzen
# datapath = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/' #C1314
# data_address = datapath+"Corpus/cora-classify/cora/embeddings/single_component_small_18k/n2v 300-70-20 p1q05"#node2vec super-d2v-node 128-70-20 p1q025"
# label_address = datapath+"Corpus/cora-classify/cora/clean/single_component_small_18k/labels"
data_address = datapath+"Corpus/KPRIS/embeddings/deflemm/Doc2Vec patent_wos corpus"
label_address = datapath+"Corpus/KPRIS/labels"
vectors = pd.read_csv(data_address)#,header=None)
labels = pd.read_csv(label_address,names=['label'])
labels.columns = ['label']
try:
vectors = vectors.drop('Unnamed: 0',axis=1)
print('\nDroped index column. Now '+data_address+' has the shape of: ',vectors.shape)
except:
print('\nVector shapes seem to be good:',vectors.shape)
labels_f = pd.factorize(labels.label)
X = vectors.values
Y = labels_f[0]
n_clusters = len(list(labels.groupby('label').groups.keys()))
results = pd.DataFrame([],columns=['Method','parameter','Silhouette','Homogeneity','Completeness','NMI','AMI','ARI'])
# =============================================================================
# Evaluation method
# =============================================================================
def evaluate(X,Y,predicted_labels):
df = pd.DataFrame(predicted_labels,columns=['label'])
if len(df.groupby('label').groups)<2:
return [0,0,0,0,0,0]
try:
sil = silhouette_score(X, predicted_labels, metric='euclidean')
except:
sil = 0
return [sil,
homogeneity_score(Y, predicted_labels),
homogeneity_score(predicted_labels, Y),
normalized_mutual_info_score(Y, predicted_labels),
adjusted_mutual_info_score(Y, predicted_labels),
adjusted_rand_score(Y, predicted_labels)]
# =============================================================================
# Evaluate if you already have results and skip clustering (i.e. LDA)
# =============================================================================
prediction_results_address = datapath+"Corpus/KPRIS/LDA Results/_5/dataset_topic_scores.csv"
predictions = pd.read_csv(prediction_results_address)['class'].values # If you don't want to cluster and already have resutls
tmp_results = ['LDA unigram','max_df 0.8']+evaluate(None,Y,predictions)
tmp_results = pd.Series(tmp_results, index = results.columns)
results = results.append(tmp_results, ignore_index=True)
# =============================================================================
# K-means
# =============================================================================
print('\n- k-means random -----------------------')
for fold in tqdm(range(5)):
seed = randint(0,10**5)
model = KMeans(n_clusters=n_clusters,n_init=20, init='random', random_state=seed).fit(X)
predicted_labels = model.labels_
tmp_results = ['k-means random','seed '+str(seed)]+evaluate(X,Y,predicted_labels)
tmp_results = pd.Series(tmp_results, index = results.columns)
results = results.append(tmp_results, ignore_index=True)
mean = results.mean(axis=0)
maxx = results.max(axis=0)
print(mean)
print(maxx)
# =============================================================================
# K-means with init='k-means++'
# =============================================================================
# print('\n- k-means++ -----------------------')
# for fold in tqdm(range(20)):
# seed = randint(0,10**5)
# model = KMeans(n_clusters=n_clusters,n_init=20,init='k-means++', random_state=seed).fit(X)
# predicted_labels = model.labels_
# tmp_results = ['k-means++','seed '+str(seed)]+evaluate(X,Y,predicted_labels)
# tmp_results = pd.Series(tmp_results, index = results.columns)
# results = results.append(tmp_results, ignore_index=True)
# mean = results.mean(axis=0)
# maxx = results.max(axis=0)
# print(mean)
# print(maxx)
# =============================================================================
# Agglomerative
# =============================================================================
print('\n- Agglomerative -----------------------')
for fold in tqdm(range(1)):
model = AgglomerativeClustering(n_clusters=n_clusters,linkage='ward').fit(X)
predicted_labels = model.labels_
tmp_results = ['Agglomerative','ward']+evaluate(X,Y,predicted_labels)
tmp_results = pd.Series(tmp_results, index = results.columns)
results = results.append(tmp_results, ignore_index=True)
mean = results.mean(axis=0)
maxx = results.max(axis=0)
print(mean)
print(maxx)
# =============================================================================
# DBSCAN
# =============================================================================
# eps=0.000001
# print('\n- DBSCAN -----------------------')
# for fold in tqdm(range(19)):
# eps = eps+0.05
# model = DBSCAN(eps=eps, min_samples=10,n_jobs=15).fit(X)
# predicted_labels = model.labels_
# tmp_results = ['DBSCAN','eps '+str(eps)]+evaluate(X,Y,predicted_labels)
# tmp_results = pd.Series(tmp_results, index = results.columns)
# results = results.append(tmp_results, ignore_index=True)
# mean = results.mean(axis=0)
# maxx = results.max(axis=0)
# print(mean)
# print(maxx)
# =============================================================================
# Deep no min_max_scaling
# =============================================================================
archs = [[500, 500, 2000, 10],[500, 1000, 2000, 10],[500, 1000, 1000, 10],
# [500, 500, 2000, 100],[500, 1000, 2000, 100],[500, 1000, 1000, 100],
# [100, 300, 600, 10],[300, 500, 2000, 10],[700, 1000, 2000, 10],
[200, 500, 10],[500, 1000, 10],[1000, 2000, 10],
[200, 500, 100],[500, 1000, 100],[1000, 2000, 100],
# [1000, 500, 10],[500, 200, 10],[200, 100, 10],
# [1000, 1000, 2000, 10],[1000, 1500, 2000, 10],[1000, 1500, 1000, 10],
# [1000, 1000, 2000,500, 10],[1000, 1500, 2000,500, 10],[1000, 1500, 1000, 500, 10],
[500, 500, 2000, 500, 10],[500, 1000, 2000, 500, 10],[500, 1000, 1000, 500, 10]]
archs = [[200,500,10],[200,500,10],[200,500,10],[200,500,10],[200,500,10]]
print('\n- DEC -----------------------')
for fold in tqdm(archs):
seed = randint(0,10**4)
np.random.seed(seed)
predicted_labels = DEC_simple_run(X,minmax_scale_custom_data=False,n_clusters=n_clusters,architecture=fold,pretrain_epochs=300)
tmp_results = ['DEC',str(seed)+' '+str(fold)]+evaluate(X,Y,predicted_labels)
tmp_results = pd.Series(tmp_results, index = results.columns)
results = results.append(tmp_results, ignore_index=True)
mean = results.mean(axis=0)
maxx = results.max(axis=0)
print(mean)
print(maxx)
# =============================================================================
# Deep with min_max_scaling
# =============================================================================
# archs = [[500, 500, 2000, 10],[500, 1000, 2000, 10],[500, 1000, 1000, 10],
# [500, 500, 2000, 100],[500, 1000, 2000, 100],[500, 1000, 1000, 100],
# [100, 300, 600, 10],[300, 500, 2000, 10],[700, 1000, 2000, 10],
# [200, 500, 10],[500, 1000, 10],[1000, 2000, 10],
# [200, 500, 100],[500, 1000, 100],[1000, 2000, 100],
# [1000, 500, 10],[500, 200, 10],[200, 100, 10],
# [1000, 1000, 2000, 10],[1000, 1500, 2000, 10],[1000, 1500, 1000, 10],
# [1000, 1000, 2000,500, 10],[1000, 1500, 2000,500, 10],[1000, 1500, 1000, 500, 10],
# [500, 500, 2000, 500, 10],[500, 1000, 2000, 500, 10],[500, 1000, 1000, 500, 10]]
# print('\n- DEC -----------------------')
# for fold in tqdm(archs):
# seed = randint(0,10**4)
# np.random.seed(seed)
# predicted_labels = DEC_simple_run(X,minmax_scale_custom_data=False,n_clusters=5,architecture=fold,pretrain_epochs=300)
# tmp_results = ['DEC',str(seed)+' '+str(fold)]+evaluate(X,Y,predicted_labels)
# tmp_results = pd.Series(tmp_results, index = results.columns)
# results = results.append(tmp_results, ignore_index=True)
# mean = results.mean(axis=0)
# maxx = results.max(axis=0)
# print(mean)
# print(maxx)
# =============================================================================
# Just cluster
# =============================================================================
print('\n- k-means -----------------------')
seed = 11822
model = KMeans(n_clusters=n_clusters,n_init=20,init='k-means++', random_state=seed).fit(X)
predicted_labels = model.labels_
model = KMeans(n_clusters=n_clusters,n_init=20, init='random', random_state=seed).fit(X)
predicted_labels = model.labels_
results_df = pd.DataFrame(predicted_labels,columns=['label'])
results_df.to_csv(data_address+' Kmeans labels',index=False)
results_df.groupby('label').groups.keys()
archs = [[200,500,10],[200,500,10]]
print('\n- DEC -----------------------')
for i,fold in tqdm(enumerate(archs)):
seed = randint(0,10**5)
np.random.seed(seed)
predicted_labels = DEC_simple_run(X,minmax_scale_custom_data=False,n_clusters=10,architecture=fold,pretrain_epochs=1000)
tmp_results = ['DEC',str(seed)+' '+str(fold)]+evaluate(X,Y,predicted_labels)
tmp_results = pd.Series(tmp_results, index = results.columns)
results = results.append(tmp_results, ignore_index=True)
results_df = pd.DataFrame(predicted_labels,columns=['label'])
results_df.to_csv(data_address+' DEC 500, 1000, 1000, 500, 10 k10 labels - '+str(i),index=False)
results_df.groupby('label').groups.keys()
# =============================================================================
# Save to disk
# =============================================================================
results_df = pd.DataFrame(results)
results_df.to_csv(data_address+' clustering results - 06 2021',index=False)
# =============================================================================
# find centroid of each cluster for a model
# =============================================================================
cluster_centers = []
for cluster in tqdm(range(n_clusters),total=n_clusters):
cluster_centers.append(np.array(X)[predicted_labels==cluster].mean(axis=0))
# =============================================================================
# Save clusters
# =============================================================================
predicted_labels = pd.DataFrame(predicted_labels,columns=['labels'])
predicted_labels.to_csv(data_address+' clustering predictions',index=False) |
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from omb import OMB_VERSION_01, OAUTH_REQUEST, OAUTH_ACCESS, OMB_POST_NOTICE, OMB_UPDATE_PROFILE
from oauth.oauth import OAuthConsumer, OAuthRequest, OAuthSignatureMethod_HMAC_SHA1, OAuthToken
import urlparse, urllib
def requestToken(omb):
current_site = Site.objects.get_current()
url = urlparse.urlparse(omb[OAUTH_REQUEST].uris[0].uri)
params = {}
if url[4] != '':
# We need to copy over the query string params for sites like laconica
params.update(dict([part.split('=') for part in url[4].split('&')]))
params['omb_version'] = OMB_VERSION_01
params['omb_listener'] = omb[OAUTH_REQUEST].localid.text
consumer = OAuthConsumer(current_site.domain, "")
req = OAuthRequest().from_consumer_and_token(consumer, http_url=url.geturl(), parameters=params, http_method="POST")
req.sign_request(OAuthSignatureMethod_HMAC_SHA1(), consumer, None)
f = urllib.urlopen(url.geturl(), req.to_postdata())
data = f.read()
requestToken = OAuthToken.from_string(data)
return requestToken
def requestAuthorization(token, url, listener, user):
current_site = Site.objects.get_current()
user_profile_url = "%s%s" % (current_site.domain, reverse('profile_detail', args=[user.username]))
profile = user.get_profile()
url = urlparse.urlparse(url)
params = {}
if url[4] != '':
# We need to copy over the query string params for sites like laconica
params.update(dict([part.split('=') for part in url[4].split('&')]))
params['omb_version'] = OMB_VERSION_01
params['omb_listener'] = listener
params['omb_listenee'] = "http://%s" % user_profile_url
params['omb_listenee_profile'] = "http://%s" % user_profile_url
params['omb_listenee_nickname'] = user.username
params['omb_listenee_license'] = 'http://%s/license/' % current_site.domain # TODO link to the real license
params['omb_listenee_fullname'] = "%s %s" % (user.first_name, user.last_name)
params['omb_listenee_homepage'] = "" # TOOD Pinax doesn't have this
params['omb_listenee_bio'] = profile.about
params['omb_listenee_location'] = profile.location
params['omb_listenee_avatar'] = '' # TODO get the avatar url
params['oauth_callback'] = 'http://%s/omb/finish_follow/' % current_site.domain
consumer = OAuthConsumer(current_site.domain, "")
oauth_request = OAuthRequest().from_consumer_and_token(consumer, http_url=url.geturl(), parameters=params, http_method="GET", token=token)
oauth_request.sign_request(OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
return oauth_request
def requestAccessToken(omb_session, oauth_request):
current_site = Site.objects.get_current()
token = OAuthToken(omb_session["token"], omb_session["secret"])
url = urlparse.urlparse(omb_session["access_token_url"])
params = {}
if url[4] != '':
# We need to copy over the query string params for sites like laconica
params.update(dict([part.split('=') for part in url[4].split('&')]))
params['omb_version'] = OMB_VERSION_01
consumer = OAuthConsumer(current_site.domain, "")
req = OAuthRequest().from_consumer_and_token(consumer, token=token, http_url=url.geturl(), parameters=params, http_method="POST")
req.sign_request(OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
f = urllib.urlopen(url.geturl(), req.to_postdata())
data = f.read()
accessToken = OAuthToken.from_string(data)
return accessToken
def postNotice(token, secret, post_notice_url, notice_content, notice_url, user):
current_site = Site.objects.get_current()
user_profile_url = "%s%s" % (current_site.domain, reverse('profile_detail', args=[user.username]))
oauthToken = OAuthToken(token, secret)
url = urlparse.urlparse(post_notice_url)
params = {}
if url[4] != '':
# We need to copy over the query string params for sites like laconica
params.update(dict([part.split('=') for part in url[4].split('&')]))
params['omb_version'] = OMB_VERSION_01
params['omb_listenee'] = user_profile_url
params['omb_notice'] = "%s%s" % (current_site.domain, notice_url)
params['omb_notice_content'] = notice_content
params['omb_notice_url'] = "%s%s" % (current_site.domain, notice_url)
params['omb_notice_license'] = '%s/license/' % current_site.domain # TODO link to the real license
consumer = OAuthConsumer(current_site.domain, "")
req = OAuthRequest().from_consumer_and_token(consumer, token=oauthToken, http_url=url.geturl(), parameters=params, http_method="POST")
req.sign_request(OAuthSignatureMethod_HMAC_SHA1(), consumer, oauthToken)
f = urllib.urlopen(url.geturl(), req.to_postdata())
data = f.read()
# TODO log failures
def updateProfile(token, secret, update_profile_url, profile):
current_site = Site.objects.get_current()
user_profile_url = "%s%s" % (current_site.domain, reverse('profile_detail', args=[profile.user.username]))
oauthToken = OAuthToken(token, secret)
url = urlparse.urlparse(update_profile_url)
params = {}
if url[4] != '':
# We need to copy over the query string params for sites like laconica
params.update(dict([part.split('=') for part in url[4].split('&')]))
params['omb_version'] = OMB_VERSION_01
params['omb_listenee'] = user_profile_url
params['omb_listenee_profile'] = user_profile_url
params['omb_listenee_nickname'] = profile.username
params['omb_listenee_license'] = '%s/license/' % current_site.domain # TODO link to the real license
params['omb_listenee_fullname'] = profile.name
params['omb_listenee_homepage'] = profile.website
params['omb_listenee_bio'] = profile.about
params['omb_listenee_location'] = profile.location
#params['omb_listenee_avatar'] = TODO get the gravatar of the user
consumer = OAuthConsumer(current_site.domain, "")
req = OAuthRequest().from_consumer_and_token(consumer, token=oauthToken, http_url=url.geturl(), parameters=params, http_method="POST")
req.sign_request(OAuthSignatureMethod_HMAC_SHA1(), consumer, oauthToken)
f = urllib.urlopen(url.geturl(), req.to_postdata())
data = f.read()
# TODO log failures |
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension.base.base import BaseExtensionTests
class BaseGroupbyTests(BaseExtensionTests):
"""Groupby-specific tests."""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame(
{"A": ["B", "B", None, None, "A", "A", "B", "C"], "B": data_for_grouping}
)
gr1 = df.groupby("A").grouper.groupings[0]
gr2 = df.groupby("B").grouper.groupings[0]
tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)
tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", as_index=as_index).A.mean()
_, index = pd.factorize(data_for_grouping, sort=True)
index = pd.Index(index, name="B")
expected = pd.Series([3.0, 1.0, 4.0], index=index, name="A")
if as_index:
self.assert_series_equal(result, expected)
else:
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
def test_groupby_agg_extension(self, data_for_grouping):
# GH#38980 groupby agg on extension type fails for non-numeric types
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
expected = df.iloc[[0, 2, 4, 7]]
expected = expected.set_index("A")
result = df.groupby("A").agg({"B": "first"})
self.assert_frame_equal(result, expected)
result = df.groupby("A").agg("first")
self.assert_frame_equal(result, expected)
result = df.groupby("A").first()
self.assert_frame_equal(result, expected)
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
index = pd.Index(index, name="B")
expected = pd.Series([1.0, 3.0, 4.0], index=index, name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_transform(self, data_for_grouping):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})
result = df.groupby("B").A.transform(len)
expected = pd.Series([3, 3, 2, 2, 3, 1], name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
df.groupby("B").apply(groupby_apply_op)
df.groupby("B").A.apply(groupby_apply_op)
df.groupby("A").apply(groupby_apply_op)
df.groupby("A").B.apply(groupby_apply_op)
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
df.B.iloc[[7]].array,
],
index=pd.Index([1, 2, 3, 4], name="A"),
name="B",
)
self.assert_series_equal(result, expected)
def test_in_numeric_groupby(self, data_for_grouping):
df = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping,
"C": [1, 1, 1, 1, 1, 1, 1, 1],
}
)
result = df.groupby("A").sum().columns
if data_for_grouping.dtype._is_numeric:
expected = pd.Index(["B", "C"])
else:
expected = pd.Index(["C"])
tm.assert_index_equal(result, expected)
|
from enum import Enum
from typing import Optional, Callable, Iterable, Iterator, Union
JOIN_TYPES = ('left', 'right', 'full', 'inner', 'outer') # deprecated
class JoinType(Enum):
Left = 'left'
Right = 'right'
Full = 'full'
Inner = 'inner'
Outer = 'outer'
def topologically_sorted( # Kahn's algorithm
nodes: Union[list, tuple],
edges: dict,
ignore_cycles: bool = False,
logger=None,
):
if len(nodes) < 2:
return nodes
unordered_nodes = nodes.copy()
unresolved_dependencies = edges.copy()
ordered_nodes = list()
while unordered_nodes:
ordered_count = len(ordered_nodes)
for node in unordered_nodes:
if not unresolved_dependencies[node]:
unordered_nodes.remove(node)
ordered_nodes.append(node)
for f in unordered_nodes:
if node in unresolved_dependencies[f]:
unresolved_dependencies[f].remove(node)
has_progress = ordered_count != len(ordered_nodes)
if not has_progress:
message = "Kahn's algorithm is not converging. "
message += 'Probably given graph has cyclic dependencies or missing nodes. '
message += 'Unordered nodes: {} '.format(unordered_nodes)
if ignore_cycles:
if hasattr(logger, 'warning'):
# logger.log(msg=message + 'skipped.', level=30)
logger.warning(message + 'skipped.')
break
else:
raise OverflowError(message)
return ordered_nodes
def merge_iter(
iterables: Union[list, tuple],
key_function: Callable,
reverse: bool = False,
post_action: Optional[Callable] = None,
):
iterators_count = len(iterables)
finished = [False] * iterators_count
take_next = [True] * iterators_count
item_from = [None] * iterators_count
key_from = [None] * iterators_count
choice_function = max if reverse else min
while not min(finished):
for n in range(iterators_count):
if take_next[n] and not finished[n]:
try:
item_from[n] = next(iterables[n])
key_from[n] = key_function(item_from[n])
take_next[n] = False
except StopIteration:
finished[n] = True
if not min(finished):
chosen_key = choice_function([k for f, k in zip(finished, key_from) if not f])
for n in range(iterators_count):
if key_from[n] == chosen_key and not finished[n]:
yield item_from[n]
take_next[n] = True
if post_action:
post_action()
def map_side_join(
iter_left: Iterable,
iter_right: Iterable,
key_function: Callable,
merge_function: Callable, # it.merge_two_items
dict_function: Callable, # it.items_to_dict
how: JoinType = JoinType.Left,
uniq_right: bool = False,
):
if not isinstance(how, JoinType):
how = JoinType(how)
dict_right = dict_function(iter_right, key_function=key_function, of_lists=not uniq_right)
keys_used = set()
for left_part in iter_left:
cur_key = key_function(left_part)
right_part = dict_right.get(cur_key)
if how in (JoinType.Right, JoinType.Full):
keys_used.add(cur_key)
if right_part:
if uniq_right:
out_items = [merge_function(left_part, right_part)]
elif isinstance(right_part, (list, tuple)):
out_items = [merge_function(left_part, i) for i in right_part]
else:
message = 'right part must be list or tuple while using uniq_right option (got {})'
raise ValueError(message.format(type(right_part)))
else:
if how in (JoinType.Right, JoinType.Inner):
out_items = []
else:
out_items = [left_part]
if right_part or how != JoinType.Inner:
yield from out_items
if how in (JoinType.Right, JoinType.Full):
for k in dict_right:
if k not in keys_used:
if uniq_right:
yield merge_function(None, dict_right[k])
else:
yield from [merge_function(None, i) for i in dict_right[k]]
def sorted_join(
iter_left: Iterator,
iter_right: Iterator,
key_function: Callable,
merge_function: Callable, # fs.merge_two_items()
order_function: Callable, # fs.is_ordered(reverse=sorting_is_reversed, including=True)
how: JoinType = JoinType.Left,
):
if not isinstance(how, JoinType):
how = JoinType(how)
left_finished, right_finished = False, False
take_next_left, take_next_right = True, True
cur_left, cur_right = None, None
group_left, group_right = list(), list()
left_key, right_key = None, None
prev_left_key, prev_right_key = None, None
while not (left_finished and right_finished):
if take_next_left and not left_finished:
try:
cur_left = next(iter_left)
left_key = key_function(cur_left)
except StopIteration:
left_finished = True
if take_next_right and not right_finished:
try:
cur_right = next(iter_right)
right_key = key_function(cur_right)
except StopIteration:
right_finished = True
left_key_changed = left_finished or left_key != prev_left_key
right_key_changed = right_finished or right_key != prev_right_key
if left_key_changed and right_key_changed:
if prev_left_key == prev_right_key:
if how != JoinType.Outer:
for out_left in group_left:
for out_right in group_right:
yield merge_function(out_left, out_right)
else:
if how in (JoinType.Left, JoinType.Full, JoinType.Outer):
for out_left in group_left:
yield merge_function(out_left, None)
if how in (JoinType.Right, JoinType.Full, JoinType.Outer):
for out_right in group_right:
yield merge_function(None, out_right)
group_left, group_right = list(), list()
if left_key == right_key:
take_next_left, take_next_right = True, True
prev_left_key, prev_right_key = left_key, right_key
if take_next_left and not left_finished:
group_left.append(cur_left)
if take_next_right and not right_finished:
group_right.append(cur_right)
elif order_function(left_key, right_key) or right_finished:
take_next_left, take_next_right = True, False
assert order_function(prev_left_key, left_key) or left_finished, 'left stream must be sorted'
prev_left_key = left_key
if take_next_left and not left_finished:
group_left.append(cur_left)
else: # next is right
take_next_left, take_next_right = False, True
assert order_function(prev_right_key, right_key) or right_finished, 'right stream must be sorted'
prev_right_key = right_key
if take_next_right and not right_finished:
group_right.append(cur_right)
if (left_finished and not take_next_right) or (right_finished and not take_next_left):
break
|
<filename>tests/golog.py<gh_stars>1-10
#!/usr/bin/env python3
from collections import defaultdict
from strips import *
from golog_program import *
from domains.bag import S as S
from domains.math1 import S as S1
def assert_pn(s, incl, excl):
assert set(incl) <= s.exists
assert not s.exists.intersection(set(excl))
def test01(s):
p = Exec(S.remove(a))
pn, sn, an = next(trans_star(p, s, []))
assert an == [S.remove(a)]
assert_pn(sn, [b, c], [a])
def test02(s):
p = While(lambda s: s.exists, Pick(Object, lambda x: Exec(S.remove(x))))
pn, sn, an = next(trans_star(p, s, []))
assert len(an) == 3
assert_pn(sn, [], [a, b, c])
def test03(s):
p = Sequence(Exec(S.remove(a)), Choose(Exec(S.remove(a)), Exec(S.remove(b))))
pn, sn, an = next(trans_star(p, s, []))
assert_pn(sn, [c], [a, b])
def test04(s):
p = If(lambda s: a in s.exists, Exec(S.remove(a)), Empty())
pn, sn, an = next(trans_star(p, s, []))
assert_pn(sn, [b, c], [a])
def test05(s):
passed = False
p = Star(Choose(Exec(S.remove(a)), Exec(S.remove(b)), Exec(S.remove(c))))
for pn, sn, an in trans_star(p, s, []):
if len(an) == 3 and not sn.exists:
passed = True
break
assert passed
def test06(s):
p = Test(lambda s: s.exists)
pn, sn, an = next(trans_star(p, s, [])) # test passed if no StopIteration exception
def test07(s):
p = Sequence(Exec(S.remove(a)), Exec(S.remove(b)), Exec(S.remove(c)), Test(lambda s: s.exists))
assert not any(trans_star(p, s, [])) # should not have solution
def test08(s):
p = Sequence(Exec(S.remove(a)), Choose(Sequence(Test(lambda s: a in s.exists), Exec(S.remove(b))), Exec(S.remove(c))))
pn, sn, an = next(trans_star(p, s, []))
assert_pn(sn, [b], [a, c])
def test09(s):
p = Sequence(Choose(Exec(S.remove(a)), Exec(S.remove(b)), Exec(S.remove(c))), Test(lambda s: a in s.exists))
for pn, sn, an in trans_star(p, s, []):
assert a in sn.exists and (b not in sn.exists or c not in sn.exists)
def test10(s):
p = Sequence(Star(Pick(Object, lambda x: Exec(S.remove(x)))), Test(lambda s: not s.exists))
for pn, sn, an in trans_star(p, s, []):
assert_pn(sn, [], [a, b, c])
pn, sn, an = next(trans_star(p, s, []))
def test11(s):
p = Star(Exec(S1.incr()))
g = trans_star(p, s1, [])
# first step will do nothing:
pn, sn, an = next(g)
last = sn.n
# this has to increase in a sequence
for k in range(10):
pn, sn, an = next(g)
assert last + 1 == sn.n
last = sn.n
def test12(s):
# like previous test, but forcing backtracking with test for even numbers:
p = Sequence(Star(Exec(S1.incr())), Test(lambda s: s.n % 2 == 0))
g = trans_star(p, s1, [])
# first step will do nothing:
pn, sn, an = next(g)
last = sn.n
# this has to increase in a sequence of step 2
for k in range(10):
pn, sn, an = next(g)
assert last + 2 == sn.n
last = sn.n
def test13(s):
# use nondeterminism to find a solution:
goal = lambda s: s.n == 123
p = Sequence(Star(Choose(Exec(S1.incr()), Exec(S1.double()))), Test(goal))
pn, sn, an = next(trans_star(p, s, []))
assert goal(sn)
def run_test(n,s):
testname = 'test%02d' % i
print('running %s...' % testname)
globals()[testname](s)
a = Object('a')
b = Object('b')
c = Object('c')
s = S(a, b, c)
s1 = S1(0)
for i in range(1,11): run_test(i,s)
for i in range(11,14): run_test(i,s1)
print('All tests passed successfully')
|
'''SMART API Verifier main controller'''
# Developed by: <NAME>
#
# CONFIG: Change the consumer_secret in _ENDPOINT!
#
# Revision history:
# 2012-02-24 Initial release
# 2013-03-27 Upgraded to SMART v0.6 OAuth - <NAME>
import os
import sys
abspath = os.path.dirname(__file__)
sys.path.append(abspath)
import logging
import json
import tempfile
import time
import urllib
import web
from smart_client.client import SMARTClient
from smart_client.common.rdf_tools import rdf_ontology
from settings import APP_PATH, ONTOLOGY_PATH, ENDPOINT
from tests import runTest, getMessages, describeQueries
from threading import Lock
# Configuration
###########################################################################
logging.basicConfig(level=logging.DEBUG)
# SMART Container OAuth Endpoint
_ENDPOINT = ENDPOINT
# webpy file based sessions
###########################################################################
_session = web.session.DiskStore(tempfile.mkdtemp())
# SMARTClient and OAuth Helper Functions
###########################################################################
_smart = None # A global flag to check is the SMARTClient is configured
def _smart_client(api_base, record_id=None):
""" Returns the SMART client, configured accordingly. """
global _smart
if _smart is None:
try:
_smart = SMARTClient(_ENDPOINT.get('app_id'), api_base, _ENDPOINT)
except Exception, e:
logging.critical("Could not init SMARTClient. " + str(e))
_smart.record_id = record_id
return _smart
def _request_token_for_record(api_base, record_id):
""" Requests a request token for a given record_id """
global _session
_session['req_token'] = None
logging.debug("Requesting token for %s on %s" % (record_id, api_base))
smart = _smart_client(api_base, record_id)
smart.token = None
try:
_session['req_token'] = smart.fetch_request_token()
except Exception, e:
return False, str(e)
return True, None
def _exchange_token(verifier):
""" Exchanges verifier for an acc_token and stores it in the session """
global _session
record_id = _session['record_id']
req_token = _session['req_token']
api_base = _session['api_base']
if record_id is None:
logging.error("No record_id, cannot exchange %s" % req_token)
return
logging.debug("Exchanging token: %s" % req_token)
smart = _smart_client(api_base, record_id)
smart.update_token(req_token)
try:
acc_token = smart.exchange_token(verifier)
except Exception, e:
logging.error("Token exchange failed: %s" % e)
return
# success! store it
logging.debug("Exchanged req_token for acc_token: %s" % acc_token)
_session['acc_token'] = acc_token
smart.update_token(acc_token)
# store in cookies too
web.setcookie('oauth_token_secret', acc_token['oauth_token_secret'])
web.setcookie('oauth_token', acc_token['oauth_token'])
web.setcookie('user_id', acc_token['user_id'])
web.setcookie('api_base', api_base)
web.setcookie('record_id', record_id)
return True
def _test_token():
""" Tries to fetch demographics with session acc_token and returns a
bool whether thas was successful. """
smart = _smart_client(_session['api_base'],
_session['record_id'])
if smart is None:
return False
smart.update_token(_session['acc_token'])
try:
demo = smart.get_demographics()
if '200' == demo.response.get('status'):
return True
except Exception, e:
pass
return False
# App URLs
###########################################################################
# URL mappings for web.py
urls = ('/smartapp/index.html', 'index',
'/smartapp/authorize', 'authorize',
'/smartapp/getcalls', 'get_calls',
'/smartapp/apicall', 'api_call',
'/smartapp/runtests', 'run_tests',
'/smartapp/describe', 'describe_queries')
class index:
'''Disseminator for the SMART tester index page'''
def GET(self):
# We should have the api_base and record_id in the query string
# e.g we're not going to redirect to the record selection UI
global _session
_session['api_base'] = api_base = _ENDPOINT.get('url')
_session['record_id'] = record_id = web.input().get('record_id')
logging.debug('api_base: ' + str(api_base) +
' record_id: ' + str(record_id))
# Init the SMARTClient
smart = _smart_client(api_base, record_id)
# Do we have a valid access token?
if 'acc_token' not in _session or not _test_token():
# Nope, clear the acc and req tokens
web.setcookie('oauth_token_secret', '', -1)
web.setcookie('oauth_token', '', -1)
web.setcookie('record_id', '', -1)
web.setcookie('user_id', '', -1)
_session['acc_token'] = None
_session['req_token'] = None
fetched, error_msg = _request_token_for_record(api_base, record_id)
if fetched:
logging.debug("Redirecting to authorize url: " +
smart.auth_redirect_url)
raise web.seeother(smart.auth_redirect_url)
if error_msg:
logging.debug('_request_token_for_record failed')
web.internalerror()
# We are good to go
template_html = web.template.frender(APP_PATH +
'/templates/index.html')
html = template_html("1.0")
web.header('Content-Type', 'text/html')
return html
class authorize:
def GET(self):
""" Extract the oauth_verifier and exchange it for an access token. """
global _session
new_oauth_token = web.input().oauth_token
req_token = _session['req_token']
api_base = _session['api_base']
record_id = _session['record_id']
if new_oauth_token != req_token.get('oauth_token', None):
logging.critical('Token mismatch in /authorize! Aborting.')
web.internalerror()
return
if _exchange_token(web.input().oauth_verifier):
return web.seeother(
'/smartapp/index.html?api_base=%s&record_id=%s' %
(api_base, record_id))
else:
logging.critical('Could not exchange token.')
web.internalerror()
class get_calls:
def GET(self):
'''Returns the available python client calls based on the ontology'''
out = {}
for t in rdf_ontology.api_calls:
path = str(t.path)
method = str(t.http_method)
target = str(t.target)
category = str(t.category)
cardinality = str(t.cardinality)
# Process only GET calls of "record_items" category plus a few
# specific exceptions by adding them to the dictionary
if method == "GET" and \
((category == "record" and cardinality == "multiple") or
t.client_method_name in (
"get_demographics",
"get_ontology",
"get_container_manifest",
#"get_user_preferences", # disabled
"get_app_manifests")):
# Build the generic python client call name and use it
# in the dictionary
out[target] = str(t.client_method_name)
return json.dumps(out, sort_keys=True, indent=4)
class api_call:
def POST(self):
'''Executes a python client API call identified by its generic name'''
global _smart
# make sure the SMARTClient is init'd
cookies = web.cookies()
api_base = cookies.api_base
record_id = cookies.record_id
# reconstruct acc_token from cookies
acc_token = {
'oauth_token_secret': cookies.oauth_token_secret,
'oauth_token': cookies.oauth_token,
'record_id': record_id,
'user_id': cookies.user_id
}
logging.debug('Cookies are: api_base: ' + api_base +
' record_id: ' + record_id +
' acc_token: ' + str(acc_token))
smart = _smart_client(api_base, record_id)
if smart is None:
return False
smart.update_token(acc_token)
call_name = web.input().call_name
# Figure out the SMART model corresponding to the API call
model = get_model(call_name)
logging.debug('Calling ' + call_name)
method_to_call = getattr(SMARTClient, call_name)
r = method_to_call(_smart)
# Run the API tests on the result of the call
contentType = r.response.get('content-type', None)
messages = getMessages(runTest(model, r.body, contentType))
# Encode and return the call and tests result as JSON
return json.dumps({
'body': r.body,
'contentType': contentType,
'messages': messages
}, sort_keys=True, indent=4)
class run_tests:
def POST(self):
'''Executes the appropriate series of tests for a given data model'''
# Get the input data from the HTTP header
model = web.input().model
data = web.input().data
contentType = web.input().content_type
# Run the tests and obtain the failure messages
messages = getMessages(runTest(model, data, contentType))
# Return the failure messages encoded as JSON
return json.dumps(messages, sort_keys=True, indent=4)
class describe_queries:
def GET(self):
'''Returns a string describing the queries used in testing a DM'''
model = web.input().model
return describeQueries(model)
def get_api_calls():
calls = {}
for t in rdf_ontology.api_calls:
target = str(t.target)
method = str(t.http_method)
path = str(t.path)
category = str(t.category)
if method == "GET" and (category == "record_items" or
path == "/ontology" or
path == "/apps/manifests/" or
path == "/manifest"):
if target not in calls.keys():
calls[target] = path
return calls
def get_model(call):
'''Returns the name of the target SMART data model
corresponding to the SMART python client convenience method
Expects a valid SMART python client convenience method name
'''
# We may have to load the ontology if it is not available yet
if not rdf_ontology.api_types:
rdf_ontology.parse_ontology(open(ONTOLOGY_PATH).read())
# Look through the api calls array until a call with matching
# convenience method name is found
for c in rdf_ontology.api_calls:
if call == c.client_method_name:
return c.target.replace("http://smartplatforms.org/terms#", "")
# Simulate single threading using a mutex. This is to prevent errors
# in httplib2 (used by oauth2) which is comically not threadsafe! Argh!!!
def mutex_processor():
mutex = Lock()
def processor_func(handle):
mutex.acquire()
try:
return handle()
finally:
mutex.release()
return processor_func
web.config.debug = False
app = web.application(urls, globals())
app.add_processor(mutex_processor())
if __name__ == "__main__":
app.run()
else:
application = app.wsgifunc()
|
<gh_stars>1-10
#!/usr/bin/env python
from HTMLParser import HTMLParser
import re
import os
import sys
import string
class Html2MarkdownParser(HTMLParser):
def __init__(self):
self._markdown = ''
self._tag_stack = []
self._tag_attr_data = {}
self._handled_tag_body_data = ''
self._convertible_tags = ['a',
'b', 'blockquote',
'em',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr',
'ol',
'p', 'pre',
'strong',
'ul']
# FIXME: special characters
HTMLParser.__init__(self)
def _append_to_markdown(self, new_markdown):
if len(self._markdown) > 1:
if re.match('\s', self._markdown[-1:]):
self._markdown += new_markdown
else:
self._markdown += ' ' + new_markdown
else:
self._markdown += new_markdown
# <a />
def handle_start_a(self, attrs):
self._tag_attr_data = dict(attrs)
def handle_end_a(self):
a_tag = ''
a_tag += '[' + self._handled_tag_body_data + ']'
a_tag += '(' + self._tag_attr_data.get('href')
title = self._tag_attr_data.get('title')
if title:
a_tag += ' "' + title + '") '
else:
a_tag += ') '
self._append_to_markdown(a_tag)
# <b />
def handle_end_b(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('*' + self._handled_tag_body_data + '*')
# <blockquote />
def handle_end_blockquote(self):
blockquote_body = self._handled_tag_body_data.split(os.linesep)
for blockquote_line in blockquote_body:
blockquote_line = blockquote_line.strip()
self._append_to_markdown('> ' + blockquote_line + os.linesep)
# <em />
def handle_end_em(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('*' + self._handled_tag_body_data + '*')
# <h1 />
def handle_end_h1(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('# ' + self._handled_tag_body_data + ' #' + os.linesep)
# <h2 />
def handle_end_h2(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('## ' + self._handled_tag_body_data + ' ##' + os.linesep)
# <h3 />
def handle_end_h3(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('### ' + self._handled_tag_body_data + ' ###' + os.linesep)
# <h4 />
def handle_end_h4(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('#### ' + self._handled_tag_body_data + ' ####' + os.linesep)
# <h5 />
def handle_end_h5(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('##### ' + self._handled_tag_body_data + ' #####' + os.linesep)
# <h6 />
def handle_end_h6(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('###### ' + self._handled_tag_body_data + ' ######' + os.linesep)
# <hr />
def handle_start_hr(self, attrs):
self._append_to_markdown('* * *' + os.linesep)
# <li />
def handle_end_li(self):
if len(self._tag_stack):
if self._tag_stack[-1] == 'ol':
self._append_to_markdown('1. ' + self._handled_tag_body_data + os.linesep)
elif self._tag_stack[-1] == 'ul':
self._append_to_markdown('* ' + self._handled_tag_body_data + os.linesep)
# <p />
def handle_start_p(self, attrs):
if len(self._markdown) > 1:
if self._markdown[-2:] == '%s%s' % (os.linesep, os.linesep):
pass
elif self._markdown[-1:] == os.linesep:
self._markdown += os.linesep
else:
self._markdown += os.linesep + os.linesep
def handle_end_p(self):
self._markdown += '%s%s' % (os.linesep, os.linesep)
# <pre />
def handle_end_pre(self):
code_lines = self._handled_tag_body_data.split(os.linesep)
for code_line in code_lines:
code_line = code_line.strip()
self._append_to_markdown(' ' + code_line + os.linesep)
# <strong />
def handle_end_strong(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('**' + self._handled_tag_body_data + '**')
## ###
def handle_starttag(self, tag, attrs):
self._tag_stack.append(tag)
try:
eval('self.handle_start_' + tag + '(attrs)')
except AttributeError, e:
pass
def handle_endtag(self, tag):
self._tag_stack.pop()
try:
eval('self.handle_end_' + tag + '()')
# Collapse three successive CRs into two before moving on
while len(self._markdown) > 2 and \
self._markdown[-3:] == '%s%s%s' % (os.linesep, os.linesep, os.linesep):
self._markdown = self._markdown[:-3] + '%s%s' % (os.linesep, os.linesep)
except AttributeError, e:
pass
self._tag_attr_data = {}
self._handled_tag_body_data = ''
def handle_data(self, data):
data = os.linesep.join(map(string.strip, data.strip().split(os.linesep)))
if len(self._tag_stack) and self._tag_stack[-1] not in ['p']:
self._handled_tag_body_data += data
else:
self._append_to_markdown(data)
def get_markdown(self):
return self._markdown.rstrip() + '\n'
def main():
p = Html2MarkdownParser()
buf = sys.stdin.read()
p.feed(buf)
p.close()
print p.get_markdown()
if __name__ == "__main__":
sys.exit(main())
|
import math
import os
import unittest
import torch
import torchaudio
import torchaudio.functional as F
import torchaudio.transforms as T
import pytest
import common_utils
from common_utils import AudioBackendScope, BACKENDS
class TestFunctional(unittest.TestCase):
data_sizes = [(2, 20), (3, 15), (4, 10)]
number_of_trials = 100
specgram = torch.tensor([1., 2., 3., 4.])
test_dirpath, test_dir = common_utils.create_temp_assets_dir()
test_filepath = os.path.join(test_dirpath, 'assets',
'steam-train-whistle-daniel_simon.wav')
waveform_train, sr_train = torchaudio.load(test_filepath)
def _test_compute_deltas(self, specgram, expected, win_length=3, atol=1e-6, rtol=1e-8):
computed = F.compute_deltas(specgram, win_length=win_length)
self.assertTrue(computed.shape == expected.shape, (computed.shape, expected.shape))
torch.testing.assert_allclose(computed, expected, atol=atol, rtol=rtol)
def test_compute_deltas_onechannel(self):
specgram = self.specgram.unsqueeze(0).unsqueeze(0)
expected = torch.tensor([[[0.5, 1.0, 1.0, 0.5]]])
self._test_compute_deltas(specgram, expected)
def test_compute_deltas_twochannel(self):
specgram = self.specgram.repeat(1, 2, 1)
expected = torch.tensor([[[0.5, 1.0, 1.0, 0.5],
[0.5, 1.0, 1.0, 0.5]]])
self._test_compute_deltas(specgram, expected)
def _compare_estimate(self, sound, estimate, atol=1e-6, rtol=1e-8):
# trim sound for case when constructed signal is shorter than original
sound = sound[..., :estimate.size(-1)]
self.assertTrue(sound.shape == estimate.shape, (sound.shape, estimate.shape))
self.assertTrue(torch.allclose(sound, estimate, atol=atol, rtol=rtol))
def _test_istft_is_inverse_of_stft(self, kwargs):
# generates a random sound signal for each tril and then does the stft/istft
# operation to check whether we can reconstruct signal
for data_size in self.data_sizes:
for i in range(self.number_of_trials):
sound = common_utils.random_float_tensor(i, data_size)
stft = torch.stft(sound, **kwargs)
estimate = torchaudio.functional.istft(stft, length=sound.size(1), **kwargs)
self._compare_estimate(sound, estimate)
def test_istft_is_inverse_of_stft1(self):
# hann_window, centered, normalized, onesided
kwargs1 = {
'n_fft': 12,
'hop_length': 4,
'win_length': 12,
'window': torch.hann_window(12),
'center': True,
'pad_mode': 'reflect',
'normalized': True,
'onesided': True,
}
self._test_istft_is_inverse_of_stft(kwargs1)
def test_istft_is_inverse_of_stft2(self):
# hann_window, centered, not normalized, not onesided
kwargs2 = {
'n_fft': 12,
'hop_length': 2,
'win_length': 8,
'window': torch.hann_window(8),
'center': True,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
}
self._test_istft_is_inverse_of_stft(kwargs2)
def test_istft_is_inverse_of_stft3(self):
# hamming_window, centered, normalized, not onesided
kwargs3 = {
'n_fft': 15,
'hop_length': 3,
'win_length': 11,
'window': torch.hamming_window(11),
'center': True,
'pad_mode': 'constant',
'normalized': True,
'onesided': False,
}
self._test_istft_is_inverse_of_stft(kwargs3)
def test_istft_is_inverse_of_stft4(self):
# hamming_window, not centered, not normalized, onesided
# window same size as n_fft
kwargs4 = {
'n_fft': 5,
'hop_length': 2,
'win_length': 5,
'window': torch.hamming_window(5),
'center': False,
'pad_mode': 'constant',
'normalized': False,
'onesided': True,
}
self._test_istft_is_inverse_of_stft(kwargs4)
def test_istft_is_inverse_of_stft5(self):
# hamming_window, not centered, not normalized, not onesided
# window same size as n_fft
kwargs5 = {
'n_fft': 3,
'hop_length': 2,
'win_length': 3,
'window': torch.hamming_window(3),
'center': False,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
}
self._test_istft_is_inverse_of_stft(kwargs5)
def test_istft_of_ones(self):
# stft = torch.stft(torch.ones(4), 4)
stft = torch.tensor([
[[4., 0.], [4., 0.], [4., 0.], [4., 0.], [4., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.]]
])
estimate = torchaudio.functional.istft(stft, n_fft=4, length=4)
self._compare_estimate(torch.ones(4), estimate)
def test_istft_of_zeros(self):
# stft = torch.stft(torch.zeros(4), 4)
stft = torch.zeros((3, 5, 2))
estimate = torchaudio.functional.istft(stft, n_fft=4, length=4)
self._compare_estimate(torch.zeros(4), estimate)
def test_istft_requires_overlap_windows(self):
# the window is size 1 but it hops 20 so there is a gap which throw an error
stft = torch.zeros((3, 5, 2))
self.assertRaises(AssertionError, torchaudio.functional.istft, stft, n_fft=4,
hop_length=20, win_length=1, window=torch.ones(1))
def test_istft_requires_nola(self):
stft = torch.zeros((3, 5, 2))
kwargs_ok = {
'n_fft': 4,
'win_length': 4,
'window': torch.ones(4),
}
kwargs_not_ok = {
'n_fft': 4,
'win_length': 4,
'window': torch.zeros(4),
}
# A window of ones meets NOLA but a window of zeros does not. This should
# throw an error.
torchaudio.functional.istft(stft, **kwargs_ok)
self.assertRaises(AssertionError, torchaudio.functional.istft, stft, **kwargs_not_ok)
def test_istft_requires_non_empty(self):
self.assertRaises(AssertionError, torchaudio.functional.istft, torch.zeros((3, 0, 2)), 2)
self.assertRaises(AssertionError, torchaudio.functional.istft, torch.zeros((0, 3, 2)), 2)
def _test_istft_of_sine(self, amplitude, L, n):
# stft of amplitude*sin(2*pi/L*n*x) with the hop length and window size equaling L
x = torch.arange(2 * L + 1, dtype=torch.get_default_dtype())
sound = amplitude * torch.sin(2 * math.pi / L * x * n)
# stft = torch.stft(sound, L, hop_length=L, win_length=L,
# window=torch.ones(L), center=False, normalized=False)
stft = torch.zeros((L // 2 + 1, 2, 2))
stft_largest_val = (amplitude * L) / 2.0
if n < stft.size(0):
stft[n, :, 1] = -stft_largest_val
if 0 <= L - n < stft.size(0):
# symmetric about L // 2
stft[L - n, :, 1] = stft_largest_val
estimate = torchaudio.functional.istft(stft, L, hop_length=L, win_length=L,
window=torch.ones(L), center=False, normalized=False)
# There is a larger error due to the scaling of amplitude
self._compare_estimate(sound, estimate, atol=1e-3)
def test_istft_of_sine(self):
self._test_istft_of_sine(amplitude=123, L=5, n=1)
self._test_istft_of_sine(amplitude=150, L=5, n=2)
self._test_istft_of_sine(amplitude=111, L=5, n=3)
self._test_istft_of_sine(amplitude=160, L=7, n=4)
self._test_istft_of_sine(amplitude=145, L=8, n=5)
self._test_istft_of_sine(amplitude=80, L=9, n=6)
self._test_istft_of_sine(amplitude=99, L=10, n=7)
def _test_linearity_of_istft(self, data_size, kwargs, atol=1e-6, rtol=1e-8):
for i in range(self.number_of_trials):
tensor1 = common_utils.random_float_tensor(i, data_size)
tensor2 = common_utils.random_float_tensor(i * 2, data_size)
a, b = torch.rand(2)
istft1 = torchaudio.functional.istft(tensor1, **kwargs)
istft2 = torchaudio.functional.istft(tensor2, **kwargs)
istft = a * istft1 + b * istft2
estimate = torchaudio.functional.istft(a * tensor1 + b * tensor2, **kwargs)
self._compare_estimate(istft, estimate, atol, rtol)
def test_linearity_of_istft1(self):
# hann_window, centered, normalized, onesided
kwargs1 = {
'n_fft': 12,
'window': torch.hann_window(12),
'center': True,
'pad_mode': 'reflect',
'normalized': True,
'onesided': True,
}
data_size = (2, 7, 7, 2)
self._test_linearity_of_istft(data_size, kwargs1)
def test_linearity_of_istft2(self):
# hann_window, centered, not normalized, not onesided
kwargs2 = {
'n_fft': 12,
'window': torch.hann_window(12),
'center': True,
'pad_mode': 'reflect',
'normalized': False,
'onesided': False,
}
data_size = (2, 12, 7, 2)
self._test_linearity_of_istft(data_size, kwargs2)
def test_linearity_of_istft3(self):
# hamming_window, centered, normalized, not onesided
kwargs3 = {
'n_fft': 12,
'window': torch.hamming_window(12),
'center': True,
'pad_mode': 'constant',
'normalized': True,
'onesided': False,
}
data_size = (2, 12, 7, 2)
self._test_linearity_of_istft(data_size, kwargs3)
def test_linearity_of_istft4(self):
# hamming_window, not centered, not normalized, onesided
kwargs4 = {
'n_fft': 12,
'window': torch.hamming_window(12),
'center': False,
'pad_mode': 'constant',
'normalized': False,
'onesided': True,
}
data_size = (2, 7, 3, 2)
self._test_linearity_of_istft(data_size, kwargs4, atol=1e-5, rtol=1e-8)
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_gain(self):
waveform_gain = F.gain(self.waveform_train, 3)
self.assertTrue(waveform_gain.abs().max().item(), 1.)
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(self.test_filepath)
E.append_effect_to_chain("gain", [3])
sox_gain_waveform = E.sox_build_flow_effects()[0]
self.assertTrue(torch.allclose(waveform_gain, sox_gain_waveform, atol=1e-04))
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_dither(self):
waveform_dithered = F.dither(self.waveform_train)
waveform_dithered_noiseshaped = F.dither(self.waveform_train, noise_shaping=True)
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(self.test_filepath)
E.append_effect_to_chain("dither", [])
sox_dither_waveform = E.sox_build_flow_effects()[0]
self.assertTrue(torch.allclose(waveform_dithered, sox_dither_waveform, atol=1e-04))
E.clear_chain()
E.append_effect_to_chain("dither", ["-s"])
sox_dither_waveform_ns = E.sox_build_flow_effects()[0]
self.assertTrue(torch.allclose(waveform_dithered_noiseshaped, sox_dither_waveform_ns, atol=1e-02))
@unittest.skipIf("sox" not in BACKENDS, "sox not available")
@AudioBackendScope("sox")
def test_vctk_transform_pipeline(self):
test_filepath_vctk = os.path.join(self.test_dirpath, "assets/VCTK-Corpus/wav48/p224/", "p224_002.wav")
wf_vctk, sr_vctk = torchaudio.load(test_filepath_vctk)
# rate
sample = T.Resample(sr_vctk, 16000, resampling_method='sinc_interpolation')
wf_vctk = sample(wf_vctk)
# dither
wf_vctk = F.dither(wf_vctk, noise_shaping=True)
E = torchaudio.sox_effects.SoxEffectsChain()
E.set_input_file(test_filepath_vctk)
E.append_effect_to_chain("gain", ["-h"])
E.append_effect_to_chain("channels", [1])
E.append_effect_to_chain("rate", [16000])
E.append_effect_to_chain("gain", ["-rh"])
E.append_effect_to_chain("dither", ["-s"])
wf_vctk_sox = E.sox_build_flow_effects()[0]
self.assertTrue(torch.allclose(wf_vctk, wf_vctk_sox, rtol=1e-03, atol=1e-03))
def test_pitch(self):
test_dirpath, test_dir = common_utils.create_temp_assets_dir()
test_filepath_100 = os.path.join(test_dirpath, 'assets', "100Hz_44100Hz_16bit_05sec.wav")
test_filepath_440 = os.path.join(test_dirpath, 'assets', "440Hz_44100Hz_16bit_05sec.wav")
# Files from https://www.mediacollege.com/audio/tone/download/
tests = [
(test_filepath_100, 100),
(test_filepath_440, 440),
]
for filename, freq_ref in tests:
waveform, sample_rate = torchaudio.load(filename)
freq = torchaudio.functional.detect_pitch_frequency(waveform, sample_rate)
threshold = 1
s = ((freq - freq_ref).abs() > threshold).sum()
self.assertFalse(s)
def test_DB_to_amplitude(self):
# Make some noise
x = torch.rand(1000)
spectrogram = torchaudio.transforms.Spectrogram()
spec = spectrogram(x)
amin = 1e-10
ref = 1.0
db_multiplier = math.log10(max(amin, ref))
# Waveform amplitude -> DB -> amplitude
multiplier = 20.
power = 0.5
db = F.amplitude_to_DB(torch.abs(x), multiplier, amin, db_multiplier, top_db=None)
x2 = F.DB_to_amplitude(db, ref, power)
self.assertTrue(torch.allclose(torch.abs(x), x2, atol=5e-5))
# Spectrogram amplitude -> DB -> amplitude
db = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db=None)
x2 = F.DB_to_amplitude(db, ref, power)
self.assertTrue(torch.allclose(spec, x2, atol=5e-5))
# Waveform power -> DB -> power
multiplier = 10.
power = 1.
db = F.amplitude_to_DB(x, multiplier, amin, db_multiplier, top_db=None)
x2 = F.DB_to_amplitude(db, ref, power)
self.assertTrue(torch.allclose(torch.abs(x), x2, atol=5e-5))
# Spectrogram power -> DB -> power
db = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db=None)
x2 = F.DB_to_amplitude(db, ref, power)
self.assertTrue(torch.allclose(spec, x2, atol=5e-5))
@pytest.mark.parametrize('complex_tensor', [
torch.randn(1, 2, 1025, 400, 2),
torch.randn(1025, 400, 2)
])
@pytest.mark.parametrize('power', [1, 2, 0.7])
def test_complex_norm(complex_tensor, power):
expected_norm_tensor = complex_tensor.pow(2).sum(-1).pow(power / 2)
norm_tensor = F.complex_norm(complex_tensor, power)
assert torch.allclose(expected_norm_tensor, norm_tensor, atol=1e-5)
@pytest.mark.parametrize('specgram', [
torch.randn(2, 1025, 400),
torch.randn(1, 201, 100)
])
@pytest.mark.parametrize('mask_param', [100])
@pytest.mark.parametrize('mask_value', [0., 30.])
@pytest.mark.parametrize('axis', [1, 2])
def test_mask_along_axis(specgram, mask_param, mask_value, axis):
mask_specgram = F.mask_along_axis(specgram, mask_param, mask_value, axis)
other_axis = 1 if axis == 2 else 2
masked_columns = (mask_specgram == mask_value).sum(other_axis)
num_masked_columns = (masked_columns == mask_specgram.size(other_axis)).sum()
num_masked_columns /= mask_specgram.size(0)
assert mask_specgram.size() == specgram.size()
assert num_masked_columns < mask_param
@pytest.mark.parametrize('specgrams', [
torch.randn(4, 2, 1025, 400),
])
@pytest.mark.parametrize('mask_param', [100])
@pytest.mark.parametrize('mask_value', [0., 30.])
@pytest.mark.parametrize('axis', [2, 3])
def test_mask_along_axis_iid(specgrams, mask_param, mask_value, axis):
mask_specgrams = F.mask_along_axis_iid(specgrams, mask_param, mask_value, axis)
other_axis = 2 if axis == 3 else 3
masked_columns = (mask_specgrams == mask_value).sum(other_axis)
num_masked_columns = (masked_columns == mask_specgrams.size(other_axis)).sum(-1)
assert mask_specgrams.size() == specgrams.size()
assert (num_masked_columns < mask_param).sum() == num_masked_columns.numel()
if __name__ == '__main__':
unittest.main()
|
<filename>loopchain/peer/candidate_blocks.py
# Copyright 2017 theloop, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage Candidate Blocks and its vote"""
import collections
import logging
import loopchain.utils as util
from loopchain import configure as conf
from loopchain.baseservice import ObjectManager
from loopchain.peer import Vote
class NoExistBlock(Exception):
"""해당 블럭이 존재하지 않습니다.
"""
pass
class NotCompleteValidation(Exception):
"""해당 블럭에 대한 검증이 완료되지 않았습니다.
"""
def __init__(self, message, block=None):
self.message = message
self.block = block
class InvalidatedBlock(Exception):
"""검증에 실패한 블럭입니다.
"""
def __init__(self, message, block=None):
self.message = message
self.block = block
class CandidateBlocks:
"""BlockManager 가 BlockChain 에 Block 을 등록하기 전
생성한 Block 들을 관리하는 클래스
unconfirmed block 을 저장하고, 각 peer 로 부터 vote 된 결과를 반영한다.
"""
def __init__(self, peer_id, channel_name):
"""
:param voter_count: 전체 투표 가능 Peer 수의 초기값 설정, 변경시 set_voter_count 로 동기화 되어야 한다.
"""
self.__peer_id = peer_id
self.__channel_name = channel_name
self.__unconfirmed_blocks = collections.OrderedDict() # $block_hash : [$vote, $block], ... 인 Ordered Dictionary
self.__candidate_last_block = None
def add_unconfirmed_block(self, block):
"""Block Manager 가 주기적으로 생성한 블럭을 등록한다. 이 블럭은 각 Peer 로 전송되어 Validate vote 를 받아야 한다.
:param block: Block Manager 가 tx 를 수집하여 주기적으로 생성한 블럭, 아직 Block Chain 의 멤버가 아니다.
:return: unconfirmed block 을 식별하기 위한 block_hash (str)
"""
logging.debug(f"CandidateBlocks:add_unconfirmed_block ({self.__channel_name})")
# block 생성자의 peer_id 를 지정한다. (새로 네트워크에 참여하는 피어는 마지막 블럭의 peer_id 를 리더로 간주한다.)
block.peer_id = self.__peer_id
# leader 가 block 에 담을 때 이미 1 투표한 내용으로 생성한다.
vote = Vote(block.block_hash,
ObjectManager().peer_service.channel_manager.get_peer_manager(self.__channel_name))
vote.add_vote(ObjectManager().peer_service.group_id, ObjectManager().peer_service.peer_id, None)
self.__unconfirmed_blocks[block.block_hash] = [vote, block]
self.__candidate_last_block = block
return block.block_hash
def reset_voter_count(self, block_hash):
logging.debug(f"({self.__channel_name}) Reset voter count in candidate blocks")
vote = Vote(block_hash, ObjectManager().peer_service.channel_manager.get_peer_manager(self.__channel_name))
prev_vote, block = self.__unconfirmed_blocks[block_hash]
# vote.get_result_detail(block.block_hash, conf.VOTING_RATIO)
# prev_vote.get_result_detail(block.block_hash, conf.VOTING_RATIO)
vote.set_vote_with_prev_vote(prev_vote)
# vote.get_result_detail(block.block_hash, conf.VOTING_RATIO)
self.__unconfirmed_blocks[block_hash] = [vote, block]
# logging.debug("candidate_blocks::reset_voter_count block_hash(" + block_hash + ")")
def get_last_block(self, blockchain=None):
if self.__candidate_last_block is not None:
return self.__candidate_last_block
if blockchain is not None:
return blockchain.last_block
return None
def set_last_block(self, block):
# self.__unconfirmed_blocks = collections.OrderedDict() # $block_hash : [$vote, $block], ... 인 Ordered Dictionary
self.__candidate_last_block = block
def vote_to_block(self, block_hash, is_validate, peer_id, group_id):
"""각 Peer 로 부터 전송된 vote 값을 Block 에 반영한다.
:param is_validate: 검증 성공 값 (True | False)
"""
if block_hash in self.__unconfirmed_blocks.keys():
self.__unconfirmed_blocks[block_hash][0].add_vote(group_id, peer_id,
(conf.TEST_FAIL_VOTE_SIGN, None)[is_validate])
def remove_broken_block(self, block_hash):
"""실패한 block 을 candidate blocks 에서 제외 한다.
:return: 실패한 block Object
"""
return self.__unconfirmed_blocks.pop(block_hash)[1]
def get_confirmed_block(self, block_hash=None):
"""검증에 성공한 block 을 얻는다.
해당 블럭은 CandidateBlocks 에서 제거된다.
:return: 검증에 성공한 block(이 block 은 BlockChain 에 추가되어야 한다.),
해당 block 이 검증되지 않았을때에는 Exception(해당블럭이 없다, 해당블럭이 아직 검증되지 않았다.) 을 발생한다.
"""
if block_hash is None:
candidate_block = self.get_candidate_block()
if candidate_block is None:
return None
block_hash = candidate_block.block_hash
if block_hash not in self.__unconfirmed_blocks.keys():
util.apm_event(self.__peer_id, {
'event_type': 'NoExistBlock',
'peer_id': self.__peer_id,
'data': {
'message': 'No Exist block in candidate blocks by hash',
'block_hash': block_hash}})
raise NoExistBlock("No Exist block in candidate blocks by hash: " + block_hash)
if self.__unconfirmed_blocks[block_hash][0].get_result(block_hash, conf.VOTING_RATIO):
logging.info("Confirmed block pop from candidate blocks hash: " + block_hash)
return self.__unconfirmed_blocks.pop(block_hash)[1]
else:
if self.__unconfirmed_blocks[block_hash][0].is_failed_vote(block_hash, conf.VOTING_RATIO):
logging.warning("This block fail to validate!!")
self.remove_broken_block(block_hash)
util.apm_event(self.__peer_id, {
'event_type': 'InvalidatedBlock',
'peer_id': self.__peer_id,
'data': {
'message': 'This block fail to validate',
'block_hash': candidate_block.block_hash}})
raise InvalidatedBlock("This block fail to validate", candidate_block)
else:
logging.warning("There is Not Complete Validation.")
util.apm_event(self.__peer_id, {
'event_type': 'NotCompleteValidation',
'peer_id': self.__peer_id,
'data': {
'message': 'There is Not Complete Validation.',
'block_hash': candidate_block.block_hash}})
raise NotCompleteValidation("Not Complete Validation", candidate_block)
def get_candidate_block(self):
"""생성된 블록중 가장 먼저 입력된 블록을 가져온다.
:return: block, broadcast 를 통해 피어로부터 검증 받아야 한다.
"""
if self.__unconfirmed_blocks.__len__() > 0:
return list(self.__unconfirmed_blocks.items())[0][1][1]
return None
def is_remain_blocks(self):
return self.__unconfirmed_blocks.__len__() > 0
|
<filename>actor_services/src/random_create.py<gh_stars>10-100
#!/usr/bin/env python
# coding=utf-8
'''
Author:<NAME>
Date:
Info:
'''
import random
import numpy as np
import rospkg
from lxml import etree
from lxml.etree import Element
from copy import deepcopy
import yaml
rospack = rospkg.RosPack()
with open(rospack.get_path("actor_services")+"/src/multiforceFactors.yaml", 'r') as stream:
try:
factorData = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
SocialForce = factorData["SocialForceFactor"]
DesiredForce = factorData["DesiredForceFactor"]
ObstacleForce = factorData["ObstacleForceFactor"]
AnimationFactor = factorData["AnimationFactor"]
print(SocialForce)
print(DesiredForce)
print(ObstacleForce)
print(AnimationFactor)
plugin_pkg_path = rospack.get_path("actor_plugin")
plugin_path = plugin_pkg_path + "/lib/libactorplugin_ros.so"
actor_pkg_path = rospack.get_path("actor_services")
tree_ = etree.parse(actor_pkg_path+'/worlds/empty.world')
world_ = tree_.getroot().getchildren()[0]
skin_list = ["moonwalk.dae",
"run.dae",
"sit_down.dae",
"sitting.dae",
"stand_up.dae",
"stand.dae",
"talk_a.dae",
"talk_b.dae",
"walk.dae"]
# add speed and doghingdirection
actor_list = []
for item in range(6):
actor = Element("actor", name="actor"+str(item))
pose = Element("pose")
#randomly generate position to pose text
x = str((np.random.rand()-0.5)*4)
y = str((np.random.rand()-0.5)*4)
pose.text = x+" "+y+" "+"1.02 0 0 0"
actor.append(pose)
skin = Element("skin")
skin_fn = Element("filename")
skin_fn.text=random.choice(skin_list)
skin_scale = Element("scale")
skin_scale.text = "1"
skin.append(skin_fn)
skin.append(skin_scale)
actor.append(skin)
animation = Element("animation", name="walking")
animate_fn = Element("filename")
animate_fn.text = "walk.dae"
interpolate_x = Element("interpolate_x")
interpolate_x.text = "true"
animate_scale = Element("scale")
animate_scale.text = "1"
animation.append(animate_fn)
animation.append(animate_scale)
animation.append(interpolate_x)
actor.append(animation)
plugin = Element("plugin", name="None", filename=plugin_path)
speed = Element("speed")
speed.text = str(1.3)
socialForce = Element("socialForce")
socialForce.text = str(SocialForce)
desiredForce = Element("desiredForce")
desiredForce.text = str(DesiredForce)
obstacleForce = Element("obstacleForce")
obstacleForce.text = str(ObstacleForce)
dodgingRight = Element("dodgingRight")
dodgingRight.text = str(np.random.rand() < 0.5).lower()
target = Element("target")
x = str((np.random.rand()-0.5)*4)
y = str((np.random.rand()-0.5)*4)
target.text = x+" "+y+" "+"1.02"
target_weight = Element("target_weight")
target_weight.text = "1.5"
obstacle_weight = Element("obstacle_weight")
obstacle_weight.text = "1.5"
animation_factor = Element("animation_factor")
speed_ = str((np.random.rand()*3)+5)
animation_factor.text = speed_
ignore_obstacle = Element("ignore_obstacles")
model_cafe = Element("model")
model_cafe.text = "caffe"
model_ground_plane = Element("model")
model_ground_plane.text = "ground_plane"
ignore_obstacle.append(model_cafe)
ignore_obstacle.append(model_ground_plane)
plugin.append(speed)
plugin.append(socialForce)
plugin.append(desiredForce)
plugin.append(obstacleForce)
plugin.append(dodgingRight)
plugin.append(target)
plugin.append(target_weight)
plugin.append(obstacle_weight)
plugin.append(animation_factor)
plugin.append(ignore_obstacle)
actor.append(plugin)
world_.append(actor)
tree_.write(actor_pkg_path+'/worlds/ped_world.world', pretty_print=True, xml_declaration=True, encoding="utf-8")
|
import cdms2,cdutil,sys,MV2,numpy,os,cdat_info
f=cdms2.open(os.path.join(cdat_info.get_sampledata_path(),'clt.nc'))
s=f("clt")
cdutil.setTimeBoundsMonthly(s)
print 'Getting JJA, which should be inexistant in data'
try:
cdutil.JJA(s[:5])
raise RuntimeError( "data w/o season did not fail")
except:
pass
## Create a year worth of data w/o JJA
s1 = s[:5]
s2 = s[8:12]
s3 = MV2.concatenate((s1,s2))
t = MV2.concatenate((s1.getTime()[:],s2.getTime()[:]))
t = cdms2.createAxis(t,id='time')
t.units=s.getTime().units
t.designateTime()
s3.setAxis(0,t)
cdutil.setTimeBoundsMonthly(s3)
try:
cdutil.JJA(s3)
raise RuntimeError, "data w/o season did not return None"
except:
pass
try:
cdutil.JJA.departures(s3)
raise RuntimeError, "data w/o season did not return None for dep"
except:
pass
try:
cdutil.JJA.climatology(s3)
raise RuntimeError, "data w/o season did not return None for clim"
except:
pass
# Now gets seasonal cycle, should have JJA all missing
print 'Testing seasonal cycle on 1 year data w/o JJA should work'
a = cdutil.SEASONALCYCLE(s3)
assert(a.shape==(4, 46, 72))
assert(numpy.allclose(a.getTime(),[ 0., 3., 9., 12.]))
assert(numpy.allclose(a.getTime().getBounds(),numpy.array([[ -1., 2.],
[ 2., 5.],
[ 8., 11.],
[ 11., 14.]])))
if a.shape!=(4,46,72):
raise "Error returned data with wrong shape"
if not numpy.equal(a.getTime()[:],[ 0., 3., 9., 12.]).all():
raise "Error time are not valid"
if not numpy.equal(a.getTime().getBounds()[:],[[ -1., 2.], [ 2., 5.], [ 8., 11.], [ 11., 14.]]).all():
raise "Error bound time are not valid"
d = cdutil.SEASONALCYCLE.departures(s3)
c = cdutil.SEASONALCYCLE.climatology(s3)
## Create 2 year worth of data w/o JJA
s1 = s[:5]
s2 = s[8:17]
s3 = s[20:24]
s4 = MV2.concatenate((s1,s2))
s5 = MV2.concatenate((s4,s3))
t = MV2.concatenate((s1.getTime()[:],s2.getTime()[:]))
t2 = MV2.concatenate((t,s3.getTime()[:]))
t = cdms2.createAxis(t2,id='time')
t.units=s.getTime().units
t.designateTime()
s5.setAxis(0,t)
cdutil.setTimeBoundsMonthly(s5)
d = cdutil.SEASONALCYCLE.departures(s5)
c = cdutil.SEASONALCYCLE.climatology(s5)
try:
cdutil.JJA(s5)
raise RuntimeError, "data w/o season did not return None"
except:
pass
# Now gets seasonal cycle, should have JJA all missing
print 'Testing seasonal cycle on 2 years data w/o JJA should work'
a = cdutil.SEASONALCYCLE(s5)
if a.shape!=(7,46,72):
raise "Error returned data with wrong shape"
## Create 2 years worth of data w/o 1st JJA
s1 = s[:5]
s2 = s[8:24]
s3 = MV2.concatenate((s1,s2))
t = MV2.concatenate((s1.getTime()[:],s2.getTime()[:]))
t = cdms2.createAxis(t,id='time')
t.units=s.getTime().units
t.designateTime()
s3.setAxis(0,t)
cdutil.setTimeBoundsMonthly(s3)
a = cdutil.JJA(s3)
if a is None:
raise RuntimeError, "data w/o 1st season returned None"
# Now gets seasonal cycle, should have JJA all missing
print 'Testing seasonal cycle on 2 years data w/o 1st JJA should work'
a = cdutil.SEASONALCYCLE(s3)
d = cdutil.SEASONALCYCLE.departures(s3)
c = cdutil.SEASONALCYCLE.climatology(s3)
if a.shape!=(8,46,72):
raise "Error returned data with wrong shape"
if not numpy.equal(a.getTime()[:],[ 0., 3., 9., 12., 15.,18.,21,24]).all():
raise "Error time are not valid"
if not numpy.equal(a.getTime().getBounds()[:],[[ -1., 2.], [ 2., 5.], [ 8., 11.], [ 11., 14.], [ 14., 17.], [ 17., 20.], [ 20., 23.], [ 23., 26.]]).all():
raise "Error bound time are not valid"
print " Ok we test the filling part"
print " this should add month '6' as all missing"
b= cdutil.times.insert_monthly_seasons(a,['JJA',])
if b.shape!=(9,46,72):
raise "Error returned data with wrong shape"
if not numpy.equal(b.getTime()[:],[ 0., 3.,6, 9., 12., 15.,18.,21,24]).all():
raise "Error time are not valid"
if not numpy.equal(b.getTime().getBounds()[:],[[ -1., 2.], [ 2., 5.], [5,8],[ 8., 11.], [ 11., 14.], [ 14., 17.], [ 17., 20.], [ 20., 23.], [ 23., 26.]]).all():
raise "Error bound time are not valid"
if not b[2].count() == 0:
raise "Error not all times missing in added spot"
# Now gets seasonal cycle, should have JJA all missing
print 'Testing seasonal cycle on 2 years data w/o JJA should work'
a = cdutil.SEASONALCYCLE(s5)
if a.shape!=(7,46,72):
raise "Error returned data with wrong shape"
## Creates data with big gap in years
s1 = s[:15]
s2 = s[68:]
s3 = MV2.concatenate((s1,s2))
t = MV2.concatenate((s1.getTime()[:],s2.getTime()[:]))
t = cdms2.createAxis(t,id='time')
t.units=s.getTime().units
t.designateTime()
s3.setAxis(0,t)
cdutil.setTimeBoundsMonthly(s3)
a = cdutil.JJA(s3)
if a is None:
raise RuntimeError, "data with gap returned None"
# Now gets seasonal cycle, should have JJA all missing
print 'Testing seasonal cycle on data with years of gap should work'
a = cdutil.SEASONALCYCLE(s3)
d = cdutil.SEASONALCYCLE.departures(s3)
c = cdutil.SEASONALCYCLE.climatology(s3)
assert(s3.shape == (67, 46, 72))
assert(a.shape == (24, 46, 72))
assert(numpy.equal(a.getTime(), [ 0., 3., 6., 9., 12., 15., 69., 72., 75., 78., 81., 84.,
87., 90., 93., 96., 99., 102., 105., 108., 111., 114., 117., 120.]).all())
print " Ok we test the filling part"
print " this should add month '6' as all missing"
b= cdutil.times.insert_monthly_seasons(a,cdutil.times.SEASONALCYCLE.seasons)
assert(b.shape == (41,46,72))
assert(numpy.equal(b.getTime()[:],[ 0., 3., 6., 9., 12., 15., 18., 21., 24., 27., 30., 33.,
36., 39., 42., 45., 48., 51., 54., 57., 60., 63., 66., 69.,
72., 75., 78., 81., 84., 87., 90., 93., 96., 99., 102., 105.,
108., 111., 114., 117., 120.]).all())
assert(cdutil.SEASONALCYCLE.departures(s3).shape == (24, 46, 72) )
assert(a.shape == (24, 46, 72) )
assert(cdutil.SEASONALCYCLE.climatology(s3).shape == (4, 46, 72) )
## c = cdutil.SEASONALCYCLE.climatology(s5)
## print d.shape
|
import hashlib
import os
import requests
import time
import warnings
import six
warnings.filterwarnings("ignore", message=".*InsecurePlatformWarning.*")
"""
OneSky's simple python wrapper
Known WTF?:
- If you manualy create project file (e.g. django.po) inside SkyOne app, API will return 400 error "This file is not downloadable through API"
- Always upload at least your default django.po language file for each project.
"""
class OneSkyApiClient(object):
def __init__(self, api_key, api_secret, locale_path='.'):
self.api_key = api_key
self.api_secret = api_secret
self.locale_path = locale_path
pass
def json_request(self, method = "get", api_path = None, api_params = None, file_stream = None):
url = 'https://platform.api.onesky.io/1/' + api_path
url_params = {}
if isinstance(api_params, dict):
url_params = dict([(k, v) for k, v in api_params.items() if v is not None])
timestamp = str(int(time.time()))
auth_hash = hashlib.md5()
auth_hash.update(six.ensure_binary(timestamp))
auth_hash.update(six.ensure_binary(self.api_secret))
url_params["dev_hash"] = auth_hash.hexdigest()
url_params["timestamp"] = timestamp
url_params["api_key"] = self.api_key
if method.lower() == "get":
response = requests.get(url, params=url_params)
elif method.lower() == "post":
file_name = url_params["file_name"]
del url_params["file_name"]
response = requests.post(url, params=url_params, files={"file":(file_name,file_stream)} if file_stream else None)
if(response.headers.get('content-disposition', '').startswith('attachment;')):
filename = response.headers['content-disposition'].split('=')[1]
dest_filename = os.path.join(self.locale_path, filename)
try:
os.makedirs(os.path.dirname(dest_filename))
except OSError:
# Ok if path exists
pass
with open(dest_filename, 'wb') as f:
for chunk in response.iter_content():
f.write(chunk)
response_output = {'filename': dest_filename}
else:
try:
response_output = response.json()
except ValueError:
response_output = {}
return response.status_code, response_output
def json_get_request(self, *args, **kwargs):
return self.json_request(method = "get", *args, **kwargs)
def json_post_request(self, *args, **kwargs):
return self.json_request(method = "post", *args, **kwargs)
def project_languages(self, project_id):
return self.json_get_request(api_path="projects/%s/languages" % project_id)
def file_list(self, project_id, page=1):
return self.json_get_request(api_path="projects/%s/files" % project_id, api_params={"page":page})
def file_upload(self, project_id, file_name, file_format = "GNU_PO", locale = None, is_keeping_all_strings=None):
with open(file_name, 'rb') as file_stream:
return self.json_post_request(api_path="projects/%s/files" % project_id, file_stream=file_stream, api_params={"file_name":os.path.basename(file_name), "file_format":file_format, "locale":locale, "is_keeping_all_strings":is_keeping_all_strings})
def translation_export(self, project_id, locale, source_file_name, export_file_name):
return self.json_get_request(api_path="projects/%s/translations" % project_id, api_params={"locale":locale, "source_file_name": source_file_name , "export_file_name": export_file_name})
class OneSkyApiClientException(Exception):
pass
|
<reponame>Xchkoo/student_system_web
from flask import g
import sqlite3
from app_mask import config, app
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(config.DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def class_add_photo(name, path, lesson, time, annotation):
t_id = name_to_trans_id(name)
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
c.execute("INSERT INTO SYSTEM (TYPE,STUDENT_ID,PATH,LESSON,annotation,TIME)"
+ "VALUES ('CLASS','" + str(t_id) + "','" + str(path) + "','" + str(lesson)
+ "','" + str(annotation) + "','" + str(time) + "');")
conn.commit()
conn.close()
def enter(student_id, path, is_mask, position, time):
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
c.execute("INSERT INTO SYSTEM (TYPE, STUDENT_ID, PATH, is_mask, POSITION, TIME) "
+ "VALUES ( 'MASK', '" + str(student_id) + "','" + str(path) + "', " +
str(is_mask) + ",'" + str(position) + "','" + str(time) + "');")
conn.commit()
conn.close()
def register_trans(name, path):
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
c.execute("INSERT INTO TRANS (trans_id, NAME, PATH) VALUES (NULL, '" + name + "', '" + path + "')")
sc = c.execute("SELECT * from TRANS")
return_data = 0
for row in sc:
if row[1] == name:
return_data = row[0]
conn.commit()
conn.close()
return str(return_data)
def add_homework(name, path, subject, homework, is_commit, time):
t_id = name_to_trans_id(name)
if t_id == -1:
return {'msg': 'FAIL'}
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
c.execute("INSERT INTO SYSTEM (TYPE,STUDENT_ID,PATH,LESSON,HOMEWORK,is_commit,TIME) VALUES ('HOMEWORK','"
+ str(t_id) + "','"
+ str(path) + "','"
+ str(subject) + "','"
+ str(homework) + "','"
+ str(is_commit) + "','"
+ str(time) + "');")
conn.commit()
conn.close()
return {'msg': 'SUCCESS'}
# ---------------- 以下是统计模块 ---------------------- #
def get_class():
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
res = []
sc = c.execute("SELECT ID,TYPE,STUDENT_ID,PATH,TIME,LESSON,annotation FROM SYSTEM WHERE TYPE IS 'CLASS'")
for col in sc:
pre_data = {"name": trans_id_to_name(int(col[2])), "img": col[3], "time": col[4], "lesson": col[5], "reason": col[6]}
res.append(pre_data)
return res
def get_class_num():
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute("SELECT count(*) FROM SYSTEM WHERE TYPE IS 'CLASS'")
return sc.fetchone()[0]
def get_homework_data(subject):
sequence = get_homework_sequence(subject)
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
res = []
for i in sequence:
sc = c.execute("SELECT ID, TYPE, STUDENT_ID, PATH, TIME, HOMEWORK, is_commit FROM SYSTEM WHERE TYPE IS 'HOMEWORK' AND LESSON IS '" + subject + "' AND HOMEWORK IS '" + i + "'")
check = 0
data = dict(homework_name="", homework_img="", hand_in_num=0, hand_in_name=[], un_hand_in_num=0,
un_hand_in_name=[])
for col in sc:
if check == 0:
data["homework_name"] = col[5]
data["homework_img"] = col[3]
data["homework_time"] = col[4]
check = 1
name = trans_id_to_name(int(col[2]))
if col[6] == 1:
data["hand_in_name"].append(name)
data["hand_in_num"] = data["hand_in_num"] + 1
if col[6] == 0:
data["un_hand_in_name"].append(name)
data["un_hand_in_num"] = data["un_hand_in_num"] + 1
res.append(data)
conn.close()
return res
def get_homework_num(subject):
s = get_homework_sequence(subject)
return len(s)
def get_homework_sequence(subject):
res = []
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute("SELECT ID, TYPE, STUDENT_ID, PATH, TIME, HOMEWORK, is_commit FROM SYSTEM WHERE TYPE IS 'HOMEWORK' AND LESSON IS '"
+ subject + "'")
for i in sc:
if i[5] not in res and i[5] != '':
res.append(i[5])
conn.close()
return res
def sum_mask():
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
uwc = c.execute("SELECT count(*) FROM SYSTEM WHERE TYPE IS 'MASK' AND is_mask IS 0")
data_wear = uwc.fetchone()[0]
wc = c.execute("SELECT count(*) FROM SYSTEM WHERE TYPE IS 'MASK' AND is_mask IS 1")
data_unwear = wc.fetchone()[0]
res = {'wear': data_wear, 'unwear': data_unwear, 'all': data_wear + data_unwear}
return res
def get_date():
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute("SELECT TIME FROM SYSTEM WHERE TIME IS NOT NULL AND TYPE IS 'MASK'")
sequence = []
for col in sc:
if col[0] not in sequence and col[0] != '':
sequence.append(col[0])
return sequence
def count_mask():
res = []
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute("SELECT TIME FROM SYSTEM WHERE TIME IS NOT NULL AND TYPE IS 'MASK'")
sequence = []
for col in sc:
if col[0] not in sequence:
sequence.append(col[0])
for i, date in enumerate(sequence):
if date == '':
continue
uwc = c.execute("SELECT count(*) FROM SYSTEM WHERE TIME IS NOT NULL AND TYPE IS 'MASK' AND is_mask IS 0 AND "
"TIME IS '" + date + "'")
data_wear = uwc.fetchone()[0]
wc = c.execute("SELECT count(*) FROM SYSTEM WHERE TIME IS NOT NULL AND TYPE IS 'MASK' AND is_mask IS 1 AND "
"TIME IS '" + date + "'")
data_unwear = wc.fetchone()[0]
res.append({'date': date, 'wear': data_wear, 'unwear': data_unwear, 'all': data_wear + data_unwear})
return res
def count_pass(date):
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute("SELECT * FROM SYSTEM WHERE TYPE IS 'MASK' AND POSITION IS NOT NULL AND TIME IS '" + date + "'")
res = {"clnum": 0, "rnum": 0, "snum": 0, "cnum": 0}
for i in sc:
if i[4] == "校园门口":
res['snum'] = res['snum'] + 1
elif i[4] == "食堂门口":
res['rnum'] = res['rnum'] + 1
elif i[4] == "机房门口":
res['cnum'] = res['cnum'] + 1
elif i[4] == "教室门口":
res['clnum'] = res['clnum'] + 1
c.close()
return res
def count_commuting():
return count_mask()
def get_students_info():
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
transform = []
all_stu_info = c.execute("SELECT * FROM TRANS WHERE NAME IS NOT NULL")
for i, stu in enumerate(all_stu_info):
transform.append({"name": stu[1], "path": stu[2]})
conn.close()
return transform
def count_person():
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute('SELECT count(*) FROM TRANS')
num = sc.fetchone()[0]
conn.close()
return num
# ---------------- 以下是通用模块 ------------------ #
def delete_trans(trans_id):
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
c.execute("DELETE FROM TRANS WHERE trans_id = " + str(trans_id))
conn.commit()
conn.close()
def path_search_by_id(trans_id):
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute("SELECT PATH FROM TRANS WHERE trans_id IS " + str(trans_id))
conn.close()
return sc.fetchone()[0]
def trans_id_to_name(trans_id):
"""
TIPS: trans_id must be int
"""
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute("SELECT * FROM TRANS WHERE NAME IS NOT NULL")
for row in sc:
if row[0] == trans_id:
return row[1]
conn.close()
return 'NaN'
def name_to_trans_id(name):
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute('SELECT * FROM TRANS WHERE NAME IS NOT NULL')
for row in sc:
if row[1] == name:
conn.close()
return row[0]
conn.close()
return -1
def homework_update(name, homework, subject):
tid = name_to_trans_id(name)
conn = sqlite3.connect(config.DATABASE)
c = conn.cursor()
sc = c.execute("SELECT ID,HOMEWORK,STUDENT_ID FROM SYSTEM WHERE TYPE = 'HOMEWORK' AND LESSON IS '"+str(subject)+"' AND HOMEWORK IS '"+str(homework)+"'AND STUDENT_ID IS '"+str(tid)+"' AND is_commit IS 0")
for col in sc:
c.execute("UPDATE SYSTEM SET is_commit = 1 WHERE ID IS "+str(col[0]))
conn.commit()
conn.close()
if __name__ == '__main__':
print(homework_update('张威', '第三周的纠错','政治'))
|
# Online Bayesian linear regression using Kalman Filter
# Based on: https://github.com/probml/pmtk3/blob/master/demos/linregOnlineDemoKalman.m
# Author: <NAME> (@gerdm), <NAME>(@karalleyna)
import superimport
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from numpy.linalg import inv
from lds_lib import KalmanFilter
from jax.lax import scan
import jax.numpy as jnp
def kf_linreg(X, y, R, mu0, Sigma0, F, Q):
"""
Online estimation of a linear regression
using Kalman Filters
Parameters
----------
X: array(n_obs, dimension)
Matrix of features
y: array(n_obs,)
Array of observations
Q: float
Known variance
mu0: array(dimension)
Prior mean
Sigma0: array(dimesion, dimension)
Prior covariance matrix
Returns
-------
* array(n_obs, dimension)
Online estimation of parameters
* array(n_obs, dimension, dimension)
Online estimation of uncertainty
"""
n_obs, dim = X.shape
C = lambda t: X[t][None, ...]
kf = KalmanFilter(F, C, Q, R, mu0.copy(), Sigma0.copy(), timesteps=n_obs)
_, (mu_hist, Sigma_hist, _, _) = scan(kf.kalman_step, (mu0.copy(), Sigma0.copy(), 0), y)
return mu_hist, Sigma_hist
def posterior_lreg(X, y, R, mu0, Sigma0):
"""
Compute mean and covariance matrix of a
Bayesian Linear regression
Parameters
----------
X: array(n_obs, dimension)
Matrix of features
y: array(n_obs,)
Array of observations
R: float
Known variance
mu0: array(dimension)
Prior mean
Sigma0: array(dimesion, dimension)
Prior covariance matrix
Returns
-------
* array(dimension)
Posterior mean
* array(n_obs, dimension, dimension)
Posterior covariance matrix
"""
Sn_bayes_inv = inv(Sigma0) + X.T @ X / R
Sn_bayes = inv(Sn_bayes_inv)
mn_bayes = Sn_bayes @ (inv(Sigma0) @ mu0 + X.T @ y / R)
return mn_bayes, Sn_bayes
if __name__ == "__main__":
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
n_obs = 21
timesteps = jnp.arange(n_obs)
x = jnp.linspace(0, 20, n_obs)
X = jnp.c_[jnp.ones(n_obs), x]
F = jnp.eye(2)
mu0 = jnp.zeros(2)
Sigma0 = jnp.eye(2) * 10.
Q, R = 0, 1
# Data from original matlab example
y = jnp.array([2.4865, -0.3033, -4.0531, -4.3359, -6.1742, -5.604, -3.5069, -2.3257, -4.6377,
-0.2327, -1.9858, 1.0284, -2.264, -0.4508, 1.1672, 6.6524, 4.1452, 5.2677, 6.3403, 9.6264, 14.7842])
# Online estimation
mu_hist, Sigma_hist = kf_linreg(X, y, R, mu0, Sigma0, F, Q)
kf_var = Sigma_hist[-1, [0, 1], [0, 1]]
w0_hist, w1_hist = mu_hist.T
w0_err, w1_err = jnp.sqrt(Sigma_hist[:, [0, 1], [0, 1]].T)
# Offline estimation
(w0_post, w1_post), Sigma_post = posterior_lreg(X, y, R, mu0, Sigma0)
w0_std, w1_std = jnp.sqrt(Sigma_post[[0, 1], [0, 1]])
# Asserting values for means and variance
assert jnp.allclose(w0_hist[-1], w0_post)
assert jnp.allclose(w1_hist[-1], w1_post)
assert jnp.allclose(w0_err[-1], w0_std)
assert jnp.allclose(w1_err[-1], w1_std)
fig, ax = plt.subplots()
ax.errorbar(timesteps, w0_hist, w0_err, fmt="-o", label="$w_0$", color="black", fillstyle="none")
ax.errorbar(timesteps, w1_hist, w1_err, fmt="-o", label="$w_1$", color="tab:red")
ax.axhline(y=w0_post, c="black", label="$w_0$ batch")
ax.axhline(y=w1_post, c="tab:red", linestyle="--", label="$w_1$ batch")
ax.fill_between(timesteps, w0_post - w0_std, w0_post + w0_std, color="black", alpha=0.4)
ax.fill_between(timesteps, w1_post - w1_std, w1_post + w1_std, color="tab:red", alpha=0.4)
plt.legend()
ax.set_xlabel("time")
ax.set_ylabel("weights")
ax.set_ylim(-8, 4)
ax.set_xlim(-0.5, n_obs)
pml.savefig("linreg-online-kalman.pdf")
plt.show()
|
import requests;
from bs4 import BeautifulSoup;
import jieba;
import os;
import re;
import time;
from gensim import corpora,models,similarities;
import random;
import sys;
def get_review_tag(src):
review_response=requests.get(src);
review_soup=BeautifulSoup(review_response.text);
review_tag=review_soup.find('div',{'class':'review-content clearfix'});
time.sleep(1);
return review_tag;
def get_review_list(book_num):
review_list = []; # 书评列表,每一篇文章是一个元素
start=0;
while(True):
book_reviews_response = requests.get("https://book.douban.com/subject/%d/reviews?start=%d"%(book_num,start));
book_reviews_soup = BeautifulSoup(book_reviews_response.text);
src_tags = book_reviews_soup.find_all('a', {
'href': re.compile('^https://book.douban.com/review/\d*/$')}); # 获得所有长篇评论网页的网址
time.sleep(2);
if(len(src_tags)==0):
break;
for tag in src_tags:
# print(tag.attrs['href']);
text_tag = get_review_tag(tag.attrs['href']);
p_tags = text_tag.find_all('p');
text = "";
if(len(p_tags)==0):
text=text_tag.text;
text = re.sub('[a-z]|[A-Z]', '', text); # 清洗数据
else:
for p_tag in p_tags:
text = text + p_tag.text;
text = re.sub('[a-z]|[A-Z]', '', text); # 清洗数据
print(text);
review_list.append(text);
start=start+20;
return review_list;
def train_model(texts_list,src):#使用jieba库和gensim生成model,texts_list为一个文本数组,元素为文本,src为model的路径。
word_list = [];
for doc in texts_list:#用jieba库做分词
doc_list = [word for word in jieba.cut(doc)];
word_list.append(doc_list);
dictionary = corpora.Dictionary(word_list);
corpus = [dictionary.doc2bow(doc) for doc in word_list]; # 建立语料库
tfidf = models.TfidfModel(corpus);
tfidf.save(src);
def get_word_list(texts_list):#切词
word_list = [];
for doc in texts_list: # 用jieba库做分词
doc_list = [word for word in jieba.cut(doc)];
word_list.append(doc_list);
return word_list;
def tfidf_model(corpus,text,dictionary):
tfidf = models.TfidfModel(corpus);
index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dictionary.keys()));
text_list = [word for word in jieba.cut(text)];
text_vec = dictionary.doc2bow(text_list);
sim = index[tfidf[text_vec]];
sim = sorted(enumerate(sim), key=lambda item: -item[1]);
return sim;
def lsi_model(corpus,text,dictionary):
lsi_model = models.LsiModel(corpus, id2word=dictionary, num_topics=2);
documents = lsi_model[corpus];
text_list = [word for word in jieba.cut(text)];
text_vec = dictionary.doc2bow(text_list);
query_vec = lsi_model[text_vec];
index = similarities.MatrixSimilarity(documents);
sim = index[query_vec];
sim = sorted(enumerate(sim), key=lambda item: -item[1]);
return sim;
def main():
file=open('test.txt','r');
text=file.read();
file.close();
file=open('language_data.txt','r',encoding='utf-8');#要使用utf-8读取
original_text_list=file.read().split('*$*');
texts_list=get_review_list(26118072);
texts_list.extend(original_text_list);
word_list = get_word_list(texts_list);
dictionary = corpora.Dictionary(word_list);
corpus = [dictionary.doc2bow(doc) for doc in word_list]; # 建立语料库
sim=tfidf_model(corpus,text,dictionary);
# sim=tfidf_model(corpus,text,dictionary);
for i in range(5):
print(sim[i][1], ': ', texts_list[sim[i][0]]);
main();
|
<reponame>stactools-packages/soilgrids
# flake8: noqa
from datetime import datetime
from pystac import Link, Provider, ProviderRole
COLLECTION_ID = "soilgrids250m"
EPSG = 152160
CRS_WKT = """PROJCS["Homolosine",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Interrupted_Goode_Homolosine"],
UNIT["Meter",1]]"""
TITLE = "ISRIC SoilGrids Global Soil Property Maps"
LICENSE = "CC-BY-4.0"
LICENSE_LINK = Link(
rel="license",
target="https://creativecommons.org/licenses/by/4.0/",
title="Creative Commons Attribution 4.0 International",
)
DATASET_URL = "https://files.isric.org/soilgrids/latest/data/"
DATASET_ACCESS_URL = f"/vsicurl?max_retry=3&retry_delay=1&list_dir=no&url={DATASET_URL}"
DESCRIPTION = """SoilGridsTM (hereafter SoilGrids) is a system for global digital soil mapping that makes use of global soil profile information and covariate data to model the spatial distribution of soil properties across the globe. SoilGrids is a collections of soil property maps for the world produced using machine learning at 250 m resolution."""
PROVIDER = Provider(
name="ISRIC — World Soil Information",
roles=[
ProviderRole.HOST,
ProviderRole.PROCESSOR,
ProviderRole.PRODUCER,
],
url="https://www.isric.org/explore/soilgrids",
)
BOUNDING_BOX = [96.00, -44.00, 168.00, -9.00]
RELEASE_DATE = datetime(2020, 5, 1)
DOI = "10.5194/soil-7-217-2021"
CITATION = "<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>.: SoilGrids 2.0: producing soil information for the globe with quantified spatial uncertainty, SOIL, 7, 217–240, 2021."
SOIL_PROPERTIES = {
"bdod": "Bulk density of the fine earth fraction (cg/cm³)",
"cec": "Cation Exchange Capacity of the soil (mmol(c)/kg)",
"cfvo":
"Volumetric fraction of coarse fragments (> 2 mm) (cm3/dm3 (vol‰))",
"clay":
"Proportion of clay particles (< 0.002 mm) in the fine earth fraction (g/kg)",
"nitrogen": "Total nitrogen (cg/kg)",
"ocd": "Organic carbon density (hg/m³)",
"ocs": "Organic carbon stocks (t/ha)", # only 0-30cm
"phh2o": "Soil pH (pHx10)",
"sand":
"Proportion of sand particles (> 0.05 mm) in the fine earth fraction (g/kg)",
"silt":
"Proportion of silt particles (≥ 0.002 mm and ≤ 0.05 mm) in the fine earth fraction (g/kg)",
"soc": "Soil organic carbon content in the fine earth fraction (dg/kg)",
}
UNITS = {
"bdod": "cg/cm³",
"cec": "mmol(c)/kg",
"cfvo": "cm3/dm3 (vol‰)",
"clay": "g/kg",
"nitrogen": "cg/kg",
"ocd": "hg/m³",
"ocs": "t/ha",
"phh2o": "pH",
"sand": "g/kg",
"silt": "g/kg",
"soc": "dg/kg",
}
SCALES = {
"phh2o": 0.1,
}
DEPTHS = {
"0-5cm": "Zero to 5cm Depth",
"5-15cm": "5cm to 15cm Depth",
"15-30cm": "15cm to 30cm Depth",
"30-60cm": "30cm to 60cm Depth",
"60-100cm": "60cm to 100cm Depth",
"100-200cm": "100cm to 200cm Depth",
}
OCS_DEPTHS = {
"0-30cm": "Zero to 30cm Depth",
}
PROBS = {
"Q0.05": "5% quantile",
"Q0.5": "median of the distribution",
"Q0.95": "95% quantile",
"mean": "mean of the distribution",
"uncertainty": "10x(Q0.95-Q0.05)/Q0.50",
}
TILING_PIXEL_SIZE = (10000, 10000)
NO_DATA = -32768
|
from unittest.mock import patch
from bs4 import BeautifulSoup
from django.urls import reverse
from web.grant_applications.forms import CompanyDetailsForm
from web.grant_applications.services import BackofficeService
from web.grant_applications.views import CompanyDetailsView
from web.tests.factories.grant_application_link import GrantApplicationLinkFactory
from web.tests.helpers.backoffice_objects import (
FAKE_GRANT_APPLICATION, FAKE_COMPANY
)
from web.tests.helpers.testcases import BaseTestCase
@patch.object(BackofficeService, 'create_company', return_value=FAKE_COMPANY)
@patch.object(BackofficeService, 'get_grant_application', return_value=FAKE_GRANT_APPLICATION)
@patch.object(BackofficeService, 'update_grant_application', return_value=FAKE_GRANT_APPLICATION)
class TestCompanyDetailsView(BaseTestCase):
def setUp(self):
super().setUp()
self.gal = GrantApplicationLinkFactory()
self.url = reverse('grant-applications:company-details', args=(self.gal.pk,))
def test_get_template(self, *mocks):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, CompanyDetailsView.template_name)
def test_back_url_select_company(self, *mocks):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
back_html = BeautifulSoup(response.content, 'html.parser').find(id='id_back_link')
self.assertEqual(
back_html.attrs['href'],
reverse('grant-applications:select-company', args=(self.gal.pk,))
)
def test_back_url_manual_company_details(self, *mocks):
fake_grant_application = FAKE_GRANT_APPLICATION.copy()
fake_grant_application['company'] = None
mocks[1].return_value = fake_grant_application
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
back_html = BeautifulSoup(response.content, 'html.parser').find(id='id_back_link')
self.assertEqual(
back_html.attrs['href'],
reverse('grant-applications:manual-company-details', args=(self.gal.pk,))
)
def test_post(self, *mocks):
response = self.client.post(
self.url,
data={
'number_of_employees': CompanyDetailsForm.NumberOfEmployees.HAS_FEWER_THAN_10,
'is_turnover_greater_than': True
}
)
self.assertEqual(response.status_code, 302)
mocks[0].assert_called_once_with(
grant_application_id=str(self.gal.backoffice_grant_application_id),
number_of_employees=CompanyDetailsForm.NumberOfEmployees.HAS_FEWER_THAN_10.value,
is_turnover_greater_than=True
)
def test_post_form_redirect_path(self, *mocks):
response = self.client.post(
self.url,
data={
'number_of_employees': CompanyDetailsForm.NumberOfEmployees.HAS_FEWER_THAN_10,
'is_turnover_greater_than': True
}
)
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response=response,
expected_url=reverse('grant-applications:contact-details', args=(self.gal.pk,))
)
def test_post_cannot_set_random_field(self, *mocks):
response = self.client.post(
self.url,
data={
'number_of_employees': CompanyDetailsForm.NumberOfEmployees.HAS_FEWER_THAN_10,
'is_turnover_greater_than': True,
'is_already_committed_to_event': True
}
)
self.assertEqual(response.status_code, 302)
mocks[0].assert_called_once_with(
grant_application_id=str(self.gal.backoffice_grant_application_id),
number_of_employees=CompanyDetailsForm.NumberOfEmployees.HAS_FEWER_THAN_10.value,
is_turnover_greater_than=True
)
def test_required_fields(self, *mocks):
response = self.client.post(self.url)
self.assertFormError(response, 'form', 'number_of_employees', self.form_msgs['required'])
self.assertFormError(
response, 'form', 'is_turnover_greater_than', self.form_msgs['required']
)
def test_get_redirects_to_confirmation_if_application_already_sent_for_review(self, *mocks):
fake_grant_application = FAKE_GRANT_APPLICATION.copy()
fake_grant_application['sent_for_review'] = True
mocks[1].return_value = fake_grant_application
response = self.client.get(self.url)
self.assertRedirects(
response, reverse('grant-applications:confirmation', args=(self.gal.pk,))
)
def test_get_redirects_to_ineligible_if_application_is_not_active(self, *mocks):
fake_grant_application = FAKE_GRANT_APPLICATION.copy()
fake_grant_application['is_eligible'] = False
mocks[1].return_value = fake_grant_application
response = self.client.get(self.url)
self.assertRedirects(response, reverse('grant-applications:ineligible'))
def test_post_redirects_to_ineligible_if_application_is_not_active(self, *mocks):
fake_grant_application = FAKE_GRANT_APPLICATION.copy()
fake_grant_application['is_eligible'] = False
mocks[1].return_value = fake_grant_application
response = self.client.post(self.url)
self.assertRedirects(response, reverse('grant-applications:ineligible'))
def test_get_does_not_redirect_to_ineligible_if_review_page_has_been_viewed(self, *mocks):
fake_grant_application = FAKE_GRANT_APPLICATION.copy()
fake_grant_application['is_eligible'] = False
mocks[1].return_value = fake_grant_application
self.gal.has_viewed_review_page = True
self.gal.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, CompanyDetailsView.template_name)
def test_post_redirects_to_review_page_if_application_review_page_has_been_viewed(self, *mocks):
self.gal.has_viewed_review_page = True
self.gal.save()
response = self.client.post(
self.url,
data={
'number_of_employees': CompanyDetailsForm.NumberOfEmployees.HAS_FEWER_THAN_10,
'is_turnover_greater_than': True,
'is_already_committed_to_event': True
}
)
self.assertRedirects(
response,
reverse('grant-applications:application-review', args=(self.gal.pk,)),
fetch_redirect_response=False
)
|
<reponame>draftable/compare-api-python-client<filename>draftable/commands/dr_compare.py
#!/usr/bin/env python
import argparse
import configparser
import datetime
import os
import sys
from draftable import Client as DraftableClient
from draftable.endpoints.comparisons.sides import make_side
from draftable.endpoints.exceptions import InvalidArgument, InvalidPath, NotFound
DESCRIPTION = "Create and manage Draftable.com comparisons on the command line"
USAGE_TEMPLATE = """%(prog)s <command> [<command-args>]
COMMANDS:
{COMMANDS}
CONFIGURATION:
Both account_id and auth_token must be set. They can be provided several ways:
1. Environment variables (DR_ACCOUNT, DR_TOKEN)
2. A named configuration group in file `~/.draftable`, for example:
[cloud-testing] # replace the following with your own credentials
account: F91n2k-test
token: <PASSWORD>
To use the above, either specify `-e cloud-testing` as flags to this script,
or `DR_ENV=cloud-testing` as an environment variable.
3. As command-line flags, for example:
"-a <account> -t <token>"
or
"--account <account> --token <token>".
The `base_url` is needed only for self-hosted or development purposes. It can be
set from environment variable (DR_BASE_URL), preset in the configuration file,
or a command-line flag (`-b <url>`).
The `base_url` must always end with '/api' with no trailing slash.
EXAMPLES:
The following assume credentials are in the environment.
Create a comparison:
$ dr-compare create ./left.pdf ./right.pdf
Create a public comparison that expires in 15 minutes:
$ dr-compare create -p -m 15 ./left.pdf ./right.pdf
Provide the file types when not clear from the file extension:
$ dr-compare create --left-type=pdf --right-type=rtf ./left-file ./right-file
List all comparisons:
$ dr-compare list
$ dr-compare all
Get a specific comparison:
$ dr-compare get PCiIEXzW
Get public URL:
$ dr-compare url PCiIEXzW
Get signed URL with 30 minute expiry:
$ dr-compare signed PCiIEXzW
Get signed URL with 75 minute expiry:
$ dr-compare signed PCiIEXzW -m 75
$ dr-compare signed PCiIEXzW --expiry-mins 75
"""
class SetupError(Exception):
pass
def get_connection_args(name):
ini_path = os.path.join(os.path.expanduser("~"), ".draftable")
if not os.path.isfile(ini_path):
raise SetupError(
f"Requested environment '{name}' but missing config file: {ini_path}"
)
config = configparser.ConfigParser()
config.read(ini_path)
if not name in config:
raise SetupError(
f"Requested environment '{name}' but config file ({ini_path}) does not define that name."
)
base_url = config.get(name, "base_url", fallback=None)
return (
config.get(name, "account"),
config.get(name, "token"),
base_url,
)
def create_client(args):
# First, use any environment variables
account = os.getenv("DR_ACCOUNT")
token = os.getenv("DR_TOKEN")
base_url = os.getenv("DR_BASE_URL")
# Secondly, if provided, look up this name in the ~/.draftable file (if it exists)
env_name = getattr(args, "env_name") or os.getenv(
"DR_ENV"
) # getattr default value doesn't work with args object
if env_name:
account, token, base_url = get_connection_args(env_name)
# Thirdly, allow the user to override both environment vars and predefined settings with command-line flags.
account = args.account if args.account else account
token = args.token if args.token else token
base_url = args.base_url if args.base_url else base_url
# Some sanity checks
if not account or not token:
raise SetupError("Both account and token must be set.")
client = DraftableClient(account, token, base_url)
if args.unverified_ssl:
client.verify_ssl = False
return client
def with_std_options(arg_parser):
"""Embellish the provided `arg_parser` with standard options."""
arg_parser.add_argument(
"-a",
"--account",
metavar="<ACCOUNT-ID>",
action="store",
help="For cloud, see https://api.draftable.com/account/credentials",
)
arg_parser.add_argument(
"-b",
"--base-url",
metavar="<BASE-URL>",
action="store",
help="Only for Enterprise self-hosted",
)
arg_parser.add_argument(
"-e",
"--env-name",
metavar="<ENV-NAME>",
action="store",
help="Name from file $HOME/.draftable",
)
arg_parser.add_argument(
"-t",
"--token",
metavar="<TOKEN>",
action="store",
help="For cloud, see https://api.draftable.com/account/credentials",
)
arg_parser.add_argument(
"-S",
"--unverified-ssl",
action="store_true",
help="Don't verify SSL validity; useful for self-hosted self-signed SSL",
)
return arg_parser
def default_comparison_display(comp, out=sys.stdout, position=None):
"""Print a comparison to stdout"""
out.write("Comparison")
if position is not None:
out.write(" f{position}")
out.write(f" identifier: {comp.identifier}\n")
out.write(f" ready: {comp.ready}\n")
out.write(f" failed: {comp.failed}\n")
out.write(f" error: {comp.error_message}\n")
out.write(f" public: {comp.public}\n")
out.write(f" created: {comp.creation_time}\n")
out.write(f" expires: {comp.expiry_time}\n")
out.write(f" left: {comp.left}\n")
out.write(f" right: {comp.right}\n")
def create_comparison(system_args, prog, cmd_name):
"""Create a new comparison."""
arg_parser = with_std_options(
argparse.ArgumentParser(description=create_comparison.__doc__)
)
arg_parser.add_argument(
"-p",
"--public",
action="store_true",
default=False,
help="Marks this comparison public",
)
arg_parser.add_argument(
"-i", "--identifier", default=None, help="Provide your own comparison ID"
)
arg_parser.add_argument(
"-m",
"--expiry-mins",
metavar="<MINS>",
type=int,
default=None,
help="number of minutes this URL should be valid",
)
arg_parser.add_argument(
"--left-type",
default="guess",
metavar="<EXT>",
action="store",
help="e.g. 'pdf', 'doc', 'docx', 'ppt', 'pptx'.",
)
arg_parser.add_argument(
"--right-type", default="guess", metavar="<EXT>", action="store"
)
arg_parser.add_argument("left", metavar="left-file-path-or-url")
arg_parser.add_argument("right", metavar="right-file-path-or-url")
# arg_parser.add_argument('--amend', action='store_true')
args = arg_parser.parse_args(system_args)
# print('Running create, args:', args)
client = create_client(args)
# print("Client:", client)
try:
left = make_side(args.left, args.left_type)
right = make_side(args.right, args.right_type)
except InvalidArgument as ex:
raise SetupError(
f"{ex}. You may need to specify file type with --left-type or --right-type"
)
except InvalidPath as ex:
raise SetupError(str(ex))
identifier = args.identifier
public = args.public
expires = datetime.timedelta(minutes=args.expiry_mins) if args.expiry_mins else None
print("Create:")
print(f" identifier: {identifier}")
print(f" public: {public}")
print(f" expires: {expires}")
print(f" left: {left}")
print(f" right: {right}")
print(" ...")
comp = client.comparisons.create(left, right, identifier, public, expires)
display = default_comparison_display
display(comp)
print_basic_urls(client, comp)
def print_basic_urls(client, comp):
url_expiry = datetime.timedelta(minutes=30)
print("\nURLs:")
print(f" public URL: {client.comparisons.public_viewer_url(comp.identifier)}")
print(
f" signed URL: {client.comparisons.signed_viewer_url(comp.identifier, url_expiry)}"
)
print(f" expires: {url_expiry}")
def list_all_comparisons(system_args, prog, cmd_name):
"""Retrieve and display all comparisons."""
arg_parser = with_std_options(
argparse.ArgumentParser(description=list_all_comparisons.__doc__)
)
args = arg_parser.parse_args(system_args)
# print('Running list, args:', args)
client = create_client(args)
display = default_comparison_display
# print("Client:", client)
comparisons = client.comparisons.all()
num_comparisons = len(comparisons)
print(f"Account {client.account_id} has {num_comparisons:d} comparison(s):")
for i, comp in enumerate(comparisons, 1):
display(comp, position=f"{i:d} of {num_comparisons:d}")
def list_one_comparison(system_args, prog, cmd_name):
"""Retrieve and display specific comparison or comparisons."""
arg_parser = with_std_options(
argparse.ArgumentParser(
prog=f"{prog} {cmd_name}", # so "-h / --help" shows "dr-compare <cmd>"
description=list_one_comparison.__doc__,
)
)
arg_parser.add_argument(
"identifiers", metavar="<ID>", nargs="+", help="a comparison identifier"
)
args = arg_parser.parse_args(system_args)
client = create_client(args)
display = default_comparison_display
num_comparisons = len(args.identifiers)
for i, identifier in enumerate(args.identifiers, 1):
try:
comp = client.comparisons.get(identifier)
display(comp, position=f"{i:d} of {num_comparisons:d}")
print_basic_urls(client, comp)
except NotFound:
print(f"Comparison not found with identifier: {identifier}")
def delete_comparison(system_args, prog, cmd_name):
"""Delete a specific comparison."""
arg_parser = with_std_options(
argparse.ArgumentParser(
prog=f"{prog} {cmd_name}", # so "-h / --help" shows "dr-compare <cmd>"
description=list_one_comparison.__doc__,
)
)
arg_parser.add_argument(
"identifier", metavar="<ID>", help="a comparison identifier"
)
args = arg_parser.parse_args(system_args)
client = create_client(args)
identifier = args.identifier
try:
client.comparisons.delete(identifier)
except NotFound:
print(f"Comparison not found with identifier: {identifier}")
def show_public_url(system_args, prog, cmd_name):
"""Generate a public (unsigned) URL to view a specific comparison."""
arg_parser = with_std_options(
argparse.ArgumentParser(
prog=f"{prog} {cmd_name}", # so "-h / --help" shows "dr-compare <cmd>"
description=list_one_comparison.__doc__,
)
)
arg_parser.add_argument(
"identifiers", metavar="<ID>", nargs="+", help="a comparison identifier"
)
args = arg_parser.parse_args(system_args)
client = create_client(args)
for _, identifier in enumerate(args.identifiers, 1):
print(client.comparisons.public_viewer_url(identifier))
def show_signed_url(system_args, prog, cmd_name):
"""Generate a signed URL to view a specific comparison."""
arg_parser = with_std_options(
argparse.ArgumentParser(
prog=f"{prog} {cmd_name}", # so "-h / --help" shows "dr-compare <cmd>"
description=list_one_comparison.__doc__,
)
)
arg_parser.add_argument(
"-m",
"--expiry-mins",
metavar="<MINS>",
type=int,
default=30,
help="number of minutes this URL should be valid",
)
arg_parser.add_argument(
"identifiers", metavar="<ID>", nargs="+", help="a comparison identifier"
)
args = arg_parser.parse_args(system_args)
client = create_client(args)
url_expiry = datetime.timedelta(minutes=args.expiry_mins)
for _, identifier in enumerate(args.identifiers, 1):
print(client.comparisons.signed_viewer_url(identifier, url_expiry))
COMMANDS = dict(
create=create_comparison,
all=list_all_comparisons,
get=list_one_comparison,
delete=delete_comparison,
url=show_public_url,
signed=show_signed_url,
)
ALIASES = {
"new": "create",
"post": "create",
"add": "create",
"list": "all",
"getall": "all",
"del": "delete",
"rm": "delete",
"public": "url",
"public-url": "url",
"public_url": "url",
"signed-url": "signed",
"signed_url": "signed",
}
def make_usage(template, command_map, alias_map):
"""Generate the usage doc based on configured commands and aliases"""
def format_command_info(command_name):
func = command_map[command_name]
# Some commands (but not all) have aliases
aliases = [k for k in alias_map.keys() if alias_map[k] == command_name]
aliases = " ".join(sorted(aliases)) if aliases else ""
aliases = f"\n Aliases: {aliases}"
return f" {command_name:8s} {func.__doc__}{aliases}\n"
command_info_parts = map(
format_command_info, (name for name in sorted(command_map.keys()))
)
return template.format(COMMANDS="\n".join(command_info_parts))
def dr_compare_main(system_args=None):
if system_args is None:
system_args = sys.argv
arg_parser = argparse.ArgumentParser(
description=DESCRIPTION, usage=make_usage(USAGE_TEMPLATE, COMMANDS, ALIASES)
)
arg_parser.add_argument("command", help="Command to run")
args = arg_parser.parse_args(system_args[1:2]) # just to select the command
if args.command == "help":
arg_parser.print_usage()
sys.exit(0)
command_name = ALIASES.get(args.command, args.command)
command = COMMANDS.get(command_name)
if not command:
err = f"Invalid command '{command_name}'. For list of commands: {arg_parser.prog} -h\n"
arg_parser.error(err)
command(system_args[2:], arg_parser.prog, command_name)
if __name__ == "__main__":
try:
dr_compare_main(sys.argv)
except SetupError as ex:
sys.stderr.write(f"Error: {ex}\n")
sys.exit(1)
|
"""
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import traceback
import cgi
from flask import Blueprint, render_template, request, Response, url_for, redirect
from app import get_api_spark, get_api_tropo, get_default_room_id, get_notification_sms_phone_number
from app.database import db_session
from app.mod_user.models import RegisteredUser
from app.models import Floor, EngagementTrigger, Zone
try:
from html import unescape # python 3.4+
except ImportError:
try:
from html.parser import HTMLParser # python 3.x (<3.4)
except ImportError:
from HTMLParser import HTMLParser # python 2.x
unescape = HTMLParser().unescape
mod_engagement = Blueprint('mod_engagement', __name__, url_prefix='/engagement')
@mod_engagement.route('/', methods=['GET'])
def home():
output = render_template("engagement/engagement_home.html")
return output
@mod_engagement.route('/screen/select', methods=['GET', 'POST'])
def engagement_screen_select():
if request.method == 'GET':
floors = db_session.query(Floor).all()
output = render_template("engagement/screen/engagement_select.html", floors=floors)
else:
output = redirect(url_for('.engagement_screen_show', hierarchy=request.form['hierarchy']))
return output
@mod_engagement.route('/screen/<hierarchy>', methods=['GET'])
def engagement_screen_show(hierarchy):
zone_name = hierarchy.split('>')[-1]
zone = db_session.query(Zone).filter(Zone.name == zone_name).first()
triggers = []
for t in get_engagement_triggers_per_zone(zone.id):
triggers.append(t.serialize())
vertical_hierarchy = None
if zone.vertical_name:
vertical_hierarchy = zone.get_vertical_hierarchy()
output = render_template("engagement/screen/engagement_show.html", hierarchy=hierarchy, triggers=triggers, vertical_hierarchy=vertical_hierarchy)
return output
@mod_engagement.route('/screen_dwell/select', methods=['GET', 'POST'])
def engagement_screen_dwell_select():
if request.method == 'GET':
floors = db_session.query(Floor).all()
output = render_template("engagement/screen/engagement_select.html", floors=floors)
else:
output = redirect(url_for('.engagement_screen_dwell_show', hierarchy=request.form['hierarchy']))
return output
@mod_engagement.route('/screen_dwell/<hierarchy>', methods=['GET'])
def engagement_screen_dwell_show(hierarchy):
zone_name = hierarchy.split('>')[-1]
zone = db_session.query(Zone).filter(Zone.name == zone_name).first()
vertical_hierarchy = None
if zone.vertical_name:
vertical_hierarchy = zone.get_vertical_hierarchy()
output = render_template("engagement/screen/engagement_show_dwell.html", hierarchy=hierarchy, vertical_hierarchy=vertical_hierarchy)
return output
@mod_engagement.route('/trigger/', methods=['GET'])
def engagement_trigger_list():
output = render_template("engagement/trigger/trigger_home.html")
return output
@mod_engagement.route('/trigger/add', methods=['GET'])
def engagement_trigger_add():
floors = db_session.query(Floor).all()
users = db_session.query(RegisteredUser).all()
output = render_template("engagement/trigger/trigger_add.html", users=users, floors=floors, default_room_id=get_default_room_id())
return output
@mod_engagement.route('/trigger/user/add', methods=['POST'])
def engagement_trigger_user_add():
output = {
'error': True,
'error_message': 'Unknown error',
'message': None,
}
if request.json:
request_json = request.json
registered_user_id = request_json['registered_user_id']
zone_id = request_json['zone']
event = request_json['event']
triggers_created = 0
post_on_spark = 'spark_checkbox' in request_json
if post_on_spark:
spark_target = request_json['spark_target']
spark_value = request_json['spark_value']
if spark_target and spark_value:
spark_trigger = EngagementTrigger('spark', spark_target, spark_value, event, zone_id, registered_user_id, extras=None)
db_session.add(spark_trigger)
triggers_created += 1
post_on_tropo = 'tropo_checkbox' in request_json
if post_on_tropo:
tropo_target = request_json['tropo_target']
tropo_platform = request_json['tropo_platform']
tropo_value = request_json['tropo_value']
if tropo_target and tropo_platform and tropo_value:
tropo_trigger = EngagementTrigger('tropo', tropo_target, tropo_value, event, zone_id, registered_user_id, extras=tropo_platform)
db_session.add(tropo_trigger)
triggers_created += 1
try:
db_session.commit()
output = {
'error': False,
'error_message': None,
'message': "{} trigger(s) created".format(triggers_created)
}
except:
output = {
'error': True,
'error_message': 'Error on trigger creation.',
'message': None,
}
traceback.print_exc()
else:
output = {
'error': True,
'error_message': 'JSON data not provided on request',
'message': None,
}
return Response(json.dumps(output), mimetype='application/json')
@mod_engagement.route('/trigger/user/<registered_user_id>/view', methods=['GET'])
def engagement_trigger_user_list(registered_user_id):
# output = render_template("engagement/show.html", hierarchy=hierarchy)
output = 'Under construction'
return output
@mod_engagement.route('/trigger/user/fire', methods=['POST'])
def fire_user_zone_trigger():
try:
trigger = None
if request.json:
trigger = db_session.query(EngagementTrigger).filter(EngagementTrigger.id == request.json['trigger_id']).first()
if trigger:
user = db_session.query(RegisteredUser).filter(RegisteredUser.id == trigger.registered_user_id).first()
zone = db_session.query(Zone).filter(Zone.id == trigger.zone_id).first()
if user and zone:
platform = trigger.platform
text = trigger.value
text = replace_user_info_on_trigger_text(text, user)
text = replace_zone_information(text, zone)
response = None
if trigger.platform == 'spark':
# do action
room_id = trigger.target
response = get_api_spark().messages.create(roomId=room_id, text=text)
ok = response
elif trigger.platform == 'tropo':
number = trigger.target
number = replace_user_info_on_trigger_text(number, user)
tropo_platform = trigger.extras
response = get_api_tropo().triggerTropoWithMessageAndNumber(text, number, voice="dave", type=tropo_platform)
ok = response.status_code == 200
if ok:
output = {
'error': False,
'error_message': None,
'message': 'Successfully posted on {}'.format(platform),
}
else:
output = {
'error': True,
'error_message': 'Error when trying to post to on {}'.format(platform),
'message': None,
}
else:
output = {
'error': True,
'error_message': 'User or Zone not found ids = {} / {}'.format(trigger.registered_user_id, trigger.zone_id),
'message': None,
}
else:
output = {
'error': True,
'error_message': 'Trigger id not provided as json.',
'message': None,
}
except Exception as e:
output = {
'error': True,
'error_message': 'Unknown error\n{}'.format(str(e)),
'message': None,
}
traceback.print_exc()
return Response(json.dumps(output), mimetype='application/json')
@mod_engagement.route('/trigger_dwell', methods=['POST'])
def fire_exceeded_dwell_time():
error = True
error_message = 'Unknown error'
message = None
try:
if request.json:
user = db_session.query(RegisteredUser).filter(RegisteredUser.id == request.json['user_id']).first()
hierarchy = request.json['hierarchy']
hierarchy_vertical_name = request.json['hierarchy_vertical_name']
if user:
h = hierarchy
if hierarchy_vertical_name:
h = hierarchy_vertical_name
h = unescape(h)
spark_message = 'Employee {} has stayed for too long at {}'.format(user.name, h)
get_api_spark().messages.create(get_default_room_id(), text=spark_message)
if user.phone:
tropo_message = 'Please leave {}'.format(h)
get_api_tropo().triggerTropoWithMessageAndNumber(tropo_message, user.phone, type='text')
error = False
error_message = None
message = 'Triggers for {}. OK'.format(user.name)
else:
error = True
error_message = 'User not found'
except Exception as e:
traceback.print_exc()
finally:
output = {
'error': error,
'error_message': error_message,
'message': message,
}
return Response(json.dumps(output), mimetype='application/json')
def replace_user_info_on_trigger_text(text, user):
text = text.replace('{user.name}', str(user.name))
text = text.replace('{user.phone}', str(user.phone))
text = text.replace('{user.id}', str(user.id))
return text
def replace_zone_information(text, zone):
zone_name = zone.name
if zone.vertical_name:
zone_name = zone.vertical_name
floor_name = zone.floor.name
if zone.floor.vertical_name:
floor_name = zone.floor.vertical_name
text = text.replace('{zone.name}', zone_name)
text = text.replace('{zone.id}', str(zone.id))
text = text.replace('{zone.floor}', floor_name)
return text
def get_engagement_triggers_per_zone(zone_id):
output = []
triggers = db_session.query(EngagementTrigger).filter(EngagementTrigger.zone_id == zone_id).all()
for t in triggers:
output.append(t)
return output
|
<reponame>dankilman/pysource<filename>pysource/transport.py
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fcntl
import sys
import time
import uuid
import select
import os
import json
import socket
import errno
from SocketServer import (ThreadingUnixStreamServer,
StreamRequestHandler)
from io import BytesIO
from StringIO import StringIO
import traceback
import pysource
from pysource import config
from pysource import remote_call_handlers
from pysource import request_context
RESPONSE_STATUS_OK = "ok"
RESPONSE_STATUS_ERROR = "error"
CONTROL_SOCKET_ERROR_CODE = 1
CONTROL_SOCKET_LENGTH_CODE = 2
unix_socket_path = lambda: os.path.join(config.pysource_dir, 'socket')
DEBUG = False
DEBUG_CLIENT = True
DEBUG_SERVER = False
def _handle(req_type, payload, **kwargs):
handler = remote_call_handlers.get(req_type)
if handler is None:
raise RuntimeError('Unknown request type: {0}'.format(req_type))
return handler(**payload)
class RequestHandler(StreamRequestHandler):
def __init__(self, request, client_address, server):
self.pipe_control_handler = None
StreamRequestHandler.__init__(self, request, client_address, server)
def handle(self):
try:
res_status = RESPONSE_STATUS_OK
body = _read_body(self.rfile)
if body['piped'] is True:
request_context.piped = True
self.pipe_control_handler = \
PipeControlSocketHandler(body['uid'])
self._handle_piped(body, self.pipe_control_handler)
res_payload = {}
else:
res_payload = _handle(**body)
except Exception:
res_status = RESPONSE_STATUS_ERROR
error = StringIO()
error.write('daemon: ')
traceback.print_exc(file=error)
res_payload = {'error': error.getvalue()}
if self.pipe_control_handler:
response_file = self.pipe_control_handler.wfile
if res_status == RESPONSE_STATUS_ERROR:
try:
self.wfile.flush()
except socket.error:
pass
response_file.write('{}\r\n'.format(CONTROL_SOCKET_ERROR_CODE))
response_file.flush()
else:
response_file = self.wfile
_write_body(response_file, {
'payload': res_payload,
'status': res_status
})
def finish(self):
StreamRequestHandler.finish(self)
if self.pipe_control_handler:
self.pipe_control_handler.close()
def _handle_piped(self, body, pipe_control_handler):
self.connection.setblocking(0)
try:
pipe_control_handler.accept()
piped_input = PipeControlledInputSocket(
self.rfile,
self.connection,
pipe_control_handler.rfile,
pipe_control_handler.conn)
piped_output = PipeControlledOutputSocket(
self.wfile,
self.connection,
pipe_control_handler.wfile,
pipe_control_handler.conn)
request_context.stdin = piped_input
request_context.stdout = piped_output
result = _handle(**body)
if result is not None:
piped_output.write(result)
piped_input.close()
piped_output.close()
finally:
self.connection.setblocking(1)
pipe_control_handler.conn.setblocking(1)
class PipeControlSocketHandler(object):
def __init__(self, uid):
self.uid = uid
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.unix_socket_path = os.path.join(config.pysource_dir, self.uid)
self.conn = None
self.wfile = None
self.rfile = None
self.server = True
def accept(self):
self.sock.bind(self.unix_socket_path)
self.sock.listen(1)
conn, addr = self.sock.accept()
self._setup(conn)
def connect(self):
self.server = False
retries = 20
current_tries = 0
while True:
try:
current_tries += 1
self.sock.connect(self.unix_socket_path)
break
except socket.error, e:
if e.errno == errno.ENOENT and current_tries < retries:
time.sleep(0.1)
else:
raise
self._setup(self.sock)
def _setup(self, conn):
self.conn = conn
self.conn.setblocking(0)
self.rfile = self.conn.makefile('rb', -1)
self.wfile = self.conn.makefile('wb', 0)
def close(self):
if not self.wfile.closed:
try:
self.conn.setblocking(1)
self.wfile.flush()
except socket.error:
pass
self.wfile.close()
self.rfile.close()
if self.server:
try:
self.conn.shutdown(socket.SHUT_WR)
except socket.error:
pass
try:
self.sock.close()
except socket.error:
pass
self.conn.close()
if self.server and os.path.exists(self.unix_socket_path):
os.remove(self.unix_socket_path)
class PipeControlledBaseSocket(object):
def __init__(self, data_file, data_socket, control_file, control_socket):
self.closed = False
self.data_file = data_file
self.data_socket = data_socket
self.control_file = control_file
self.control_socket = control_socket
self.sockets = [self.data_socket, self.control_socket]
class PipeControlledInputSocket(PipeControlledBaseSocket):
def __init__(self, data_file, data_socket, control_file, control_socket):
super(PipeControlledInputSocket, self).__init__(data_file,
data_socket,
control_file,
control_socket)
self.bytes_read = 0
self.total_bytes = None
self.done = False
self.read_sockets = self.sockets
self.control_consumed = False
def p(self, message):
if DEBUG and ((DEBUG_CLIENT and not request_context.piped) or
(DEBUG_SERVER and request_context.piped)):
prefix = '(SERVER_IN)' if request_context.piped else '(CLIENT_IN)'
print '{} {} (total_bytes={}, bytes_read={}, done={})'.format(
prefix, message, self.total_bytes, self.bytes_read, self.done)
def read(self, length=0, blocking=True):
self.p('read length={}, blocking={}'.format(
length, blocking))
if self.done:
return ''
result = BytesIO()
while True:
self.p('loop')
if self.bytes_read == self.total_bytes:
break
timeout = 30 if blocking else 0
readable, _, _ = select.select(self.read_sockets, [], [], timeout)
if self.control_socket in readable:
self.read_sockets = [self.data_socket]
total_bytes = self.consume_control()
if total_bytes == -1: # error
self.total_bytes = self.bytes_read
break
else:
self.total_bytes = total_bytes
self.p('total_bytes={}'.format(self.total_bytes))
if self.data_socket in readable:
to_read = 1024 if length <= 0 else length - result.tell()
if self.total_bytes and self.total_bytes < to_read:
to_read = self.total_bytes
try:
data = self.data_socket.recv(to_read)
self.p('data={}, len={}'.format(data, len(data)))
self.bytes_read += len(data)
result.write(data)
except socket.error, e:
if e.errno == errno.EAGAIN:
self.p('errno.EAGAIN blocking={}'.format(
blocking))
if blocking:
pass
else:
break
else:
raise
if 0 < length == result.tell():
break
if len(readable) == 0 and not blocking:
break
if self.bytes_read == self.total_bytes:
self.done = True
self.p('out done={}, bytes_read={}, total_bytes={}'.format(
self.done, self.bytes_read, self.total_bytes))
value = result.getvalue()
if len(value) == 0:
if self.done:
return ''
else:
return None
else:
return result.getvalue()
def consume_control(self):
if self.control_consumed:
return -2
self.control_consumed = True
self.control_socket.setblocking(1)
try:
control_code = int(self.control_file.readline())
self.p('control_code={}'.format(control_code))
if control_code == CONTROL_SOCKET_ERROR_CODE:
return -1
else:
return int(self.control_file.readline())
finally:
self.control_socket.setblocking(0)
def close(self):
self.p('read_close done={}, bytes_read={}, total_bytes={}'.format(
self.done, self.bytes_read, self.total_bytes))
class PipeControlledOutputSocket(PipeControlledBaseSocket):
def __init__(self, data_file, data_socket, control_file, control_socket):
super(PipeControlledOutputSocket, self).__init__(data_file,
data_socket,
control_file,
control_socket)
self.bytes_written = 0
def p(self, message):
if DEBUG and ((DEBUG_CLIENT and not request_context.piped) or
(DEBUG_SERVER and request_context.piped)):
prefix = '(SERVER_OUT)' if request_context.piped else \
'(CLIENT_OUT)'
print '{} {}'.format(prefix, message)
def write(self, data):
self.p('write data={}'.format(data))
view = memoryview(data)
total_bytes_to_write = len(view)
total_sent = 0
while True:
if total_sent == total_bytes_to_write:
break
_, writable, _ = select.select([], [self.data_socket], [], 30)
if self.data_socket in writable:
sent = self.data_socket.send(view[total_sent:])
total_sent += sent
self.bytes_written += total_bytes_to_write
self.p('write out bytes_written={}'.format(self.bytes_written))
def close(self):
self.p('write close bytes_written={}'.format(self.bytes_written))
self.control_socket.setblocking(1)
try:
length_control_message = '{}\r\n{}\r\n'.format(
CONTROL_SOCKET_LENGTH_CODE, self.bytes_written)
self.control_file.write(length_control_message)
self.control_file.flush()
finally:
self.control_socket.setblocking(0)
def do_regular_client_request(req_type, payload):
return _do_client_request(req_type, payload)
def do_piped_client_request(req_type, payload):
def p(message):
if DEBUG:
print '(CLIENT) {}'.format(message)
def pipe_handler(sock, req, res, uid):
sock.setblocking(0)
try:
pipe_control_handler = PipeControlSocketHandler(uid)
pipe_control_handler.connect()
piped_output = PipeControlledOutputSocket(
req,
sock,
pipe_control_handler.wfile,
pipe_control_handler.conn)
piped_input = PipeControlledInputSocket(
res,
sock,
pipe_control_handler.rfile,
pipe_control_handler.conn)
stdin_fd = sys.stdin.fileno()
stdin_buf = 16 * 1024
fl = fcntl.fcntl(stdin_fd, fcntl.F_GETFL)
fcntl.fcntl(stdin_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
data_sock = sock
data_sock_buf = 1024
control_soc = pipe_control_handler.conn
control_soc_written = False
read_sockets = [stdin_fd, data_sock, control_soc]
while True:
readable = select.select(read_sockets, [], [], 30)[0]
if stdin_fd in readable:
stdin_read = os.read(stdin_fd, stdin_buf)
if stdin_read == '':
piped_output.close()
read_sockets = [data_sock, control_soc]
elif stdin_read is not None:
piped_output.write(stdin_read)
if control_soc in readable:
control_soc_written = True
if data_sock in readable:
if control_soc_written:
buf = piped_input.read()
else:
buf = piped_input.read(data_sock_buf,
blocking=False)
if buf is '':
break
elif buf is not None:
sys.stdout.write(buf)
try:
sys.stdout.flush()
except IOError, e:
if e.errno == errno.EPIPE:
raise pysource.error('Flushing stdout failed.'
' It seems the process'
' being piped to, '
'terminated.')
else:
raise
if control_soc_written:
break
piped_input.consume_control()
piped_input.close()
pipe_control_handler.conn.setblocking(1)
return pipe_control_handler
finally:
sock.setblocking(1)
return _do_client_request(req_type, payload, pipe_handler)
def _do_client_request(req_type, payload, pipe_handler=None):
sock = _client_connect()
req = sock.makefile('wb', 0)
res = sock.makefile('rb', -1)
piped = pipe_handler is not None
uid = str(uuid.uuid4())
try:
_write_body(req, {
'req_type': req_type,
'payload': payload,
'piped': piped,
'uid': uid
})
if piped:
pipe_control_handler = pipe_handler(sock, req, res, uid)
res_body = _read_body(pipe_control_handler.rfile)
pipe_control_handler.close()
else:
res_body = _read_body(res)
res_body_payload = res_body['payload']
if res_body['status'] == RESPONSE_STATUS_ERROR:
error = res_body_payload['error']
raise pysource.error('{0}'.format(error))
return res_body_payload
finally:
req.close()
res.close()
sock.close()
def _client_connect():
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(unix_socket_path())
except socket.error, e:
if e.errno in [errno.ENOENT, errno.ECONNREFUSED]:
raise pysource.error('Is the pysource daemon running? '
'Run "pysource daemon start" to start '
'it.')
else:
raise
return sock
def _read_body(sock):
json_body_len = int(sock.readline())
return json.loads(sock.read(json_body_len))
def _write_body(sock, body):
json_body = json.dumps(body)
json_body_len = len(json_body)
sock.write(json_body_len)
sock.write('\r\n')
sock.write(json_body)
def start_server():
server = ThreadingUnixStreamServer(unix_socket_path(),
RequestHandler)
server.serve_forever()
def cleanup():
"""Used for forced cleanup"""
if os.path.exists(unix_socket_path()):
try:
os.remove(unix_socket_path())
except (OSError, IOError), e:
# could happen in tests
if e.errno == errno.ENOENT:
pass
else:
raise
|
<gh_stars>1-10
from django.db import models
from django.contrib.auth.models import User
import uuid
class Company(models.Model):
name = models.CharField(max_length=128)
telephone = models.CharField(max_length=128)
mail = models.CharField(max_length=128)
def __str__(self):
return self.name
#class Staff(models.Model):
# name = models.CharField(max_length=128)
# telephone = models.CharField(max_length=128)
# mail = models.CharField(max_length=128)
#main_sales_staff = models.TextField()
#sub_sales_staff = models.TextField()
#main_technical_staff = models.TextField()
#sub_technical_staff = models.TextField()
#telephone = models.CharField(max_length=128)
#mail = models.CharField(max_length=128)
def __str__(self):
return self.name
class Database(models.Model):
STATUS_USER = "zabbix"
STATUS_PASSWD = "<PASSWORD>"
STATUS_DB = "zabbix"
STATUS_PORT = 3306
STATUS_CHARSET = "utf8"
#STATUS_SET = (
# (STATUS_DRAFT, "²¼½ñ),
# (STATUS_PUBLIC, "¸ø"),
#)
hostname = models.CharField(max_length=128)
#cace = models.ForeignKey(Cace, related_name='database')
hostname = models.CharField(max_length=128)
host = models.CharField(max_length=128)
user = models.CharField(default=STATUS_USER, max_length=128)
passwd = models.CharField(default=STATUS_PASSWD, max_length=128)
db = models.CharField(default=STATUS_DB, max_length=128)
port = models.IntegerField(default=STATUS_PORT)
charset = models.CharField(default=STATUS_CHARSET, max_length=128)
def __str__(self):
return self.hostname
#class Templetes(models.Model):
# cace_id = models.IntegerField()
# filename = models.CharField(max_length=128)
# memo = models.TextField(blank=True)
# created_at = models.DateTimeField(auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True)
# def __str__(self):
# return self.cace
class Cace(models.Model):
#cace_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
#cace_id = models.IntegerField()
cace = models.CharField(max_length=128)
company = models.ForeignKey(Company)
service = models.TextField(blank=True)
technical_main_staff = models.ForeignKey(User, related_name='technical_main_staff')
technical_sub_staff = models.ForeignKey(User, related_name='technical_sub_staff')
sales_main_staff = models.ForeignKey(User, related_name='sales_main_staff')
sales_sub_staff = models.ForeignKey(User, related_name='sales_sub_staff')
monitoring_server = models.ForeignKey(Database)
#Templetes = models.ForeignKey(Templetes)
memo = models.TextField(blank=True)
#created_at = models.DateTimeField(auto_now_add=True)
#updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.cace
#sub_sales_staff = models.TextField()
#main_technical_staff = models.TextField()
#sub_technical_staff = models.TextField()
#telephone = models.CharField(max_length=128)
#mail = models.CharField(max_length=128)
#def __unicode__(self):
# return '{}'.format(self.your_field)
#def __repr__(self):
# return "{}: {}".format(self.pk, self.name)
#__str__ = __repr__
#def __unicode__(self):
# return '{}'.format(self.your_field)
|
<reponame>tliakos/default-toolchain-1490724000329
from __future__ import with_statement
import doctest
import socket
import unittest2
try:
import urllib.error as urllib_error
except ImportError:
import urllib2 as urllib_error
from klout import *
class TestKlout(unittest2.TestCase):
def test_klout(self):
with self.assertRaises(TypeError):
k = Klout()
class TestKloutIdentity(unittest2.TestCase):
def setUp(self):
f = open('key')
self.key= f.readline().strip()
f.close()
def test_identityByTwitterId(self):
k = Klout(self.key)
result = k.identity.klout(tw=11158872)
self.assertIn('id', result)
self.assertIn('network', result)
result = k.identity.klout(tw='11158872')
self.assertIn('id', result)
self.assertIn('network', result)
def test_identityByGooglePlusId(self):
k = Klout(self.key)
result = k.identity.klout(gp=112975106809988327760)
self.assertIn('id', result)
self.assertIn('network', result)
result = k.identity.klout(gp='112975106809988327760')
self.assertIn('id', result)
self.assertIn('network', result)
def test_identityByTwitterScreenName(self):
k = Klout(self.key)
result = k.identity.klout(screenName='erfaan')
self.assertIn('id', result)
self.assertIn('network', result)
def test_googlePlusIdByIdentity(self):
k = Klout(self.key)
result = k.identity.gp(klout=11747)
self.assertIn('id', result)
self.assertIn('network', result)
result = k.identity.gp(klout='11747')
self.assertIn('id', result)
self.assertIn('network', result)
def test_twitterIdByIdentity(self):
k = Klout(self.key)
result = k.identity.tw(klout=11747)
self.assertIn('id', result)
self.assertIn('network', result)
result = k.identity.tw(klout='11747')
self.assertIn('id', result)
self.assertIn('network', result)
class TestKloutUser(unittest2.TestCase):
def setUp(self):
f = open('key')
self.key= f.readline().strip()
f.close()
def test_userScore(self):
k = Klout(self.key)
result = k.user.score(kloutId=11747)
self.assertIn('score', result)
self.assertLess(result['score'], 100.0)
self.assertGreater(result['score'], 0.0)
result = k.user.score(kloutId='11747')
self.assertIn('score', result)
self.assertLess(result['score'], 100.0)
self.assertGreater(result['score'], 0.0)
def test_userInfluence(self):
k = Klout(self.key)
result = k.user.influence(kloutId=11747)
self.assertIn('myInfluencers', result)
self.assertIn('myInfluencees', result)
result = k.user.influence(kloutId='11747')
self.assertIn('myInfluencers', result)
self.assertIn('myInfluencees', result)
def test_userTopics(self):
klout = Klout(self.key)
result = klout.user.topics(kloutId=11747)
self.assertIsInstance(result, list)
for topic in result:
for k, v in topic.items():
self.assertIn(k, ['imageUrl', 'slug', 'displayName', 'id', 'name', 'topicType'])
result = klout.user.topics(kloutId='11747')
self.assertIsInstance(result, list)
for topic in result:
for k, v in topic.items():
self.assertIn(k, ['displayName', 'imageUrl', 'slug', 'id', 'name', 'topicType'])
class TestTimeout(unittest2.TestCase):
def setUp(self):
f = open('key')
self.key= f.readline().strip()
f.close()
def test_timeout(self):
k = Klout(self.key)
result = k.user.score(kloutId=11747, timeout=60)
self.assertIn('score', result)
with self.assertRaises(urllib_error.URLError) as er:
result = k.user.score(kloutId=11747, timeout=0.001)
self.assertIsInstance(er.exception.reason, socket.timeout)
class TestSecure(unittest2.TestCase):
def setUp(self):
f = open('key')
self.key= f.readline().strip()
f.close()
def test_secure(self):
k = Klout(self.key, secure=True)
result = k.user.score(kloutId=11747)
self.assertIn('score', result)
if __name__ == '__main__':
unittest2.main() |
import os
import numbers
import base64
from django.contrib.auth.models import User
from django.db import transaction
import io
import pandas as pd
import numpy as np
from openfacstrack.apps.track.models import (
PanelMetadata,
Parameter,
ProcessedSample,
Result,
DataProcessing,
Patient,
PatientMetadataDict,
PatientMetadata,
Panel,
NumericValue,
TextValue,
DateValue,
UploadedFile,
ValidationEntry,
GatingStrategy,
)
class ClinicalSampleFile:
"""
Validates and uploads a file with results from clinical samples.
"""
def __init__(
self,
file_name=None,
file_contents=None,
uploaded_file: UploadedFile = None,
user: User = None,
gating_strategy: GatingStrategy = None,
):
"""load contents of file into a data frame and set other attribs.
Parameters
----------
file_name : string
name of file
file_contents : InMemoryUploadedFile
Django object with binary contents of uploaded file
uploaded_file : UploadedFile
custom object to store details of uploaded file
user : User
Django object representing user making upload
gating_strategy : GatingStrategy
Custom object representing the GatingStrategy for this upload
Returns
-------
None
"""
if uploaded_file:
self.upload_file = uploaded_file
file_name = uploaded_file.name
file_contents = uploaded_file.content
# print(file_contents)
self.content = file_contents
self.file_name = file_name
self.gating_strategy = gating_strategy
self.df = pd.read_csv(self.content, parse_dates=["Date"])
# List of columns always expected
# ToDo: Find out if any of these columns are 'required' - if so
# cannot continue without them.
# Use variables to store static_column names in case they change
# in future
self.sc_panel = "Panel"
self.sc_clinical_sample = "Clinical_sample"
self.sc_filename = "filename"
self.sc_operator1 = "Operator name"
self.sc_comments = "Comments"
self.sc_batch = "batch"
self.sc_date = "Date"
self.required_columns = [
self.sc_filename,
self.sc_panel,
self.sc_clinical_sample,
]
self.static_columns = [
self.sc_batch,
self.sc_operator1,
self.sc_comments,
self.sc_date,
]
# Store the unique panels in the data
# ToDo: I think there should be only one unique panel - check.
self.panels = self.df["Panel"].unique().tolist()
self.panel_name = self.panels[0].upper()
# Compute names of parameters present. These are all the other
# columns in the file that are not in the static_columns list
# and are not unregistered_derived_parameters
parameter_columns = set(self.df.columns) - set(self.static_columns)
parameter_columns -= set(self.required_columns)
self.parameter_columns = list(parameter_columns)
# Store unregistered parameters. Derived ones will be dynamically
# added to the Parameter table before upload
self.unregistered_derived_parameters = []
self.unregistered_parameters = []
for parameter_column in self.parameter_columns:
try:
parameter_object = Parameter.objects.get(
gating_hierarchy=parameter_column
)
except Parameter.DoesNotExist:
if parameter_column.endswith("Count_back") or parameter_column.endswith(
"freq"
):
self.unregistered_derived_parameters.append(parameter_column)
else:
self.unregistered_parameters.append(parameter_column)
self.parameter_columns = [
column
for column in self.parameter_columns
if column not in self.unregistered_parameters
and column not in self.unregistered_derived_parameters
]
# Names for pseudo parameters (parameters computed from data)
self.pseudo_parameters_numeric = []
if self.sc_batch in self.df.columns:
self.pseudo_parameters_numeric.append(
(self.sc_batch, f"{self.panel_name}_batch")
)
if self.sc_operator1 in self.df.columns:
self.pseudo_parameters_numeric.append(
(self.sc_operator1, f"{self.panel_name}_operator_1")
)
self.pseudo_parameters_date = []
if self.sc_date in self.df.columns:
self.pseudo_parameters_date.append(
(self.sc_date, f"{self.panel_name}_date_processed")
)
self.pseudo_parameters_text = []
if self.sc_comments in self.df.columns:
self.pseudo_parameters_text.append(
(self.sc_comments, f"{self.panel_name}_comments")
)
# Number of rows to process
self.nrows = len(self.df)
# Default uploaded file
if not uploaded_file:
self.upload_file = UploadedFile(
name=self.file_name,
user=user,
description="Panel results",
row_number=self.nrows,
content=self.content,
notes="",
content_type="PANEL_RESULTS",
)
self.upload_file.save()
def validate(self):
"""Validate file for completeness of reference data
Parameters
----------
None
Returns
-------
validation_error : list
list of validation errors. Each entry in the list is a
ValidationEntry object - basically a dict
whose keys are types of errors and values are descriptions.
Empty list is returned if there are no errors
"""
# Start validation writing errors into dictionary/or json string?
validation_errors = []
# Check we have the required columns needed for upload to proceed.
required_columns_missing = []
for required_column in self.required_columns:
if required_column not in self.df.columns:
required_columns_missing.append(required_column)
if len(required_columns_missing) > 0:
error = ValidationEntry(
subject_file=self.upload_file,
key="required_columns_missing",
value=required_columns_missing,
entry_type="FATAL",
validation_type="SYNTAX",
)
error.save()
validation_errors.append(error)
self.upload_file.valid_syntax = False
self.upload_file.save()
# Check we have the expected number of columns.
static_columns_missing = []
for static_column in self.static_columns:
if static_column not in self.df.columns:
static_columns_missing.append(static_column)
if len(static_columns_missing) > 0:
error = ValidationEntry(
subject_file=self.upload_file,
key="static_columns_missing",
value=static_columns_missing,
entry_type="ERROR",
validation_type="SYNTAX",
)
error.save()
validation_errors.append(error)
self.upload_file.valid_syntax = False
self.upload_file.save()
# Check that all the info is for the same panel
# It is dangerous to proceed otherwise as we will
# mainly because of the parameters we dynamically
# compose from the panel name.
if "Panel" in self.df.columns:
panels_in_data = self.df["Panel"].unique().tolist()
n_unique_panels_in_data = len(panels_in_data)
if n_unique_panels_in_data != 1:
error = ValidationEntry(
subject_file=self.upload_file,
key="unique_panel_error",
value=f"Expected 1 unique value for panels in each record"
+ f". Got {n_unique_panels_in_data}: {panels_in_data}",
entry_type="FATAL",
validation_type="SYNTAX",
)
error.save()
validation_errors.append(error)
self.upload_file.valid_syntax = False
self.upload_file.save()
# Check if the panel(s) are present in the Panel table
panels_in_data_pk = []
unknown_panels = []
for panel in panels_in_data:
try:
panels_in_data_pk.append(Panel.objects.get(name=panel.upper()).id)
except Panel.DoesNotExist as e:
unknown_panels.append(panel)
if len(unknown_panels) > 0:
error = ValidationEntry(
subject_file=self.upload_file,
key="unknown_panel_error",
value=f"The following panels are not in Panel table: {unknown_panels}",
entry_type="WARN",
validation_type="SYNTAX",
)
error.save()
validation_errors.append(error)
else:
# ToDo: Can we continue without unique panels?
panels_in_data = []
panels_in_data_pk = []
if len(self.unregistered_parameters) > 0:
error = ValidationEntry(
subject_file=self.upload_file,
key="unregistered_parameters",
value=self.unregistered_parameters,
entry_type="WARN",
validation_type="SYNTAX",
)
error.save()
validation_errors.append(error)
if len(self.unregistered_derived_parameters) > 0:
error = ValidationEntry(
subject_file=self.upload_file,
key="unregistered_derived_parameters - will be added during upload",
value=self.unregistered_derived_parameters,
entry_type="INFO",
validation_type="SYNTAX",
)
error.save()
validation_errors.append(error)
# Check all fields needed for processed_sample table present
# Check all clinical samples present in processed_sample table
# Enter values into processed_sample, processed_sample,
# numeric_value and text_parameter
# Print out list of validation errors
# print("Validation errors:")
return validation_errors
def upload(self, dry_run=False):
"""Upload file to respective tables
Upload data in clinical sample results for panel into the database.
We assume that all the results here are based on one panel (ToDo:
need to confirm whether to throw error during validation if more
than one panel). The upload is carried out in an atomic transaction
and if there are any errors nothing is written to the database. If
the dry_run parameter is False nothing is written to the database.
This is useful to get details of any records that have issues that
would otherwise be missed when writing to the database.
Workflow:
1 - Details of the file being uploaded are written to the
UploadedFile table - the ID of this file is saved so that
it can be stored with each record in the Result table
2 - covid patient IDs loaded into Patient table
create if they do not exist
3 - For each row create unique record in Result table if it
does not already exist. Uniqueness is by
(panel, fcs_file_name, gating_strategy) then store:
(a) patient_id in Patient table
(b) sample_id (and any other sample metadata in
ProcessedSample table
(c) FCS file metadata into DataProcessing table
(d) Parameters and values for each sample into
NumericValue, DateValue and TextValue tables
Parameters
----------
dry_run : boolean
Indicates it's going to attempt to do the upload without committing the changes.
Returns
-------
upload_report : dict
Details of how upload proceeded. Keys are:
success : boolean - whether upload was successful
rows_processed : int - No. of rows from csv file
rows_with_issues : int - No. of rows that had issues
upload_issues : dict - keys are types of issue, values are
descriptions with row in sheet where issue
occured. Empty dict is returned if there
are no issues
"""
# Assume all checks done - will stop and terminate upload if
# any errors encountered
upload_issues = []
rows_with_issues = set()
with transaction.atomic():
# Ensure all sample numbers are in processed_sample table
# and respective records for patients exist
sample_ids = self.df[self.sc_clinical_sample].unique().tolist()
patient_ids = [str(s_id).split("n")[0] for s_id in sample_ids]
processed_sample_pks = {}
for patient_id, sample_id in zip(patient_ids, sample_ids):
patient = Patient.objects.get_or_create(patient_id=patient_id)[0]
processed_sample = ProcessedSample.objects.get_or_create(
clinical_sample_id=sample_id, patient=patient
)[0]
processed_sample_pks[sample_id] = processed_sample.pk
# Get the panel(s) pks
panels_pk = {}
for panel in self.panels:
panels_pk[panel] = Panel.objects.get(name=panel.upper()).id
# Store first panel primary key for use later
panel_pk = panels_pk[self.panels[0]]
# Append any unregistered derived parameters to parameter table
for parameter_to_add in self.unregistered_derived_parameters:
parameter, created = Parameter.objects.get_or_create(
gating_hierarchy=parameter_to_add, panel_id=panel_pk
)
parameter.internal_name = parameter_to_add
parameter.public_name = parameter_to_add
parameter.is_reference_parameter = False
if parameter_to_add.endswith("freq"):
parameter.unit = "Derived frequency"
else:
parameter.unit = "Derived count"
parameter.data_type = "PanelNumeric"
parameter.description = parameter.unit
parameter.save()
self.parameter_columns.append(parameter_to_add)
# Get parameter_ids for NumericParameters
parameters_pk = {}
for parameter in self.parameter_columns:
parameters_pk[parameter] = Parameter.objects.get(
gating_hierarchy=parameter
).id
# Ditto for pseudo parameters (date, text, numeric)
pseudo_parameters_pk = {}
for column, parameter in self.pseudo_parameters_numeric:
pseudo_parameters_pk[parameter] = Parameter.objects.get(
gating_hierarchy=parameter
).id
for column, parameter in self.pseudo_parameters_date:
pseudo_parameters_pk[parameter] = Parameter.objects.get(
gating_hierarchy=parameter
).id
for column, parameter in self.pseudo_parameters_text:
pseudo_parameters_pk[parameter] = Parameter.objects.get(
gating_hierarchy=parameter
).id
# Store details in relevant tables
for index, row in self.df.iterrows():
# Only proceed if sample_id is valid
sample_id = str(row[self.sc_clinical_sample])
if not sample_id.upper().startswith("P") or len(sample_id) < 4:
validation_entry = ValidationEntry(
subject_file=self.upload_file,
key=f"row:{index} field:Clinical_sample",
value=f"Value ({sample_id}) not a valid "
+ "clinical sample id. Expected pxxxnxx. "
+ "All entries for this row not loaded.",
entry_type="WARN",
validation_type="MODEL",
)
upload_issues.append(validation_entry)
rows_with_issues.add(index)
continue
# Data processing details
fcs_file_name = row[self.sc_filename]
if type(fcs_file_name) == str and fcs_file_name.find(sample_id) >= 0:
data_processing, created = DataProcessing.objects.get_or_create(
fcs_file_name=fcs_file_name, panel_id=panels_pk[row["Panel"]]
)
else:
validation_entry = ValidationEntry(
subject_file=self.upload_file,
key=f"row:{index} field:{self.sc_filename}",
value=f"Value {fcs_file_name} does not contain the"
+ f" sample ID ({sample_id}) - row not loaded",
entry_type="WARN",
validation_type="MODEL",
)
upload_issues.append(validation_entry)
rows_with_issues.add(index)
continue
# Create an entry in the results table
result = Result.objects.get_or_create(
processed_sample_id=processed_sample_pks[sample_id],
gating_strategy=self.gating_strategy,
panel_id=panel_pk,
data_processing=data_processing,
)[0]
result.uploaded_file = self.upload_file
result.save()
# Store data for parameters
for parameter, parameter_pk in parameters_pk.items():
if isinstance(row[parameter], numbers.Number) and not np.isnan(
row[parameter]
):
numeric_value, created = NumericValue.objects.get_or_create(
result_id=result.id, parameter_id=parameters_pk[parameter]
)
numeric_value.value = row[parameter]
numeric_value.save()
else:
validation_entry = ValidationEntry(
subject_file=self.upload_file,
key=f"row:{index} parameter:{parameter}",
value=f"Value ({row[parameter]}) not a "
+ "number - not uploaded to NumericValue"
+ " table",
entry_type="WARN",
validation_type="MODEL",
)
upload_issues.append(validation_entry)
rows_with_issues.add(index)
# Store numeric pseudo parameters
for column, parameter in self.pseudo_parameters_numeric:
value = row[column]
if isinstance(value, numbers.Number) and not np.isnan(value):
numeric_value, created = NumericValue.objects.get_or_create(
result_id=result.id,
parameter_id=pseudo_parameters_pk[parameter],
)
numeric_value.value = value
numeric_value.save()
else:
validation_entry = ValidationEntry(
subject_file=self.upload_file,
key=f"row:{index} parameter:{parameter}",
value=f"Value ({value}) not a "
+ "number - not uploaded to NumericValue"
+ " table",
entry_type="WARN",
validation_type="MODEL",
)
upload_issues.append(validation_entry)
rows_with_issues.add(index)
# Stdate pseudo parameters
for column, parameter in self.pseudo_parameters_date:
value = row[column]
if isinstance(value, pd.Timestamp) and not pd.isnull(value):
date_value, created = DateValue.objects.get_or_create(
result_id=result.id,
parameter_id=pseudo_parameters_pk[parameter],
)
date_value.value = value
date_value.save()
else:
validation_entry = ValidationEntry(
subject_file=self.upload_file,
key=f"row:{index} parameter:{parameter}",
value=f"Value ({value}) not a "
+ "Date - not uploaded to DateValue"
+ " table",
entry_type="WARN",
validation_type="MODEL",
)
upload_issues.append(validation_entry)
rows_with_issues.add(index)
# Store text pseudo parameters
for column, parameter in self.pseudo_parameters_text:
value = str(row[column]).strip()
if len(value) > 0 and value != "nan":
text_value, created = TextValue.objects.get_or_create(
result_id=result.id,
parameter_id=pseudo_parameters_pk[parameter],
)
text_value.value = value
text_value.save()
upload_report = {
"rows_processed": self.nrows,
"rows_with_issues": len(rows_with_issues),
"validation": upload_issues,
}
if dry_run:
transaction.set_rollback(True)
if upload_issues:
for issue in upload_issues:
issue.save()
else:
self.upload_file.valid_model = True
self.upload_file.save()
return upload_report
class PatientFile:
"""Uploads a file with anonymised patient details."""
def __init__(
self,
file_name=None,
file_contents=None,
uploaded_file: UploadedFile = None,
user: User = None,
):
if uploaded_file:
self.upload_file = uploaded_file
file_name = uploaded_file.name
file_contents = uploaded_file.content
self.content = file_contents
self.file_name = file_name
self.df = pd.read_csv(self.content)
self.nrows = len(self.df)
# Default uploaded file
if not uploaded_file:
self.upload_file = UploadedFile(
name=self.file_name,
user=user,
description="Patient data",
row_number=self.nrows,
content=self.content,
notes="",
content_type="PATIENT_DATA",
)
self.upload_file.save()
self.patient_ids = self.df["patient"].unique().tolist()
def validate(self):
return []
def upload(self, dry_run=False):
"""Upload data to relevant tables"""
upload_issues = []
rows_with_issues = []
with transaction.atomic():
# Create metadata dict entries if necessary
columns = self.df.columns.tolist()
columns.remove("patient")
metadata_dicts = {}
for column in columns:
column_lc = column.lower()
metadata_dict, created = PatientMetadataDict.objects.get_or_create(
name=column_lc
)
if created:
metadata_dict.description = f"{column}"
metadata_dict.notes = "Dynamically added"
metadata_dict.save()
metadata_dicts[column] = metadata_dict
# Enter details for all patients
for index, row in self.df.iterrows():
patient_id = str(row["patient"])
# Create patients if necessary
if not patient_id.upper().startswith("P"):
validation_entry = ValidationEntry(
subject_file=self.upload_file,
key=f"row:{index} field:patient",
value=f"Value ({patient_id}) not valid. "
+ "Expected pxxx. Entries for this id not loaded.",
entry_type="WARN",
validation_type="MODEL",
)
upload_issues.append(validation_entry)
rows_with_issues.append(index)
continue
patient = Patient.objects.get_or_create(patient_id=patient_id)[0]
# Store metadata associated with patient
for column, metadata_dict in metadata_dicts.items():
value = row[column]
patient_metadata = PatientMetadata.objects.get_or_create(
patient=patient, metadata_key=metadata_dict
)[0]
patient_metadata.metadata_value = value
patient_metadata.save()
if upload_issues:
for issue in upload_issues:
issue.save()
else:
self.upload_file.valid_model = True
if dry_run:
transaction.set_rollback(True)
else:
# Put this here as I think uploaded file is also saved to disk. Can this be rolled back?
self.upload_file.save()
upload_report = {
"rows_processed": self.nrows,
"rows_with_issues": len(rows_with_issues),
"upload_issues": upload_issues,
}
return upload_report
|
#!/usr/bin/env python
# ALTA data transfer: Uses the iROD client to transfer data from ALTA
# Example usage: >> python getdata_alta.py 180316 004-010 00-36
# <NAME> (<EMAIL>)
###################################################################################################
from __future__ import print_function
import os
import sys
import time
import logging
import subprocess
FNULL = open(os.devnull, 'w')
###################################################################################################
def parse_list(spec):
"""Convert a string specification like 00-04,07,09-12 into a list [0,1,2,3,4,7,9,10,11,12]
Args:
spec (str): string specification
Returns:
List[int]
Example:
>>> parse_list("00-04,07,09-12")
[0, 1, 2, 3, 4, 7, 9, 10, 11, 12]
>>> parse_list("05-04")
Traceback (most recent call last):
...
ValueError: In specification 05-04, end should not be smaller than begin
"""
ret_list = []
for spec_part in spec.split(","):
if "-" in spec_part:
begin, end = spec_part.split("-")
if end < begin:
raise ValueError(
"In specification %s, end should not be smaller than begin" % spec_part)
ret_list += range(int(begin), int(end) + 1)
else:
ret_list += [int(spec_part)]
return ret_list
###################################################################################################
def get_alta_dir(date, task_id, beam_nr, alta_exception):
"""Get the directory where stuff is stored in ALTA. Takes care of different historical locations
Args:
date (str): date for which location is requested
task_id (int): task id
beam_nr (int): beam id
alta_exception (bool): force 3 digits task id, old directory
Returns:
str: location in ALTA, including the date itself
Examples:
>>> get_alta_dir(180201, 5, 35, False)
'/altaZone/home/apertif_main/wcudata/WSRTA18020105/WSRTA18020105_B035.MS'
>>> get_alta_dir(180321, 5, 35, False)
'/altaZone/home/apertif_main/wcudata/WSRTA180321005/WSRTA180321005_B035.MS'
>>> get_alta_dir(181205, 5, 35, False)
'/altaZone/archive/apertif_main/visibilities_default/181205005/WSRTA181205005_B035.MS'
"""
#Test if data is in cold storage retrieval location
altadir = "/altaZone/stage/apertif_main/visibilities_default/{date}{task_id:03d}/WSRTA{date}{task_id:03d}_B{beam_nr:03d}.MS.tar".format(**locals())
cmd = "ils {}".format(altadir)
testcold = subprocess.call(cmd.split(), stdout=FNULL, stderr=FNULL)
if int(date) < 180216:
return "/altaZone/home/apertif_main/wcudata/WSRTA{date}{task_id:02d}/WSRTA{date}{task_id:02d}_B{beam_nr:03d}.MS".format(**locals())
elif int(date) < 181003 or alta_exception:
return "/altaZone/home/apertif_main/wcudata/WSRTA{date}{task_id:03d}/WSRTA{date}{task_id:03d}_B{beam_nr:03d}.MS".format(**locals())
elif int(str(date)+'%.3d' % task_id) == 190326001:
return "/altaZone/ingest/apertif_main/visibilities_default/{date}{task_id:03d}/WSRTA{date}{task_id:03d}_B{beam_nr:03d}.MS".format(**locals())
elif testcold == 0:
return "/altaZone/stage/apertif_main/visibilities_default/{date}{task_id:03d}/WSRTA{date}{task_id:03d}_B{beam_nr:03d}.MS.tar".format(**locals())
else:
return "/altaZone/archive/apertif_main/visibilities_default/{date}{task_id:03d}/WSRTA{date}{task_id:03d}_B{beam_nr:03d}.MS".format(**locals())
###################################################################################################
def getstatus_alta(date, task_id, beam):
"""
Funtion to check if the data is on ALTA.
date (int or str): Date of the observation of the data. Format: YYMMDD
task_id (int or str): ID number of the observation. Format: NNN
beam (int or str): Beam number to copy. Format: NN
return (bool): True if the file is available, False if not
"""
altadir = get_alta_dir(date, int(task_id), int(beam), False)
cmd = "ils {}".format(altadir)
retcode = subprocess.call(cmd.split(), stdout=FNULL, stderr=FNULL)
return retcode == 0
###################################################################################################
def getdata_alta(date, task_ids, beams, targetdir=".", tmpdir=".", alta_exception=False, check_with_rsync=True):
"""Download data from ALTA using low-level IRODS commands.
Report status to slack
Args:
date (str): date of the observation
task_ids (List[int] or int): list of task_ids, or a single task_id (int)
beams (List[int] or int): list of beam numbers, or a single beam number (int)
targetdir (str): directory to put the downloaded files
tmpdir (str): directory for temporary files
alta_exception (bool): force 3 digits task id, old directory
check_with_rsync (bool): run rsync on the result of iget to verify the data got in
"""
# Time the transfer
start = time.time()
logger = logging.getLogger("GET_ALTA")
logger.setLevel(logging.DEBUG)
if isinstance(task_ids, int):
task_ids = [task_ids]
if isinstance(beams, int):
beams = [beams]
if tmpdir == "":
tmpdir = "."
if targetdir == "":
targetdir = "."
if tmpdir[-1] != "/":
tmpdir += "/"
if targetdir[-1] != "/":
targetdir += "/"
logger.debug('Start getting data from ALTA')
logging.debug('Beams: %s' % beams)
for beam_nr in beams:
logger.debug('Processing beam %.3d' % beam_nr)
for task_id in task_ids:
logger.debug('Processing task ID %.3d' % task_id)
alta_dir = get_alta_dir(date, task_id, beam_nr, alta_exception)
if alta_dir[-2:] == 'MS':
cmd = "iget -rfPIT -X {tmpdir}WSRTA{date}{task_id:03d}_B{beam_nr:03d}-icat.irods-status --lfrestart " \
"{tmpdir}WSRTA{date}{task_id:03d}_B{beam_nr:03d}-icat.lf-irods-status --retries 5 {alta_dir} " \
"{targetdir}".format(**locals())
logger.debug(cmd)
subprocess.check_call(cmd, shell=True, stdout=FNULL, stderr=FNULL)
#check for tar file and untar if needed:
elif alta_dir[-3:] == 'tar':
targetdir = targetdir[:-1]
cmd = "iget -rfPIT -X {tmpdir}WSRTA{date}{task_id:03d}_B{beam_nr:03d}-icat.irods-status --lfrestart " \
"{tmpdir}WSRTA{date}{task_id:03d}_B{beam_nr:03d}-icat.lf-irods-status --retries 5 {alta_dir} " \
"{targetdir}.tar".format(**locals())
logger.debug(cmd)
subprocess.check_call(cmd, shell=True, stdout=FNULL, stderr=FNULL)
head, tail = os.path.split(targetdir)
tarcmd = "tar -xf {targetdir}.tar -C {head}".format(**locals())
logger.debug(tarcmd)
#subprocess.check_call(tarcmd, shell=True, stdout=FNULL, stderr=FNULL)
#force untarring
os.system(tarcmd)
#have to rename
head, tail = os.path.split(targetdir)
print(head)
print(os.path.join(head,'WSRTA{date}{task_id:03d}_B{beam_nr:03d}.MS'.format(**locals())))
logger.debug("Rename untarred file to target name")
os.rename(os.path.join(head,'WSRTA{date}{task_id:03d}_B{beam_nr:03d}.MS'.format(**locals())),targetdir)
#remove tar file
logger.debug("Removing tar file")
os.remove("{targetdir}.tar".format(**locals()))
os.system('rm -rf {tmpdir}*irods-status'.format(**locals()))
# Add verification at the end of the transfer
if check_with_rsync:
for beam_nr in beams:
logger.info('Verifying beam %.3d... ######' % beam_nr)
for task_id in task_ids:
logger.info('Verifying task ID %.3d...' % task_id)
# Toggle for when we started using more digits:
alta_dir = get_alta_dir(date, task_id, beam_nr, alta_exception)
if targetdir == '.':
local_dir = "{targetdir}WSRTA{date}{task_id:03d}_B{beam_nr:03d}.MS"
else:
local_dir = targetdir
cmd = "irsync -srl i:{alta_dir} {local_dir} >> " \
"{tmpdir}transfer_WSRTA{date}{task_id:03d}_to_alta_verify.log".format(
**locals())
subprocess.check_call(
cmd, shell=True, stdout=FNULL, stderr=FNULL)
# Identify server details
hostname = os.popen('hostname').read().strip()
# Check for failed files
for task_id in task_ids:
logger.debug('Checking failed files for task ID %.3d' % task_id)
cmd = 'grep N {tmpdir}transfer_WSRTA{date}{task_id:03d}_to_alta_verify.log | wc -l'.format(
**locals())
output = os.popen(cmd)
n_failed_files = output.read().split()[0]
logger.warning('Number of failed files: %s', n_failed_files)
# Time the transfer
end = time.time()
# Print the results
diff = (end - start) / 60. # in min
logger.debug("Total time to transfer data: %.2f min" % diff)
logger.debug("Done getting data from ALTA")
###################################################################################################
if __name__ == "__main__":
import doctest
doctest.testmod()
logging.basicConfig()
args = sys.argv
# Get date
try:
date = args[1]
except Exception:
raise Exception("Date required! Format: YYMMDD e.g. 180309")
# Get ID range
try:
irange = args[2]
except Exception:
raise Exception("ID range required! Format: NNN-NNN e.g. 002-010")
# Get beam range
try:
brange = args[3]
except Exception:
raise Exception("Beam range required! Format: NN-NN e.g. 00-37")
# Get beams
try:
alta_exception = args[4]
if alta_exception == 'Y':
alta_exception = True
else:
alta_exception = False
except Exception:
alta_exception = False
# Now with all the information required, loop through beams
beams = parse_list(brange)
# Now with all the information required, loop through task_ids
task_ids = parse_list(irange)
getdata_alta(date, task_ids, beams, ".", ".", alta_exception)
|
import glob
import os
import sys
import copy
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyabf
from ipfx import feature_extractor
from ipfx import subthresh_features as subt
print("feature extractor loaded")
from .abf_ipfx_dataframes import _build_full_df, _build_sweepwise_dataframe, save_data_frames
from .loadABF import loadABF
from .patch_utils import plotabf, load_protocols, find_non_zero_range
from .QC import run_qc
default_dict = {'start': 0, 'end': 0, 'filter': 0}
def folder_feature_extract(files, param_dict, plot_sweeps=-1, protocol_name='IC1', para=1):
debugplot = 0
running_lab = ['Trough', 'Peak', 'Max Rise (upstroke)', 'Max decline (downstroke)', 'Width']
dfs = pd.DataFrame()
df_spike_count = pd.DataFrame()
df_running_avg_count = pd.DataFrame()
filelist = glob.glob(files + "/**/*.abf", recursive=True)
temp_df_spike_count = Parallel(n_jobs= para)(delayed(preprocess_abf)(f, copy.deepcopy(param_dict), plot_sweeps, protocol_name) for f in filelist)
df_spike_count = pd.concat(temp_df_spike_count, sort=True)
return dfs, df_spike_count, df_running_avg_count
def preprocess_abf(file_path, param_dict, plot_sweeps, protocol_name):
try:
abf = pyabf.ABF(file_path)
if abf.sweepLabelY != 'Clamp Current (pA)' and protocol_name in abf.protocol:
print(file_path + ' import')
temp_spike_df, df, temp_running_bin = analyze_abf(abf, sweeplist=None, plot=plot_sweeps, param_dict=param_dict)
return temp_spike_df
else:
print('Not correct protocol: ' + abf.protocol)
return pd.DataFrame()
except:
return pd.DataFrame()
def analyze_spike_sweep(abf, sweepNumber, param_dict):
abf.setSweep(sweepNumber)
spikext = feature_extractor.SpikeFeatureExtractor(**param_dict)
spiketxt = feature_extractor.SpikeTrainFeatureExtractor(start=param_dict['start'], end=param_dict['end'])
dataT, dataV, dataI = abf.sweepX, abf.sweepY, abf.sweepC
if dataI.shape[0] < dataV.shape[0]:
dataI = np.hstack((dataI, np.full(dataV.shape[0] - dataI.shape[0], 0)))
spike_in_sweep = spikext.process(dataT, dataV, dataI)
spike_train = spiketxt.process(dataT, dataV, dataI, spike_in_sweep)
return spike_in_sweep, spike_train
def analyze_abf(abf, sweeplist=None, plot=-1, param_dict=None):
np.nan_to_num(abf.data, nan=-9999, copy=False)
#If there is more than one sweep, we need to ensure we dont iterate out of range
if sweeplist == None:
if abf.sweepCount > 1:
sweepcount = abf.sweepList
else:
sweepcount = [0]
df = pd.DataFrame()
#Now we walk through the sweeps looking for action potentials
temp_spike_df = pd.DataFrame()
temp_spike_df['filename'] = [abf.abfID]
temp_spike_df['foldername'] = [os.path.dirname(abf.abfFilePath)]
temp_running_bin = pd.DataFrame()
stim_find = param_dict.pop('stim_find')
#for now if user wants to filter by stim time we will just use the first sweep
if stim_find:
abf.setSweep(abf.sweepList[-1])
start, end = find_non_zero_range(abf.sweepX, abf.sweepC)
param_dict['end'] = end
param_dict['start'] = start
print('Stimulation time found: ' + str(start) + ' to ' + str(end))
for sweepNumber in sweepcount:
real_sweep_length = abf.sweepLengthSec - 0.0001
if sweepNumber < 9:
real_sweep_number = '00' + str(sweepNumber + 1)
elif sweepNumber > 8 and sweepNumber < 99:
real_sweep_number = '0' + str(sweepNumber + 1)
if param_dict['start'] == 0 and param_dict['end'] == 0:
param_dict['end']= real_sweep_length
elif param_dict['end'] > real_sweep_length:
param_dict['end'] = real_sweep_length
spike_in_sweep, spike_train = analyze_spike_sweep(abf, sweepNumber, param_dict) ### Returns the default Dataframe Returned by
temp_spike_df, df, temp_running_bin = _build_sweepwise_dataframe(abf, real_sweep_number, spike_in_sweep, spike_train, temp_spike_df, df, temp_running_bin, param_dict)
temp_spike_df, df, temp_running_bin = _build_full_df(abf, temp_spike_df, df, temp_running_bin, sweepcount)
x, y ,c = loadABF(abf.abfFilePath)
_qc_data = run_qc(y, c)
temp_spike_df['QC Mean RMS'] = _qc_data[0]
temp_spike_df['QC Mean Sweep Drift'] = _qc_data[2]
try:
spiketimes = np.transpose(np.vstack((np.ravel(df['peak_index'].to_numpy()), np.ravel(df['sweep Number'].to_numpy()))))
plotabf(abf, spiketimes, param_dict['start'], param_dict['end'], plot)
except:
pass
return temp_spike_df, df, temp_running_bin
class abfFeatExtractor(object):
""" """
def __init__(self, abf, start=None, end=None, filter=10.,
dv_cutoff=20., max_interval=0.005, min_height=2., min_peak=-30.,
thresh_frac=0.05, reject_at_stim_start_interval=0):
"""Initialize SweepFeatures object.-
Parameters
----------
t : ndarray of times (seconds)
v : ndarray of voltages (mV)
i : ndarray of currents (pA)
start : start of time window for feature analysis (optional)
end : end of time window for feature analysis (optional)
filter : cutoff frequency for 4-pole low-pass Bessel filter in kHz (optional, default 10)
dv_cutoff : minimum dV/dt to qualify as a spike in V/s (optional, default 20)
max_interval : maximum acceptable time between start of spike and time of peak in sec (optional, default 0.005)
min_height : minimum acceptable height from threshold to peak in mV (optional, default 2)
min_peak : minimum acceptable absolute peak level in mV (optional, default -30)
thresh_frac : fraction of average upstroke for threshold calculation (optional, default 0.05)
reject_at_stim_start_interval : duration of window after start to reject potential spikes (optional, default 0)
"""
if isinstance(abf, pyabf.ABF):
self.abf = abf
elif isinstance(abf, str) or isinstance(abf, os.path.abspath):
self.abf = pyabf.ABF
self.start = start
self.end = end
self.filter = filter
self.dv_cutoff = dv_cutoff
self.max_interval = max_interval
self.min_height = min_height
self.min_peak = min_peak
self.thresh_frac = thresh_frac
self.reject_at_stim_start_interval = reject_at_stim_start_interval
self.spikefeatureextractor = feature_extractor.SpikeFeatureExtractor(start=start, end=end, filter=filter, dv_cutoff=dv_cutoff, max_interval=max_interval, min_height=min_height, min_peak=min_peak, thresh_frac=thresh_frac, reject_at_stim_start_interval=reject_at_stim_start_interval)
self.spiketrainextractor = feature_extractor.SpikeTrainFeatureExtractor(start=start, end=end)
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(2, inplace=True)
df_2012_2013['prcab'].fillna(2, inplace=True)
df_2014_2015['prcab'].fillna(2, inplace=True)
df_2016_2017['prcab'].fillna(2, inplace=True)
df_2018_2019['prcab'].fillna(2, inplace=True)
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('siteid')['siteid'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('siteid')['siteid'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('siteid')['siteid'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('siteid')['siteid'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('siteid')['siteid'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('siteid')['siteid'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('siteid')['siteid'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('siteid')['siteid'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('siteid')['siteid'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('siteid')['siteid'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 =pd.merge(df5, df2016, on='siteid', how='outer')
df7 =pd.merge(df6, df2017, on='siteid', how='outer')
df8 =pd.merge(df7, df2018, on='siteid', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='siteid', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_sum'] = df_sum_all_Years['Year_sum']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_siteid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 =pd.merge(df5, df2016, on='siteid', how='outer')
df7 =pd.merge(df6, df2017, on='siteid', how='outer')
df8 =pd.merge(df7, df2018, on='siteid', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='siteid', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_siteid['num_of_years']
df_sum_all_Years.to_csv("sum all years siteid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop siteid.csv")
print("num of sites with less years reop : ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('siteid')['prcab'].apply(lambda x:(x==2).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='siteid', how='outer')
d2 = pd.merge(d1, df_12, on='siteid', how='outer')
d3 = pd.merge(d2, df_13, on='siteid', how='outer')
d4 = pd.merge(d3, df_14, on='siteid', how='outer')
d5 = pd.merge(d4, df_15, on='siteid', how='outer')
d6 = pd.merge(d5, df_16, on='siteid', how='outer')
d7 = pd.merge(d6, df_17, on='siteid', how='outer')
d8 = pd.merge(d7, df_18, on='siteid', how='outer')
df_sum_all_Years_total = pd.merge(d8, df_19, on='siteid', how='outer')
df_sum_all_Years_total.fillna(0, inplace=True)
cols = df_sum_all_Years_total.columns.difference(['siteid'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['siteid', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / avg_siteid['num_of_years']
df_sum_all_Years_total.to_csv("First op sum all years siteid.csv")
less = df_sum_all_Years_total[df_sum_all_Years_total['Distinct_years'] != 10]
less.to_csv("First op less 10 years siteid.csv")
print("First op num of sites with less years: ", len(less))
x = np.array(less['Distinct_years'])
print(np.unique(x))
temp_first = pd.DataFrame()
temp_first['siteid'] = df_sum_all_Years_total['siteid']
temp_first['Year_sum_Firstop'] = df_sum_all_Years_total['Year_sum']
temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
temp_reop = pd.DataFrame()
temp_reop['siteid'] = df_sum_all_Years['siteid']
temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
temp_reop['Year_sum_reop'] = df_sum_all_Years['Year_sum_reop']
df20 = pd.merge(avg_siteid, temp_first, on='siteid', how='outer')
total_avg_site_id = pd.merge(df20, temp_reop, on='siteid', how='outer')
total_avg_site_id['firstop/total'] = (total_avg_site_id['Year_avg_Firstop'] / total_avg_site_id['num_of_years']) * 100
total_avg_site_id['reop/total'] = (total_avg_site_id['Year_avg_reop'] / total_avg_site_id['num_of_years']) * 100
total_avg_site_id.fillna(0,inplace=True)
total_avg_site_id.to_csv('total_avg_site_id.csv')
def groupby_surgid():
df2010 = df_2010.groupby('surgid')['surgid'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('surgid')['surgid'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('surgid')['surgid'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('surgid')['surgid'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('surgid')['surgid'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('surgid')['surgid'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('surgid')['surgid'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('surgid')['surgid'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('surgid')['surgid'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('surgid')['surgid'].count().reset_index(name='2019_total')
df1 = pd.merge(df2010, df2011, on='surgid', how='outer')
df2 = pd.merge(df1, df2012, on='surgid', how='outer')
df3 = pd.merge(df2, df2013, on='surgid', how='outer')
df4 = pd.merge(df3, df2014, on='surgid', how='outer')
df5 = pd.merge(df4, df2015, on='surgid', how='outer')
df6 = pd.merge(df5, df2016, on='surgid', how='outer')
df7 = pd.merge(df6, df2017, on='surgid', how='outer')
df8 = pd.merge(df7, df2018, on='surgid', how='outer')
df_sum_all_Years = pd.merge(df8, df2019, on='surgid', how='outer')
df_sum_all_Years.fillna(0, inplace=True)
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid', 'Distinct_years'])
df_sum_all_Years['Year_sum'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum'] / df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years surgid.csv")
print("details on surg id dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years'] != 10]
less_8.to_csv("total op less 10 years surgid.csv")
print("num of surgid with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
# avg_surgid['surgid'] = df_sum_all_Years['surgid']
# avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_surgid['total_year_count'] = df_sum_all_Years['Year_sum']
avg_surgid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_surgid_prcab():
df2010 = df_2010.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='surgid', how='outer')
df2 = pd.merge(df1, df2012, on='surgid', how='outer')
df3 = pd.merge(df2, df2013, on='surgid', how='outer')
df4 = pd.merge(df3, df2014, on='surgid', how='outer')
df5 = pd.merge(df4, df2015, on='surgid', how='outer')
df6 = pd.merge(df5, df2016, on='surgid', how='outer')
df7 = pd.merge(df6, df2017, on='surgid', how='outer')
df8 = pd.merge(df7, df2018, on='surgid', how='outer')
df_sum_all_Years = pd.merge(df8, df2019, on='surgid', how='outer')
df_sum_all_Years.fillna(0, inplace=True)
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_surgid['num_of_years']
df_sum_all_Years.to_csv("sum all years surgid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop surgid.csv")
print("num of surgid with less years reop : ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('surgid')['prcab'].apply(lambda x: (x == 2).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='surgid', how='outer')
d2 = pd.merge(d1, df_12, on='surgid', how='outer')
d3 = pd.merge(d2, df_13, on='surgid', how='outer')
d4 = pd.merge(d3, df_14, on='surgid', how='outer')
d5 = pd.merge(d4, df_15, on='surgid', how='outer')
d6 = pd.merge(d5, df_16, on='surgid', how='outer')
d7 = pd.merge(d6, df_17, on='surgid', how='outer')
d8 = pd.merge(d7, df_18, on='surgid', how='outer')
df_sum_all_Years_total = pd.merge(d8, df_19, on='surgid', how='outer')
df_sum_all_Years_total.fillna(0, inplace=True)
cols = df_sum_all_Years_total.columns.difference(['surgid'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['surgid', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / avg_surgid['num_of_years']
df_sum_all_Years_total.to_csv("First op sum all years surgid.csv")
less = df_sum_all_Years_total[df_sum_all_Years_total['Distinct_years'] != 10]
less.to_csv("First op less 10 years surgid.csv")
print("First op num of sites with less years: ", len(less))
x = np.array(less['Distinct_years'])
print(np.unique(x))
# temp_first = pd.DataFrame()
# temp_first['surgid'] = df_sum_all_Years_total['surgid']
# temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
# temp_reop = pd.DataFrame()
# temp_reop['surgid'] = df_sum_all_Years['surgid']
# temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
#
# df20 = pd.merge(avg_surgid, temp_first, on='surgid', how='outer')
# total_avg_surgid = pd.merge(df20, temp_reop, on='surgid', how='outer')
#
# total_avg_surgid['firstop/total'] = (total_avg_surgid['Year_avg_Firstop'] / total_avg_surgid['total_year_avg']) * 100
# total_avg_surgid['reop/total'] = (total_avg_surgid['Year_avg_reop'] / total_avg_surgid['total_year_avg']) * 100
# total_avg_surgid.fillna(0, inplace=True)
# total_avg_surgid.to_csv('total_avg_surgid.csv')
temp_first = pd.DataFrame()
temp_first['surgid'] = df_sum_all_Years_total['surgid']
temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
temp_first['Year_sum_Firstop'] = df_sum_all_Years_total['Year_sum']
temp_reop = pd.DataFrame()
temp_reop['surgid'] = df_sum_all_Years['surgid']
temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
temp_reop['Year_sum_reop'] = df_sum_all_Years['Year_sum_reop']
df20 = pd.merge(avg_surgid, temp_first, on='surgid', how='outer')
total_avg_surgid = pd.merge(df20, temp_reop, on='surgid', how='outer')
total_avg_surgid['firstop/total'] = (total_avg_surgid['Year_sum_Firstop'] / total_avg_surgid['total_year_count']) * 100
total_avg_surgid['reop/total'] = (total_avg_surgid['Year_sum_reop'] / total_avg_surgid['total_year_count']) * 100
total_avg_surgid.fillna(0, inplace=True)
total_avg_surgid.to_csv('total_avg_surgid.csv')
def draw_hist(data,num_of_bins,title,x_title,y_title,color):
plt.hist(data, bins=num_of_bins, color=color,ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
groupby_siteid()
groupby_siteid_prcab()
groupby_surgid()
groupby_surgid_prcab()
df_avg_siteid = pd.read_csv("total_avg_site_id.csv")
df_avg_surgid = pd.read_csv("total_avg_surgid.csv")
# df_avg2_surgid = pd.read_csv("total_avg_surgid_sum avg count.csv")
# # # df_sum_hospid= pd.read_csv(path+"sum all years hospid.csv")
# #
# #
# draw_hist(df_avg_siteid['total_year_avg'],40,"siteid Histogram of yearly avg operation",'avg of Operation',"count of siteid",'skyblue')
# draw_hist(df_avg_siteid['Year_avg_Firstop'],40,"siteid Histogram of yearly avg First operation",'avg of First Operation',"count of siteid",'skyblue')
# draw_hist(df_avg_siteid['Year_avg_reop'],40,"siteid Histogram of yearly avg reOperation",'avg of reOperation',"count of siteid",'skyblue')
#
# draw_hist(df_avg_siteid['firstop/total'],40,"siteid Histogram of yearly avg First operation/Total operation",'% of First Operation',"count of siteid",'palegreen')
# draw_hist(df_avg_siteid['reop/total'],40,"siteid Histogram of yearly avg reOperation/Total operation",'% of reOperation',"count of siteid",'palegreen')
#
# # draw_hist(df_sum_surgid['Year_avg'],20,"surgid Histogram of yearly avg operation",'avg of Operation',"count of surgid")
# draw_hist(df_avg_surgid['total_year_avg'],40,"surgid Histogram of yearly avg operation",'avg of Operation',"count of surgid",'plum')
# draw_hist(df_avg_surgid['Year_avg_Firstop'],40,"surgid Histogram of yearly avg First operation",'avg of First Operation',"count of surgid",'plum')
# draw_hist(df_avg_surgid['Year_avg_reop'],40,"surgid Histogram of yearly avg reOperation",'avg of reOperation',"count of surgid",'plum')
#
# draw_hist(df_avg_surgid['firstop/total'],40,"surgid Histogram of yearly avg First operation/Total operation",'% of First Operation',"count of surgid",'bisque')
# draw_hist(df_avg_surgid['reop/total'],40,"surgid Histogram of yearly avg reOperation/Total operation",'% of reOperation',"count of surgid",'bisque')
#
# draw_hist(df_avg2_surgid['total_year_avg'],40,"surgid Histogram of yearly avg operation",'avg of Operation',"count of surgid",'plum')
# draw_hist(df_avg2_surgid['Year_avg_Firstop'],40,"surgid Histogram of yearly avg First operation",'avg of First Operation',"count of surgid",'plum')
# draw_hist(df_avg2_surgid['Year_avg_reop'],40,"surgid Histogram of yearly avg reOperation",'avg of reOperation',"count of surgid",'plum')
#
# draw_hist(df_avg2_surgid['firstop/total'],40,"surgid Histogram of yearly avg First operation/Total operation",'% of First Operation',"count of surgid",'bisque')
# draw_hist(df_avg2_surgid['reop/total'],40,"surgid Histogram of yearly avg reOperation/Total operation",'% of reOperation',"count of surgid",'bisque') |
import numpy as np
import os, argparse, pickle, sys
from os.path import exists, join, isfile, dirname, abspath, split
import logging
from sklearn.neighbors import KDTree
import yaml
from .base_dataset import BaseDataset, BaseDatasetSplit
from .utils import DataProcessing
from ..utils import make_dir, DATASET
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s - %(asctime)s - %(module)s - %(message)s',
)
log = logging.getLogger(__name__)
class Rellis3D(BaseDataset):
"""
This class is used to create a dataset based on the Rellis3D dataset, and used in visualizer, training, or testing. The dataset is best for semantic scene understanding.
"""
def __init__(self,
dataset_path,
name='rellis3d',
cache_dir='./logs/cache',
use_cache=False,
class_weights=[
762620209,
0,
374236754,
207826491,
43383,
226059,
0,
319261,
15455,
752,
10,
876764,
17692783,
1760936,
365615949,
25059355,
4787808,
2159838,
7695463,
5874762,
],
ignored_label_inds=[0],
test_result_folder='./test',
test_split=[
'02'
],
training_split=[
'00', '01', '03', '04'
],
validation_split=['02'],
all_split=[
'00', '01', '02', '03', '04'
],
**kwargs):
"""
Initialize the function by passing the dataset and other details.
Args:
dataset_path: The path to the dataset to use.
name: The name of the dataset (Semantic3D in this case).
cache_dir: The directory where the cache is stored.
use_cache: Indicates if the dataset should be cached.
num_points: The maximum number of points to use when splitting the dataset.
class_weights: The class weights to use in the dataset.
ignored_label_inds: A list of labels that should be ignored in the dataset.
test_result_folder: The folder where the test results should be stored.
Returns:
class: The corresponding class.
"""
super().__init__(dataset_path=dataset_path,
name=name,
cache_dir=cache_dir,
use_cache=use_cache,
class_weights=class_weights,
ignored_label_inds=ignored_label_inds,
test_result_folder=test_result_folder,
test_split=test_split,
training_split=training_split,
validation_split=validation_split,
all_split=all_split,
**kwargs)
cfg = self.cfg
self.label_to_names = self.get_label_to_names()
#print("label_to_names = ", self.label_to_names)
self.num_classes = len(self.label_to_names)
data_config = join(dirname(abspath(__file__)), '_resources/',
'rellis3d.yaml')
DATA = yaml.safe_load(open(data_config, 'r'))
remap_dict = DATA["learning_map_inv"]
#remap_dict = DATA["learning_map"]
self.colour_map = DATA["color_map"]
#print("remap_dict = ", remap_dict)
# make lookup table for mapping
max_key = max(remap_dict.keys())
remap_lut = np.zeros((max_key + 100), dtype=np.int32)
remap_lut[list(remap_dict.keys())] = list(remap_dict.values())
#print("remap_lut = ", remap_lut)
remap_dict_val = DATA["learning_map"]
#remap_dict_val = DATA["learning_map_inv"]
self.label_indices = list(remap_dict_val)
max_key = max(remap_dict_val.keys())
remap_lut_val = np.zeros((max_key + 100), dtype=np.int32)
remap_lut_val[list(remap_dict_val.keys())] = list(
remap_dict_val.values())
self.remap_lut_val = remap_lut_val
self.remap_lut = remap_lut
#print("self.remap_lut_val = ", self.remap_lut_val)
#print("self.remap_lut = ", self.remap_lut)
@staticmethod
def get_label_to_names():
"""
Returns a label to names dictonary object.
Returns:
A dict where keys are label numbers and
values are the corresponding names.
"""
label_to_names = {
0: 'void',
1: 'dirt',
3: 'grass',
4: 'tree',
5: 'pole',
6: 'water',
7: 'sky',
8: 'vehicle',
9: 'object',
10: 'asphalt',
12: 'building',
15: 'log',
17: 'person',
18: 'fence',
19: 'bush',
23: 'concrete',
27: 'barrier',
31: 'puddle',
33: 'mud',
34: 'rubble'
}
return label_to_names
def get_split(self, split):
"""Returns a dataset split.
Args:
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
Returns:
A dataset split object providing the requested subset of the data.
"""
return Rellis3DSplit(self, split=split)
def is_tested(self, attr):
"""Checks if a datum in the dataset has been tested.
Args:
dataset: The current dataset to which the datum belongs to.
attr: The attribute that needs to be checked.
Returns:
If the dataum attribute is tested, then resturn the path where the attribute is stored; else, returns false.
"""
cfg = self.cfg
name = attr['name']
name_seq, name_points = name.split("_")
test_path = join(cfg.test_result_folder, 'sequences')
save_path = join(test_path, name_seq, 'predictions')
test_file_name = name_points
store_path = join(save_path, name_points + '.label')
if exists(store_path):
print("{} already exists.".format(store_path))
return True
else:
return False
def save_test_result(self, results, attr):
"""Saves the output of a model.
Args:
results: The output of a model for the datum associated with the attribute passed.
attr: The attributes that correspond to the outputs passed in results.
"""
cfg = self.cfg
name = attr['name']
name_seq, name_points = name.split("_")
test_path = join(cfg.test_result_folder, 'sequences')
make_dir(test_path)
save_path = join(test_path, name_seq, 'predictions')
make_dir(save_path)
test_file_name = name_points
for ign in cfg.ignored_label_inds:
pred[pred >= ign] += 1
store_path = join(save_path, name_points + '.label')
pred = self.remap_lut[pred].astype(np.uint32)
pred.tofile(store_path)
def save_test_result_kpconv(self, results, inputs):
cfg = self.cfg
for j in range(1):
name = inputs['attr']['name']
name_seq, name_points = name.split("_")
test_path = join(cfg.test_result_folder, 'sequences')
make_dir(test_path)
save_path = join(test_path, name_seq, 'predictions')
make_dir(save_path)
test_file_name = name_points
proj_inds = inputs['data'].reproj_inds[0]
probs = results[proj_inds, :]
pred = np.argmax(probs, 1)
store_path = join(save_path, name_points + '.label')
pred = pred + 1
pred = remap_lut[pred].astype(np.uint32)
pred.tofile(store_path)
def get_split_list(self, split):
"""Returns a dataset split.
Args:
split: A string identifying the dataset split that is usually one of
'training', 'test', 'validation', or 'all'.
Returns:
A dataset split object providing the requested subset of the data.
Raises:
ValueError: Indicates that the split name passed is incorrect. The split name should be one of
'training', 'test', 'validation', or 'all'.
"""
cfg = self.cfg
dataset_path = cfg.dataset_path
file_list = []
if split in ['train', 'training']:
seq_list = cfg.training_split
elif split in ['test', 'testing']:
seq_list = cfg.test_split
elif split in ['val', 'validation']:
seq_list = cfg.validation_split
elif split in ['all']:
seq_list = cfg.all_split
else:
raise ValueError("Invalid split {}".format(split))
for seq_id in seq_list:
pc_path = join(dataset_path, 'dataset', 'sequences', seq_id,
'velodyne')
file_list.append(
[join(pc_path, f) for f in np.sort(os.listdir(pc_path))])
file_list = np.concatenate(file_list, axis=0)
return file_list
class Rellis3DSplit(BaseDatasetSplit):
def __init__(self, dataset, split='training'):
super().__init__(dataset, split=split)
log.info("Found {} pointclouds for {}".format(len(self.path_list),
split))
self.remap_lut_val = dataset.remap_lut_val
def __len__(self):
return len(self.path_list)
def get_data(self, idx):
pc_path = self.path_list[idx]
points = DataProcessing.load_pc_kitti(pc_path)
dir, file = split(pc_path)
label_path = join(dir, '../labels', file[:-4] + '.label')
if not exists(label_path):
labels = np.zeros(np.shape(points)[0], dtype=np.int32)
if self.split not in ['test', 'all']:
raise FileNotFoundError(f' Label file {label_path} not found')
else:
#print("self.remap_lut_val = ", self.remap_lut_val)
labels = DataProcessing.load_label_kitti(
label_path, self.remap_lut_val).astype(np.int32)
data = {
'point': points[:, 0:3],
'feat': None,
'label': labels,
}
return data
def get_attr(self, idx):
pc_path = self.path_list[idx]
dir, file = split(pc_path)
_, seq = split(split(dir)[0])
name = '{}_{}'.format(seq, file[:-4])
pc_path = str(pc_path)
attr = {'idx': idx, 'name': name, 'path': pc_path, 'split': self.split}
return attr
DATASET._register_module(Rellis3D)
|
#!/usr/bin/python3
import argparse
import base64
import time
import logging
_logger = logging.getLogger(__name__ if __name__ != '__main__' else __file__)
class Stream():
def __init__(self, stream, host=None, debug=False):
''' TODO: support streams other than filenames '''
self.stream = stream
self.host = host # ex: '/dev/hidg0'
self.debug = debug
with open(self.stream) as s:
d = s.read()
self.reports = []
report = ''
for l in d.split('\n'):
r = l
if ':' in r:
continue
if not r.strip():
self.reports.append(report.strip())
report = ''
continue
report += r
def read(self, delay=0):
''' read from stream (with an optional delay (for debugging) '''
# import binascii
for r in self.reports:
# NOTE: we have to open and close it for each report
# otherwise, it's never sent
# TODO: find out which method is faster
# TODO: see if there's a no-close version (\n, \r, something)
if delay:
time.sleep(delay)
p = base64.b16decode(r.replace(' ', ''))
if self.debug:
print('%s %s' % (len(p), r), end='\r')
#print(len(p))
yield p
# alternatively, using binascii
# f.write(binascii.a2b_hex(r.replace(' ', '')))
def send_echo(fstream):
''' echo reports to file (can be ran as script to send reports) '''
import os
import random
srandom = random.SystemRandom()
f = open(fstream, 'w')
for r in reports:
# r = srandom.choice(reports) # random testing
hidr = ''.join(['\\x%s' % c.lower() for c in r.split()])
hidr = hidr.replace('\\x00', '\\0').replace('\\x0', '\\x')
cmd = 'echo -ne "%s" > %s' % (hidr, args.device)
#os.system(cmd) # this doesn't work
f.write(cmd + '\n') # this works (call with bash after generation)
f.close()
def send_to_host(self, delay=0):
data = self.read(delay)
# import binascii
for p in data:
# NOTE: we have to open and close it for each report
# otherwise, it's never sent
# TODO: find out which method is faster
# TODO: see if there's a no-close version (\n, \r, something)
with open(self.host, 'wb') as f:
f.write(p)
class ArgsParser(argparse.ArgumentParser):
def error(self, message):
# NOTE: this is just if we want to do something differnt; the default's fine
self.print_usage()
self.exit(2, '%s: error: %s\n' % (self.prog, message))
if __name__ == '__main__':
default_hid = '/dev/hidg0'
parser = ArgsParser(description='Sends a stream of HID reports captured by hid-dump')
parser.add_argument('file', type=str, help='File containing HID stream')
parser.add_argument(
'-d', '--device', default=default_hid,
help='Device to stream to; default: %s' % default_hid
)
parser.add_argument(
'--delay', type=float, default=0,
help='Playback delay (in seconds; ex: .01)')
parser.add_argument('--debug', default=False, action='store_true')
args = parser.parse_args()
stream = Stream(args.file, args.device, args.debug)
stream.send_to_host(args.delay)
|
import random
def gen_test():
n = 1000
k = 100
max_end = 1000
A = []
R = random.Random(0)
for _ in range(n):
a = R.randint(0, max_end)
b = R.randint(0, max_end)
if a == b:
A.append((a, a + 1))
else:
a, b = min(a, b), max(a, b)
A.append((a, b))
x = {
'A': A,
'k': k,
'res': 668,
'sol': [10, 18, 27, 44, 45, 48, 74, 75, 77, 85]
}
print(x)
exit(0)
return x
tests = [
{
'A': [(0, 4), (1, 10), (6, 7), (2, 8)],
'k': 3,
'res': 2,
'sol': [0, 1, 3]
}, {
'A': [(x, x + 2) for x in range(50)],
'k': 2,
'res': 1,
'sol': [0, 1]
}, {
'A': [(x, x + 10) for x in range(150)],
'k': 10,
'res': 1,
'sol': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
}, {
'A': [(100 - 5 * x, 100 + 5 * x) for x in range(15)],
'k': 14,
'res': 10,
'sol': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
},
{
# All start at point 0
'A': [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)],
'k': 3,
'res': 3,
'sol': [2, 3, 4]
},
{
# k = 1
'A': [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)],
'k': 1,
'res': 5,
'sol': [4]
},
{
# Smaller intervals are fully contained into longer ones
'A': [(10, 11), (9, 12), (8, 13), (7, 14), (6, 15)],
'k': 3,
'res': 5,
'sol': [2, 3, 4]
},
{'A': [(394, 864), (776, 911), (41, 430), (265, 988), (497, 523), (414, 940), (802, 849), (310, 991), (366, 488),
(597, 913), (223, 929), (142, 516),
(143, 288), (97, 773), (633, 818), (256, 931), (545, 722), (616, 829), (150, 923), (101, 317), (75, 747),
(870, 920), (338, 700), (483, 573),
(103, 362), (323, 444), (625, 655), (209, 934), (565, 989), (453, 488), (533, 886), (63, 266), (824, 940),
(561, 937), (14, 95), (736, 860),
(408, 727), (803, 844), (640, 684), (1, 626), (505, 847), (341, 888), (249, 747), (333, 720), (64, 891),
(195, 939), (227, 581), (244, 822),
(145, 990), (556, 822), (93, 458), (82, 327), (520, 896), (501, 955), (111, 308), (298, 564), (127, 723),
(340, 560), (834, 944), (208, 553),
(818, 986), (560, 617), (294, 601), (93, 455), (610, 817), (324, 394), (247, 589), (188, 297), (193, 841),
(33, 191), (627, 672), (266, 487),
(70, 91), (695, 775), (133, 897), (153, 945), (39, 862), (82, 919), (716, 945), (553, 849), (400, 699),
(722, 857), (282, 537), (534, 831),
(241, 869), (220, 916), (603, 695), (845, 972), (429, 593), (281, 461), (504, 676), (656, 717), (812, 938),
(84, 365), (332, 627), (118, 498),
(601, 645), (343, 865), (194, 248), (16, 749)],
'k': 10,
'res': 668,
'sol': [10, 18, 27, 44, 45, 48, 74, 75, 77, 85]
},
{'A': [(394, 864), (776, 911), (41, 430), (265, 988), (497, 523), (414, 940), (802, 849), (310, 991), (366, 488),
(597, 913), (223, 929), (142, 516),
(143, 288), (97, 773), (633, 818), (256, 931), (545, 722), (616, 829), (150, 923), (101, 317), (75, 747),
(870, 920), (338, 700), (483, 573),
(103, 362), (323, 444), (625, 655), (209, 934), (565, 989), (453, 488), (533, 886), (63, 266), (824, 940),
(561, 937), (14, 95), (736, 860),
(408, 727), (803, 844), (640, 684), (1, 626), (505, 847), (341, 888), (249, 747), (333, 720), (64, 891),
(195, 939), (227, 581), (244, 822),
(145, 990), (556, 822), (93, 458), (82, 327), (520, 896), (501, 955), (111, 308), (298, 564), (127, 723),
(340, 560), (834, 944), (208, 553),
(818, 986), (560, 617), (294, 601), (93, 455), (610, 817), (324, 394), (247, 589), (188, 297), (193, 841),
(33, 191), (627, 672), (266, 487),
(70, 91), (695, 775), (133, 897), (153, 945), (39, 862), (82, 919), (716, 945), (553, 849), (400, 699),
(722, 857), (282, 537), (534, 831),
(241, 869), (220, 916), (603, 695), (845, 972), (429, 593), (281, 461), (504, 676), (656, 717), (812, 938),
(84, 365), (332, 627), (118, 498),
(601, 645), (343, 865), (194, 248), (16, 749), (119, 277), (225, 722), (380, 813), (174, 340), (436, 835),
(63, 103), (149, 801), (714, 875),
(46, 224), (587, 836), (649, 931), (547, 958), (616, 696), (27, 75), (127, 650), (193, 620), (589, 850),
(122, 400), (93, 379), (118, 853),
(37, 620), (22, 199), (984, 993), (189, 735), (126, 490), (215, 744), (62, 819), (695, 959), (23, 557),
(435, 635), (103, 855), (71, 266), (73, 226),
(308, 662), (358, 446), (62, 184), (478, 515), (40, 610), (103, 716), (204, 400), (266, 367), (749, 926),
(481, 858), (923, 940), (173, 583),
(688, 714), (208, 989), (59, 785), (692, 807), (162, 865), (165, 350), (256, 542), (120, 611), (452, 943),
(179, 681), (13, 482), (419, 697),
(582, 921), (520, 895), (318, 939), (365, 664), (397, 857), (256, 673), (157, 574), (12, 707), (468, 759),
(80, 343), (46, 756), (287, 557),
(138, 245), (780, 976), (360, 493), (294, 624), (367, 689), (604, 969), (648, 913), (635, 874), (135, 732),
(317, 397), (424, 766), (666, 848),
(1, 82), (196, 608), (342, 715), (163, 245), (228, 652), (387, 458), (727, 896), (581, 689), (424, 895),
(32, 411), (718, 892), (428, 581),
(678, 790), (47, 726), (169, 456), (65, 265), (161, 718), (457, 540), (498, 906), (574, 929), (618, 773),
(0, 905), (39, 506), (319, 333),
(478, 857), (51, 828), (842, 896), (831, 997), (192, 425), (561, 986), (85, 648), (742, 857), (15, 133),
(411, 972), (427, 694), (3, 323), (14, 218),
(734, 772), (2, 842), (541, 691), (100, 626), (121, 195), (622, 664), (203, 894), (286, 309), (186, 705),
(102, 487), (874, 944), (406, 642),
(22, 83), (281, 935), (463, 819), (118, 811), (262, 882), (136, 669), (533, 836), (660, 666), (117, 355),
(158, 892), (285, 871), (19, 43),
(41, 210), (265, 697), (322, 571), (375, 969), (581, 960), (869, 931), (43, 866), (767, 984), (622, 718),
(506, 671), (659, 729), (469, 924),
(445, 655), (381, 892), (182, 550), (212, 384), (298, 601), (9, 141), (154, 277), (341, 345), (376, 808),
(95, 735), (346, 798), (36, 635),
(42, 276), (153, 167), (296, 597), (369, 404), (132, 561), (117, 300), (489, 748), (245, 956), (49, 315),
(183, 877), (535, 746), (72, 309),
(412, 855), (306, 336), (111, 424), (101, 574), (492, 930), (345, 485), (817, 861), (831, 999), (127, 351),
(118, 490), (509, 716), (38, 436),
(309, 343), (703, 752), (159, 915), (170, 941), (578, 641), (384, 825), (654, 997), (67, 89), (86, 827),
(202, 767), (62, 226), (8, 394), (100, 403),
(531, 569), (296, 459), (500, 942), (598, 807), (695, 731), (222, 433), (85, 377), (225, 267), (599, 795),
(170, 441), (196, 367), (65, 117),
(841, 884), (718, 873), (28, 924), (462, 538), (693, 770), (121, 206), (407, 509), (212, 262), (43, 656),
(816, 970), (221, 638), (107, 149),
(202, 469), (370, 387), (559, 846), (107, 154), (499, 610), (151, 577), (415, 653), (433, 696), (533, 898),
(507, 695), (909, 939), (330, 853),
(510, 511), (650, 686), (206, 895), (555, 624), (224, 953), (9, 348), (722, 985), (764, 920), (325, 837),
(36, 329), (151, 537), (263, 895),
(617, 802), (159, 862), (388, 596), (301, 735), (723, 826), (67, 481), (86, 819), (528, 889), (40, 937),
(67, 230), (41, 133), (15, 307), (777, 864),
(338, 459), (164, 882), (152, 819), (671, 889), (471, 991), (380, 517), (391, 922), (514, 542), (34, 587),
(92, 694), (813, 824), (530, 776),
(78, 614), (436, 764), (772, 927), (211, 296), (548, 922), (427, 612), (845, 995), (493, 865), (810, 995),
(397, 622), (239, 600), (871, 885),
(20, 817), (672, 906), (0, 758), (186, 309), (519, 583), (260, 340), (67, 505), (268, 880), (844, 965),
(310, 791), (393, 417), (392, 829),
(63, 167), (656, 957), (130, 244), (293, 746), (342, 849), (56, 964), (36, 492), (144, 427), (503, 911),
(616, 884), (83, 734), (689, 715),
(155, 829), (361, 421), (36, 626), (395, 477), (48, 469), (103, 482), (155, 796), (20, 33), (612, 632),
(135, 645), (107, 331), (562, 716),
(354, 664), (199, 392), (795, 802), (502, 796), (113, 902), (61, 624), (478, 717), (629, 647), (345, 956),
(127, 666), (698, 992), (636, 730),
(303, 807), (130, 869), (933, 981), (396, 818), (300, 938), (763, 893), (697, 980), (124, 829), (531, 881),
(193, 804), (39, 800), (401, 455),
(380, 774), (195, 466), (365, 808), (77, 647), (45, 979), (923, 956), (40, 497), (261, 922), (27, 967),
(532, 682), (582, 585), (221, 896),
(95, 235), (794, 839), (905, 910), (642, 798), (514, 715), (430, 536), (312, 519), (116, 968), (149, 436),
(579, 913), (432, 945), (86, 958),
(107, 425), (64, 101), (425, 792), (159, 751), (31, 977), (457, 810), (441, 702), (30, 427), (508, 941),
(884, 985), (332, 739), (80, 258),
(72, 360), (124, 367), (30, 708), (353, 356), (10, 182), (850, 997), (236, 838), (72, 374), (610, 914),
(146, 212), (3, 209), (674, 689), (749, 960),
(126, 922), (7, 765), (300, 377), (25, 706), (619, 955), (238, 879), (145, 191), (115, 464), (352, 488),
(724, 982), (133, 264), (28, 989),
(213, 370), (343, 484), (299, 983), (303, 959), (899, 981), (566, 651), (188, 334), (82, 607), (105, 546),
(315, 594), (160, 385), (150, 919),
(128, 968), (228, 823), (323, 520), (242, 248), (188, 772), (298, 381), (429, 679), (47, 881), (135, 615),
(21, 403), (79, 719), (74, 135),
(306, 430), (426, 563), (758, 948), (145, 605), (305, 432), (363, 652), (86, 254), (455, 647), (378, 652),
(541, 971), (59, 385), (8, 418),
(427, 985), (745, 922), (328, 451), (208, 380), (300, 975), (93, 482), (189, 973), (111, 815), (114, 283),
(571, 620), (157, 704), (719, 814),
(456, 950), (189, 408), (431, 786), (178, 442), (253, 982), (348, 464), (535, 959), (145, 363), (473, 646),
(88, 652), (494, 773), (208, 301),
(1, 850), (459, 715), (473, 633), (7, 223), (117, 305), (644, 787), (308, 558), (159, 623), (434, 723),
(482, 769), (94, 694), (509, 778),
(237, 983), (556, 780), (286, 415), (22, 647), (123, 276), (684, 904), (0, 41), (262, 407), (538, 911),
(595, 727), (405, 455), (104, 764),
(258, 362), (290, 892), (688, 773), (200, 930), (87, 609), (36, 72), (268, 811), (312, 546), (121, 348),
(542, 880), (255, 912), (780, 942),
(69, 167), (424, 881), (289, 296), (137, 532), (535, 587), (215, 642), (107, 544), (420, 978), (556, 649),
(413, 759), (797, 925), (285, 807),
(299, 452), (380, 581), (141, 643), (126, 160), (123, 713), (390, 410), (479, 605), (142, 573), (306, 684),
(362, 647), (484, 760), (223, 425),
(488, 500), (513, 711), (325, 504), (667, 981), (61, 454), (146, 307), (507, 763), (53, 908), (220, 636),
(26, 363), (400, 482), (10, 909),
(539, 866), (68, 703), (83, 887), (702, 972), (759, 946), (404, 685), (6, 369), (42, 118), (3, 635),
(276, 894), (655, 716), (299, 744), (232, 922),
(144, 769), (294, 586), (107, 195), (444, 471), (338, 733), (172, 393), (338, 431), (663, 918), (445, 703),
(151, 458), (725, 955), (151, 536),
(132, 323), (213, 932), (191, 454), (357, 808), (398, 437), (503, 826), (398, 747), (225, 814), (200, 449),
(209, 962), (600, 727), (50, 927),
(34, 397), (239, 648), (86, 891), (191, 372), (58, 760), (653, 693), (177, 238), (304, 625), (88, 627),
(721, 889), (524, 769), (291, 789),
(898, 904), (361, 421), (55, 469), (647, 713), (528, 681), (664, 979), (560, 977), (752, 952), (440, 956),
(465, 594), (260, 501), (487, 721),
(220, 345), (43, 272), (44, 53), (166, 358), (3, 296), (7, 670), (65, 143), (438, 805), (227, 696),
(623, 993), (406, 571), (226, 943), (197, 464),
(347, 622), (104, 620), (87, 904), (326, 813), (330, 548), (466, 914), (261, 332), (29, 534), (45, 194),
(82, 377), (214, 890), (354, 537),
(192, 857), (206, 257), (688, 746), (308, 753), (319, 529), (393, 880), (260, 493), (352, 892), (245, 729),
(45, 313), (565, 956), (9, 74),
(471, 507), (448, 741), (48, 939), (422, 828), (471, 505), (120, 450), (83, 87), (101, 246), (783, 846),
(157, 423), (905, 941), (218, 451),
(78, 627), (437, 837), (572, 772), (849, 907), (40, 403), (184, 981), (255, 501), (131, 225), (860, 891),
(285, 956), (327, 360), (109, 445),
(570, 921), (292, 624), (554, 807), (206, 728), (303, 796), (452, 526), (473, 619), (549, 649), (267, 279),
(16, 237), (121, 629), (728, 802),
(101, 176), (424, 750), (223, 254), (291, 900), (675, 753), (6, 759), (527, 548), (438, 879), (50, 124),
(393, 660), (121, 279), (754, 994),
(367, 578), (235, 691), (720, 733), (559, 676), (226, 288), (757, 851), (245, 923), (66, 530), (314, 690),
(239, 335), (382, 643), (293, 491),
(175, 596), (140, 829), (15, 566), (335, 516), (375, 599), (25, 650), (132, 831), (405, 897), (158, 999),
(181, 522), (78, 138), (211, 783),
(800, 937), (508, 793), (583, 785), (712, 992), (218, 240), (135, 750), (239, 835), (393, 778), (361, 623),
(135, 605), (510, 644), (922, 939),
(110, 630), (26, 853), (539, 610), (367, 500), (316, 466), (12, 981), (225, 568), (166, 668), (676, 900),
(506, 828), (755, 976), (492, 559),
(321, 721), (80, 868), (140, 265), (411, 618), (195, 720), (324, 841), (298, 804), (393, 966), (60, 213),
(39, 322), (743, 765), (255, 984),
(351, 883), (451, 684), (675, 743), (231, 673), (266, 352), (166, 687), (17, 312), (364, 587), (59, 552),
(645, 749), (154, 361), (22, 502),
(62, 642), (25, 247), (12, 46), (231, 968), (334, 669), (68, 840), (63, 353), (679, 995), (139, 432),
(913, 944), (221, 459), (145, 445), (319, 366),
(181, 664), (336, 745), (765, 804), (391, 418), (9, 419), (270, 912), (544, 546), (753, 823), (703, 721),
(473, 777), (42, 578), (125, 943),
(418, 976), (175, 399), (3, 512), (141, 636), (677, 889), (526, 860), (716, 750), (81, 151), (243, 337),
(848, 860), (180, 840), (252, 968),
(22, 979), (825, 917), (172, 760), (806, 858), (574, 700), (172, 735), (80, 437), (885, 949), (106, 612),
(635, 643), (468, 727), (153, 629),
(41, 616), (258, 348), (755, 825), (385, 750), (27, 640), (910, 954), (37, 508), (91, 366), (299, 687),
(154, 469), (241, 519), (166, 364),
(753, 771), (345, 414), (276, 822), (504, 967), (15, 402), (318, 543), (295, 922), (480, 563), (35, 1000),
(544, 791), (565, 584), (268, 956),
(39, 703), (404, 466), (122, 738), (354, 413), (52, 507), (21, 279), (35, 757), (260, 697), (595, 696),
(719, 794), (296, 951), (702, 780),
(212, 780), (528, 541), (348, 395), (854, 995), (213, 256), (118, 579), (337, 824), (965, 991), (248, 946),
(600, 690), (545, 737), (697, 896),
(166, 361), (888, 938), (156, 921), (338, 866), (765, 839), (9, 598), (52, 835), (159, 578), (352, 975),
(298, 371), (300, 640), (330, 506),
(413, 801), (441, 615), (1, 174), (144, 807), (44, 582), (128, 452), (349, 951), (9, 971), (491, 738),
(931, 969), (680, 838), (678, 797),
(263, 765), (192, 626), (71, 562), (434, 958), (285, 972), (178, 850), (173, 542), (64, 674), (161, 654),
(112, 593), (516, 964), (647, 815),
(556, 619), (393, 772), (272, 445), (291, 318), (13, 438), (797, 839), (287, 735), (264, 968)],
'k': 10,
'res': 889,
'sol': [359, 452, 456, 476, 508, 741, 828, 896, 928, 975]
},
{'A': [(394, 864), (776, 911), (41, 430), (265, 988), (497, 523), (414, 940), (802, 849), (310, 991), (366, 488),
(597, 913), (223, 929), (142, 516),
(143, 288), (97, 773), (633, 818), (256, 931), (545, 722), (616, 829), (150, 923), (101, 317), (75, 747),
(870, 920), (338, 700), (483, 573),
(103, 362), (323, 444), (625, 655), (209, 934), (565, 989), (453, 488), (533, 886), (63, 266), (824, 940),
(561, 937), (14, 95), (736, 860),
(408, 727), (803, 844), (640, 684), (1, 626), (505, 847), (341, 888), (249, 747), (333, 720), (64, 891),
(195, 939), (227, 581), (244, 822),
(145, 990), (556, 822), (93, 458), (82, 327), (520, 896), (501, 955), (111, 308), (298, 564), (127, 723),
(340, 560), (834, 944), (208, 553),
(818, 986), (560, 617), (294, 601), (93, 455), (610, 817), (324, 394), (247, 589), (188, 297), (193, 841),
(33, 191), (627, 672), (266, 487),
(70, 91), (695, 775), (133, 897), (153, 945), (39, 862), (82, 919), (716, 945), (553, 849), (400, 699),
(722, 857), (282, 537), (534, 831),
(241, 869), (220, 916), (603, 695), (845, 972), (429, 593), (281, 461), (504, 676), (656, 717), (812, 938),
(84, 365), (332, 627), (118, 498),
(601, 645), (343, 865), (194, 248), (16, 749), (119, 277), (225, 722), (380, 813), (174, 340), (436, 835),
(63, 103), (149, 801), (714, 875),
(46, 224), (587, 836), (649, 931), (547, 958), (616, 696), (27, 75), (127, 650), (193, 620), (589, 850),
(122, 400), (93, 379), (118, 853),
(37, 620), (22, 199), (984, 993), (189, 735), (126, 490), (215, 744), (62, 819), (695, 959), (23, 557),
(435, 635), (103, 855), (71, 266), (73, 226),
(308, 662), (358, 446), (62, 184), (478, 515), (40, 610), (103, 716), (204, 400), (266, 367), (749, 926),
(481, 858), (923, 940), (173, 583),
(688, 714), (208, 989), (59, 785), (692, 807), (162, 865), (165, 350), (256, 542), (120, 611), (452, 943),
(179, 681), (13, 482), (419, 697),
(582, 921), (520, 895), (318, 939), (365, 664), (397, 857), (256, 673), (157, 574), (12, 707), (468, 759),
(80, 343), (46, 756), (287, 557),
(138, 245), (780, 976), (360, 493), (294, 624), (367, 689), (604, 969), (648, 913), (635, 874), (135, 732),
(317, 397), (424, 766), (666, 848),
(1, 82), (196, 608), (342, 715), (163, 245), (228, 652), (387, 458), (727, 896), (581, 689), (424, 895),
(32, 411), (718, 892), (428, 581),
(678, 790), (47, 726), (169, 456), (65, 265), (161, 718), (457, 540), (498, 906), (574, 929), (618, 773),
(0, 905), (39, 506), (319, 333),
(478, 857), (51, 828), (842, 896), (831, 997), (192, 425), (561, 986), (85, 648), (742, 857), (15, 133),
(411, 972), (427, 694), (3, 323), (14, 218),
(734, 772), (2, 842), (541, 691), (100, 626), (121, 195), (622, 664), (203, 894), (286, 309), (186, 705),
(102, 487), (874, 944), (406, 642),
(22, 83), (281, 935), (463, 819), (118, 811), (262, 882), (136, 669), (533, 836), (660, 666), (117, 355),
(158, 892), (285, 871), (19, 43),
(41, 210), (265, 697), (322, 571), (375, 969), (581, 960), (869, 931), (43, 866), (767, 984), (622, 718),
(506, 671), (659, 729), (469, 924),
(445, 655), (381, 892), (182, 550), (212, 384), (298, 601), (9, 141), (154, 277), (341, 345), (376, 808),
(95, 735), (346, 798), (36, 635),
(42, 276), (153, 167), (296, 597), (369, 404), (132, 561), (117, 300), (489, 748), (245, 956), (49, 315),
(183, 877), (535, 746), (72, 309),
(412, 855), (306, 336), (111, 424), (101, 574), (492, 930), (345, 485), (817, 861), (831, 999), (127, 351),
(118, 490), (509, 716), (38, 436),
(309, 343), (703, 752), (159, 915), (170, 941), (578, 641), (384, 825), (654, 997), (67, 89), (86, 827),
(202, 767), (62, 226), (8, 394), (100, 403),
(531, 569), (296, 459), (500, 942), (598, 807), (695, 731), (222, 433), (85, 377), (225, 267), (599, 795),
(170, 441), (196, 367), (65, 117),
(841, 884), (718, 873), (28, 924), (462, 538), (693, 770), (121, 206), (407, 509), (212, 262), (43, 656),
(816, 970), (221, 638), (107, 149),
(202, 469), (370, 387), (559, 846), (107, 154), (499, 610), (151, 577), (415, 653), (433, 696), (533, 898),
(507, 695), (909, 939), (330, 853),
(510, 511), (650, 686), (206, 895), (555, 624), (224, 953), (9, 348), (722, 985), (764, 920), (325, 837),
(36, 329), (151, 537), (263, 895),
(617, 802), (159, 862), (388, 596), (301, 735), (723, 826), (67, 481), (86, 819), (528, 889), (40, 937),
(67, 230), (41, 133), (15, 307), (777, 864),
(338, 459), (164, 882), (152, 819), (671, 889), (471, 991), (380, 517), (391, 922), (514, 542), (34, 587),
(92, 694), (813, 824), (530, 776),
(78, 614), (436, 764), (772, 927), (211, 296), (548, 922), (427, 612), (845, 995), (493, 865), (810, 995),
(397, 622), (239, 600), (871, 885),
(20, 817), (672, 906), (0, 758), (186, 309), (519, 583), (260, 340), (67, 505), (268, 880), (844, 965),
(310, 791), (393, 417), (392, 829),
(63, 167), (656, 957), (130, 244), (293, 746), (342, 849), (56, 964), (36, 492), (144, 427), (503, 911),
(616, 884), (83, 734), (689, 715),
(155, 829), (361, 421), (36, 626), (395, 477), (48, 469), (103, 482), (155, 796), (20, 33), (612, 632),
(135, 645), (107, 331), (562, 716),
(354, 664), (199, 392), (795, 802), (502, 796), (113, 902), (61, 624), (478, 717), (629, 647), (345, 956),
(127, 666), (698, 992), (636, 730),
(303, 807), (130, 869), (933, 981), (396, 818), (300, 938), (763, 893), (697, 980), (124, 829), (531, 881),
(193, 804), (39, 800), (401, 455),
(380, 774), (195, 466), (365, 808), (77, 647), (45, 979), (923, 956), (40, 497), (261, 922), (27, 967),
(532, 682), (582, 585), (221, 896),
(95, 235), (794, 839), (905, 910), (642, 798), (514, 715), (430, 536), (312, 519), (116, 968), (149, 436),
(579, 913), (432, 945), (86, 958),
(107, 425), (64, 101), (425, 792), (159, 751), (31, 977), (457, 810), (441, 702), (30, 427), (508, 941),
(884, 985), (332, 739), (80, 258),
(72, 360), (124, 367), (30, 708), (353, 356), (10, 182), (850, 997), (236, 838), (72, 374), (610, 914),
(146, 212), (3, 209), (674, 689), (749, 960),
(126, 922), (7, 765), (300, 377), (25, 706), (619, 955), (238, 879), (145, 191), (115, 464), (352, 488),
(724, 982), (133, 264), (28, 989),
(213, 370), (343, 484), (299, 983), (303, 959), (899, 981), (566, 651), (188, 334), (82, 607), (105, 546),
(315, 594), (160, 385), (150, 919),
(128, 968), (228, 823), (323, 520), (242, 248), (188, 772), (298, 381), (429, 679), (47, 881), (135, 615),
(21, 403), (79, 719), (74, 135),
(306, 430), (426, 563), (758, 948), (145, 605), (305, 432), (363, 652), (86, 254), (455, 647), (378, 652),
(541, 971), (59, 385), (8, 418),
(427, 985), (745, 922), (328, 451), (208, 380), (300, 975), (93, 482), (189, 973), (111, 815), (114, 283),
(571, 620), (157, 704), (719, 814),
(456, 950), (189, 408), (431, 786), (178, 442), (253, 982), (348, 464), (535, 959), (145, 363), (473, 646),
(88, 652), (494, 773), (208, 301),
(1, 850), (459, 715), (473, 633), (7, 223), (117, 305), (644, 787), (308, 558), (159, 623), (434, 723),
(482, 769), (94, 694), (509, 778),
(237, 983), (556, 780), (286, 415), (22, 647), (123, 276), (684, 904), (0, 41), (262, 407), (538, 911),
(595, 727), (405, 455), (104, 764),
(258, 362), (290, 892), (688, 773), (200, 930), (87, 609), (36, 72), (268, 811), (312, 546), (121, 348),
(542, 880), (255, 912), (780, 942),
(69, 167), (424, 881), (289, 296), (137, 532), (535, 587), (215, 642), (107, 544), (420, 978), (556, 649),
(413, 759), (797, 925), (285, 807),
(299, 452), (380, 581), (141, 643), (126, 160), (123, 713), (390, 410), (479, 605), (142, 573), (306, 684),
(362, 647), (484, 760), (223, 425),
(488, 500), (513, 711), (325, 504), (667, 981), (61, 454), (146, 307), (507, 763), (53, 908), (220, 636),
(26, 363), (400, 482), (10, 909),
(539, 866), (68, 703), (83, 887), (702, 972), (759, 946), (404, 685), (6, 369), (42, 118), (3, 635),
(276, 894), (655, 716), (299, 744), (232, 922),
(144, 769), (294, 586), (107, 195), (444, 471), (338, 733), (172, 393), (338, 431), (663, 918), (445, 703),
(151, 458), (725, 955), (151, 536),
(132, 323), (213, 932), (191, 454), (357, 808), (398, 437), (503, 826), (398, 747), (225, 814), (200, 449),
(209, 962), (600, 727), (50, 927),
(34, 397), (239, 648), (86, 891), (191, 372), (58, 760), (653, 693), (177, 238), (304, 625), (88, 627),
(721, 889), (524, 769), (291, 789),
(898, 904), (361, 421), (55, 469), (647, 713), (528, 681), (664, 979), (560, 977), (752, 952), (440, 956),
(465, 594), (260, 501), (487, 721),
(220, 345), (43, 272), (44, 53), (166, 358), (3, 296), (7, 670), (65, 143), (438, 805), (227, 696),
(623, 993), (406, 571), (226, 943), (197, 464),
(347, 622), (104, 620), (87, 904), (326, 813), (330, 548), (466, 914), (261, 332), (29, 534), (45, 194),
(82, 377), (214, 890), (354, 537),
(192, 857), (206, 257), (688, 746), (308, 753), (319, 529), (393, 880), (260, 493), (352, 892), (245, 729),
(45, 313), (565, 956), (9, 74),
(471, 507), (448, 741), (48, 939), (422, 828), (471, 505), (120, 450), (83, 87), (101, 246), (783, 846),
(157, 423), (905, 941), (218, 451),
(78, 627), (437, 837), (572, 772), (849, 907), (40, 403), (184, 981), (255, 501), (131, 225), (860, 891),
(285, 956), (327, 360), (109, 445),
(570, 921), (292, 624), (554, 807), (206, 728), (303, 796), (452, 526), (473, 619), (549, 649), (267, 279),
(16, 237), (121, 629), (728, 802),
(101, 176), (424, 750), (223, 254), (291, 900), (675, 753), (6, 759), (527, 548), (438, 879), (50, 124),
(393, 660), (121, 279), (754, 994),
(367, 578), (235, 691), (720, 733), (559, 676), (226, 288), (757, 851), (245, 923), (66, 530), (314, 690),
(239, 335), (382, 643), (293, 491),
(175, 596), (140, 829), (15, 566), (335, 516), (375, 599), (25, 650), (132, 831), (405, 897), (158, 999),
(181, 522), (78, 138), (211, 783),
(800, 937), (508, 793), (583, 785), (712, 992), (218, 240), (135, 750), (239, 835), (393, 778), (361, 623),
(135, 605), (510, 644), (922, 939),
(110, 630), (26, 853), (539, 610), (367, 500), (316, 466), (12, 981), (225, 568), (166, 668), (676, 900),
(506, 828), (755, 976), (492, 559),
(321, 721), (80, 868), (140, 265), (411, 618), (195, 720), (324, 841), (298, 804), (393, 966), (60, 213),
(39, 322), (743, 765), (255, 984),
(351, 883), (451, 684), (675, 743), (231, 673), (266, 352), (166, 687), (17, 312), (364, 587), (59, 552),
(645, 749), (154, 361), (22, 502),
(62, 642), (25, 247), (12, 46), (231, 968), (334, 669), (68, 840), (63, 353), (679, 995), (139, 432),
(913, 944), (221, 459), (145, 445), (319, 366),
(181, 664), (336, 745), (765, 804), (391, 418), (9, 419), (270, 912), (544, 546), (753, 823), (703, 721),
(473, 777), (42, 578), (125, 943),
(418, 976), (175, 399), (3, 512), (141, 636), (677, 889), (526, 860), (716, 750), (81, 151), (243, 337),
(848, 860), (180, 840), (252, 968),
(22, 979), (825, 917), (172, 760), (806, 858), (574, 700), (172, 735), (80, 437), (885, 949), (106, 612),
(635, 643), (468, 727), (153, 629),
(41, 616), (258, 348), (755, 825), (385, 750), (27, 640), (910, 954), (37, 508), (91, 366), (299, 687),
(154, 469), (241, 519), (166, 364),
(753, 771), (345, 414), (276, 822), (504, 967), (15, 402), (318, 543), (295, 922), (480, 563), (35, 1000),
(544, 791), (565, 584), (268, 956),
(39, 703), (404, 466), (122, 738), (354, 413), (52, 507), (21, 279), (35, 757), (260, 697), (595, 696),
(719, 794), (296, 951), (702, 780),
(212, 780), (528, 541), (348, 395), (854, 995), (213, 256), (118, 579), (337, 824), (965, 991), (248, 946),
(600, 690), (545, 737), (697, 896),
(166, 361), (888, 938), (156, 921), (338, 866), (765, 839), (9, 598), (52, 835), (159, 578), (352, 975),
(298, 371), (300, 640), (330, 506),
(413, 801), (441, 615), (1, 174), (144, 807), (44, 582), (128, 452), (349, 951), (9, 971), (491, 738),
(931, 969), (680, 838), (678, 797),
(263, 765), (192, 626), (71, 562), (434, 958), (285, 972), (178, 850), (173, 542), (64, 674), (161, 654),
(112, 593), (516, 964), (647, 815),
(556, 619), (393, 772), (272, 445), (291, 318), (13, 438), (797, 839), (287, 735), (264, 968)],
'k': 100,
'res': 561,
'sol': [10, 15, 18, 27, 44, 45, 47, 48, 68, 74, 75, 76, 77, 84, 85, 119, 126, 130, 146, 149, 202, 206, 219, 224,
239, 248, 273, 275, 292, 293, 298, 317,
341, 343, 352, 357, 359, 365, 366, 388, 405, 412, 428, 437, 443, 452, 456, 459, 467, 471, 476, 490, 497,
502, 508, 520, 521, 522, 528, 551, 561,
569, 581, 596, 603, 636, 640, 643, 653, 667, 675, 677, 680, 713, 717, 725, 727, 741, 756, 793, 800, 805,
807, 817, 824, 828, 836, 846, 862, 864,
883, 894, 895, 896, 928, 952, 958, 962, 975, 985]
}
# gen_test()
]
def runtests(f):
ok = True
problems_count = 0
for t in tests[:]:
A = t['A']
k = t['k']
r = t['res']
s = t['sol']
print("-------------------")
if len(A) < 20:
print("len(A) =", len(A))
print("A :", A)
print("k :", k)
print("oczekiwana dlugosc przeciecia :", r)
print("przykladowe rozwiazanie :", s)
else:
print("len(A) =", len(A))
print("A : <<prefiks>>: ", A[:10], "...")
print("k :", k)
print("oczekiwana dlugosc przeciecia :", r)
print("przykladowe rozwiazanie : <<prefiks>>", s[:5], "...")
SOL = f(A.copy(), k)
if len(A) < 20:
print("uzyskane rozwiaznie :", SOL)
else:
print("uzyskane rozwiaznie : <<prefiks>>", SOL[:5], "...")
if len(SOL) != k:
print("Problem! Niezgodna dlugosc rozwiazania")
ok = False
problems_count += 1
continue
(a, b) = A[SOL[0]]
for i in range(1, k):
a = max(a, A[SOL[i]][0])
b = min(b, A[SOL[i]][1])
RES = b - a
print("uzyskana dlugosc przeciecia :", RES)
if RES != r:
print("Problem! Bledny wynik")
ok = False
problems_count += 1
continue
print("OK")
print("===============================")
if ok:
print('Wszystko OK!')
else:
print(f'PROBLEMY! Jest ich {problems_count}!')
|
<reponame>lutzkuen/statarb
#!/usr/bin/env python
import numpy as np
import pandas as pd
import gc
from scipy import stats
from pandas.stats.api import ols
from pandas.stats import moments
from lmfit import minimize, Parameters, Parameter, report_errors
from collections import defaultdict
from util import *
INDUSTRIES = ['CONTAINR', 'HLTHSVCS', 'SPLTYRET', 'SPTYSTOR', 'DIVFIN', 'GASUTIL', 'BIOLIFE', 'SPTYCHEM', 'ALUMSTEL', 'AERODEF', 'COMMEQP', 'HOUSEDUR', 'CHEM', 'LEISPROD', 'AUTO', 'CONGLOM', 'HOMEBLDG', 'CNSTENG', 'LEISSVCS', 'OILGSCON', 'MEDIA', 'FOODPROD', 'PSNLPROD', 'OILGSDRL', 'SOFTWARE', 'BANKS', 'RESTAUR', 'FOODRET', 'ROADRAIL', 'APPAREL', 'INTERNET', 'NETRET', 'PAPER', 'WIRELESS', 'PHARMA', 'MGDHLTH', 'CNSTMACH', 'OILGSEQP', 'REALEST', 'COMPELEC', 'BLDGPROD', 'TRADECO', 'MULTUTIL', 'CNSTMATL', 'HLTHEQP', 'PRECMTLS', 'INDMACH', 'TRANSPRT', 'SEMIEQP', 'TELECOM', 'OILGSEXP', 'INSURNCE', 'AIRLINES', 'SEMICOND', 'ELECEQP', 'ELECUTIL', 'LIFEINS', 'COMSVCS', 'DISTRIB']
BARRA_FACTORS = ['country', 'growth', 'size', 'sizenl', 'divyild', 'btop', 'earnyild', 'beta', 'resvol', 'betanl', 'momentum', 'leverage', 'liquidty']
PROP_FACTORS = ['srisk_pct_z', 'rating_mean_z']
ALL_FACTORS = BARRA_FACTORS + INDUSTRIES + PROP_FACTORS
def calc_vol_profiles(full_df):
full_df['dpvolume_med_21'] = np.nan
full_df['dpvolume_std_21'] = np.nan
full_df['dpvolume'] = full_df['dvolume'] * full_df['dvwap']
print "Calculating trailing volume profile..."
for timeslice in ['09:45', '10:00', '10:15', '10:30', '10:45', '11:00', '11:15', '11:30', '11:45', '12:00', '12:15', '12:30', '12:45', '13:00', '13:15', '13:30', '13:45', '14:00', '14:15', '14:30', '14:45', '15:00', '15:15', '15:30', '15:45', '16:00' ]:
timeslice_df = full_df[ ['dpvolume', 'tradable_med_volume_21', 'close'] ]
timeslice_df = timeslice_df.unstack().between_time(timeslice, timeslice).stack()
timeslice_df = timeslice_df.dropna()
if len(timeslice_df) == 0: continue
timeslice_df['dpvolume_med_21'] = timeslice_df['dpvolume'].groupby(level='sid').apply(lambda x: pd.rolling_median(x.shift(1), 21))
timeslice_df['dpvolume_std_21'] = timeslice_df['dpvolume'].groupby(level='sid').apply(lambda x: pd.rolling_std(x.shift(1), 21))
m_df = timeslice_df.dropna()
print m_df.head()
print "Average dvol frac at {}: {}".format(timeslice, (m_df['dpvolume_med_21'] / (m_df['tradable_med_volume_21'] * m_df['close'])).mean())
full_df.ix[ timeslice_df.index, 'dpvolume_med_21'] = timeslice_df['dpvolume_med_21']
full_df.ix[ timeslice_df.index, 'dpvolume_std_21'] = timeslice_df['dpvolume_std_21']
return full_df
def calc_price_extras(daily_df):
daily_df['volat_ratio'] = daily_df['volat_21'] / daily_df['volat_60']
daily_df['volume_ratio'] = daily_df['tradable_volume'] / daily_df['shares_out']
daily_df['volume_ratio'] = daily_df['tradable_volume'] / daily_df['comp_volume']
daily_df['volat_move'] = daily_df['volat_21'].diff()
return daily_df
def calc_forward_returns(daily_df, horizon):
print "Calculating forward returns..."
results_df = pd.DataFrame( index=daily_df.index )
for ii in range(1, horizon+1):
retname = 'cum_ret'+str(ii)
cum_rets = daily_df['log_ret'].groupby(level='sid').apply(lambda x: pd.rolling_sum(x, ii))
shift_df = cum_rets.unstack().shift(-ii).stack()
results_df[retname] = shift_df
return results_df
def winsorize(data, std_level=5):
result = data.copy()
std = result.std() * std_level
mean = result.mean()
result[result > mean + std] = mean + std
result[result < mean - std] = mean - std
return result
def winsorize_by_date(data):
print "Winsorizing by day..."
return data.groupby(level='date', sort=False).transform(winsorize)
def winsorize_by_ts(data):
print "Winsorizing by day..."
return data.groupby(level='iclose_ts', sort=False).transform(winsorize)
def winsorize_by_group(data, group):
print "Winsorizing by day..."
return data.groupby([group], sort=False).transform(winsorize)
def rolling_ew_corr_pairwise(df, halflife):
all_results = {}
for left_col, left in df.iteritems():
all_results[left_col] = col_results = {}
for right_col, right in df.iteritems():
col_results[right_col] = moments.ewmcorr(left, right, span=(halflife-1)/2.0)
ret = pd.Panel(all_results)
ret = ret.swapaxes(0,1, copy=False)
return ret
def push_data(df, col):
#Careful, can push to next day...
lagged_df = df[[col]].unstack(level='sid').shift(-1).stack()
merged_df = pd.merge(df, lagged_df, left_index=True, right_index=True, sort=True, suffixes=['', '_n'])
return merged_df
def lag_data(daily_df):
lagged_df = daily_df.unstack(level=-1).shift(1).stack()
merged_df = pd.merge(daily_df, lagged_df, left_index=True, right_index=True, sort=True, suffixes=['', '_y'])
return merged_df
def calc_med_price_corr(daily_df):
pass
def calc_resid_vol(daily_df):
lookback = 20
daily_df['barraResidVol'] = np.sqrt(pd.rolling_var(daily_df['barraResidRet'], lookback))
return daily_df['barraResidVol']
def calc_factor_vol(factor_df):
halflife = 20.0
# factors = factor_df.index.get_level_values('factor').unique()
factors = ALL_FACTORS
ret = dict()
for factor1 in factors:
for factor2 in factors:
key = (factor1, factor2)
if key not in ret.keys():
ret[key] = moments.ewmcov(factor_df.xs(factor1, level=1)['ret'], factor_df.xs(factor2, level=1)['ret'], span=(halflife-1)/2.0)
# ret[key] = pd.rolling_cov(factor_df.xs(factor1, level=1)['ret'], factor_df.xs(factor2, level=1)['ret'], window=20)
# print "Created factor Cov on {} from {} to {}".format(key, min(ret[key].index), max(ret[key].index))
return ret
weights_df = None
def create_z_score(daily_df, name):
zscore = lambda x: ( (x - x.mean()) / x.std())
indgroups = daily_df[[name, 'gdate']].groupby(['gdate'], sort=True).transform(zscore)
daily_df[name + "_z"] = indgroups[name]
return daily_df
def calc_factors(daily_df, barraOnly=False):
print "Calculating factors..."
allreturns_df = pd.DataFrame(columns=['barraResidRet'], index=daily_df.index)
if barraOnly:
factors = BARRA_FACTORS + INDUSTRIES
else:
daily_df = create_z_score(daily_df, 'srisk_pct')
daily_df = create_z_score(daily_df, 'rating_mean')
factors = ALL_FACTORS
print "Total len: {}".format(len(daily_df))
cnt = 0
cnt1 = 0
factorrets = list()
for name, group in daily_df.groupby(level='date'):
print "Regressing {}".format(name)
cnt1 += len(group)
print "Size: {} {}".format(len(group), cnt1)
loadings_df = group[ factors ]
loadings_df = loadings_df.reset_index().fillna(0)
del loadings_df['sid']
del loadings_df['date']
# print "loadings len {}".format(len(loadings_df))
# print loadings_df.head()
returns_df = group['log_ret'].fillna(0)
# print "returns len {}".format(len(returns_df))
# print returns_df.head()
global weights_df
weights_df = np.log(group['capitalization']).fillna(0)
# print weights_df.head()
weights_df = pd.DataFrame( np.diag(weights_df) )
# print "weights len {}".format(len(weights_df))
indwgt = dict()
capsum = (group['capitalization'] / 1e6).sum()
for ind in INDUSTRIES:
indwgt[ind] = (group[ group['indname1'] == ind]['capitalization'] / 1e6).sum() / capsum
# print returns_df.head()
fRets, residRets = factorize(loadings_df, returns_df, weights_df, indwgt)
print "Factor Returns:"
# print fRets
# print residRets
cnt += len(residRets)
print "Running tally: {}".format(cnt)
fdf = pd.DataFrame([ [i,v] for i, v in fRets.items() ], columns=['factor', 'ret'])
fdf['date'] = name
factorrets.append( fdf )
allreturns_df.ix[ group.index, 'barraResidRet'] = residRets
fRets = residRets = None
gc.collect()
# print allreturns_df.tail()
factorRets_df = pd.concat(factorrets).set_index(['date', 'factor']).fillna(0)
print "Final len {}".format(len(allreturns_df))
daily_df['barraResidRet'] = allreturns_df['barraResidRet']
return daily_df, factorRets_df
def calc_intra_factors(intra_df, barraOnly=False):
print "Calculating intra factors..."
allreturns_df = pd.DataFrame(columns=['barraResidRetI'], index=intra_df.index)
if barraOnly:
factors = BARRA_FACTORS + INDUSTRIES
else:
factors = ALL_FACTORS
print "Total len: {}".format(len(intra_df))
cnt = 0
cnt1 = 0
factorrets = list()
for name, group in intra_df.groupby(level='iclose_ts'):
print "Regressing {}".format(name)
cnt1 += len(group)
print "Size: {} {}".format(len(group), cnt1)
loadings_df = group[ factors ]
loadings_df = loadings_df.reset_index().fillna(0)
del loadings_df['sid']
del loadings_df['iclose_ts']
# print "loadings len {}".format(len(loadings_df))
# print loadings_df.head()
returns_df = (group['overnight_log_ret'] + np.log(group['iclose'] / group['dopen'])).fillna(0)
# print "returns len {}".format(len(returns_df))
# print returns_df.head()
global weights_df
weights_df = np.log(group['capitalization']).fillna(0)
# print weights_df.head()
weights_df = pd.DataFrame( np.diag(weights_df) )
# print "weights len {}".format(len(weights_df))
indwgt = dict()
capsum = (group['capitalization'] / 1e6).sum()
for ind in INDUSTRIES:
indwgt[ind] = (group[ group['indname1'] == ind]['capitalization'] / 1e6).sum() / capsum
# print returns_df.head()
fRets, residRets = factorize(loadings_df, returns_df, weights_df, indwgt)
print "Factor Returns:"
print fRets
# print residRets
cnt += len(residRets)
print "Running tally: {}".format(cnt)
fdf = pd.DataFrame([ [i,v] for i, v in fRets.items() ], columns=['factor', 'ret'])
fdf['iclose_ts'] = name
factorrets.append( fdf )
allreturns_df.ix[ group.index, 'barraResidRetI'] = residRets
fRets = residRets = None
gc.collect()
# print allreturns_df.tail()
factorRets_df = pd.concat(factorrets).set_index(['iclose_ts', 'factor']).fillna(0)
print "Final len {}".format(len(allreturns_df))
intra_df['barraResidRetI'] = allreturns_df['barraResidRetI']
return intra_df, factorRets_df
def factorize(loadings_df, returns_df, weights_df, indwgt):
print "Factorizing..."
params = Parameters()
for colname in loadings_df.columns:
expr = None
if colname == 'country':
expr = "0"
for ind in INDUSTRIES:
expr += "+" + ind + "*" + str(indwgt[ind])
# expr += "+" + ind
print expr
params.add(colname, value=0.0, expr=expr)
print "Minimizing..."
result = minimize(fcn2min, params, args=(loadings_df, returns_df))
print "Result: "
if not result.success:
print "ERROR: failed fit"
exit(1)
fRets_d = dict()
for param in params:
val = params[param].value
error = params[param].stderr
fRets_d[param] = val
upper = val + error * 2
lower = val - error * 2
if upper * lower < 0:
print "{} not significant: {}, {}".format(param, val, error)
print "SEAN"
print result
print result.residual
print result.message
print result.lmdif_message
print result.nfev
print result.ndata
residRets_na = result.residual
return fRets_d, residRets_na
def fcn2min(params, x, data):
# f1 = params['BBETANL_b'].value
# f2 = params['SIZE_b'].value
# print "f1: " + str(type(f1))
# print f1
ps = list()
for param in params:
val = params[param].value
# if val is None: val = 0.0
ps.append(val)
# print "adding {} of {}".format(param, val)
# print ps
f = np.array(ps)
f.shape = (len(params),1)
# print "f: " + str(f.shape)
# print f
# print "x: " + str(type(x)) + str(x.shape)
# print x
model = np.dot(x, f)
# print "model: " + str(type(model)) + " " + str(model.shape)
# print model
# print "data: " + str(type(data)) + " " + str(data.shape)
#
# print data
global weights_df
cap_sq = weights_df.as_matrix()
# cap_sq.shape = (cap_sq.shape[0], 1)
# print model.shape
# print data.values.shape
# print cap_sq.shape
# print "SEAN2"
# print model
# print data.values
# print cap_sq
#ret = np.multiply((model - data.values), cap_sq) / cap_sq.mean()
ret = np.multiply((model - data.values), cap_sq)
# print str(ret)
# ret = model - data
ret = ret.diagonal()
# print ret.shape
# ret = ret.as_matrix()
ret.shape = (ret.shape[0], )
#UGH XXX should really make sure types are correct at a higher level
ret = ret.astype(np.float64, copy=False)
# print
# print "ret: " + str(type(ret)) + " " + str(ret.shape)
# print ret
return ret
def mkt_ret(group):
d = group['cum_ret1']
w = group['mkt_cap'] / 1e6
res = (d * w).sum() / w.sum()
return res
|
<reponame>kmeister/ML_Benchmark
from multiprocessing import Pool
from multiprocessing import Manager
import subprocess
class AsyncTask:
def __init__(self, name, command, queue):
self.command = command
self.queue = queue
self.name = name
def execute(self):
print(f"Starting Task: {self.name}")
try:
result = subprocess.check_output(self.command.split(), stderr=subprocess.STDOUT)
self.queue.put(result.decode())
except Exception as e:
self.queue.put(str(e))
pass
print(f"Finished Task: {self.name}")
class BenchmarkCommandBuilder:
def __init__(self):
self._stats_filename = "RISCV.txt"
self._config_filename= "CONFIG.ini"
self._l1i_size = 32
self._l1i_assoc = 4
self._l1d_size = 32
self._l1d_assoc = 4
self._cacheline_size = 64
self._l2cache = False
self._l2_size = 1024
self._l2_assoc = 8
self._cpu_type = "DerivO3CPU"
self._maxinsts=100000000
self._benchmark_path= "../ML_Benchmark/Benchmarks/mlbench"
def set_l2cache(self, value: bool):
self._l2cache = value
return self
def set_stats_filename(self, value):
self._stats_filename = value
return self
def set_config_filename(self, value):
self._config_filename = value
return self
def set_l1i_size(self, value):
self._l1i_size = value
return self
def set_l1i_assoc(self, value):
self._l1i_assoc = value
return self
def set_l1d_size(self, value):
self._l1d_size = value
return self
def set_l1d_assoc(self, value):
self._l1d_assoc = value
return self
def set_cacheline_size(self, value):
self._cacheline_size = value
return self
def set_l2_size(self, value):
self._l2_size = value
return self
def set_l2_assoc(self, value):
self._l2_assoc = value
return self
def set_cpu_type(self, value):
self._cpu_type = value
return self
def set_maxinsts(self, value):
self._maxinsts = value
return self
def set_benchmark_path(self, value):
self._benchmark_path = value
return self
def build(self):
str = "build/RISCV/gem5.opt "
str += f"--stats-file={self._stats_filename:s} "
str += f"--dump-config={self._config_filename:s} "
str += f"configs/example/se.py "
str += f"-c {self._benchmark_path} "
str += f"--caches "
str += f"--l1i_size={self._l1i_size}kB "
str += f"--l1i_assoc={self._l1d_assoc} "
str += f"--l1d_size={self._l1d_size}kB "
str += f"--l1d_assoc={self._l1d_assoc} "
str += f"--cacheline_size={self._cacheline_size} "
if self._l2cache:
str += f"--l2cache "
str += f"--l2_size={self._l2_size}kB "
str += f"--l2_assoc={self._l2_assoc} "
str += f"--cpu-clock=1.6GHz "
str += f"--cpu-type={self._cpu_type} "
str += f" -n 1"
str += f" --maxinsts={self._maxinsts} "
return str
def run_commands_async(commands, max_processes = 2):
pool = Pool(max_processes)
manager = Manager()
queue = manager.Queue()
tasks = []
for command in commands:
tasks.append(AsyncTask(command, command, queue))
pool.map(AsyncTask.execute, tasks)
while not queue.empty():
print(queue.get())
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from tm import TuringMachine
from tmbuilder import TuringMachineBuilder
import re
import sys
import logging
class TuringMachineParser:
"""
Proportionate methods to parse a Turing Machine.
The allowed expresions are:
- empty line
- comment line: '% any text that comes after it's ignored'
- initial state: 'INITIAL <state>'
- blank symbol: 'BLANK <symbol>'
- final state: 'FINAL <state>'
- halt state: 'HALT <state>'
- transition: '<state>, <symbol> -> <new_state>, <new_symbol>, <movement>
It is not possible to add comments at the end of any line, comments must
be on a standalone line
"""
MOVE_RIGHT = '>'
MOVE_LEFT = '<'
NON_MOVEMENT = '_'
#
#
def __init__(self):
self._builder = TuringMachineBuilder()
# Regular expresions
self._comment_line_re = re.compile('[ ]*%\s*')
self._blank_symbol_re = re.compile('[\s]*BLANK[\s]+(?P<symbol>.)\s*$')
self._halt_state_re = re.compile('[ ]*HALT[ ]+(?P<state>\w+)\s*$')
self._final_state_re = re.compile('[ ]*FINAL[ ]+(?P<state>\w+)\s*$')
self._inital_state_re = re.compile('[ ]*INITIAL[ ]+(?P<state>\w)\s*$'
)
self._transition_re = re.compile('\s*(?P<state>\w+)\s*,\s*'
'(?P<symbol>.)\s*->\s*'
'(?P<nstate>\w+)\s*,\s*'
'(?P<nsymbol>.)\s*,\s*'
'(?P<movement>[%s%s%s])\s*$' %
(TuringMachineParser.MOVE_LEFT,
TuringMachineParser.MOVE_RIGHT,
TuringMachineParser.NON_MOVEMENT)
)
#
#
def clean(self):
"""
Cleans all the previos parsed data
"""
self._builder.clean()
#
#
def parseString(self, string_data):
"""
Parses the given string an add the information to the Turing Machine
builder
Raise an exception if the given data is not an string
"""
if type(string_data) != str:
raise Exception('Expected an string')
self._parse(string_data.splitlines())
#
#
def parseLine(self, data):
"""
Parse the given line of data
"""
# The most expected expresions are in order:
# - Transition
# - Comments
# - Final States
# - Initial State, Halt State, Blank Symbol
if not self._parseTransition(data):
if not self._parseComment(data):
if not self._parseFinalState(data):
if not self._parseInitialState(data):
if not self._parseBlankSymbol(data):
if not self._parseHaltState(data):
raise Exception('Unrecognized pattern: %s'
% data)
#
#
def create(self):
"""
Attempts to create a Turing Machine with the parsed data until the
call to this function
Can raise any of the TuringMachineBuilder an TuringMachine exceptions
"""
return self._builder.create()
#
#
def _parseComment(self, data):
"""
Returns True if the given data is a comment expresion, otherwise
returns False
"""
mc = self._comment_line_re.match(data)
if mc:
return True
return False
#
#
def _parseBlankSymbol(self, data):
"""
Returns True if the given data is a blank symbol expresion, otherwise
returns False
"""
mbs = self._blank_symbol_re.match(data)
if mbs:
if self._builder.hasBlankSymbol():
raise Exception('Blank symbol can only be defined once')
self._builder.setBlankSymbol( mbs.group('symbol') )
return True
return False
#
#
def _parseHaltState(self, data):
"""
Returns True if the given data is a halt state expresion, otherwise
returns False
Throws
Exception if Halt is already defined or if the builder fails when setting the halt state
"""
mhs = self._halt_state_re.match(data)
if mhs:
if self._builder.hasHaltState():
raise Exception('Halt state can only be defined once')
self._builder.setHaltState( mhs.group('state') )
return True
return False
#
#
def _parseFinalState(self, data):
"""
Returns True if the given data is a final state expresion, otherwise
returns False
"""
mfs = self._final_state_re.match(data)
if mfs:
self._builder.addFinalState( mfs.group('state') )
return True
return False
#
#
def _parseInitialState(self, data):
"""
Returns True if the given data is an initial state expresion, otherwise
returns False
"""
mis = self._inital_state_re.match(data)
if mis:
if self._builder.hasInitialState():
raise Exception('Initial state can only be defined once')
self._builder.setInitialState( mis.group('state') )
return True
return False
#
#
def _parseTransition(self, data):
"""
Returns True if the given data is a transition state expresion,
otherwise returns False
"""
mt = self._transition_re.match(data)
if mt:
# Filter movement
move_sym = mt.group('movement')
move = TuringMachine.NON_MOVEMENT
if move_sym == TuringMachineParser.MOVE_LEFT:
move = TuringMachine.MOVE_LEFT
elif move_sym == TuringMachineParser.MOVE_RIGHT:
move = TuringMachine.MOVE_RIGHT
self._builder.addTransition(mt.group('state'),
mt.group('symbol'),
mt.group('nstate'),
mt.group('nsymbol'),
move)
return True
return False
#
#
def _parse(self, parse_data):
"""
Parses the specified data
- parse_data: must be an iterable that returns a new line of data on each iteration
"""
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
for line, data in enumerate(parse_data):
# The execution flow it's ugly
# But personally I hate the idea of have a lot of indentation levels
if not data:
continue
try:
self.parseLine(data)
except Exception as e:
raise Exception('Line %d, %s' % (line+1, e.message))
#
# Test
if __name__ == '__main__':
parser = TuringMachineParser()
test_str = '% Start with a comment line\n' \
' % Another comment line\n' \
'HALT HALT\n' \
'BLANK #\n' \
'INITIAL 1\n' \
'FINAL 2\n' \
'1, 0 -> 2, 1, >\n' \
'1, 1 -> 2, 0, > \n' \
'2, 0 -> 1, 0, _\n' \
' 2,1->3,1,>\n '\
'3, 0 -> HALT, 0, _\n' \
'3, 1 -> HALT, 1, _\n' \
'3, # -> HALT, #, _\n'
parser.parseString(test_str)
tm = parser.create()
print tm
|
<reponame>Wentaobi/OpenCDA<gh_stars>0
# -*- coding: utf-8 -*-
"""
Use Extended Kalman Filter on GPS + IMU for better localization.
"""
# Author: <NAME> <<EMAIL>>, credit to <NAME> <<EMAIL>>
# License: MIT
import math
import numpy as np
class ExtentedKalmanFilter(object):
"""
Kalman Filter implementation for gps + imu
"""
def __init__(self, dt):
"""
Construct class
Args:
dt(float): unit time step for simulation.
"""
self.Q = np.diag([
0.2, # variance of location on x-axis
0.2, # variance of location on y-axis
np.deg2rad(0.1), # variance of yaw angle
0.001 # variance of velocity
]) ** 2 # predict state covariance
self.R = np.diag([0.5, 0.5, 0.2]) ** 2 # Observation x,y position covariance
self.time_step = dt
self.xEst = np.zeros((4, 1))
self.PEst = np.eye(4)
def motion_model(self, x, u):
"""
Predict current position and yaw based on previous result.
X = F * X_prev + B * u
Args:
x (np.array): [x_prev, y_prev, yaw_prev, v_prev], shape: (4, 1).
u (np.array): [v_current, imu_yaw_rate], shape:(2, 1).
Returns:
np.array: predicted state.
"""
F = np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 0]])
B = np.array([[self.time_step * math.cos(x[2, 0]), 0],
[self.time_step * math.sin(x[2, 0]), 0],
[0.0, self.time_step],
[1.0, 0.0]])
x = F @ x + B @ u
return x
def jacob_f(self, x, u):
"""
Jacobian of Motion Model motion model
x_{t+1} = x_t+v*dt*cos(yaw)
y_{t+1} = y_t+v*dt*sin(yaw)
yaw_{t+1} = yaw_t+omega*dt
v_{t+1} = v{t}
so
dx/dyaw = -v*dt*sin(yaw)
dx/dv = dt*cos(yaw)
dy/dyaw = v*dt*cos(yaw)
dy/dv = dt*sin(yaw)
"""
yaw = x[2, 0]
v = u[0, 0]
jF = np.array([
[1.0, 0.0, -self.time_step * v * math.sin(yaw), self.time_step * math.cos(yaw)],
[0.0, 1.0, self.time_step * v * math.cos(yaw), self.time_step * math.sin(yaw)],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
return jF
def observation_model(self, x):
"""
Project the state matrix to sensor measurement matrix.
Args:
x (np.array): [x, y, yaw, v], shape: (4. 1).
Returns:
np.array: predicted measurement.
"""
H = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]
])
z = H @ x
return z
def run_step_init(self, x, y, heading, velocity):
"""
Initalization for states
:param x:
:param y:
:param heading:
:param velocity:
:return:
"""
self.xEst[0] = x
self.xEst[1] = y
self.xEst[2] = heading
self.xEst[3] = velocity
def run_step(self, x, y, heading, velocity, yaw_rate_imu):
"""
Apply EKF on current measurement and previous prediction
:param x: x(esu) coordinate from gnss sensor at current timestamp
:param y: y(esu) coordinate from gnss sensor at current timestamp
:param heading: heading direction at current timestamp
:param velocity: current speed
:param yaw_rate_imu: yaw rate rad/s from IMU sensor
:return: corrected x, y, heading, velocity
"""
# gps observation
z = np.array([x, y, heading]).reshape(3, 1)
# velocity and imu yaw rate
u = np.array([velocity, yaw_rate_imu]).reshape(2, 1)
# EKF starts
xPred = self.motion_model(self.xEst, u)
jF = self.jacob_f(self.xEst, u)
PPred = jF @ self.PEst @ jF.T + self.Q
# Jacobian of Observation Model
jH = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]])
zPred = self.observation_model(xPred)
y = z - zPred
S = jH @ PPred @ jH.T + self.R
K = PPred @ jH.T @ np.linalg.inv(S)
self.xEst = xPred + K @ y
self.PEst = (np.eye(len(self.xEst)) - K @ jH) @ PPred
return self.xEst[0][0], self.xEst[1][0], self.xEst[2][0], self.xEst[3][0]
|
import numpy as np
from vis.fields import DomainType, VisualizationField, ScalarField, VectorField
from vis.pythreejs_viewer import *
try:
from vis.offscreen_viewer import *
HAS_OFFSCREEN = True
except Exception as e:
print("WARNING: failed to load offscreen viewer:", e)
HAS_OFFSCREEN = False
import mesh_operations
class RawMesh():
def __init__(self, vertices, faces, normals = None, omitNormals = False):
if normals is None and (not omitNormals):
normals = mesh_operations.getVertexNormalsRaw(vertices, faces)
self.updateGeometry(vertices, faces, normals)
def visualizationGeometry(self):
return self.vertices, self.faces, self.normals
def updateGeometry(self, vertices, faces, normals):
self.vertices = np.array(vertices, dtype = np.float32)
self.faces = np.array(faces, dtype = np. uint32)
self.normals = np.array(normals, dtype = np.float32) if normals is not None else None
# No decoding needed for per-entity fields on raw meshes.
def visualizationField(self, data):
return data
class TriMeshViewer(PythreejsViewerBase):
def __init__(self, trimesh, width=512, height=512, textureMap=None, scalarField=None, vectorField=None, superView=None, transparent=False, wireframe=False):
if isinstance(trimesh, tuple): # accept (V, F) tuples as meshes, wrapping in a RawMesh
trimesh = RawMesh(*trimesh)
self.MeshConstructor = pythreejs.Mesh
super().__init__(trimesh, width, height, textureMap, scalarField, vectorField, superView, transparent)
if wireframe: self.showWireframe(True)
class LineMeshViewer(PythreejsViewerBase):
def __init__(self, linemesh, width=512, height=512, textureMap=None, scalarField=None, vectorField=None, superView=None):
if (isinstance(linemesh, tuple)):
linemesh = RawMesh(*linemesh, omitNormals=True)
self.isLineMesh = True
self.MeshConstructor = pythreejs.LineSegments
super().__init__(linemesh, width, height, textureMap, scalarField, vectorField, superView)
def PointCloudMesh(points):
return RawMesh(points, np.arange(points.shape[0], dtype=np.uint32), None, omitNormals=True)
class PointCloudViewer(PythreejsViewerBase):
def __init__(self, points, width=512, height=512, textureMap=None, scalarField=None, vectorField=None, superView=None):
pcmesh = PointCloudMesh(points)
self.isPointCloud = True
self.MeshConstructor = pythreejs.Points
super().__init__(pcmesh, width, height, textureMap, scalarField, vectorField, superView)
# Visualize a parametrization by animating the flattening and unflattening of the mesh to the plane.
class FlatteningAnimation:
# Duration in seconds
def __init__(self, trimesh, uvs, width=512, height=512, duration=5, textureMap = None):
self.viewer = TriMeshViewer(trimesh, width, height, textureMap)
flatPosArray = None
if (uvs.shape[1] == 2): flatPosArray = np.array(np.pad(uvs, [(0, 0), (0, 1)], 'constant'), dtype=np.float32)
else: flatPosArray = np.array(uvs, dtype=np.float32)
flatPos = pythreejs.BufferAttribute(array=flatPosArray, normalized=False)
flatNormals = pythreejs.BufferAttribute(array=np.repeat(np.array([[0, 0, 1]], dtype=np.float32), uvs.shape[0], axis=0), normalized=False)
geom = self.viewer.currMesh.geometry
mat = self.viewer.currMesh.material
geom.morphAttributes = {'position': [flatPos,], 'normal': [flatNormals,]}
# Both of these material settings are needed or else our target positions/normals are ignored!
mat.morphTargets, mat.morphNormals = True, True
flatteningMesh = pythreejs.Mesh(geometry=geom, material=mat)
amplitude = np.linspace(-1, 1, 20, dtype=np.float32)
times = (np.arcsin(amplitude) / np.pi + 0.5) * duration
blendWeights = 0.5 * (amplitude + 1)
track = pythreejs.NumberKeyframeTrack('name=.morphTargetInfluences[0]', times = times, values = blendWeights, interpolation='InterpolateSmooth')
self.action = pythreejs.AnimationAction(pythreejs.AnimationMixer(flatteningMesh),
pythreejs.AnimationClip(tracks=[track]),
flatteningMesh, loop='LoopPingPong')
self.viewer.meshes.children = [flatteningMesh]
self.layout = ipywidgets.VBox()
self.layout.children = [self.viewer.renderer, self.action]
def show(self):
return self.layout
def exportHTML(self, path):
import ipywidget_embedder
ipywidget_embedder.embed(path, self.layout)
# Render a quad/hex mesh
class QuadHexMeshWrapper:
def __init__(self, V, F):
V = np.array(V, dtype=np.float32)
F = np.array(F, dtype=np.uint32)
outwardFaces = None
if (F.shape[1] == 4):
# 2 triangles per quad
outwardFaces = [[0, 1, 2, 3]]
elif (F.shape[1] == 8):
# 2 triangles for each of the 6 cube faces
# outward oriented faces:
outwardFaces = [[0, 3, 2, 1],
[0, 4, 7, 3],
[0, 1, 5, 4],
[4, 5, 6, 7],
[1, 2, 6, 5],
[3, 7, 6, 2]]
else:
raise Exception('Only quads and hexahedra are supported')
FT = None # triangulated quads/hex faces
trisPerElem = 2 * len(outwardFaces)
FT = np.empty((trisPerElem * F.shape[0], 3), dtype=F.dtype)
outwardFaces = np.array(outwardFaces)
for i, q in enumerate(outwardFaces):
FT[2 * i ::trisPerElem] = F[:, q[[0, 1, 2]]]
FT[2 * i + 1::trisPerElem] = F[:, q[[2, 3, 0]]]
# compute face normals per triangle
FN = np.cross(V[FT[:, 1]] - V[FT[:, 0]], V[FT[:, 2]] - V[FT[:, 0]])
FN /= np.linalg.norm(FN, axis=1)[:, np.newaxis]
# Average onto the vertices with uniform weights for now...
N = np.zeros_like(V)
np.add.at(N, FT, FN[:, np.newaxis, :]) # todo: incorporate weights?
# Normalize, guarding for zero-vector normals which occur for interior hex mesh vertices
# (assuming we do not replicate them per-face)
norms = np.linalg.norm(N, axis=1)
norms = np.where(norms > 1e-5, norms, 1.0)
N /= norms[:, np.newaxis]
self.numElems = F.shape[0]
self.numVerts = V.shape[0]
# Lookup table maping visualization vertices, triangles back to their originating vertex/element
# currently we do not replicate vertices...
self.origVertForVert = np.arange(V.shape[0])
self.elementForTri = np.empty(FT.shape[0], dtype=np.int)
eft = np.reshape(self.elementForTri, (F.shape[0], -1), order='C')
eft[:, :] = np.arange(F.shape[0])[:, np.newaxis]
self.V, self.F, self.N = V, FT, N
def visualizationGeometry(self):
return self.V, self.F, self.N
def visualizationField(self, data):
domainSize = data.shape[0]
if (domainSize == self.numVerts): return data[self.origVertForVert]
if (domainSize == self.numElems): return data[self.elementForTri]
raise Exception('Unrecognized data size')
class QuadHexViewer(TriMeshViewer):
def __init__(self, V, F, *args, **kwargs):
super().__init__(QuadHexMeshWrapper(V, F), *args, **kwargs)
# Offscreen versions of the viewers (where supported)
if HAS_OFFSCREEN:
class OffscreenTriMeshViewer(OffscreenViewerBase):
def __init__(self, trimesh, width=512, height=512, textureMap=None, scalarField=None, vectorField=None, transparent=False, wireframe=False):
if isinstance(trimesh, tuple): # accept (V, F) tuples as meshes, wrapping in a RawMesh
trimesh = RawMesh(*trimesh)
super().__init__(trimesh, width, height, textureMap, scalarField, vectorField, transparent)
if wireframe: self.showWireframe(True)
class OffscreenQuadHexViewer(OffscreenTriMeshViewer):
def __init__(self, V, F, *args, **kwargs):
super().__init__(QuadHexMeshWrapper(V, F), *args, **kwargs)
def concatVisGeometries(A, B):
return (np.vstack([A[0], B[0]]), # Stacked V
np.vstack([A[1], B[1] + len(A[0])]), # Stacked F
np.vstack([A[2], B[2]])) # Stacked N
def concatWithColors(A, colorA, B, colorB):
return concatVisGeometries(A, B), np.vstack([np.tile(colorA, [len(A[0]), 1]), np.tile(colorB, [len(B[0]), 1])])
|
<filename>matting/alpha_matting.py
from .util import make_system, solve_cg
from .closed_form_laplacian import closed_form_laplacian
from .knn_laplacian import knn_laplacian
from .ichol import ichol, ichol_solve
from .lkm import make_lkm_operators
from .ifm_matting import ifm_system
from .vcycle import vcycle
import numpy as np
import scipy.sparse.linalg
METHODS = ["cf", "knn", "lkm", "ifm"]
PRECONDITIONERS = {
"cf": [None, "jacobi", "vcycle", "ichol"],
"knn": [None, "jacobi", "vcycle", "ichol"],
"ifm": [None, "jacobi", "vcycle"],
"lkm": [None, "jacobi"],
}
def alpha_matting(
image,
trimap,
method="cf",
preconditioner="vcycle",
ichol_regularization=0.0,
ichol_threshold=1e-4,
lkm_radius=10,
lambd=100.0,
epsilon=1e-7,
max_iterations=2000,
relative_tolerance=None,
absolute_tolerance=None,
callback=None,
x0=None,
print_info=False,
):
# Propagates approximate alpha values of trimap into unknown regions
# based on image color.
# A system of linear equations is assembled and then solved with
# conjugate gradient descent.
# To accelerate convergence, an incomplete Cholesky preconditioner is
# used.
# The effectiveness of this preconditioner can be controlled with the
# "ichol_*" parameters.
#
# The information flow alpha matting method is provided for academic use only.
# If you use the information flow alpha matting method for an academic
# publication, please cite corresponding publications referenced in the
# description of each function, as well as this toolbox itself:
#
# @INPROCEEDINGS{ifm,
# author={<NAME>} Ozan and Pollefeys, Marc},
# booktitle={Proc. CVPR},
# title={Designing Effective Inter-Pixel Information Flow for Natural Image Matting},
# year={2017},
# }
#
# Closed form (cf) matting based on:
# <NAME>, <NAME>, and <NAME>.
# "A closed-form solution to natural image matting."
# IEEE transactions on pattern analysis and machine intelligence
# 30.2 (2008): 228-242.
#
# K-nearest neighbors (knn) matting based on:
# <NAME>, <NAME>, <NAME>.
# "KNN Matting."
# Conference on Computer Vision and Pattern Recognition (CVPR), June 2012.
#
# Large kernel matting (lkm) based on:
# <NAME>, <NAME>, and <NAME>.
# "Fast matting using large kernel matting laplacian matrices."
# Computer Vision and Pattern Recognition (CVPR),
# 2010 IEEE Conference on. IEEE, 2010.
#
# Information flow matting (ifm) based on:
# <NAME>, <NAME>, and <NAME>.
# "Designing effective inter-pixel information flow for natural image matting."
# Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2017.
#
# Vcycle matting based on:
# Lee, <NAME>., and <NAME>.
# "Scalable matting: A sub-linear approach."
# arXiv preprint arXiv:1404.3933 (2014).
#
# Parameters
# ----------
# image: ndarray, dtype float64, shape [height, width, 3]
# Values should be in range [0 - 1]
# trimap: ndarray, dtype float64, shape [height, width]
# Values should be
# 0 for background,
# 1 for foreground, and
# other for unknown.
# method: string
# Possible methods are:
# "cf"
# "knn"
# "lkm"
# "ifm"
# "vcycle"
# preconditioner: string
# Possible preconditioners are:
# None
# "jacobi"
# "ichol"
# "vcycle"
# ichol_regularization: float64
# Increase to increase probability that incomplete
# Cholesky decomposition can be built successfully.
# Increasing regularization decreases convergence rate.
# ichol_threshold: float64
# Increase to discard more values of incomplete Cholesky
# decomposition.
# Leads to faster build times and lower memory use,
# but decreases convergence rate and decomposition might fail.
# lkm_radius: int
# Radius for matting kernel used in lkm matting.
# Converges faster with larger radius, but result is more blurry.
# lambd: float64
# Weighting factor to constrain known trimap values.
# epsilon: float64
# Regularization factor for closed-form matting.
# Larger values lead to faster convergence but more blurry alpha.
# max_iterations: int
# Maximum number of iterations of conjugate gradient descent.
# relative_tolerance: float64
# Conjugate gradient descent will stop when
# norm(A x - b) < relative_tolerance norm(b).
# If either relative_tolerance or absolute_tolerance is None,
# the other is set to 0.0.
# absolute_tolerance: float64
# Conjugate gradient descent will stop when
# norm(A x - b) < absolute_tolerance.
# The default value is
# 0.1/(width * height).
# callback: func(A, x, b)
# callback to inspect temporary result after each iteration.
# x0: np.ndarray of dtype np.float64
# Initial guess for alpha matte.
# print_info: bool
# If to print convergence information.
#
# Returns
# -------
# alpha: ndarray, dtype float64, shape [height, width]
assert(image.dtype == np.float64)
assert(trimap.dtype == np.float64)
assert(len(image.shape) == 3)
assert(image.shape[2] == 3)
assert(len(trimap.shape) == 2)
assert(image.shape[:2] == trimap.shape)
assert(0 <= image.min() and image.max() <= 1)
assert(0 <= trimap.min() and trimap.max() <= 1)
if relative_tolerance is None:
if absolute_tolerance is None:
relative_tolerance = 0.0
absolute_tolerance = 0.1 / (image.shape[0] * image.shape[1])
else:
relative_tolerance = 0.0
else:
if absolute_tolerance is None:
absolute_tolerance = 0.0
if method == "lkm" and preconditioner not in [None, "jacobi"]:
raise ValueError('Only None or "jacobi" preconditioner are supported for lkm matting')
if print_info:
print("Alpha matting with method %s and preconditioner %s" % (
method, preconditioner))
print("Building Laplacian matrix")
if method == "cf":
L = closed_form_laplacian(image, epsilon)
A, b = make_system(L, trimap, lambd)
elif method == "knn":
L = knn_laplacian(image)
A, b = make_system(L, trimap, lambd)
elif method == "lkm":
L, L_diag = make_lkm_operators(
image,
radius=lkm_radius,
eps=epsilon)
from .util import trimap_split
is_fg, is_bg, is_known, is_unknown = trimap_split(trimap)
d = lambd * is_known.astype(np.float64)
inv_A_diag = 1 / (L_diag + d)
def A_dot(x):
return L @ x + d * x
n = len(d)
A = scipy.sparse.linalg.LinearOperator(matvec=A_dot, shape=(n, n))
b = lambd * is_fg.astype(np.float64)
def lkm_jacobi_precondition(r):
return r * inv_A_diag
elif method == "ifm":
A, b = ifm_system(image, trimap)
else:
raise ValueError("Invalid matting method: %s\nValid methods are:\n%s" % (
method, METHODS))
if print_info:
print("Building preconditioner")
if preconditioner is None:
def precondition(r):
return r
elif preconditioner == "ichol":
params = [
(ichol_regularization, ichol_threshold),
(1e-4, 1e-4),
(0.0, 1e-5),
(1e-4, 1e-5),
(0.0, 0.0),
]
for ichol_regularization, ichol_threshold in params:
try:
A_regularized = A if ichol_regularization == 0.0 else \
A + ichol_regularization * scipy.sparse.identity(A.shape[0])
L = ichol(A_regularized.tocsc(), ichol_threshold)
break
except ValueError as e:
print("""WARNING:
Incomplete Cholesky decomposition failed (%s) with:
ichol_regularization = %f
ichol_threshold = %f
A smaller value for ichol_threshold might help if sufficient memory is available.
A larger value for ichol_threshold might help if more time is available.
See help of matting_closed_form for more info.
""" % (e, ichol_regularization, ichol_threshold))
def precondition(r):
return ichol_solve(L, r)
elif preconditioner == "jacobi":
if method == "lkm":
precondition = lkm_jacobi_precondition
else:
inv_diag = 1 / A.diagonal()
def precondition(r):
return r * inv_diag
elif preconditioner == "vcycle":
cache = {}
def precondition(r):
return vcycle(A, r, trimap.shape, cache)
else:
raise ValueError('Invalid preconditioner %s\nValid preconditioners are: %s' % (
preconditioner, PRECONDITIONERS))
x = solve_cg(
A,
b,
x0=x0,
max_iter=max_iterations,
rtol=relative_tolerance,
atol=absolute_tolerance,
precondition=precondition,
print_info=print_info,
callback=callback)
alpha = np.clip(x, 0, 1).reshape(trimap.shape)
return alpha
|
import pdb
import time
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.model_zoo as model_zoo
from torch.optim import lr_scheduler
from torchvision.models.resnet import Bottleneck, BasicBlock
class DeepLearningModel(nn.Module):
def __init__(self, cfg):
super().__init__()
self.verbose = cfg['verbose']
self.num_epochs = cfg['num_epochs']
self.optimizer_name = cfg['optimizer_name']
self.optimizer_kwargs = cfg['optimizer_kwargs']
self.scheduler_kwargs = cfg['scheduler_kwargs']
self.prefixes_of_vars_to_freeze = cfg['prefixes_of_vars_to_freeze']
self.layer_magnitudes = {}
# ----------------- Abstract class methods to be implemented per model -----------------
def forward(self, X):
raise NotImplementedError()
def forward_with_intervention(self, X, labels):
raise NotImplementedError()
def get_data_dict_from_dataloader(self, data):
raise NotImplementedError()
def loss(self, outputs, data_dict):
raise NotImplementedError()
def analyse_predictions(self, y_true, y_pred, info={}):
raise NotImplementedError()
# ----------------- Standard deep learning boilerplate train + val code -----------------
def train_or_eval_dataset(self, dataloaders, dataset_sizes, phase, intervention=False):
"""
Given a model, data, and a phase (train/val/test), it runs the model on the data and,
if phase = train, we will train the model.
"""
print('Train / Eval pass on %s dataset' % phase)
assert phase in ['train', 'val', 'test']
use_gpu = torch.cuda.is_available()
if phase == 'train':
self.train(True) # Set model to training mode
else:
self.train(False) # Set model to evaluate mode
running_loss = 0.0
n_batches_loaded = 0
start_time_for_100_images = time.time()
time_data_loading = 0
time_forward_prop = 0
time_backward_prop = 0
time_update_step = 0
# Iterate over data.
# keep track of all labels + outputs to compute the final metrics.
concatenated_labels = {}
concatenated_outputs = {}
loss_details = []
for data in dataloaders[phase]:
# print("We reached the beginning of the loop with %i images" % n_batches_loaded)
t = time.time()
n_batches_loaded += 1
if n_batches_loaded % 100 == 0:
time_100batches = time.time() - start_time_for_100_images
if self.verbose['time_100batches']:
print('Time taken to process 100 batches %2.3f seconds (total batches %i)' % (
time_100batches, len(dataloaders[phase])))
if self.verbose['time_breakdown']:
time_data_loop = time_100batches - time_data_loading - time_forward_prop - \
time_backward_prop - time_update_step
print(' Data Loop : %2.2f seconds' % time_data_loop)
print(' Data Loading: %2.2f seconds' % time_data_loading)
print(' Forward : %2.2f seconds' % time_forward_prop)
print(' Backward : %2.2f seconds' % time_backward_prop)
print(' Update : %2.2f seconds' % time_update_step)
time_data_loading = 0
time_forward_prop = 0
time_backward_prop = 0
time_update_step = 0
start_time_for_100_images = time.time()
# Get the inputs
data_dict = self.get_data_dict_from_dataloader(data)
inputs = data_dict['inputs']
labels = data_dict['labels']
time_data_loading += time.time() - t
t = time.time()
# Zero the parameter gradients
self.optimizer.zero_grad()
# Forward
if intervention:
# Under intervention, we assume some limited form of access to ground truth during test-time
assert phase in ['val', 'test'] # Usually for evaluation purposes and not training
outputs = self.forward_with_intervention(inputs, labels)
else:
outputs = self.forward(inputs)
# Compute loss
loss, loss_detail = self.loss(outputs, data_dict)
loss_details.append(loss_detail)
# Keep track of everything for correlations
extend_dicts(concatenated_labels, labels)
extend_dicts(concatenated_outputs, outputs)
time_forward_prop += time.time() - t
t = time.time()
# Backward + optimize only if in training phase
if phase == 'train':
loss.backward()
time_backward_prop += time.time() - t
t = time.time()
self.optimizer.step()
time_update_step += time.time() - t
t = time.time()
# Loss statistics
running_loss += loss.data.item() * labels[list(labels.keys())[0]].size(0)
epoch_loss = running_loss / dataset_sizes[phase]
info = {
'phase': phase,
'dataset_size': dataset_sizes[phase],
'epoch_loss': epoch_loss,
'loss_details': loss_details,
}
metrics_for_epoch = self.analyse_predictions(concatenated_labels, concatenated_outputs, info)
return metrics_for_epoch
def fit(self, dataloaders, dataset_sizes):
"""
trains the model. dataloaders + dataset sizes should have keys train, val, and test. Checked.
"""
since = time.time()
best_model_wts = copy.deepcopy(self.state_dict())
best_metric_val = -np.inf
all_metrics = {}
for epoch in range(self.num_epochs):
epoch_t0 = time.time()
print('\nEpoch {}/{}'.format(epoch, self.num_epochs - 1))
print('-' * 60)
metrics_for_epoch = {}
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
metrics_for_phase = self.train_or_eval_dataset(dataloaders, dataset_sizes, phase)
# Change the learning rate.
if phase == 'val':
if self.lr_scheduler_type == 'step':
self.scheduler.step()
elif self.lr_scheduler_type == 'plateau':
self.scheduler.step(
metrics_for_phase[self.metric_to_use_as_stopping_criterion])
else:
raise Exception('Not a valid scheduler type')
print('Current learning rate after epoch %i is' % epoch)
# https://github.com/pytorch/pytorch/issues/2829 get learning rate.
for param_group in self.optimizer.param_groups:
print(param_group['lr'])
# print(self.optimizer.state_dict())
metrics_for_epoch.update(metrics_for_phase)
# Deep copy the model if the validation performance is better than what we've seen so far.
if phase == 'val' and metrics_for_phase[self.metric_to_use_as_stopping_criterion] > best_metric_val:
best_metric_val = metrics_for_phase[self.metric_to_use_as_stopping_criterion]
best_model_wts = copy.deepcopy(self.state_dict())
all_metrics[epoch] = metrics_for_epoch
print('Total seconds taken for epoch: %2.3f' % (time.time() - epoch_t0))
if self.verbose['layer_magnitudes']:
print('\n\n***\nPrinting layer magnitudes')
self.print_layer_magnitudes(epoch)
all_metrics['final_results'] = metrics_for_epoch
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
# Load best model weights
self.load_state_dict(best_model_wts)
self.train(False) # Set model to evaluate mode
self.state_dict = best_model_wts
# Evaluate on test set
all_metrics['total_seconds_to_train'] = time_elapsed
all_metrics['test_set_results'] = self.train_or_eval_dataset(dataloaders, dataset_sizes, 'test')
return all_metrics
def setup_optimizers(self, optimizer_name, optimizer_kwargs, scheduler_kwargs):
# https://github.com/pytorch/pytorch/issues/679
if optimizer_name == 'sgd':
self.optimizer = optim.SGD(filter(lambda p: p.requires_grad, self.parameters()),
**optimizer_kwargs)
elif optimizer_name == 'adam':
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.parameters()),
**optimizer_kwargs)
else:
raise Exception("Not a valid optimizer")
self.lr_scheduler_type = scheduler_kwargs['lr_scheduler_type']
if self.lr_scheduler_type == 'step':
self.scheduler = lr_scheduler.StepLR(self.optimizer,
**scheduler_kwargs['additional_kwargs'])
elif self.lr_scheduler_type == 'plateau':
self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer,
**scheduler_kwargs['additional_kwargs'])
else:
raise Exception("invalid scheduler")
def print_layer_magnitudes(self, epoch):
# small helper method so we can make sure the right layers are being trained.
for name, param in self.named_parameters():
magnitude = np.linalg.norm(param.data.cpu())
if param not in self.layer_magnitudes:
self.layer_magnitudes[param] = magnitude
print("The magnitude of layer %s at epoch %i is %2.5f" % (name, epoch, magnitude))
else:
old_magnitude = self.layer_magnitudes[param]
delta_magnitude = magnitude - old_magnitude
print("The magnitude of layer %s at epoch %i is %2.5f (delta %2.5f from last epoch)" % (
name, epoch, magnitude, delta_magnitude))
self.layer_magnitudes[param] = magnitude
def extend_dicts(dict1, dict2):
if len(dict1) == 0:
for key, val in dict2.items():
dict1[key] = val.data.cpu().numpy()
return
assert set(dict1.keys()) == set(dict2.keys())
for key, val in dict2.items():
dict1[key] = np.concatenate([dict1[key], val.data.cpu().numpy()])
return
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class PretrainedResNetModel(DeepLearningModel):
def __init__(self, cfg, build=True):
self.inplanes = 64
super().__init__(cfg)
self.dropout = cfg['dropout']
self.fc_layers = cfg['fc_layers']
self.pretrained_path = cfg['pretrained_path']
self.pretrained_model_name = cfg['pretrained_model_name']
self.pretrained_exclude_vars = cfg['pretrained_exclude_vars']
self.conv_layers_before_end_to_unfreeze = cfg['conv_layers_before_end_to_unfreeze']
# ---- Architecture based on selected model ----
block = BasicBlock if self.pretrained_model_name in ['resnet18', 'resnet34'] else Bottleneck
layers = {
'resnet18': [2, 2, 2, 2],
'resnet34': [3, 4, 6, 3],
'resnet50': [3, 4, 6, 3],
'resnet101': [3, 4, 23, 3],
'resnet152': [3, 8, 36, 3],
}[self.pretrained_model_name]
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1) # nn.AvgPool2d(7, stride=1)
self.dropout = nn.Dropout(self.dropout, inplace=False)
self.conv_layer_dims = { 'conv1': 64,
'conv2': 128,
'conv3': 256,
'conv4': 512 }
previous_layer_dims = 512 * block.expansion
for i, layer in enumerate(self.fc_layers):
setattr(self, 'fc' + str(i + 1), nn.Linear(previous_layer_dims, layer))
previous_layer_dims = layer
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if build:
self.build()
# ----------------- Abstract class methods to be implemented per model -----------------
def get_data_dict_from_dataloader(self, data):
raise NotImplementedError()
def loss(self, outputs, data_dict):
raise NotImplementedError()
def analyse_predictions(self, y_true, y_pred, info={}):
raise NotImplementedError()
# ----------------- Loading pretrained ResNet and adding fc layers -----------------
def build(self):
# Load pretrained resnet
self.load_pretrained()
# Unfreeze the pretrained weights
self.unfreeze_conv_layers(self.conv_layers_before_end_to_unfreeze)
# Move model to GPU
self.cuda()
# Setup optimizers in the DeepLearningModel class
self.setup_optimizers(self.optimizer_name, self.optimizer_kwargs, self.scheduler_kwargs)
def compute_cnn_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def forward(self, x):
x = self.compute_cnn_features(x)
if self.fc_layers:
N_layers = len(self.fc_layers)
for i, layer in enumerate(self.fc_layers):
fn = getattr(self, 'fc' + str(i + 1))
x = fn(x)
# No ReLu for last layer
if i != N_layers - 1:
x = self.relu(x)
# Cache results to get intermediate outputs
setattr(self, 'fc%s_out' % str(i + 1), x)
else:
x = self.fc(x)
return x
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def load_pretrained(self):
# Our own trained model
if self.pretrained_path and len(self.pretrained_exclude_vars) > 0:
print('[A] Loading our own pretrained model')
own_state = self.state_dict()
pretrained_state = torch.load(self.pretrained_path)
for name, param in pretrained_state.items():
if any([name.startswith(var) for var in self.pretrained_exclude_vars]):
print(' Skipping %s' % name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
print(' Loading %s' % name)
own_state[name].copy_(param)
return
elif self.pretrained_path:
print('[B] Loading our own pretrained model')
self.load_state_dict(torch.load(self.pretrained_path))
return
# Public pretrained ResNet model
N_layers = len(self.fc_layers)
if N_layers > 1 or self.fc_layers[0] != 1000: # Check if it is default model
print('Loading pretrained ResNet')
incompatible, unexpected = self.load_state_dict(
model_zoo.load_url(model_urls[self.pretrained_model_name]), strict=False)
expected_incompatible = ['fc%d.weight' % (l + 1) for l in range(N_layers)] + \
['fc%d.bias' % (l + 1) for l in range(N_layers)]
assert all([x in expected_incompatible for x in incompatible])
assert all([x in ['fc.weight', 'fc.bias'] for x in unexpected])
else:
print('Loading pretrained ResNet')
self.load_state_dict(model_zoo.load_url(model_urls[self.pretrained_model_name]))
def unfreeze_conv_layers(self, conv_layers_before_end_to_unfreeze):
param_idx = 0
all_conv_layers = []
for name, param in self.named_parameters():
print("Param %i: %s" % (param_idx, name), param.data.shape)
param_idx += 1
conv_layer_substring = get_conv_layer_substring(name)
if conv_layer_substring is not None and conv_layer_substring not in all_conv_layers:
all_conv_layers.append(conv_layer_substring)
print("All conv layers", all_conv_layers)
# Now look conv_layers_before_end_to_unfreeze conv layers before the end, and unfreeze all layers after that.
assert conv_layers_before_end_to_unfreeze <= len(all_conv_layers)
if conv_layers_before_end_to_unfreeze > 0:
conv_layers_to_unfreeze = all_conv_layers[-conv_layers_before_end_to_unfreeze:]
else:
conv_layers_to_unfreeze = []
to_unfreeze = False
for name, param in self.named_parameters():
if not name.startswith('fc'):
# Conv layers
conv_layer_substring = get_conv_layer_substring(name)
if conv_layer_substring in conv_layers_to_unfreeze:
to_unfreeze = True
else:
# Non-conv layers
if self.prefixes_of_vars_to_freeze:
to_freeze = any([name.startswith(var) for var in self.prefixes_of_vars_to_freeze])
to_unfreeze = not to_freeze
else:
to_unfreeze = True
if to_unfreeze:
print("Param %s is UNFROZEN" % name, param.data.shape)
else:
print("Param %s is FROZEN" % name, param.data.shape)
param.requires_grad = False
# Loop over layers from beginning and freeze a couple. First we need to get the layers.
def get_conv_layer_substring(name):
# This logic is probably more complex than it needs to be but it works.
if name[:5] == 'layer':
sublayer_substring = '.'.join(name.split('.')[:3])
if 'conv' in sublayer_substring:
return sublayer_substring
return None
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_enum_TimingExprOp', [dirname(__file__)])
except ImportError:
import _enum_TimingExprOp
return _enum_TimingExprOp
if fp is not None:
try:
_mod = imp.load_module('_enum_TimingExprOp', fp, pathname, description)
finally:
fp.close()
return _mod
_enum_TimingExprOp = swig_import_helper()
del swig_import_helper
else:
import _enum_TimingExprOp
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
timingExprAdd = _enum_TimingExprOp.timingExprAdd
timingExprSub = _enum_TimingExprOp.timingExprSub
timingExprUMul = _enum_TimingExprOp.timingExprUMul
timingExprUDiv = _enum_TimingExprOp.timingExprUDiv
timingExprSMul = _enum_TimingExprOp.timingExprSMul
timingExprSDiv = _enum_TimingExprOp.timingExprSDiv
timingExprUCeilDiv = _enum_TimingExprOp.timingExprUCeilDiv
timingExprEqual = _enum_TimingExprOp.timingExprEqual
timingExprNotEqual = _enum_TimingExprOp.timingExprNotEqual
timingExprULessThan = _enum_TimingExprOp.timingExprULessThan
timingExprUGreaterThan = _enum_TimingExprOp.timingExprUGreaterThan
timingExprSLessThan = _enum_TimingExprOp.timingExprSLessThan
timingExprSGreaterThan = _enum_TimingExprOp.timingExprSGreaterThan
timingExprInvert = _enum_TimingExprOp.timingExprInvert
timingExprNot = _enum_TimingExprOp.timingExprNot
timingExprAnd = _enum_TimingExprOp.timingExprAnd
timingExprOr = _enum_TimingExprOp.timingExprOr
timingExprSizeInBits = _enum_TimingExprOp.timingExprSizeInBits
timingExprSignExtend32To64 = _enum_TimingExprOp.timingExprSignExtend32To64
timingExprAbs = _enum_TimingExprOp.timingExprAbs
Num_TimingExprOp = _enum_TimingExprOp.Num_TimingExprOp
cvar = _enum_TimingExprOp.cvar
|
import time
from uuid import uuid4
import hypothesis.strategies as hst
from hypothesis import HealthCheck, assume, given, settings
import numpy as np
import pytest
import qcodes as qc
from qcodes.dataset.guids import (filter_guids_by_parts, generate_guid,
parse_guid, set_guid_location_code,
set_guid_work_station_code,
validate_guid_format)
from qcodes.tests.common import default_config
@settings(max_examples=50, deadline=1000)
@given(loc=hst.integers(0, 255), stat=hst.integers(0, 65535),
smpl=hst.integers(0, 4294967295))
def test_generate_guid(loc, stat, smpl):
# update config to generate a particular guid. Read it back to verify
with default_config():
cfg = qc.config
cfg['GUID_components']['location'] = loc
cfg['GUID_components']['work_station'] = stat
cfg['GUID_components']['sample'] = smpl
guid = generate_guid()
gen_time = int(np.round(time.time()*1000))
comps = parse_guid(guid)
if smpl == 0:
smpl = int('a'*8, base=16)
assert comps['location'] == loc
assert comps['work_station'] == stat
assert comps['sample'] == smpl
assert comps['time'] - gen_time < 2
@settings(max_examples=50, deadline=None,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(loc=hst.integers(-10, 350))
def test_set_guid_location_code(loc, monkeypatch):
monkeypatch.setattr('builtins.input', lambda x: str(loc))
with default_config():
orig_cfg = qc.config
original_loc = orig_cfg['GUID_components']['location']
set_guid_location_code()
cfg = qc.config
if 257 > loc > 0:
assert cfg['GUID_components']['location'] == loc
else:
assert cfg['GUID_components']['location'] == original_loc
@settings(max_examples=50, deadline=1000,
suppress_health_check=(HealthCheck.function_scoped_fixture,))
@given(ws=hst.integers(-10, 17000000))
def test_set_guid_workstation_code(ws, monkeypatch):
monkeypatch.setattr('builtins.input', lambda x: str(ws))
with default_config():
orig_cfg = qc.config
original_ws = orig_cfg['GUID_components']['work_station']
set_guid_work_station_code()
cfg = qc.config
if 16777216 > ws > 0:
assert cfg['GUID_components']['work_station'] == ws
else:
assert cfg['GUID_components']['work_station'] == original_ws
@settings(max_examples=50, deadline=1000)
@given(locs=hst.lists(hst.integers(0, 255), min_size=2, max_size=2,
unique=True),
stats=hst.lists(hst.integers(0, 65535), min_size=2, max_size=2,
unique=True),
smpls=hst.lists(hst.integers(0, 4294967295), min_size=2, max_size=2,
unique=True),
)
def test_filter_guid(locs, stats, smpls):
def make_test_guid(cfg, loc: int, smpl: int, stat: int):
cfg['GUID_components']['location'] = loc
cfg['GUID_components']['work_station'] = stat
cfg['GUID_components']['sample'] = smpl
guid = generate_guid()
gen_time = int(np.round(time.time() * 1000))
comps = parse_guid(guid)
assert comps['location'] == loc
assert comps['work_station'] == stat
assert comps['sample'] == smpl
assert comps['time'] - gen_time < 2
return guid
with default_config():
guids = []
cfg = qc.config
corrected_smpls = [smpl if smpl != 0 else int('a' * 8, base=16)
for smpl in smpls]
# there is a possibility that we could generate 0 and 2863311530, which
# are considered equivalent since int('a' * 8, base=16) == 2863311530.
# We want unique samples, so we exclude this case.
assume(corrected_smpls[0] != corrected_smpls[1])
# first we generate a guid that we are going to match against
guids.append(make_test_guid(cfg, locs[0], corrected_smpls[0], stats[0]))
# now generate some guids that will not match because one of the
# components changed
guids.append(make_test_guid(cfg, locs[1], corrected_smpls[0], stats[0]))
guids.append(make_test_guid(cfg, locs[0], corrected_smpls[1], stats[0]))
guids.append(make_test_guid(cfg, locs[0], corrected_smpls[0], stats[1]))
assert len(guids) == 4
# first filter on all parts. This should give exactly one matching guid
filtered_guids = filter_guids_by_parts(guids,
location=locs[0],
sample_id=corrected_smpls[0],
work_station=stats[0]
)
assert len(filtered_guids) == 1
assert filtered_guids[0] == guids[0]
# now filter on 2 components
filtered_guids = filter_guids_by_parts(guids,
location=locs[0],
sample_id=corrected_smpls[0])
assert len(filtered_guids) == 2
assert filtered_guids[0] == guids[0]
assert filtered_guids[1] == guids[3]
filtered_guids = filter_guids_by_parts(guids,
location=locs[0],
work_station=stats[0])
assert len(filtered_guids) == 2
assert filtered_guids[0] == guids[0]
assert filtered_guids[1] == guids[2]
filtered_guids = filter_guids_by_parts(guids,
sample_id=corrected_smpls[0],
work_station=stats[0]
)
assert len(filtered_guids) == 2
assert filtered_guids[0] == guids[0]
assert filtered_guids[1] == guids[1]
# now filter on 1 component
filtered_guids = filter_guids_by_parts(guids,
location=locs[0])
assert len(filtered_guids) == 3
assert filtered_guids[0] == guids[0]
assert filtered_guids[1] == guids[2]
assert filtered_guids[2] == guids[3]
filtered_guids = filter_guids_by_parts(guids,
work_station=stats[0])
assert len(filtered_guids) == 3
assert filtered_guids[0] == guids[0]
assert filtered_guids[1] == guids[1]
assert filtered_guids[2] == guids[2]
filtered_guids = filter_guids_by_parts(guids,
sample_id=corrected_smpls[0],
)
assert len(filtered_guids) == 3
assert filtered_guids[0] == guids[0]
assert filtered_guids[1] == guids[1]
assert filtered_guids[2] == guids[3]
def test_validation():
valid_guid = str(uuid4())
validate_guid_format(valid_guid)
with pytest.raises(ValueError):
validate_guid_format(valid_guid[1:])
|
#!/usr/bin/env python3
"""Crawl DB for started bcl2fastq runs and resp. output folders for
flag files indicating completion, upon which DB needs update
"""
#--- standard library imports
#
import sys
import os
import argparse
import logging
import subprocess
from datetime import datetime
#--- third-party imports
#
import yaml
## only dump() and following do not automatically create aliases
yaml.Dumper.ignore_aliases = lambda *args: True
#--- project specific imports
#
# add lib dir for this pipeline installation to PYTHONPATH
LIB_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "lib"))
if LIB_PATH not in sys.path:
sys.path.insert(0, LIB_PATH)
from mongodb import mongodb_conn
from pipelines import generate_window
from pipelines import PipelineHandler
from utils import timestamp_from_string
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "2016 Genome Institute of Singapore"
__license__ = "The MIT License (MIT)"
DBUPDATE_TRIGGER_FILE_FMT = "TRIGGER.DBUPDATE.{num}"
# up to DBUPDATE_TRIGGER_FILE_MAXNUM trigger files allowed
DBUPDATE_TRIGGER_FILE_MAXNUM = 9
BASEDIR = os.path.dirname(sys.argv[0])
# global logger
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'[{asctime}] {levelname:8s} {filename} {message}', style='{'))
logger.addHandler(handler)
class MongoUpdate(object):
"""Helper class for mongodb updates
"""
def __init__(self, run_num, analysis_id, testing=False, dryrun=False):
self.run_num = run_num
self.analysis_id = analysis_id
self.testing = testing
self.dryrun = dryrun
mongo_status_script = os.path.abspath(os.path.join(
os.path.dirname(sys.argv[0]), "mongo_status.py"))
assert os.path.exists(mongo_status_script), (
"Missing {}".format(mongo_status_script))
self.mongo_status_script = mongo_status_script
mongo_status_per_mux_script = os.path.abspath(os.path.join(
os.path.dirname(sys.argv[0]), "mongo_status_per_mux.py"))
assert os.path.exists(mongo_status_per_mux_script), (
"Missing {}".format(mongo_status_per_mux_script))
self.mongo_status_per_mux_script = mongo_status_per_mux_script
def update_run(self, status, outdir):
"""update status for run
"""
logger.info("Updating status for run %s analysis %s to %s",
self.analysis_id, self.run_num, status)
cmd = [self.mongo_status_script, '-r', self.run_num,
'-a', self.analysis_id, '-s', status, '-o', outdir]
if self.testing:
cmd.append("-t")
if self.dryrun:
cmd.append("--dry-run")
try:
_ = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger.critical("The following command failed with return code %s: %s",
e.returncode, ' '.join(cmd))
logger.critical("Output: %s", e.output.decode())
return False
else:
return True
def update_mux(self, status, mux_id, mux_dir):
"""update status for mux
"""
logger.info("Updating status for mux %s of analysis %s in run %s to %s",
mux_id, self.analysis_id, self.run_num, status)
cmd = [self.mongo_status_per_mux_script, '-r', self.run_num,
'-a', self.analysis_id, '-s', status,
'-i', mux_id, '-d', mux_dir]
if self.testing:
cmd.append("-t")
if self.dryrun:
cmd.append("--dry-run")
try:
_ = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger.critical("The following command failed with return code %s: %s",
e.returncode, ' '.join(cmd))
logger.critical("Output: %s", e.output.decode())
return False
else:
return True
def get_started_outdirs_from_db(testing=True, win=None):
"""FIXME:add-doc"""
connection = mongodb_conn(testing)
if connection is None:
sys.exit(1)
db = connection.gisds.runcomplete
if win:
epoch_present, epoch_back = generate_window(win)
results = db.find({"analysis.Status": "STARTED",
"timestamp": {"$gt": epoch_back, "$lt": epoch_present}})
else:
results = db.find({"analysis.Status": "STARTED"})
# results is a pymongo.cursor.Cursor which works like an iterator i.e. dont use len()
logger.info("Found %d runs", results.count())
for record in results:
logger.debug("record: %s", record)
#run_number = record['run']
# we might have several analysis runs:
for analysis in record['analysis']:
yield analysis["out_dir"]
def mux_dir_complete(muxdir, completed_after=None):
"""Will check whether necessary flag files for muxdir exist. Will
return false if one is missing. If completed_after is given or if
both exist, but none is newer than completed_after.
"""
if not os.path.exists(muxdir):
logger.info("Directory %s doesn't exist", muxdir)
return False
at_least_one_newer = False
for f in ['bcl2fastq.SUCCESS', 'fastqc.SUCCESS']:
f = os.path.join(muxdir, f)
if not os.path.exists(f):
logger.debug("mux dir %s incomplete: %s is missing", muxdir, f)
return False
if completed_after:
if datetime.fromtimestamp(os.path.getmtime(f)) > completed_after:
at_least_one_newer = True
if completed_after and not at_least_one_newer:
return False
return True
def main():
"""main function
"""
# FIXME ugly and duplicated in bcl2fastq.py
mongo_status_per_mux_script = os.path.abspath(os.path.join(
os.path.dirname(sys.argv[0]), "mongo_status_per_mux.py"))
assert os.path.exists(mongo_status_per_mux_script)
assert os.path.exists(mongo_status_per_mux_script)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-t', "--testing", action='store_true',
help="Use MongoDB test server")
parser.add_argument('-w', '--win', type=int,
help="Restrict to runs within last x days)")
parser.add_argument('--outdirs', nargs="*",
help="Ignore DB entries and go through this list"
" of directories (DEBUGGING)")
parser.add_argument('-n', '--dry-run', action='store_true')
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase verbosity")
parser.add_argument('-q', '--quiet', action='count', default=0,
help="Decrease verbosity")
args = parser.parse_args()
# Repeateable -v and -q for setting logging level.
# See https://www.reddit.com/r/Python/comments/3nctlm/what_python_tools_should_i_be_using_on_every/
# and https://gist.github.com/andreas-wilm/b6031a84a33e652680d4
# script -vv -> DEBUG
# script -v -> INFO
# script -> WARNING
# script -q -> ERROR
# script -qq -> CRITICAL
# script -qqq -> no logging at all
logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose)
if args.outdirs:
logger.warning("Using manually defined outdirs")
outdirs = args.outdirs
else:
# generator!
outdirs = get_started_outdirs_from_db(args.testing, args.win)
num_triggers = 0
for outdir in outdirs:
# load mux info from config instead of relying on filesystem
#
logger.debug("Loading config for %s", outdir)
config_file = os.path.join(outdir, PipelineHandler.PIPELINE_CFGFILE)
if not os.path.exists(config_file):
logger.critical("Missing config file %s. Skipping this directory", config_file)
continue
with open(config_file) as fh:
cfg = yaml.safe_load(fh)
muxes = dict([(x['mux_id'], x['mux_dir']) for x in cfg['units'].values()])
# look for trigger files. use their info for update and delete
#
for i in range(DBUPDATE_TRIGGER_FILE_MAXNUM+1):
# multiple trigger files per directory allowed (but rare)
trigger_file = os.path.join(outdir, DBUPDATE_TRIGGER_FILE_FMT.format(num=i))
if not os.path.exists(trigger_file):
continue
logger.debug("Processing trigger file %s", trigger_file)
num_triggers += 1
with open(trigger_file) as fh:
update_info = yaml.safe_load(fh)
mongo_updater = MongoUpdate(update_info['run_num'],
update_info['analysis_id'],
args.testing, args.dry_run)
res = mongo_updater.update_run(update_info['status'], outdir)
if not res:
# don't delete trigger. don't processe muxes. try again later
logger.critical("Skipping this analysis (%s) for run %s",
update_info['analysis_id'], update_info['run_num'])
continue
# update per MUX
#
keep_trigger = False
for mux_id, mux_dir_base in muxes.items():
mux_dir = os.path.join(outdir, "out", mux_dir_base)# ugly
if mux_dir_complete(mux_dir):
# skip the ones completed before
completed_after = timestamp_from_string(update_info['analysis_id'])
if not mux_dir_complete(mux_dir, completed_after=completed_after):
continue
no_archive = cfg.get('no_archive', None)
if no_archive:
status = 'NOARCHIVE'
else:
status = 'SUCCESS'
else:
status = 'FAILED'
res = mongo_updater.update_mux(status, mux_id, mux_dir_base)
if not res:
# don't delete trigger. try again later
logger.critical("Skipping rest of analysis %s for run %s",
update_info['analysis_id'], update_info['run_num'])
keep_trigger = True
break
if not args.dry_run and not keep_trigger:
os.unlink(trigger_file)
logger.info("%s dirs with triggers", num_triggers)
if __name__ == "__main__":
main()
|
<reponame>seangeggie/tourney
import os
import json
from .constants import DATA_PATH
class State:
__instance = None
def __init__(self):
if not State.__instance:
self.reset()
try:
self.load()
except Exception as ex:
print("State file could not load: {}".format(self.file_path()))
print(ex)
State.__instance = self
@staticmethod
def get():
if not State.__instance:
return State()
return State.__instance
def set_bot_id(self, bot_id):
self.__bot_id = bot_id
def bot_id(self):
return self.__bot_id
def set_channel_id(self, channel_id):
self.__channel_id = channel_id
def channel_id(self):
return self.__channel_id
def set_participants(self, participants):
self.__participants = participants
def add_participant(self, participant):
self.__participants.append(participant)
def remove_participant(self, participant):
self.__participants.remove(participant)
def participants(self):
return self.__participants
def set_morning_announce(self, ts):
self.__morning_announce = ts
def morning_announce(self):
return self.__morning_announce
def set_reminder_announce(self, ts):
self.__reminder_announce = ts
def reminder_announce(self):
return self.__reminder_announce
def set_midday_announce(self, midday_announce):
self.__midday_announce = midday_announce
def midday_announce(self):
return self.__midday_announce
def set_teams(self, teams):
self.__teams = teams
def teams(self):
return self.__teams
def set_team_names(self, team_names):
self.__team_names = team_names
def team_names(self):
return self.__team_names
def set_unrecorded_matches(self, unrecorded_matches):
self.__unrecorded_matches = unrecorded_matches
def unrecorded_matches(self):
return self.__unrecorded_matches
def file_path(self):
return os.path.expanduser("{}/state.json".format(DATA_PATH))
def reset(self):
self.__bot_id = None
self.__channel_id = None
self.__participants = []
self.__morning_announce = None
self.__reminder_announce = None
self.__midday_announce = False
self.__teams = []
self.__team_names = []
self.__unrecorded_matches = []
def save(self):
data = {
"bot_id": self.bot_id(),
"channel_id": self.channel_id(),
"participants": self.participants(),
"morning_announce": self.morning_announce(),
"reminder_announce": self.reminder_announce(),
"midday_announce": self.midday_announce(),
"teams": self.teams(),
"team_names": self.team_names(),
"unrecorded_matches": self.unrecorded_matches()
}
os.makedirs(os.path.dirname(self.file_path()), exist_ok=True)
with open(self.file_path(), "w+") as fp:
json.dump(data, fp, indent=2)
def load(self):
with open(self.file_path(), "r") as fp:
data = json.load(fp)
if "bot_id" in data:
self.set_bot_id(data["bot_id"])
if "channel_id" in data:
self.set_channel_id(data["channel_id"])
if "participants" in data:
self.set_participants(data["participants"])
if "morning_announce" in data:
self.set_morning_announce(data["morning_announce"])
if "reminder_announce" in data:
self.set_reminder_announce(data["reminder_announce"])
if "midday_announce" in data:
self.set_midday_announce(data["midday_announce"])
if "teams" in data:
self.set_teams(data["teams"])
if "team_names" in data:
self.set_team_names(data["team_names"])
if "unrecorded_matches" in data:
self.set_unrecorded_matches(data["unrecorded_matches"])
|
# import os
# import sys
import shelve
import random
import textwrap
from time import sleep, time
from collections import namedtuple
from bearlibterminal import terminal as term
# sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/../')
import spaceship.strings as strings
from .screen_functions import *
from .scene import Scene
# some helper objects used in character creation
equipment = namedtuple("Equipment", "hd nk bd ar hn lh rh lr rr wa lg ft")
inventory = namedtuple("Inventory", "inventory")
genders = namedtuple("Gender", "gender bonus")
races = namedtuple("Race", "race location stats bonus gold skills eq")
classes = namedtuple("Class", "classes bonuses equipment")
# return type
player=namedtuple("Player",
"home gold stats gender gbonus race rbonus \
job jbonus skills equipment inventory")
class Create(Scene):
def __init__(self, sid='create_menu'):
super().__init__(sid)
def reset(self):
self.shorten = self.height <= 25
self.delim, self.row, self.row_bonus = "\n", 11, 11
if self.shorten:
self.delim, self.row, self.row_bonus = "", 5, 6
self.col1, self.col2, self.col3 = 3, 26, 49
self.inv_screen = -1
self.abilities = -1
self.race_index = 0
self.class_index = 0
self.gender_index = 0
self.character_index = 0
self.grid = [[3, 26, 48], 5]
self.length = self.width // 2
def setup(self):
self.reset()
self.title = "Character Creation"
self.help = "Press (?) for info on a switch race, subrace or class"
self.gender_options = [
genders("Male", strings.MALE),
genders("Female", strings.FEMALE),
]
# race objects and options
self.race_options = [
# Tiphmore -- Largest Free City in Calabaston
races("Beast", "Tiphmore", strings.HUMAN, strings.BEAST_BONUS, 300,
("thick fur", "animal senses"),
equipment("", "", "", "", "", ("long spear", "ring of ice"),
"", "", "", "", "", "")),
# Capital of Yugahdahrum
races("Dwarf", "Dun Badur", strings.HUMAN, strings.DWARF_BONUS, 250,
("dark vision", "dwarven fortitude"),
equipment("horned helmet", "gold necklace", "", "", "",
("battle axe", "copper pick"), "", "", "",
"", "", "")),
# Aurundel -- Capital of Auriel in the Emerald Forest
races("Elf", "Aurundel", strings.HUMAN, strings.ELVEN_BONUS, 250,
("forest spirit", "nimble"),
equipment("", "", "elven chainmail", "", "",
"mithril dagger", "mithril dagger",
"", "", "", "", "")),
# Renmar -- Capital of Rane Empire
races("Human", "Renmar", strings.HUMAN, strings.HUMAN_BONUS, 200,
("", ""),
equipment("", "", "", "", "", "broadsword", "medium shield",
"", "", "", "", "")),
# Lok Gurrah, Capital of Oggrahgar
races("Orc", "Lok Gurrah", strings.HUMAN, strings.ORCEN_BONUS, 150,
("thick skin", "bloodrage"),
equipment("metal cap", "", "metal armor", "", "",
("mace", "warhammer"), "", "", "", "", "", "")),
]
# class objects and options
self.class_options = [
classes("Druid", strings.DRUIDS,
equipment("", "", "thick fur coat", "thick fur bracers",
"", "wooden staff", "", "ring of nature",
"ring of earth", "leather belt", "",
"leather boots")),
classes("Cleric", strings.CLERIC,
equipment("cloth hood", "holy symbol", "light robe", "", "",
"mace", "small shield", "ring of power",
"ring of light", "rope belt", "",
"leather sandals")),
classes("Archer", strings.ARCHER,
equipment("cloth hood", "whistle", "heavy cloak",
"leather bracers", "cloth gloves", "short bow",
"", "", "", "leather belt", "common pants",
"leather boots")),
classes("Wizard", strings.WIZARD,
equipment("hood", "amulet of power", "light robe", "", "",
"quarterstaff", "spellbook", "ring of water",
"ring of fire", "rope belt", "",
"leather sandals")),
classes("Squire", strings.SQUIRE,
equipment("leather cap", "", "leather armor",
"leather bracers", "cloth gloves", "long sword",
"medium shield", "", "", "leather belt",
"common pants", "leather boots")),
]
# description options
self.descriptions = [
[strings.start],
[strings.race_beast, strings.race_dwarf, strings.race_elven,
strings.race_human, strings.race_orcen,],
[strings.class_druid, strings.class_cleric, strings.class_wizard,
strings.class_archer, strings.class_squire,]]
def draw(self):
term.clear()
self.cc_border()
self.draw_title()
self.draw_subtitle()
# Gender Race and Class Varialbes
gender, gbonus = self.gender_row()
race, location, stats, rbonus, gold, skills, req = self.race_row()
occu, cbonus, ceq = self.class_row()
# BONUS
if self.character_index == 0:
total = strings.stats(*(sum(stats) for stats in zip(strings.HUMAN,
gbonus)))
elif self.character_index == 1:
total = strings.stats(*(sum(stats) for stats in zip(stats,
gbonus,
rbonus)))
else:
total = strings.stats(*(sum(stats) for stats in zip(stats,
gbonus,
rbonus,
cbonus)))
# STATUS :- ATTRIBUTES
hp = total.str + total.con * 2
mp = total.int + total.wis * 2
sp = total.dex // 5
# STATUS :- BACKGROUND
term.puts(self.col1, self.row + 1, strings._col1.format(
gender,
race if self.character_index > 0 else "",
location if self.character_index > 0 else "",
occu if self.character_index > 1 else "",
gold if self.character_index > 0 else 0,
1, 80, hp, mp, sp,
delim=self.delim))
# STATUS :- SKILLS
term.puts(self.col2, self.row + 1, strings._col2.format(
*("" for _ in range(2)) if self.character_index < 1 else skills,
*(total),
delim=self.delim))
# STATUS :- GENDER BONUSES
term.puts(
self.col2 + 9,
self.row + (12 if not self.shorten else 7),
strings._bon.format(
*self.transform_values(gbonus),
delim=self.delim))
# STATUS :- RACE BONUSES
# if self.character_index > 0:
term.puts(
self.col2 + 12,
self.row + (12 if not self.shorten else 7),
strings._bon.format(
*(self.transform_values(rbonus) if self.character_index > 0
else (0 for _ in range(6))),
delim=self.delim))
# STATUS :- CLASS BONUSES
term.puts(
self.col2 + 15,
self.row + (12 if not self.shorten else 7),
strings._bon.format(
*self.transform_values(cbonus) if self.character_index > 1
else (0 for _ in range(6)),
delim=self.delim))
# STATUS :- Item bonuses
term.puts(
self.col2 + 18,
self.row + (12 if not self.shorten else 7),
strings._bon.format(*(0 for _ in range(6)), delim=self.delim))
# EQUIPMENT and INVENTORY
eq, inv = None, None
if self.character_index > 0:
if self.character_index >= 2:
eq, inv = self.form_equipment(req, ceq)
else:
eq, inv = self.form_equipment(req, ["" for _ in range(12)])
# check if flag is set to show inventory or equipment
if self.inv_screen < 0:
x, y = self.col3, self.row + 1
term.puts(x, y,
strings._col3.format(
*(e if len(e) > 0 else "" for e in eq),
delim=self.delim))
else:
for item, i in zip(inv, range(len(inv))):
x, y = self.col3, self.row + i + 1
string = "{}.{}".format(chr(ord('a') + i), item)
term.puts(x, y, string)
else:
term.puts(
self.col3,
self.row + 1,
strings._col3.format(
*("" for _ in range(12)),
delim=self.delim))
# FOOTER
self.description_row()
term.refresh()
code = term.read()
while code in (term.TK_SHIFT, term.TK_ALT, term.TK_CONTROL,):
# ignores key modifiers -- keyboard think of it as a single key
code = term.read()
if code == term.TK_LEFT or code == term.TK_KP_4:
if self.character_index == 0:
self.gender_index = modify(
increment=-1,
index=self.gender_index,
options=len(self.gender_options))
elif self.character_index == 1:
self.race_index = modify(
increment=-1,
index=self.race_index,
options=len(self.race_options))
elif self.character_index == 2:
self.class_index = modify(
increment=-1,
index=self.class_index,
options=len(self.class_options))
elif code == term.TK_RIGHT or code == term.TK_KP_6:
if self.character_index == 0:
self.gender_index = modify(
increment=1,
index=self.gender_index,
options=len(self.gender_options))
elif self.character_index == 1:
self.race_index = modify(
increment=1,
index=self.race_index,
options=len(self.race_options))
elif self.character_index == 2:
self.class_index = modify(
increment=1,
index=self.class_index,
options=len(self.class_options))
elif code == term.TK_UP or code == term.TK_KP_8:
self.character_index = modify(-1, self.character_index, 4)
if self.character_index <= 2:
self.inv_screen = -1
elif code == term.TK_DOWN or code == term.TK_KP_2:
self.character_index = modify(
increment=1,
index=self.character_index,
options=4)
# Toggles Skills <-> Spells
elif code == term.TK_S and self.character_index > 1:
self.abilitites *= -1
# Toggles Inventory <-> Equipment
elif code == term.TK_V and self.character_index > 1:
self.inv_screen *= -1
# Randomize selection -- maybe remove since its more of a debugging usage
elif code == term.TK_8:
# only randomizes if shift-8 is pressed -- else it's just pressing 8
if term.state(term.TK_SHIFT) and self.character_index <= 1:
# lets not randomize if you've already switch a gender
# its not much more effort to finish creating your character
# name = "Random name"
self.gender_index = random.randint(0, 1)
gender = self.gender_row(draw=False)
self.race_index = random.randint(0, 4)
race = self.race_row(draw=False)
self.class_index = random.randint(0, 4)
job = self.class_row(draw=False)
eq, inv = self.form_equipment(race.eq, job.equipment)
self.ret['kwargs'] = {
'player': player(
race.location,
race.gold,
race.stats,
gender.gender,
gender.bonus,
race.race,
race.bonus,
job.classes,
job.bonuses,
race.skills,
eq,
inv)
}
self.ret['scene'] = 'name_menu'
self.proceed = False
self.reset()
# ENTER AFTER CHOOSING ALL 3 BACKGROUND CHOICES
elif code == term.TK_ENTER:
# check to see if we are at the final index
if self.character_index == 3:
gender = self.gender_row(draw=False)
race = self.race_row(draw=False)
job = self.class_row(draw=False)
self.ret['kwargs'] = {
'player': player(
race.location,
race.gold,
race.stats,
gender.gender,
gender.bonus,
race.race,
race.bonus,
job.classes,
job.bonuses,
race.skills,
eq,
inv)
}
self.ret['scene'] = 'name_menu'
self.proceed = False
self.reset()
else:
self.character_index += 1
elif code in (term.TK_ESCAPE,):
if term.state(term.TK_SHIFT):
self.proceed = False
# self.ret = output(
# proceed=False,
# value="Exit to Desktop")
self.ret['scene'] = ''
elif self.character_index == 0:
self.proceed = False
# self.ret = self.scene_parent('main_menu')
self.ret['scene'] = 'main_menu'
else:
self.character_index -= 1
if self.character_index <= 1:
self.inv_screen = -1
self.abilitites = -1
def cc_border(self):
'''Border for Create Character Screen'''
term.bkcolor('darkest grey')
# top/bot lines horizontal border
for i in range(self.width):
term.puts(i, 1 if not self.shorten else 0, ' ')
term.puts(i, 35 if not self.shorten else 18, ' ')
# left/right lines vertical border
for i in range(35 if not self.shorten else 18):
term.puts(0, i + 1, ' ')
term.puts(self.width - 1, i + 1, ' ')
def draw_title(self):
'''Adds the title to top of screen'''
title = " " + self.title + " "
term.bkcolor('brown')
term.puts(
center(title, self.width),
1 if not self.shorten else 0,
"[c=black]" + self.title + "[/c]")
term.bkcolor('black')
def subtitle_text(self, i):
text = "Choose your {}"
if i == 0:
return text.format("gender")
elif i == 1:
return text.format("race")
elif i == 2:
return text.format('class | Press "v" to view your inventory')
else:
return "Press (ENTER) to finish"
def draw_subtitle(self):
'''Adds text underneath the title'''
# subtitle -- clears subtitle area to make space for new subtitle text
subtitle = self.subtitle_text(self.character_index)
x = center(subtitle, self.width)
y = 3 if not self.shorten else 1
term.puts(x, y, subtitle)
term.bkcolor('black')
def gender_row(self, draw=True):
'''Returns a tuple "gender" according to the gender_index'''
if draw:
# for option, index in zip(self.gender_options,
# range(len(self.gender_options))):
for index, option in enumerate(self.gender_options):
x, y = 24 + 22 * index, 5 if not self.shorten else 2
gender = pad(option.gender, length=8)
if index == self.gender_index:
if self.character_index == 0:
switch(x, y, gender, bg_before='white', color='black')
else:
switch(x, y, gender, bg_before='grey')
else:
switch(x, y, gender)
return self.gender_options[self.gender_index]
def race_row(self, draw=True):
'''Returns a tuple "race" according to the race_index'''
# RACE OPTIONS
if draw:
# for option, i in zip(race_options, range(len(race_options))):
for index, option in enumerate(self.race_options):
x, y = 13 + 11 * index, 7 if not self.shorten else 3
race = pad(option.race, length=8)
if index == self.race_index:
if self.character_index == 1:
switch(x, y, race, bg_before='white', color='black')
elif self.character_index > 1:
switch(x, y, race, bg_before='grey')
else:
switch(x, y, race)
else:
switch(x, y, race)
return self.race_options[self.race_index]
def class_row(self, draw=True):
'''Returns a tuple "class" according to class_index'''
if draw:
# for option, i in zip(class_options, range(len(class_options))):
for index, option in enumerate(self.class_options):
x, y = 13 + 11 * index, 9 if not self.shorten else 4
option = pad(option.classes, length=8)
if index == self.class_index:
if self.character_index == 2:
switch(x, y, option, bg_before='white', color='black')
elif self.character_index > 2:
switch(x, y, option, bg_before='grey')
else:
switch(x, y, option)
else:
switch(x, y, option)
return self.class_options[self.class_index]
def description_row(self):
'''Returns the descriptions according to character_index'''
primary = min(self.character_index, 2)
secondary = self.class_index if self.character_index >= 2 \
else self.race_index if self.character_index == 1 \
else self.character_index
description = self.descriptions[primary][secondary]
term.puts(1, 37 if not self.shorten else (self.row + 14),
join(description.replace('\n',''), self.width - 2, self.delim))
def transform_values(self, values):
return ("+[c=#00ff00]" + str(v) + "[/c]" if v > 0
else "-[c=#ff0000]" + str(abs(v)) + "[/c]" if v < 0
else v
for v in values)
def form_equipment(self, race_eq, class_eq):
def get_eq(x):
eq = []
if x != "":
if isinstance(x, tuple):
for xx in x:
eq.append(xx)
else:
eq.append(x)
return eq
def flatten(container):
return [ item for items in container for item in items ]
inv = []
for r, c in zip(class_eq, race_eq):
inv.append(get_eq(r) + get_eq(c))
eqp = [ i.pop(0) if len(i) > 0 else [] for i in inv ]
return eqp, flatten(inv) + ['small health potion', 'berry']
def test_hero():
c = Create()
gender = c.gender_row(draw=False)
race = c.race_row(draw=False)
job = c.class_row(draw=False)
eq, inv = c.form_equipment(race.eq, job.equipment)
return {'player': player(
# name,
race.location,
race.gold,
race.stats,
gender.gender,
gender.bonus,
race.race,
race.bonus,
job.classes,
job.bonuses,
race.skills,
eq,
inv),
'name': 'Grey',
}
if __name__ == "__main__":
term.open()
c = Create()
ret = c.run()
print(ret)
|
from __future__ import print_function
from future.utils import iteritems
from builtins import range, str, object
import os
import sys
import time
import inspect
import itertools
import numpy as np
from contextlib import contextmanager
from peri import initializers
from peri.logger import log
log = log.getChild('util')
# ============================================================================
# Tiling utilities
# ============================================================================
def oddify(num):
"""
Return the next odd number if ``num`` is even.
Examples
--------
>>> oddify(1)
1
>>> oddify(4)
5
"""
return num + (num % 2 == 0)
def listify(a):
"""
Convert a scalar ``a`` to a list and all iterables to list as well.
Examples
--------
>>> listify(0)
[0]
>>> listify([1,2,3])
[1, 2, 3]
>>> listify('a')
['a']
>>> listify(np.array([1,2,3]))
[1, 2, 3]
>>> listify('string')
['string']
"""
if a is None:
return []
elif not isinstance(a, (tuple, list, np.ndarray)):
return [a]
return list(a)
def delistify(a, b=None):
"""
If a single element list, extract the element as an object, otherwise
leave as it is.
Examples
--------
>>> delistify('string')
'string'
>>> delistify(['string'])
'string'
>>> delistify(['string', 'other'])
['string', 'other']
>>> delistify(np.array([1.0]))
1.0
>>> delistify([1, 2, 3])
[1, 2, 3]
"""
if isinstance(b, (tuple, list, np.ndarray)):
if isinstance(a, (tuple, list, np.ndarray)):
return type(b)(a)
return type(b)([a])
else:
if isinstance(a, (tuple, list, np.ndarray)) and len(a) == 1:
return a[0]
return a
return a
def amin(a, b):
return np.vstack([a, b]).min(axis=0)
def amax(a, b):
return np.vstack([a, b]).max(axis=0)
def aN(a, dim=3, dtype='int'):
"""
Convert an integer or iterable list to numpy array of length dim. This func
is used to allow other methods to take both scalars non-numpy arrays with
flexibility.
Parameters
----------
a : number, iterable, array-like
The object to convert to numpy array
dim : integer
The length of the resulting array
dtype : string or np.dtype
Type which the resulting array should be, e.g. 'float', np.int8
Returns
-------
arr : numpy array
Resulting numpy array of length ``dim`` and type ``dtype``
Examples
--------
>>> aN(1, dim=2, dtype='float')
array([1., 1.])
>>> aN(1, dtype='int')
array([1, 1, 1])
>>> aN(np.array([1,2,3]), dtype='float')
array([1., 2., 3.])
"""
if not hasattr(a, '__iter__'):
return np.array([a]*dim, dtype=dtype)
return np.array(a).astype(dtype)
def getdtype(types):
return np.sum([np.array([1], dtype=t) for t in types]).dtype
def getdim(a):
if not hasattr(a, '__iter__'):
return None
return len(a)
def isint(dtype):
return np.array([0.0], dtype=dtype).dtype.name[0] in ['i', 'u']
class CompatibilityPatch(object):
def patch(self, var):
names, default_values = list(var.keys()), list(var.values())
for n, v in zip(names, default_values):
self.__dict__.update({n: self.__dict__.get(n, v)})
class Tile(CompatibilityPatch):
def __init__(self, left, right=None, mins=None, maxs=None,
size=None, centered=False, dim=None, dtype='int'):
"""
Creates a tile element which represents a hyperrectangle in D
dimensions. These hyperrectangles may be operated upon to find
intersections, bounding tiles, calculate interior coordinates
and other common operations.
Parameters
----------
left : number or array-like
Left side of the tile
right : (optional) number or array-like
If provided along with left, gives the right side of the tile
mins : (optional) number or array-like
Can be provided to clip the sides of the Tile to certain minimum
maxs : (optional) number or array-like
Can be provided to clip the sides of the Tile to certain maximum
size : (optional) number or array-like
If provided along with left gives the size of the tile
centered : boolean
* If true: ``[left] - [size]/2 -> [left] + [size]/2``
* If false: ``[left] -> [left] + [size]``
dim : integer
Number of dimensions for the Tile
dtype : string, np.dtype
Resulting type of number for the Tile coordinates
Notes
-----
These parameters can be combined into many different combinations
(where [] indicates an array created from either a single number
or any iterable):
* left : ``[0,0,0] -> [left]``
* left, right : ``[left] -> [right]``
* left, size (not centered) : ``[left] -> [left] + [size]``
* left, size (yes centered) : ``[left] - [size]/2 -> [left] + [size]/2``
Each of these can be limited by using (mins, maxs) which are applied
after calculating left, right for each element:
* ``left = max(left, [mins])``
* ``right = min(right, [maxs])``
Since tiles are used for array slicing, they only allow integer
values, which can truncated without warning from float.
Notes on dimension. The dimensionality is determined first by the
shape of left, right, size if provided not as an integer. If it
not provided there then it is assumed to be 3D. This can be
overridden by setting dim in the arguments. For example:
* Tile(3) : ``[0,0,0] -> [3,3,3]``
* Tile(3, dim=2) : ``[0,0] -> [3,3]``
* Tile([3]) : ``[0] -> [3]``
Examples
--------
>>> Tile(10)
Tile [0, 0, 0] -> [10, 10, 10] ([10, 10, 10])
>>> Tile([1,2])
Tile [0, 0] -> [1, 2] ([1, 2])
>>> Tile(0, size=4, centered=True)
Tile [-2, -2, -2] -> [2, 2, 2] ([4, 4, 4])
>>> Tile([-1, 0, 1], right=10, mins=0)
Tile [0, 0, 1] -> [10, 10, 10] ([10, 10, 9])
>>> Tile(10, dtype='float')
Tile [0.0, 0.0, 0.0] -> [10.0, 10.0, 10.0] ([10.0, 10.0, 10.0])
"""
self.dtype = dtype
# first determine the dimensionality of the tile
dims = set([getdim(i) for i in [left, right, size]] + [dim])
dims = dims.difference(set([None]))
if len(dims) == 0:
dim = 3
elif len(dims) == 1:
dim = dims.pop()
elif len(dims) > 1:
raise AttributeError("Dimension mismatch between left, right, size, dim")
nkw = {'dim': dim, 'dtype': self.dtype}
if right is None:
if size is None:
right = left
left = 0
else:
if not centered:
right = aN(left, **nkw) + aN(size, **nkw)
else:
l = aN(left, **nkw)
s = aN(size, **nkw)
if isint(self.dtype):
left = l - s//2
right = left + s
else:
left, right = l - s/2.0, l + s/2.0
assert np.all((right - left) == size)
left = aN(left, **nkw)
right = aN(right, **nkw)
if dim is not None:
self.dim = dim
assert(left.shape[0] == dim)
assert(right.shape[0] == dim)
else:
self.dim = left.shape[0]
if mins is not None:
left = amax(left, aN(mins, **nkw))
if maxs is not None:
right = amin(right, aN(maxs, **nkw))
self.l = np.array(left)
self.r = np.array(right)
self._build_caches()
def _build_caches(self):
self._coord_slicers = []
for i in range(self.dim):
self._coord_slicers.append(
tuple(None if j != i else np.s_[:] for j in range(self.dim))
)
@property
def slicer(self):
"""
Array slicer object for this tile
>>> Tile((2,3)).slicer
(slice(0, 2, None), slice(0, 3, None))
>>> np.arange(10)[Tile((4,)).slicer]
array([0, 1, 2, 3])
"""
return tuple(np.s_[l:r] for l,r in zip(*self.bounds))
def oslicer(self, tile):
""" Opposite slicer, the outer part wrt to a field """
mask = None
vecs = tile.coords(form='meshed')
for v in vecs:
v[self.slicer] = -1
mask = mask & (v > 0) if mask is not None else (v>0)
return tuple(np.array(i).astype('int') for i in zip(*[v[mask] for v in vecs]))
@property
def shape(self):
return self.r - self.l
@property
def bounds(self):
return (self.l, self.r)
@property
def center(self):
"""
Return the center of the tile
>>> Tile(5).center
array([2.5, 2.5, 2.5])
"""
return (self.r + self.l)/2.0
@property
def volume(self):
"""
Volume of the tile
>>> Tile(10).volume
1000
>>> Tile(np.sqrt(2), dim=2, dtype='float').volume #doctest: +ELLIPSIS
2.0000000000...
"""
return np.prod(self.shape)
@property
def kcenter(self):
""" Return the frequency center of the tile (says fftshift) """
return np.array([
np.abs(np.fft.fftshift(np.fft.fftfreq(q))).argmin()
for q in self.shape
]).astype('float')
@property
def corners(self):
"""
Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
[3, 0],
[3, 3]])
"""
corners = []
for ind in itertools.product(*((0,1),)*self.dim):
ind = np.array(ind)
corners.append(self.l + ind*self.r)
return np.array(corners)
def _format_vector(self, vecs, form='broadcast'):
"""
Format a 3d vector field in certain ways, see `coords` for a
description of each formatting method.
"""
if form == 'meshed':
return np.meshgrid(*vecs, indexing='ij')
elif form == 'vector':
vecs = np.meshgrid(*vecs, indexing='ij')
return np.rollaxis(np.array(np.broadcast_arrays(*vecs)),0,self.dim+1)
elif form == 'flat':
return vecs
else:
return [v[self._coord_slicers[i]] for i,v in enumerate(vecs)]
def coords(self, norm=False, form='broadcast'):
"""
Returns the coordinate vectors associated with the tile.
Parameters
-----------
norm : boolean
can rescale the coordinates for you. False is no rescaling,
True is rescaling so that all coordinates are from 0 -> 1.
If a scalar, the same norm is applied uniformally while if
an iterable, each scale is applied to each dimension.
form : string
In what form to return the vector array. Can be one of:
'broadcast' -- return 1D arrays that are broadcasted to be 3D
'flat' -- return array without broadcasting so each component
is 1D and the appropriate length as the tile
'meshed' -- arrays are explicitly broadcasted and so all have
a 3D shape, each the size of the tile.
'vector' -- array is meshed and combined into one array with
the vector components along last dimension [Nz, Ny, Nx, 3]
Examples
--------
>>> Tile(3, dim=2).coords(form='meshed')[0]
array([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.]])
>>> Tile(3, dim=2).coords(form='meshed')[1]
array([[0., 1., 2.],
[0., 1., 2.],
[0., 1., 2.]])
>>> Tile([4,5]).coords(form='vector').shape
(4, 5, 2)
>>> [i.shape for i in Tile((4,5), dim=2).coords(form='broadcast')]
[(4, 1), (1, 5)]
"""
if norm is False:
norm = 1
if norm is True:
norm = np.array(self.shape)
norm = aN(norm, self.dim, dtype='float')
v = list(np.arange(self.l[i], self.r[i]) / norm[i] for i in range(self.dim))
return self._format_vector(v, form=form)
def kvectors(self, norm=False, form='broadcast', real=False, shift=False):
"""
Return the kvectors associated with this tile, given the standard form
of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to
`Tile.coords`.
Parameters
-----------
real : boolean
whether to return kvectors associated with the real fft instead
"""
if norm is False:
norm = 1
if norm is True:
norm = np.array(self.shape)
norm = aN(norm, self.dim, dtype='float')
v = list(np.fft.fftfreq(self.shape[i])/norm[i] for i in range(self.dim))
if shift:
v = list(np.fft.fftshift(t) for t in v)
if real:
v[-1] = v[-1][:(self.shape[-1]+1)//2]
return self._format_vector(v, form=form)
def __str__(self):
return self.__repr__()
def __repr__(self):
return str(self.__class__.__name__)+" {} -> {} ({})".format(
list(self.l), list(self.r), list(self.shape)
)
def contains(self, items, pad=0):
"""
Test whether coordinates are contained within this tile.
Parameters
----------
items : ndarray [3] or [N, 3]
N coordinates to check are within the bounds of the tile
pad : integer or ndarray [3]
anisotropic padding to apply in the contain test
Examples
--------
>>> Tile(5, dim=2).contains([[-1, 0], [2, 3], [2, 6]])
array([False, True, False])
"""
o = ((items >= self.l-pad) & (items < self.r+pad))
if len(o.shape) == 2:
o = o.all(axis=-1)
elif len(o.shape) == 1:
o = o.all()
return o
@staticmethod
def intersection(tiles, *args):
"""
Intersection of tiles, returned as a tile
>>> Tile.intersection(Tile([0, 1], [5, 4]), Tile([1, 0], [4, 5]))
Tile [1, 1] -> [4, 4] ([3, 3])
"""
tiles = listify(tiles) + listify(args)
if len(tiles) < 2:
return tiles[0]
tile = tiles[0]
l, r = tile.l.copy(), tile.r.copy()
for tile in tiles[1:]:
l = amax(l, tile.l)
r = amin(r, tile.r)
return Tile(l, r, dtype=l.dtype)
@staticmethod
def boundingtile(tiles, *args):
"""
Convex bounding box of a group of tiles
>>> Tile.boundingtile(Tile([0, 1], [5, 4]), Tile([1, 0], [4, 5]))
Tile [0, 0] -> [5, 5] ([5, 5])
"""
tiles = listify(tiles) + listify(args)
if len(tiles) < 2:
return tiles[0]
tile = tiles[0]
l, r = tile.l.copy(), tile.r.copy()
for tile in tiles[1:]:
l = amin(l, tile.l)
r = amax(r, tile.r)
return Tile(l, r, dtype=l.dtype)
def __eq__(self, other):
if other is None:
return False
return (self.l == other.l).all() and (self.r == other.r).all()
def __ne__(self, other):
if other is None:
return True
return ~self.__eq__(other)
def __and__(self, other):
return Tile.intersection(self, other)
def __or__(self, other):
return Tile.boundingtile(self, other)
def copy(self):
return Tile(self.l.copy(), self.r.copy(), dtype=self.dtype)
def translate(self, dr):
"""
Translate a tile by an amount dr
>>> Tile(5).translate(1)
Tile [1, 1, 1] -> [6, 6, 6] ([5, 5, 5])
"""
tile = self.copy()
tile.l += dr
tile.r += dr
return tile
def pad(self, pad):
"""
Pad this tile by an equal amount on each side as specified by pad
>>> Tile(10).pad(2)
Tile [-2, -2, -2] -> [12, 12, 12] ([14, 14, 14])
>>> Tile(10).pad([1,2,3])
Tile [-1, -2, -3] -> [11, 12, 13] ([12, 14, 16])
"""
tile = self.copy()
tile.l -= pad
tile.r += pad
return tile
def overhang(self, tile):
"""
Get the left and right absolute overflow -- the amount of box
overhanging `tile`, can be viewed as self \\ tile (set theory relative
complement, but in a bounding sense)
"""
ll = np.abs(amin(self.l - tile.l, aN(0, dim=self.dim)))
rr = np.abs(amax(self.r - tile.r, aN(0, dim=self.dim)))
return ll, rr
def reflect_overhang(self, clip):
"""
Compute the overhang and reflect it internally so respect periodic
padding rules (see states._tile_from_particle_change). Returns both
the inner tile and the inner tile with necessary pad.
"""
orig = self.copy()
tile = self.copy()
hangl, hangr = tile.overhang(clip)
tile = tile.pad(hangl)
tile = tile.pad(hangr)
inner = Tile.intersection([clip, orig])
outer = Tile.intersection([clip, tile])
return inner, outer
def astype(self, dtype):
return Tile(self.l.astype(dtype), self.r.astype(dtype))
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, idct):
self.__dict__.update(idct)
self.patch({'dim': 3, 'dtype': 'int'})
self._build_caches()
# ============================================================================
# Image classes
# ============================================================================
class Image(object):
def __init__(self, image, tile=None, filters=None):
"""
Create an image object from a raw np.ndarray.
Parameters
-----------
image : ndarray
The image in float format with dimensions arranged as [z,y,x]
tile : `peri.util.Tile`
The region of the image to crop out to use for the actual
featuring, etc. Coordinates are in pixel-space.
filters : list of tuples
A list of (slice, value) pairs which are Fourier-space domain
filters that are to be applied to an image. In Fourier-space,
each filter is a numpy slice object and the Fourier values
to be subtracted from those slices.
"""
self.filters = filters or []
self.image = image
self.tile = tile or Tile(image.shape)
def get_image(self):
im = self.image[self.tile.slicer]
if not self.filters:
return im
return self.filtered_image(im)
def get_padded_image(self, pad, padval=0):
if hasattr(pad, '__iter__'):
pad = [[p, p] for p in pad]
return np.pad(self.get_image(), pad, mode='constant', constant_values=padval)
def filtered_image(self, im):
"""Returns a filtered image after applying the Fourier-space filters"""
q = np.fft.fftn(im)
for k,v in self.filters:
q[k] -= v
return np.real(np.fft.ifftn(q))
def set_tile(self, tile):
"""Sets the current tile of the image to a `peri.util.Tile`"""
self.tile = tile
def set_filter(self, slices, values):
"""
Sets Fourier-space filters for the image. The image is filtered by
subtracting values from the image at slices.
Parameters
----------
slices : List of indices or slice objects.
The q-values in Fourier space to filter.
values : np.ndarray
The complete array of Fourier space peaks to subtract off. values
should be the same size as the FFT of the image; only the portions
of values at slices will be removed.
Examples
--------
To remove a two Fourier peaks in the data at q=(10, 10, 10) &
(245, 245, 245), where im is the residuals of a model:
* slices = [(10,10,10), (245, 245, 245)]
* values = np.fft.fftn(im)
* im.set_filter(slices, values)
"""
self.filters = [[sl,values[sl]] for sl in slices]
def __repr__(self):
return "{} : {}".format(
self.__class__.__name__, str(self.tile)
)
def __str__(self):
return self.__repr__()
class NullImage(Image):
def __init__(self, image=None, shape=None):
"""
An image object that doesn't actual store any information so that small
save states can be created for pure model states
Parameters
-----------
shape : tuple
Size of the image which will be mocked
"""
if image is not None:
self.shape = image.shape
super(NullImage, self).__init__(image)
elif shape is not None:
self.shape = shape
super(NullImage, self).__init__(np.zeros(self.shape))
else:
raise AttributeError("Must provide either image or shape")
def __getstate__(self):
d = self.__dict__.copy()
cdd(d, ['image'])
return d
def __setstate__(self, idct):
self.__dict__.update(idct)
super(NullImage, self).__init__(np.zeros(self.shape))
def __repr__(self):
return "{} : {}".format(self.__class__.__name__, self.shape)
def __str__(self):
return self.__repr__()
class RawImage(Image, CompatibilityPatch):
def __init__(self, filename, tile=None, invert=False, exposure=None,
float_precision=np.float64):
"""
An image object which stores information about desired region, exposure
compensation, color inversion, and filters to remove certain fourier
peaks.
Parameters
----------
filename : str
Path of the image file. Recommended that you supply a relative path
so that transfer between computers is possible, i.e. if the file is located
at ``/home/user/data/1.tif`` then work in the directory ``/home/user/data``
and supply the filename ``1.tif``.
tile : :class:`peri.util.Tile`
the region of the image to crop out to use for the actual featuring, etc
invert : boolean
Whether to invert the image.
exposure : tuple of numbers (min, max) | None
If set, it is the values used to normalize the image. It is the
values which map to 0 and 1 in the loaded version of the image, the
default being for 8-bit images, mapping raw values (0, 255) to
loaded values (0, 1). This functionality is provided since the
noise and exposure may change between images where a common scaling
is desired for proper comparison. Setting this values allows a
series of images to be initialized with the same ILM, PSF etc.
Should be the bit value of the camera.
float_precision : numpy float datatype
One of numpy.float16, numpy.float32, numpy.float64; precision
for precomputed arrays. Default is np.float64; make it 16 or 32
to save memory.
"""
self.filename = filename
self.invert = invert
self.filters = None
self.exposure = exposure
if float_precision not in (np.float64, np.float32, np.float16):
raise ValueError('float_precision must be one of np.float64, ' +
'np.float32, np.float16')
self.float_precision = float_precision
image = self.load_image()
super(RawImage, self).__init__(image, tile=tile)
def load_image(self):
""" Read the file and perform any transforms to get a loaded image """
try:
image = initializers.load_tiff(self.filename)
image = initializers.normalize(
image, invert=self.invert, scale=self.exposure,
dtype=self.float_precision
)
except IOError as e:
log.error("Could not find image '%s'" % self.filename)
raise e
return image
def set_scale(self, exposure):
"""
Set the exposure parameter for this image, which determines the
values which get mapped to (0,1) in the output image.
See also
--------
:class:`peri.util.RawImage`
"""
self.exposure = exposure
def get_scale(self):
"""
If exposure was not set in the __init__, get the exposure associated
with this RawImage so that it may be used in other
:class:`~peri.util.RawImage`. This is useful for transferring exposure
parameters to a series of images.
Returns
-------
exposure : tuple of floats
The (emin, emax) which get mapped to (0, 1)
"""
if self.exposure is not None:
return self.exposure
raw = initializers.load_tiff(self.filename)
return raw.min(), raw.max()
@staticmethod
def get_scale_from_raw(raw, scaled):
"""
When given a raw image and the scaled version of the same image, it
extracts the ``exposure`` parameters associated with those images.
This is useful when
Parameters
----------
raw : array_like
The image loaded fresh from a file
scaled : array_like
Image scaled using :func:`peri.initializers.normalize`
Returns
-------
exposure : tuple of numbers
Returns the exposure parameters (emin, emax) which get mapped to
(0, 1) in the scaled image. Can be passed to
:func:`~peri.util.RawImage.__init__`
"""
t0, t1 = scaled.min(), scaled.max()
r0, r1 = float(raw.min()), float(raw.max())
rmin = (t1*r0 - t0*r1) / (t1 - t0)
rmax = (r1 - r0) / (t1 - t0) + rmin
return (rmin, rmax)
def __getstate__(self):
d = self.__dict__.copy()
cdd(d, ['image'])
return d
def __setstate__(self, idct):
self.__dict__.update(idct)
self.patch({'float_precision': np.float64})
self.image = self.load_image()
def __repr__(self):
return "{} <{}: {}>".format(
self.__class__.__name__, self.filename, str(self.tile)
)
def __str__(self):
return self.__repr__()
def cdd(d, k):
""" Conditionally delete key (or list of keys) 'k' from dict 'd' """
if not isinstance(k, list):
k = [k]
for i in k:
if i in d:
d.pop(i)
# ============================================================================
# Progress bar
# ============================================================================
class ProgressBar(object):
def __init__(self, num, label='Progress', value=0, screen=79,
time_remaining=True, bar=True, bar_symbol='=', bar_caps='[]',
bar_decimals=2, display=True):
"""
ProgressBar class which creates a dynamic ASCII progress bar of two
different varieties:
1) A bar chart that looks like the following:
``Progress [================ ] 63.00%``
2) A simple number completed look:
``Progress : 17 / 289``
Parameters
-----------
num : integer
The number of tasks that need to be completed
label : string [default: 'Progress']
The label for this particular progress indicator,
value : integer [default: 0]
Starting value
screen : integer [default: 79]
Size the screen to use for the progress bar
time_remaining : boolean [default: True]
Display estimated time remaining
bar : boolean [default: True]
Whether or not to display the bar chart
bar_symbol : char [default: '=']
The character to use to fill in the bar chart
bar_caps : string [default: '[]']
Characters to use as the end caps of the. The string will be split in
half and each half put on a side of the chart
bar_decimals : integer [default: 2]
Number of decimal places to include in the percentage
display : boolean [default: True]
a crutch so that we don't have a lot of ``if``s later. display
or don't display the progress bar
"""
# TODO -- add estimated time remaining
self.num = num
self.value = value
self._percent = 0
self.time_remaining = time_remaining
self._deltas = []
self.display = display
self.label = label
self.bar = bar
self._bar_symbol = bar_symbol
self._bar_caps = bar_caps
self._decimals = bar_decimals
self.screen = screen
if len(self._bar_caps) % 2 != 0:
raise AttributeError("End caps must be even number of symbols")
if self.bar:
# 3 digit _percent + decimal places + '.'
self._numsize = 3 + self._decimals + 1
# end caps size calculation
self._cap_len = len(self._bar_caps)//2
self._capl = self._bar_caps[:self._cap_len]
self._capr = self._bar_caps[self._cap_len:]
# time remaining calculation for space
self._time_space = 11 if self.time_remaining else 0
# the space available for the progress bar is
# 79 (screen) - (label) - (number) - 2 ([]) - 2 (space) - 1 (%)
self._barsize = (
self.screen - len(self.label) - self._numsize -
len(self._bar_caps) - 2 - 1 - self._time_space
)
self._formatstr = '\r{label} {_capl}{_bars:<{_barsize}}{_capr} {_percent:>{_numsize}.{_decimals}f}%'
self._percent = 0
self._dt = '--:--:--'
self._bars = ''
if self.time_remaining:
self._formatstr += " ({_dt})"
else:
self._digits = str(int(np.ceil(np.log10(self.num))))
self._formatstr = '\r{label} : {value:>{_digits}} / {num:>{_digits}}'
self._dt = '--:--:--'
if self.time_remaining:
self._formatstr += " ({_dt})"
self.update()
def _estimate_time(self):
if len(self._deltas) < 3:
self._dt = '--:--:--'
else:
dt = np.diff(self._deltas[-25:]).mean() * (self.num - self.value)
self._dt = time.strftime('%H:%M:%S', time.gmtime(dt))
def _draw(self):
""" Interal draw method, simply prints to screen """
if self.display:
print(self._formatstr.format(**self.__dict__), end='')
sys.stdout.flush()
def increment(self):
self.update(self.value + 1)
def update(self, value=0):
"""
Update the value of the progress and update progress bar.
Parameters
-----------
value : integer
The current iteration of the progress
"""
self._deltas.append(time.time())
self.value = value
self._percent = 100.0 * self.value / self.num
if self.bar:
self._bars = self._bar_symbol*int(np.round(self._percent / 100. * self._barsize))
if (len(self._deltas) < 2) or (self._deltas[-1] - self._deltas[-2]) > 1e-1:
self._estimate_time()
self._draw()
if self.value == self.num:
self.end()
def end(self):
if self.display:
print('\r{lett:>{screen}}'.format(**{'lett':'', 'screen': self.screen}))
# ============================================================================
# useful decorators
# ============================================================================
import functools
import types
def newcache():
out = {}
out['hits'] = 0
out['misses'] = 0
out['size'] = 0
return out
def memoize(cache_max_size=1e9):
def memoize_inner(obj):
cache_name = str(obj)
@functools.wraps(obj)
def wrapper(self, *args, **kwargs):
# add the memoize cache to the object first, if not present
# provide a method to the object to clear the cache too
if not hasattr(self, '_memoize_caches'):
def clear_cache(self):
for k,v in iteritems(self._memoize_caches):
self._memoize_caches[k] = newcache()
self._memoize_caches = {}
self._memoize_clear = types.MethodType(clear_cache, self)
# next, add the particular cache for this method if it does
# not already exist in the parent 'self'
cache = self._memoize_caches.get(cache_name)
if not cache:
cache = newcache()
self._memoize_caches[cache_name] = cache
size = 0
hashed = []
# let's hash the arguments (both args, kwargs) and be mindful of
# numpy arrays -- that is, only take care of its data, not the obj
# itself
for arg in args:
if isinstance(arg, np.ndarray):
hashed.append(arg.tostring())
else:
hashed.append(arg)
for k,v in iteritems(kwargs):
if isinstance(v, np.ndarray):
hashed.append(v.tostring())
else:
hashed.append(v)
hashed = tuple(hashed)
if hashed not in cache:
ans = obj(self, *args, **kwargs)
# if it is not too much to ask, place the answer in the cache
if isinstance(ans, np.ndarray):
size = ans.nbytes
newsize = size + cache['size']
if newsize < cache_max_size:
cache[hashed] = ans
cache['misses'] += 1
cache['size'] = newsize
return ans
cache['hits'] += 1
return cache[hashed]
return wrapper
return memoize_inner
# ============================================================================
# patching docstrings of sub-classes
# ============================================================================
def patch_docs(subclass, superclass):
"""
Apply the documentation from ``superclass`` to ``subclass`` by filling
in all overridden member function docstrings with those from the
parent class
"""
funcs0 = inspect.getmembers(subclass, predicate=inspect.ismethod)
funcs1 = inspect.getmembers(superclass, predicate=inspect.ismethod)
funcs1 = [f[0] for f in funcs1]
for name, func in funcs0:
if name.startswith('_'):
continue
if name not in funcs1:
continue
if func.__doc__ is None:
func = getattr(subclass, name)
func.__func__.__doc__ = getattr(superclass, name).__func__.__doc__
# ============================================================================
# misc helper functions
# ============================================================================
@contextmanager
def indir(path):
"""
Context manager for switching the current path of the process. Can be used:
with indir('/tmp'):
<do something in tmp>
"""
cwd = os.getcwd()
try:
os.chdir(path)
yield
except Exception as e:
raise
finally:
os.chdir(cwd)
|
<filename>src/peltak/extra/gitflow/logic/task.py<gh_stars>1-10
# Copyright 2017-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" git flow feature commands implementation. """
import sys
import click
from peltak.core import context
from peltak.core import git
from peltak.core import hooks
from peltak.core import log
from peltak.core import shell
from . import common
def start(name: str):
""" Start working on a new feature by branching off develop.
This will create a new branch off develop called feature/<name>.
Args:
name (str):
The name of the new feature.
"""
branch = git.current_branch(refresh=True)
task_branch = 'task/' + common.to_branch_name(name)
if branch.type not in ('feature', 'hotfix'):
log.err("Task branches can only branch off <33>feature<32> or "
"<33>hotfix<32> branches")
sys.exit(1)
hooks.register.call('pre-task-start', name)
common.git_checkout(task_branch, create=True)
hooks.register.call('post-task-start', name)
def update():
""" Update the feature with updates committed to develop.
This will merge current develop into the current branch.
"""
branch = git.current_branch(refresh=True)
base_branch = common.get_base_branch()
common.assert_branch_type('task')
common.git_checkout(base_branch)
common.git_pull(base_branch)
common.git_checkout(branch.name)
common.git_merge(branch.name, base_branch)
def rename(name: str):
""" Give the currently developed feature a new name.
Args:
name (str):
The new name of the current feature. The current branch will be
renamed to 'feature/<new_name>'.
"""
common.assert_branch_type('task')
common.git_branch_rename('task/' + name.strip().replace(' ', '_'))
def finish(fast_forward: bool):
""" Merge current feature branch into develop. """
pretend = context.get('pretend', False)
if not pretend and (git.staged() or git.unstaged()):
log.err(
"You have uncommitted changes in your repo!\n"
"You need to stash them before you merge the hotfix branch"
)
sys.exit(1)
branch = git.current_branch(refresh=True)
base = common.get_base_branch()
prompt = "<32>Merge <33>{}<32> into <33>{}<0>?".format(branch.name, base)
if not click.confirm(shell.fmt(prompt)):
log.info("Cancelled")
return
common.assert_branch_type('task')
hooks.register.call('pre-task-finish', branch, base)
# Merge task into it's base feature branch
common.git_checkout(base)
common.git_pull(base)
common.git_merge(base, branch.name, no_ff=not fast_forward)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(base)
hooks.register.call('post-task-finish', branch, base)
def merged():
""" Cleanup a remotely merged branch. """
base_branch = common.get_base_branch()
branch = git.current_branch(refresh=True)
common.assert_branch_type('task')
hooks.register.call('pre-task-merged', branch, base_branch)
# Pull feature branch with the merged task
common.git_checkout(base_branch)
common.git_pull(base_branch)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(base_branch)
hooks.register.call('post-task-merged', branch, base_branch)
|
<filename>backend/app.py<gh_stars>0
from flask import Flask, redirect, sessions, request, jsonify, session, abort
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
import requests
import os
from models import db, User, Points, Event
from dotenv import load_dotenv
from flask_jwt_extended import (
create_access_token,
get_jwt_identity,
jwt_required,
JWTManager,
)
from flask_cors import CORS
load_dotenv()
DISCORD_CLIENT_ID = os.getenv("DISCORD_CLIENT_ID")
DISCORD_CLIENT_SECRET = os.getenv("DISCORD_CLIENT_SECRET")
REDIRECT_URI = os.getenv("REDIRECT_URI")
FELLOWSHIP_GUILD_ID = "818888976458973224"
BOT_TOKEN = os.getenv("BOT_TOKEN")
CURRENT_FELLOWSHIP = "0"
DB_USER = os.getenv("DB_USER")
DB_PW = os.getenv("DB_PW")
DB_HOST = os.getenv("DB_HOST")
DB_PORT = os.getenv("DB_PORT")
DB_NAME = os.getenv("DB_NAME")
FRONTEND_URL = os.environ["FRONTEND_URL"]
SECRET_KEY = os.environ["SECRET_KEY"]
db_uri = "postgresql://{dbuser}:{dbpw}@{dbhost}:{dbport}/{dbname}".format(
dbuser=DB_USER, dbpw=DB_PW, dbhost=DB_HOST, dbport=DB_PORT, dbname=DB_NAME
)
app = Flask(__name__)
app.config.update(
SQLALCHEMY_DATABASE_URI=db_uri,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
SECRET_KEY=SECRET_KEY,
JWT_SECRET_KEY=SECRET_KEY,
)
db.init_app(app)
jwt = JWTManager(app)
CORS(app)
@app.route("/")
def index():
return f"Hello {session.get('username')}#{session.get('discriminator')} @ {session.get('role')}"
@app.route("/discord")
def discord():
full_redirect_url = "https://discord.com/api/oauth2/authorize?client_id={client_id}&redirect_uri={redirect_uri}&response_type=code&scope={scope}".format(
client_id=DISCORD_CLIENT_ID,
redirect_uri=REDIRECT_URI,
scope="identify email guilds",
)
return redirect(full_redirect_url)
@app.route("/discord/callback")
def discord_callback():
"""
Discord Callback.
Discord Access Token is stored in session.get('discord_access_token')
"""
# Get the discord access token
data = requests.post(
"https://discord.com/api/oauth2/token",
data={
"client_id": DISCORD_CLIENT_ID,
"client_secret": DISCORD_CLIENT_SECRET,
"grant_type": "authorization_code",
"code": request.args.get("code"),
"redirect_uri": REDIRECT_URI,
"scope": "identify email guilds",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
# Store access token in session
session["discord_access_token"] = data.json()["access_token"]
# Get user's information
data = requests.get(
"https://discord.com/api/v8/users/@me",
headers={"Authorization": f"Bearer {session.get('discord_access_token')}"},
)
email = data.json()["email"]
discriminator = data.json()["discriminator"]
discord_id = data.json()["id"]
username = data.json()["username"]
screen_name = str(username) + "#" + str(discriminator)
avatar = data.json()["avatar"]
session["discord_id"] = discord_id
session["username"] = username
session["email"] = email
session["discriminator"] = discriminator
session["screen_name"] = screen_name
session["avatar"] = avatar
# get all the guilds that user's in
guilds = requests.get(
"https://discord.com/api/v8/users/@me/guilds",
headers={"Authorization": f"Bearer {session.get('discord_access_token')}"},
)
# check if the user is in the fellowship guide
in_fellowship = False
for guild in guilds.json():
if guild["id"] == FELLOWSHIP_GUILD_ID:
in_fellowship = True
if not in_fellowship:
message = "Error: User is not a current MLH fellow!"
return redirect(f"{FRONTEND_URL}?error=true&msg={message}")
else:
role = requests.get(
f"https://discord.com/api/v8/guilds/{FELLOWSHIP_GUILD_ID}/members/{session.get('discord_id')}",
headers={"Authorization": f"Bot {BOT_TOKEN}"},
)
# assume user only have one role
user_roles = role.json()["roles"]
roles = requests.get(
f"https://discord.com/api/v8/guilds/{FELLOWSHIP_GUILD_ID}/roles",
headers={"Authorization": f"Bot {BOT_TOKEN}"},
)
role = None
for r in roles.json():
if r["name"] == "admin" and r["id"] in user_roles:
role = "admin"
elif 'Pod' in r["name"] and r["name"][4] == CURRENT_FELLOWSHIP and r["id"] in user_roles:
role = r["name"]
session["role"] = role
# create and add a new user if doesn't exist
if User.query.filter_by(id=discord_id).first():
message = "Success: Logged in!"
else:
new_user = User(
id=discord_id, name=screen_name, email=email, role=role, avatar=avatar
)
db.session.add(new_user)
db.session.commit()
message = "Success: User registered!"
jwt_token = create_access_token(identity=discord_id, expires_delta=False)
return redirect(f"{FRONTEND_URL}?token={jwt_token}&msg={message}")
@app.route("/admin/add_points", methods=["POST"])
def add_points():
"""
Add points
"""
data = request.get_json(silent=True)["data"]
amount = data.get("amount")
assignee = data["assignee"]
description = data["description"]
event_id = None
# if user's discord id is given, change assignee to discord username
if "#" in assignee:
user = User.query.filter_by(name=assignee).first()
else:
user = User.query.filter_by(id=assignee).first()
assignee = user.name
discord_id = user.id
if description == "Event":
event_id = data.get("event_id")
secret_input = data.get("secret_input")
if event_id is None:
return jsonify({"success": False, "message": "Please specify the event id"})
if secret_input is None:
return jsonify(
{"success": False, "message": "Please input the secret code"}
)
# Check if points are already claimed for event
if Points.query.filter_by(event_id=event_id, assignee=discord_id).first():
return jsonify(
{"success": False, "message": "Event points already claimed"}
)
else:
# Check if input matches event secret code
event = Event.query.filter_by(id=event_id).first()
if event.secret_code == secret_input:
amount = event.points_amount
message = f"{amount} points added to {assignee} for Event {event.name}"
success = True
else:
return jsonify(
{
"success": False,
"message": f"The code {secret_input} is incorrect for Event {event.name}",
}
)
elif description == "Discord":
# Check daily limit of 5 messages is exceeded
discord_points_today = (
Points.query.filter_by(description="Discord", assignee=discord_id)
.filter(func.date(Points.timestamp) == func.date(func.now()))
.all()
)
if len(discord_points_today) >= 5:
return jsonify(
{
"success": False,
"message": "Daily limit for Discord activity points reached",
}
)
else:
message = f"{amount } points added to {assignee} for Discord activity"
success = True
else:
message = f"{amount} points added to {assignee} for {description}"
success = True
# Create a Points in the points table
new_point = Points(
amount=amount, assignee=discord_id, description=description, event_id=event_id
)
db.session.add(new_point)
# Add to user's total points
user.points_total += int(amount)
db.session.commit()
return jsonify(
{
"success": success,
"message": message,
"data": {
"id": new_point.id,
"amount": new_point.amount,
"assignee": new_point.assignee,
"description": new_point.description,
"event_id": new_point.event_id,
"timestamp": new_point.timestamp,
},
}
)
@app.route("/admin/create_event", methods=["POST"])
def create_event():
"""Create an event.
Returns:
Status request: The id of the object created.
"""
data = request.get_json(silent=True)["data"]
event_name = data["name"]
start_time_f = data["start_time"]
end_time_f = data["end_time"]
link = data["event_link"]
secret_code_f = data["secret_code"]
points = data["points_amount"]
event_id = None
new_event = Event(
name=event_name,
start_time=start_time_f,
end_time=end_time_f,
points_amount=points,
secret_code=secret_code_f,
event_link=link,
)
try:
db.session.add(new_event)
db.session.commit()
message = "Event successfully created."
success = True
event_id = new_event.id
return jsonify({"success": success, "message": message, "id": event_id})
except:
message = "Server Error. Could not commit to database"
success = False
return jsonify({"success": success, "message": message})
@app.route("/get_all_pod_points")
@jwt_required()
def get_all_pod_points():
"""Return points for all the pods in the fellowship.
Returns:
json: payload with all the pods and their points.
"""
try:
data = {}
all_pods = User.query.distinct(User.role)
for pod in all_pods:
fellows_in_pod = User.query.filter_by(role=str(pod.role))
if fellows_in_pod is not None:
points = 0
for fellow in fellows_in_pod:
points = points + fellow.points_total
data[str(pod.role)] = points
return jsonify(
{
"success": True,
"message": "Successfully retrieved points for all pods",
"data": data,
}
)
except Exception as e:
return jsonify({"success": False, "message": f"Error: {e}"})
@app.route("/get_events")
@jwt_required()
def get_events():
"""Get all events data
Returns:
json: payload describing conditions of query, success/failure and events data.
"""
try:
discord_id = get_jwt_identity()
user = User.query.filter_by(id=discord_id).first()
events = Event.query.all()
events_data = []
for event in events:
event_data = {
"id": event.id,
"name": event.name,
"start_time": event.start_time,
"end_time": event.end_time,
"points_amount": event.points_amount,
"event_link": event.event_link,
"vid_link": event.vid_link,
}
# Check if points are already claimed for event
if Points.query.filter_by(event_id=event.id, assignee=discord_id).first():
event_data["points_claimed"] = True
else:
event_data["points_claimed"] = False
if user.role == "admin":
event_data["secret_code"] = event.secret_code
events_data.append(event_data)
return jsonify(
{
"success": True,
"message": "Events fetched successfully",
"data": events_data,
}
)
except Exception as e:
return jsonify({"success": False, "message": f"Error: {e}"})
@app.route("/get_pod_points")
@jwt_required()
def get_pod_points():
"""Return all the points for a single pod.
Returns:
json: Payload containing the pod name and the points.
"""
pod = request.args["pod"]
# Ideally this would be something like:
# SELECT SUM(points_total)
# FROM users
# WHERE role=pod;
#
# But I honestly have NO clue how to do this with SQL alchemy syntax.
fellows_in_pod = User.query.filter_by(role=pod)
if fellows_in_pod is not None:
points = 0
for fellow in fellows_in_pod:
points = points + fellow.points_total
return jsonify({"success": True, "message": "Pod found.", str(pod): points})
return jsonify({"success": False, "message": "Pod not found."})
def serialize_user(status, message, user=None):
response = {}
response["success"] = status
response["message"] = message
if user is None:
return jsonify(response)
data = {}
if user.avatar is None:
last_digit = int(user.name[-1])
data["avatar_url"] = "https://cdn.discordapp.com/embed/avatars/{}.png".format(
last_digit % 5
)
else:
data[
"avatar_url"
] = "https://cdn.discordapp.com/avatars/{user_id}/{avatar_hash}.png?size=128".format(
user_id=user.id, avatar_hash=user.avatar
)
data["id"] = user.id
data["name"] = user.name
data["email"] = user.email
data["role"] = user.role
data["points_total"] = user.points_total
response["data"] = data
return jsonify(response)
@app.route("/get_user")
@jwt_required()
def get_user():
"""Obtain user information. If a user is an admin, they can provide the optional "name" parameter
to their GET request to obtain details about any user.
If the user is a pod fellow, they can inquire about themselves only.
Returns:
json: payload describing conditions of query, success/failure and potentially user data.
"""
discord_id = get_jwt_identity()
user = User.query.filter_by(id=discord_id).first()
if user is None:
return serialize_user(False, "User not found.")
else:
# check if this is a fellow inquiring about their point total,
# or if this is an admin inquiring about a fellow's total.
if user.role == "admin":
# get the specified info for admin
r_discord_display_name = request.args.get("name")
r_user = User.query.filter_by(name=r_discord_display_name).first()
if r_user is None:
return serialize_user(False, "The requested fellow was not found.")
else:
return serialize_user(True, "Fellow found.", r_user)
else:
return serialize_user(True, "Found your user.", user)
@app.route("/recent_points")
@jwt_required()
def recent_points():
num_entries = int(request.args["num_entries"])
# Ideally should be something like this:
# SELECT name, timestamp, amount FROM
# points LEFT JOIN user ON user.id = points.id
# DESC LIMIT num_entries
recent_points = reversed(Points.query.order_by(Points.timestamp)[1:num_entries])
# FIXME: This is a hack. Join with Users table to get the *Discord* name, not the unique ID.
data = []
for point in recent_points:
data.append(
{
"timestamp": point.timestamp,
"amount": point.amount,
"user": User.query.filter_by(id=point.assignee).first().name,
}
)
return jsonify(
{
"success": True,
"message": "{} out of {} entries found.".format(len(data), num_entries),
"data": data,
}
)
@app.route("/get_points_history")
@jwt_required()
def get_points_history():
try:
n = request.args.get("n") or 20 # Default 20
points_history = (
Points.query.join(User).order_by(Points.timestamp.desc()).limit(n).all()
)
points_history_data = []
for points in points_history:
points_data = {
"amount": points.amount,
"assignee": points.user.name,
"description": points.description,
"event_id": points.event_id,
"timestamp": points.timestamp,
}
points_history_data.append(points_data)
# If user is not admin
discord_id = get_jwt_identity()
user = User.query.filter_by(id=discord_id).first()
if user.role != "admin":
points_history_data = list(
filter(lambda p: p["assignee"] == user.name, points_history_data)
)
return jsonify(
{
"success": True,
"message": "Points history fetched successfully",
"data": points_history_data,
}
)
except Exception as e:
return jsonify({"success": False, "message": f"Error: {e}"})
@app.route("/get_top_fellows")
def get_top_fellows():
try:
n = request.args.get("n") or 10 # Default 10
top_fellows = (
User.query.filter(User.role != "admin")
.order_by(User.points_total.desc())
.limit(n)
.all()
)
top_fellows_data = []
for fellow in top_fellows:
fellow_data = {"name": fellow.name, "points_total": fellow.points_total}
top_fellows_data.append(fellow_data)
return jsonify(
{
"success": True,
"message": "Top fellows fetched successfully",
"data": top_fellows_data,
}
)
except Exception as e:
return jsonify({"success": False, "message": f"Error: {e}"})
@app.route("/get_total_registered_fellows")
@jwt_required()
def get_total_fellows():
"""Retrieve the number of fellows in the app.
Returns:
json: json payload containing requested information
"""
try:
num_fellows = User.query.filter(User.role != 'admin').count()
return jsonify({
"success": True,
"message": "Number of fellows retrieved successfully",
"data": num_fellows
})
except Exception as e:
return jsonify({
"success": False,
"message": f"Error: {e}"
})
if __name__ == '__main__':
with app.app_context():
db.create_all()
app.run()
|
<reponame>socialmediaie/EDNIL2020<filename>utils.py
import xml.etree.ElementTree as ET
from pathlib import Path
from collections import Counter
from collections import defaultdict
import json
import pandas as pd
CLASS_MAP={
"MAN_MADE_EVENT": "MANMADE_DISASTER",
"NATURAL_EVENT": "NATURAL_DISASTER"
}
def transform_label(y):
return pd.Series(y).str.split(".", expand=True)[0]
def transform_probs(probs, clf):
clf_map = defaultdict(list)
for c in clf.classes_:
clf_map[c.split(".")[0]].append(c)
df_probs = pd.DataFrame(probs, columns=clf.classes_)
for c_parent, children in clf_map.items():
df_probs[c_parent] = df_probs[children].sum(axis=1)
return df_probs[[c for c in clf_map]].idxmax(axis=1)
def get_w_children(node, base_node=None, split_paras=False):
idx = 0
if base_node is None:
base_node = ET.Element('P')
for child in node:
if child.tag == "W":
label = base_node.tag
if base_node.attrib.get("TYPE"):
label = f"{label}.{base_node.attrib['TYPE']}"
label = f"B-{label}" if idx == 0 else f"I-{label}"
if base_node.tag == "P":
label = "O"
yield child.text.strip(), label
idx+=1
else:
yield from get_w_children(child, base_node=child)
if split_paras and node.tag == "P":
yield "<P>", "P"
def get_w_children_test(node, base_node=None, split_paras=False):
idx = 0
for child in node:
if child.tag == "P":
yield from [(t.strip(), "O") for t in child.text.split(" ")]
if split_paras:
yield "<P>", "P"
def split_tag(tag):
return tuple(tag.split("-", 1)) if tag != "O" else (tag, None)
def extract_entities(tags):
tags = list(tags)
curr_entity = []
entities = []
for i, tag in enumerate(tags + ["O"]):
# Add dummy tag in end to ensure the last entity is added to entities
boundary, label = split_tag(tag)
if curr_entity:
# Exit entity
if boundary in {"B", "O"} or label != curr_entity[-1][1]:
start = i - len(curr_entity)
end = i
entity_label = curr_entity[-1][1]
entities.append((entity_label, start, end))
curr_entity = []
elif boundary == "I":
curr_entity.append((boundary, label))
if boundary == "B":
# Enter or inside entity
assert not curr_entity, f"Entity should be empty. Found: {curr_entity}"
curr_entity.append((boundary, label))
return entities
def get_entity_info(bio_labels, tokens, text=None, spans=None):
entities_info = extract_entities(bio_labels)
entities = []
for label, start, end in entities_info:
entity_phrase = None
if text and spans:
start_char_idx = spans[start][0]
end_char_idx = spans[end-1][1]
entity_phrase = " ".join(f" {t} " for t in tokens[start:end])
entities.append(dict(
tokens=tokens[start:end],
label=label,
start=start,
end=end,
entity_phrase=entity_phrase))
return entities
def xml_to_conll(xml_path, test=False):
tree = ET.parse(xml_path)
root = tree.getroot()
parse_fn = get_w_children_test if test else get_w_children
seq = list(parse_fn(root))
return seq
def xml_to_json(xml_path, test=False):
seq = xml_to_conll(xml_path, test=test)
docid = xml_path.stem
tokens, tags = zip(*seq)
labels = Counter([t[2:] for t in tags if t.startswith(("B-MAN_MADE_EVENT", "B-NATURAL_EVENT"))])
return {
"docid": docid,
"tokens": tokens,
"tags": tags,
"labels": labels
}
def get_all_json(folder, test=False):
files = Path(folder).glob("./*.xml")
all_json = []
for xml_path in files:
json_data = xml_to_json(xml_path, test=test)
all_json.append(json_data)
return all_json
def process_lang(lang):
lang_folder = Path("./data/raw/") / lang
out_folder = Path("./data/processed/") / lang
out_folder.mkdir(exist_ok=True)
for folder in ["Train", "Test"]:
in_folder = lang_folder / folder
out_file = out_folder / f"{folder.lower()}.json"
print(f"Processing {in_folder} to {out_file}")
train_data = get_all_json(in_folder, test=folder=="Test")
df = pd.DataFrame(train_data).to_json(out_file, orient="records", lines=True) |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import pandas as pd
import os
import numpy
import MySQLdb
import omdtfn as odt
#conn= MySQLdb.connect("localhost","root","admin","omdb")
#df_mysql = pd.read_sql("select * from sitedb",conn)
omdb = os.getcwd() + "\\" + "OMDB.csv"
pntxt = os.getcwd() + "\\" + "Periodic_Notification.txt"
pth = os.getcwd() + "\\" + "WRT1.csv"
pth2 = os.getcwd() + "\\" + "WRT2.csv"
#lambda <args> : <return Value> if <condition > ( <return value > if <condition> else <return value>)
TS = lambda x : '2G' if ('2G SITE DOWN' in x) else ('3G' if ('3G SITE DOWN' in x) else ('4G' if ('4G SITE DOWN' in x) else ('MF' if ('MAIN' in x) else ('DC' if ('VOLTAGE' in x) else ('TM' if ('TEMPERATURE' in x) else ('SM' if ('SMOKE' in x) else ('GN' if ('GEN' in x) else ('GN' if ('GENSET' in x) else ('TH' if ('THEFT' in x) else ('2_CELL' if ('2G CELL DOWN' in x) else ('3_CELL' if ('3G CELL DOWN' in x) else ('4_CELL' if ('4G CELL DOWN' in x) else "NA"))))))))))))
def write2txt(flname,txt):
fo = open(flname,"w+")
txt = fo.write(txt)
fo.close()
class omdf:
def __init__(self,dic):
self.df = pd.DataFrame(dic)
self.arr = self.df.to_numpy()
def df_addcol_lamda(self):
self.df['cat'] = self.df.apply(lambda row: TS(row.Summary), axis = 1)
return self.df.to_dict()
def df_addcol_fdic(self,d,newcolname):
self.df[newcolname] = self.df['scode'].map(d)
return self.df.to_dict()
def df_apply_on_col(self,newcolname):
self.df[newcolname] = self.df.apply(lambda x : x.CustomAttr15[0:5], axis = 1)
return self.df.to_dict()
def df_remove_col_by_list(self,lis):
ndf = self.df[lis]
return ndf.to_dict()
cols = ["SERIAL","EQUIPMENTKEY","CUSTOMATTR15","SUMMARY","LASTOCCURRENCE","CLEARTIMESTAMP","ALARMDETAILS","CUSTOMATTR15"]
single = os.getcwd() + "\\" + "DWRRU.csv"
df = pd.read_csv(single)
df2 = df[cols]
print(df2['CUSTOMATTR15'].value_counts())
print(df2)
#df3 = df2.replace(np.nan,0)
#print(df2)
#codelist = [df['CUSTOMATTR15'].to_list()]
#print(codelist)
#Codelist = df2['CUSTOMATTR15']
#df2['cnt'] = df2['CUSTOMATTR15'].value_counts()
#print(df2)
#df2['cnt'] = lambda x : x.df2['CUSTOMATTR15'].value_counts()
#df['count'] = df['CUSTOMATTR15'].value_counts()
#print(df)
#print(df2)
#print(fdf['CUSTOMATTR15'].value_counts())
#df3 = df2.apply(lambda s: s['CUSTOMATTR15'], axis=1)
#df4 = df['CUSTOMATTR15'].value_counts().loc[lambda x : ]
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
# Copyright The IETF Trust 2016-2019, All Rights Reserved
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
# various utilities for working with the mailarch mail archive at
# mailarchive.ietf.org
import contextlib
import datetime
import tarfile
import mailbox
import tempfile
import hashlib
import base64
import email.utils
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
import debug # pyflakes:ignore
from pyquery import PyQuery
from django.conf import settings
from django.utils.encoding import force_bytes
def list_name_from_email(list_email):
if not list_email.endswith("@ietf.org"):
return None
return list_email[:-len("@ietf.org")]
def hash_list_message_id(list_name, msgid):
# hash in mailarch is computed similar to
# https://www.mail-archive.com/faq.html#listserver except the list
# name (without "@ietf.org") is used instead of the full address,
# and rightmost "=" signs are (optionally) stripped
sha = hashlib.sha1(force_bytes(msgid))
sha.update(force_bytes(list_name))
return base64.urlsafe_b64encode(sha.digest()).rstrip(b"=")
def construct_query_urls(review_req, query=None):
list_name = list_name_from_email(review_req.team.list_email)
if not list_name:
return None
if not query:
query = review_req.doc.name
encoded_query = "?" + urlencode({
"qdr": "c", # custom time frame
"start_date": (datetime.date.today() - datetime.timedelta(days=180)).isoformat(),
"email_list": list_name,
"q": "subject:({})".format(query),
"as": "1", # this is an advanced search
})
return {
"query": query,
"query_url": settings.MAILING_LIST_ARCHIVE_URL + "/arch/search/" + encoded_query,
"query_data_url": settings.MAILING_LIST_ARCHIVE_URL + "/arch/export/mbox/" + encoded_query,
}
def construct_message_url(list_name, msgid):
return "{}/arch/msg/{}/{}".format(settings.MAILING_LIST_ARCHIVE_URL, list_name, hash_list_message_id(list_name, msgid))
def retrieve_messages_from_mbox(mbox_fileobj):
"""Return selected content in message from mbox from mailarch."""
res = []
with tempfile.NamedTemporaryFile(suffix=".mbox") as mbox_file:
# mailbox.mbox needs a path, so we need to put the contents
# into a file
mbox_data = mbox_fileobj.read()
mbox_file.write(mbox_data)
mbox_file.flush()
mbox = mailbox.mbox(mbox_file.name, create=False)
for msg in mbox:
content = ""
for part in msg.walk():
if part.get_content_type() == "text/plain":
charset = part.get_content_charset() or "utf-8"
content += part.get_payload(decode=True).decode(charset, "ignore")
# parse a couple of things for the front end
utcdate = None
d = email.utils.parsedate_tz(msg["Date"])
if d:
utcdate = datetime.datetime.fromtimestamp(email.utils.mktime_tz(d))
res.append({
"from": msg["From"],
"splitfrom": email.utils.parseaddr(msg["From"]),
"subject": msg["Subject"],
"content": content.replace("\r\n", "\n").replace("\r", "\n").strip("\n"),
"message_id": email.utils.unquote(msg["Message-ID"]),
"url": email.utils.unquote(msg["Archived-At"]),
"date": msg["Date"],
"utcdate": (utcdate.date().isoformat(), utcdate.time().isoformat()) if utcdate else ("", ""),
})
return res
def retrieve_messages(query_data_url):
"""Retrieve and return selected content from mailarch."""
res = []
with contextlib.closing(urlopen(query_data_url, timeout=15)) as fileobj:
content_type = fileobj.info()["Content-type"]
if not content_type.startswith("application/x-tar"):
if content_type.startswith("text/html"):
r = fileobj.read(20000)
q = PyQuery(r)
div = q('div[class~="no-results"]')
if div:
raise KeyError("No results: %s -> %s" % (query_data_url, div.text(), ))
raise Exception("Export failed - this usually means no matches were found")
with tarfile.open(fileobj=fileobj, mode='r|*') as tar:
for entry in tar:
if entry.isfile():
mbox_fileobj = tar.extractfile(entry)
res.extend(retrieve_messages_from_mbox(mbox_fileobj))
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.