id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
113393
|
class Config(object):
env = 'default'
backbone = 'resnet18'
classify = 'softmax'
num_classes = 5000
metric = 'arc_margin'
easy_margin = False
use_se = False
loss = 'focal_loss'
display = False
finetune = False
meta_train = '/preprocessed/train_meta.csv'
train_root = '/preprocessed'
train_list = 'full_data_train.txt'
val_list = 'full_data_val.txt'
checkpoints_path = 'checkpoints'
save_interval = 1
train_batch_size = 32 # batch size
input_shape = (630, 80)
mp3aug_ratio = 1.0
npy_aug = True
optimizer = 'sgd'
use_gpu = True # use GPU or not
gpu_id = '0, 1'
num_workers = 0 # how many workers for loading data
print_freq = 100 # print info every N batch
debug_file = '/tmp/debug' # if os.path.exists(debug_file): enter ipdb
result_file = '/result/submission.csv'
max_epoch = 100
lr = 1e-2 # initial learning rate
lr_step = 10
lr_decay = 0.5 # when val_loss increase, lr = lr*lr_decay
weight_decay = 1e-1
|
113407
|
import pytest
import xia2.Experts.LatticeExpert
def test_lattice_expert():
cell, dist = xia2.Experts.LatticeExpert.ApplyLattice(
"oP", (23.0, 24.0, 25.0, 88.9, 90.0, 90.1)
)
assert cell == (23.0, 24.0, 25.0, 90.0, 90.0, 90.0)
assert dist == pytest.approx(1.2)
def test_SortLattices():
lattices_cells = [
("aP", (57.70, 57.70, 149.80, 90.00, 90.00, 90.00)),
("tP", (57.70, 57.70, 149.80, 90.00, 90.00, 90.00)),
("mC", (81.60, 81.60, 149.80, 90.00, 90.00, 90.00)),
("mP", (57.70, 57.70, 149.80, 90.00, 90.00, 90.00)),
("oC", (81.60, 81.60, 149.80, 90.00, 90.00, 90.00)),
("oP", (57.70, 57.70, 149.80, 90.00, 90.00, 90.00)),
]
result = xia2.Experts.LatticeExpert.SortLattices(lattices_cells)
r0 = [r[0] for r in result]
assert r0 == ["tP", "oC", "oP", "mC", "mP", "aP"]
|
113421
|
import copy
import torch
import numpy as np
from torch.utils.data import DataLoader
from src.cli import get_args
from src.utils import capitalize_first_letter, load
from src.data import get_data, get_glove_emotion_embs
from src.trainers.sentiment import SentiTrainer
from src.trainers.emotion import MoseiEmoTrainer, IemocapTrainer
from src.models import baselines # EF_LSTM, LF_LSTM, EF_LF_LSTM
from src.models.transformers import EF_Transformer
from src.models.mult import MULTModel
from src.models.eea import EmotionEmbAttnModel
from src.config import NUM_CLASSES, MULT_PARAMS, EMOTIONS
if __name__ == "__main__":
args = get_args()
# Fix seed for reproducibility
seed = args['seed']
torch.manual_seed(seed)
np.random.seed(seed)
# Set device
# os.environ["CUDA_VISIBLE_DEVICES"] = args['cuda']
device = torch.device(f"cuda:{args['cuda']}" if torch.cuda.is_available() else 'cpu')
print("Start loading the data....")
train_data = get_data(args, 'train')
valid_data = get_data(args, 'valid')
test_data = get_data(args, 'test')
train_loader = DataLoader(train_data, batch_size=args['batch_size'], shuffle=True)
valid_loader = DataLoader(valid_data, batch_size=args['batch_size'], shuffle=False)
test_loader = DataLoader(test_data, batch_size=args['batch_size'], shuffle=False)
print(f'Train samples = {len(train_loader.dataset)}')
print(f'Valid samples = {len(valid_loader.dataset)}')
print(f'Test samples = {len(test_loader.dataset)}')
dataloaders = {
'train': train_loader,
'valid': valid_loader,
'test': test_loader
}
modal_dims = list(train_data.get_dim())
model_type = args['model'].lower()
fusion_type = args['fusion'].lower()
if model_type == 'mult':
mult_params = MULT_PARAMS[args['dataset']]
mult_params['orig_d_l'] = modal_dims[0]
mult_params['orig_d_a'] = modal_dims[1]
mult_params['orig_d_v'] = modal_dims[2]
mult_params['hidden_dim'] = args['hidden_dim']
if args['zsl'] != -1:
mult_params['output_dim'] = mult_params['output_dim'] + 1
model = MULTModel(mult_params)
elif model_type == 'rnn':
if fusion_type == 'lf':
MODEL = baselines.LF_RNN
elif fusion_type == 'ef':
MODEL = baselines.EF_RNN
elif fusion_type == 'eflf':
MODEL = baselines.EF_LF_RNN
elif fusion_type == 'ts':
MODEL = baselines.TextSelectiveRNN
else:
raise ValueError('Wrong fusion!')
num_classes = NUM_CLASSES[args['dataset']]
if args['zsl'] != -1:
if args['dataset'] == 'iemocap':
num_classes += 1
else:
num_classes -= 1
model = MODEL(
num_classes=num_classes,
input_sizes=modal_dims,
hidden_size=args['hidden_size'],
hidden_sizes=args['hidden_sizes'],
num_layers=args['num_layers'],
dropout=args['dropout'],
bidirectional=args['bidirectional'],
gru=args['gru']
)
elif model_type == 'transformer':
if fusion_type == 'lf':
MODEL = EF_Transformer
elif fusion_type == 'ef':
MODEL = EF_Transformer
elif fusion_type == 'eflf':
MODEL = EF_Transformer
else:
raise ValueError('Wrong fusion!')
model = MODEL()
elif model_type == 'eea':
zsl = args['zsl']
emo_list = EMOTIONS[args['dataset']]
if zsl != -1:
if args['dataset'] == 'iemocap':
emo_list.append(EMOTIONS['iemocap9'][zsl])
else:
emo_list = emo_list[:zsl] + emo_list[zsl + 1:]
if args['cap']:
emo_list = capitalize_first_letter(emo_list)
emo_weights = get_glove_emotion_embs(args['glove_emo_path'])
emo_weight = []
for emo in emo_list:
emo_weight.append(emo_weights[emo])
MODEL = EmotionEmbAttnModel
model = MODEL(
num_classes=len(emo_list),
input_sizes=modal_dims,
hidden_size=args['hidden_size'],
hidden_sizes=args['hidden_sizes'],
num_layers=args['num_layers'],
dropout=args['dropout'],
bidirectional=args['bidirectional'],
modalities=args['modalities'],
device=device,
emo_weight=emo_weight,
gru=args['gru']
)
else:
raise ValueError('Wrong model!')
model = model.to(device=device)
# Load model checkpoint
if args['ckpt'] != '':
state_dict = load(args['ckpt'])
if args['model'] == 'eea':
state_dict.pop('textEmoEmbs.weight')
if state_dict['modality_weights.weight'].size(0) != len(args['modalities']):
state_dict.pop('modality_weights.weight')
if args['model'] == 'rnn':
if args['zsl_test'] != -1:
out_weight = copy.deepcopy(model.out.weight)
out_bias = copy.deepcopy(model.out.bias)
pretrained_out_weight = state_dict['out.weight']
pretrained_out_bias = state_dict['out.bias']
indicator = 0
for i in range(len(model.out.weight)):
if i == args['zsl_test']:
indicator = 1
continue
out_weight[i] = pretrained_out_weight[i - indicator]
out_bias[i] = pretrained_out_bias[i - indicator]
model.out.weight = torch.nn.Parameter(out_weight)
model.out.bias = torch.nn.Parameter(out_bias)
state_dict.pop('out.weight')
state_dict.pop('out.bias')
if args['model'] == 'mult':
if args['zsl_test'] != -1:
out_weight = copy.deepcopy(model.out_layer.weight)
out_bias = copy.deepcopy(model.out_layer.bias)
pretrained_out_weight = state_dict['out_layer.weight']
pretrained_out_bias = state_dict['out_layer.bias']
indicator = 0
for i in range(len(model.out_layer.weight)):
if i == args['zsl_test']:
indicator = 1
continue
out_weight[i] = pretrained_out_weight[i - indicator]
out_bias[i] = pretrained_out_bias[i - indicator]
model.out_layer.weight = torch.nn.Parameter(out_weight)
model.out_layer.bias = torch.nn.Parameter(out_bias)
state_dict.pop('out_layer.weight')
state_dict.pop('out_layer.bias')
model.load_state_dict(state_dict, strict=False)
if args['optim'] == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
elif args['optim'] == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=args['patience'], verbose=True)
if args['loss'] == 'l1':
criterion = torch.nn.L1Loss()
elif args['loss'] == 'mse':
criterion = torch.nn.MSELoss()
elif args['loss'] == 'ce':
criterion = torch.nn.CrossEntropyLoss()
elif args['loss'] == 'bce':
pos_weight = train_data.get_pos_weight()
pos_weight = pos_weight.to(device)
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
# criterion = torch.nn.BCEWithLogitsLoss()
if args['dataset'] == 'mosi' or args['dataset'] == 'mosei_senti':
TRAINER = SentiTrainer
elif args['dataset'] == 'mosei_emo':
TRAINER = MoseiEmoTrainer
elif args['dataset'] == 'iemocap':
TRAINER = IemocapTrainer
trainer = TRAINER(args, model, criterion, optimizer, scheduler, device, dataloaders)
if args['test']:
trainer.test()
elif args['valid']:
trainer.valid()
else:
trainer.train()
|
113471
|
import unittest
from taxonomy import Taxonomy, TaxonomyError
class NewickTestCase(unittest.TestCase):
def _create_tax(self):
# https://en.wikipedia.org/wiki/Newick_format#Examples
return Taxonomy.from_newick("(A:0.1,B:0.2,(C:0.3,D:0.4)E:0.5)F;")
def setUp(self) -> None:
self.tax = self._create_tax()
def test_root(self):
root = self.tax.root
self.assertEqual(root.id, "F")
self.assertIsNone(root.parent)
def test_find_node_by_id(self):
node = self.tax.node("A")
self.assertEqual(node.id, "A")
self.assertEqual(node.parent, "F")
node = self.tax.node("D")
self.assertEqual(node.id, "D")
self.assertEqual(node.parent, "E")
node = self.tax.node("unknown")
self.assertIsNone(node)
def test_index(self):
node = self.tax["A"]
self.assertEqual(node.id, "A")
self.assertEqual(node.parent, "F")
with self.assertRaises(TaxonomyError):
_ = self.tax["unknown"]
def test_find_by_name(self):
# They are not named so we can't find anything by name
node = self.tax.find_by_name("A")
self.assertIsNone(node)
def test_parent(self):
parent = self.tax.parent("D")
self.assertEqual(parent.id, "E")
def test_parent_with_distance(self):
parent, distance = self.tax.parent_with_distance("D")
self.assertEqual(parent.id, "E")
# Float precision issue
# 0.4 becomes 0.4000000059604645
self.assertAlmostEqual(distance, 0.4)
def test_children(self):
children = self.tax.children("E")
self.assertEqual(len(children), 2)
self.assertEqual(children[0].id, "C")
self.assertEqual(children[1].id, "D")
def test_lineage(self):
lineage = self.tax.lineage("D")
self.assertEqual(len(lineage), 3)
self.assertEqual(lineage[0].id, "D")
self.assertEqual(lineage[1].id, "E")
self.assertEqual(lineage[2].id, "F")
def test_parents(self):
lineage = self.tax.parents("D")
self.assertEqual(len(lineage), 2)
self.assertEqual(lineage[0].id, "E")
self.assertEqual(lineage[1].id, "F")
def test_lca(self):
lca = self.tax.lca("A", "D")
self.assertEqual(lca.id, "F")
def test_prune(self):
new_tax = self.tax.prune(remove=["E"])
self.assertIsNone(new_tax.node("D"))
self.assertIsNone(new_tax.node("E"))
# We removed a leaf
self.assertEqual(len(new_tax), 3)
new_tax = self.tax.prune(keep=["E", "D"])
self.assertEqual(len(new_tax), 3)
self.assertIsNotNone(new_tax.node("F"))
def test_remove(self):
tax = self._create_tax()
tax.remove_node("E")
self.assertIsNotNone(tax.node("D"))
self.assertIsNone(tax.node("E"))
self.assertEqual(len(tax), 5)
def test_add(self):
tax = self._create_tax()
tax.add_node("D", "G")
node = tax["G"]
self.assertEqual(node.parent, "D")
def test_edit_node(self):
tax = self._create_tax()
tax.edit_node("D", parent_distance=3)
node, distance = tax.parent_with_distance("D")
self.assertEqual(distance, 3)
class NCBITestCase(unittest.TestCase):
def _create_tax(self):
return Taxonomy.from_ncbi("tests/data/ncbi_subset_tax.nodes.dmp", "tests/data/ncbi_subset_tax.names.dmp")
def setUp(self) -> None:
self.tax = self._create_tax()
def test_root(self):
root = self.tax.root
self.assertEqual(root.id, "1")
self.assertIsNone(root.parent)
def test_find_node_by_id(self):
node = self.tax.node("1236")
self.assertEqual(node.id, "1236")
self.assertEqual(node.name, "Gammaproteobacteria")
self.assertEqual(node.parent, "1224")
node = self.tax.node("unknown")
self.assertIsNone(node)
def test_index(self):
node = self.tax["1236"]
self.assertEqual(node.id, "1236")
self.assertEqual(node.name, "Gammaproteobacteria")
self.assertEqual(node.parent, "1224")
with self.assertRaises(TaxonomyError):
_ = self.tax["unknown"]
def test_find_by_name(self):
node = self.tax.find_by_name("<NAME>")
self.assertEqual(node.id, "562")
self.assertEqual(node.name, "<NAME>")
self.assertEqual(node.parent, "561")
def test_parent(self):
parent = self.tax.parent("562")
self.assertEqual(parent.id, "561")
def test_parent_with_distance(self):
parent, distance = self.tax.parent_with_distance("562")
self.assertEqual(parent.id, "561")
# Float precision issue
# 0.4 becomes 0.4000000059604645
self.assertAlmostEqual(distance, 1.0)
def test_children(self):
children = self.tax.children("561")
self.assertEqual(len(children), 1)
self.assertEqual(children[0].id, "562")
def test_lineage(self):
lineage = self.tax.lineage("562")
self.assertEqual(len(lineage), 9)
self.assertEqual(lineage[0].id, "562")
self.assertEqual(lineage[1].id, "561")
self.assertEqual(lineage[-1].id, "1")
def test_parents(self):
lineage = self.tax.parents("562")
self.assertEqual(len(lineage), 8)
self.assertEqual(lineage[0].id, "561")
self.assertEqual(lineage[-1].id, "1")
def test_lca(self):
lca = self.tax.lca("562", "91347")
self.assertEqual(lca.id, "91347")
def test_prune(self):
new_tax = self.tax.prune(remove=["561"])
self.assertIsNone(new_tax.node("561"))
self.assertIsNone(new_tax.node("562"))
self.assertEqual(len(new_tax), 7)
new_tax = self.tax.prune(keep=["561"])
self.assertEqual(len(new_tax), 8)
self.assertIsNotNone(new_tax.node("561"))
@unittest.skip("tax.remove doesn't work on truncated taxonomies?")
def test_remove(self):
tax = self._create_tax()
tax.remove_node("561")
self.assertIsNotNone(tax.node("562"))
self.assertIsNone(tax.node("561"))
self.assertEqual(len(tax), 8)
def test_add(self):
tax = self._create_tax()
tax.add_node("561", "563")
node = tax["563"]
self.assertEqual(node.parent, "561")
def test_edit_node(self):
tax = self._create_tax()
tax.edit_node("562", parent_distance=3)
node, distance = tax.parent_with_distance("562")
self.assertEqual(distance, 3)
if __name__ == '__main__':
unittest.main()
|
113486
|
from django import template
register = template.Library()
def get(value, key):
return value.get(key)
register.filter('get', get)
|
113539
|
import torch.nn as nn
import torch
import cv2
import numpy as np
def eval_net(
net, dataset, gpu=True, vis=None, vis_im=None, vis_gt=None, loss=nn.MSELoss()
):
criterion = loss
net.eval()
losses = 0
torch.cuda.empty_cache()
for iteration, data in enumerate(dataset):
img = data["image"]
target = data["gt"]
if gpu:
img = img.cuda()
target = target.cuda()
pred_img = net(img)
loss = criterion(pred_img, target)
losses += loss.data
pred_img = pred_img.detach().cpu().numpy()
cv2.imwrite("conf_eval.tif", (pred_img * 255).astype(np.uint8)[0, 0])
return losses / iteration
|
113549
|
from types import TracebackType
from typing import Dict, Optional, Type, Union
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from aiohttp.client import ClientSession
from warnings import warn
from neispy.error import ExceptionsMapping
class NeispyRequest:
BASE = "https://open.neis.go.kr/hub"
def __init__(
self,
KEY: Optional[str],
Type: Literal["json", "xml"],
pIndex: int,
pSize: int,
session: Optional[ClientSession],
only_rows: bool = True,
) -> None:
self.KEY = KEY
if not KEY:
warn("API키가 없습니다, 샘플키로 요청합니다", UserWarning)
self.pIndex = pIndex
self.pSize = pSize
self.Type = Type
self.session = session
self.only_rows = only_rows
def _default_params(self) -> Dict[str, Union[str, int]]:
default_params = {
"pIndex": self.pIndex,
"pSize": self.pSize,
"type": self.Type,
}
if self.KEY:
default_params["KEY"] = self.KEY
return default_params
async def request(
self,
method: str,
endpoint: str,
params: Dict[str, Union[str, int]],
):
URL = self.BASE + endpoint
if not self.session:
self.session = ClientSession()
default_params = self._default_params()
default_params.update(params)
async with self.session.request(method, URL, params=default_params) as response:
data = await response.json(content_type=None)
if data.get("RESULT"):
result = data["RESULT"]
code = result["CODE"]
if code != "INFO-000":
msg = result["MESSAGE"]
raise ExceptionsMapping[result["CODE"]](code, msg)
if self.only_rows:
return list(data.values())[0][1]["row"]
return data
async def get_schoolInfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/schoolInfo", params)
async def get_mealServiceDietInfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/mealServiceDietInfo", params)
async def get_SchoolSchedule(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/SchoolSchedule", params)
async def get_acaInsTiInfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/acaInsTiInfo", params)
async def get_elsTimetable(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/elsTimetable", params)
async def get_misTimetable(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/misTimetable", params)
async def get_hisTimetable(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/hisTimetable", params)
async def get_spsTimetable(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/spsTimetable", params)
async def get_classInfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/classInfo", params)
async def get_schoolMajorinfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/schoolMajorinfo", params)
async def get_schulAflcoinfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/schulAflcoinfo", params)
async def get_tiClrminfo(self, params: Dict[str, Union[str, int]]):
return await self.request("GET", "/tiClrminfo", params)
async def __aenter__(self):
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
):
if self.session:
await self.session.close()
|
113567
|
import datetime
from nose.tools import (
assert_raises,
eq_,
set_trace,
)
from . import DatabaseTest
from bot import Bot
from model import (
InvalidPost,
Post,
_now,
)
class TestBot(DatabaseTest):
def test_publishable_posts(self):
bot = self._bot(config=dict(
state_update_schedule=1,
schedule=1
))
eq_(False, bot.state_updated)
# Since this bot has never posted, publishable_posts returns a
# list containing a single new post.
[new_post] = bot.publishable_posts
assert isinstance(new_post, Post)
eq_(new_post.content, bot.new_posts[0])
# Since this bot has no state update schedule,
# Bot.update_state() was not called.
eq_(False, bot.state_updated)
# Calling publishable_posts returns an empty list, since it's not yet
# time for another post.
eq_([], bot.publishable_posts)
def test_publishable_posts_may_update_state(self):
bot = self._bot(config=dict(state_update_schedule=1, schedule=1))
eq_(True, bot.state_needs_update)
bot.publishable_posts
eq_(True, bot.state_updated)
eq_(False, bot.state_needs_update)
def test_backlog(self):
bot = self._bot()
eq_([], bot.backlog)
item = {"k": "v"}
# By default, items are stored in the backlog as is.
eq_(item, bot.backlog_item(item))
# Backlog items are transparently serialized and deserialized
# to JSON.
bot.extend_backlog([item])
eq_([item], bot.backlog)
bot.clear_backlog()
eq_([], bot.backlog)
def test_publishable_posts_pops_backlog(self):
bot = self._bot()
bot.extend_backlog(["backlog_1", "backlog_2"])
[post1] = bot.publishable_posts
eq_("backlog_1", post1.content)
# There's still a backlog item, but it's not time for another post,
# so publishable_posts doesn't pop it.
eq_(["backlog_2"], bot.backlog)
eq_([], bot.publishable_posts)
def test_publishable_posts_returns_all_scheduled_posts(self):
bot = self._bot()
now = _now()
yesterday = now - datetime.timedelta(days=1)
day_before = now - datetime.timedelta(days=2)
tomorrow = now + datetime.timedelta(days=1)
publish_yesterday = self._post(
bot.model, "yesterday", publish_at=yesterday
)
publish_earlier = self._post(
bot.model, "day before", publish_at=day_before
)
publish_later = self._post(
bot.model, "tomorrow", publish_at=tomorrow
)
# publishable_posts returns all posts that should have been
# published by now.
eq_([publish_earlier, publish_yesterday], bot.publishable_posts)
# Since the scheduled posts weren't _created_ by the
# publishable_posts, they don't go away when you call
# publishable_posts once. They will stick around until they're
# published.
eq_([publish_earlier, publish_yesterday], bot.publishable_posts)
def test_to_post_list(self):
"""Test the method that handles the output of new_post."""
class ModifierBot(Bot):
def object_to_post(self, obj):
return obj + "!"
bot = self._bot(ModifierBot)
m = bot._to_post_list
post = self._post()
# A bot can turn an object (such as a backlog object) into a post
# by creating the Post object, or a list of posts.
eq_([post], m(post))
eq_([post], m([post]))
# A bot can also create a Post by defining object_to_post to
# return a string. publishable_posts takes care of actually
# converting it into a post.
[modified_post] = m("A string")
assert isinstance(modified_post, Post)
eq_("A string!", modified_post.content)
# It's also okay for object_to_post to return the actual Post object.
class PostBot(Bot):
def object_to_post(self, obj):
post, is_new = Post.from_content(self.model, obj)
return post
bot = self._bot(PostBot)
[post] = bot._to_post_list("A string")
assert isinstance(modified_post, Post)
eq_("A string", post.content)
# Or a list of Post objects.
class PostBot(Bot):
def object_to_post(self, obj):
post, is_new = Post.from_content(self.model, obj)
return [post]
[post] = self._bot(PostBot)._to_post_list("A string")
assert isinstance(modified_post, Post)
eq_("A string", post.content)
# No other type of value is acceptable.
class PostBot(Bot):
def object_to_list(self, obj):
return dict(value=obj)
assert_raises(
InvalidPost, self._bot(PostBot)._to_post_list, ["A complicated value"]
)
def test_post_can_only_be_scheduled_for_the_future(self):
# You can schedule a post for the future.
class FutureBot(Bot):
def _schedule_posts(self):
tomorrow = _now() + datetime.timedelta(days=1)
post, is_new = Post.from_content(
self.model, "the future!", publish_at=tomorrow
)
return post
bot = self._bot(FutureBot)
eq_(["the future!"], [x.content for x in bot.schedule_posts()])
# But not for the past.
class PastBot(Bot):
def _schedule_posts(self):
yesterday = _now() - datetime.timedelta(days=1)
post, is_new = Post.from_content(
self.model, "the past!", publish_at=yesterday
)
return [post]
bot = self._bot(PastBot)
assert_raises(InvalidPost, bot.schedule_posts)
def test_next_scheduled_post(self):
bot = self._bot()
# If there is no schedule, a Bot will either create a new post
# every time it's invoked (not a good idea), or posts must be
# scheduled in advance using some other mechanism.
bot.schedule = None
eq_(None, bot._next_scheduled_post([]))
# If the schedule is a number, a Bot will create a new post
# every [that number] of minutes.
bot.schedule = 5
delta = bot._next_scheduled_post([])
assert isinstance(delta, datetime.timedelta)
eq_(5*60, delta.seconds)
# If the schedule is a dictionary with values 'mean' and 'stdev',
# a Bot will create posts in a Gaussian distribution with those numbers
# as mean and standard deviation.
bot.schedule = dict(mean=6, stdev=0)
delta = bot._next_scheduled_post([])
assert isinstance(delta, datetime.timedelta)
eq_(6*60, delta.seconds)
|
113571
|
import dataclasses
from typing import Any, ClassVar, Dict, Iterable, Optional
from dbdaora.data_sources.fallback import FallbackDataSource
@dataclasses.dataclass
class DictFallbackDataSource(FallbackDataSource[str]):
db: Dict[Optional[str], Dict[str, Any]] = dataclasses.field(
default_factory=dict
)
key_separator: ClassVar[str] = ':'
def make_key(self, *key_parts: str) -> str:
return self.key_separator.join([p for p in key_parts if p])
async def get(self, key: str) -> Optional[Dict[str, Any]]:
return self.db.get(key)
async def put(self, key: str, data: Dict[str, Any], **kwargs: Any) -> None:
self.db[key] = data
async def delete(self, key: str) -> None:
self.db.pop(key, None)
async def query(self, key: str, **kwargs: Any) -> Iterable[Dict[str, Any]]:
return self.db.values()
|
113627
|
class Solution:
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
def dirToIndex(x, y, d):
if d == "r": return (x, y + 1, d) if y + 1 < n and matrix[x][y + 1] == 0 else (x + 1, y, "d")
elif d == "d": return (x + 1, y, d) if x + 1 < n and matrix[x + 1][y] == 0 else (x, y - 1, "l")
elif d == "l": return (x, y - 1, d) if y > 0 and matrix[x][y - 1] == 0 else (x - 1, y, "u")
else: return (x - 1, y, d) if x > 0 and matrix[x - 1][y] == 0 else (x, y +1, "r")
matrix = [[0 for i in range(1, n + 1)] for j in range(n)]
num, dir, i, j = 1, "r", 0, 0
while 0 <= i < n and 0 <= j < n and matrix[i][j] == 0:
matrix[i][j] = num
num += 1
i, j, dir = dirToIndex(i, j, dir)
return matrix
|
113628
|
import sys
import types
import _dk_core as core
# def enum(*sequential, **named):
# enums = dict(zip(sequential, range(len(sequential))), **named)
# return type('Enum', (), enums)
def enum(*seq, begin=0, prefix='Enum_'):
from collections import namedtuple
t = namedtuple(prefix + core.uuidgen().replace('-','_'), seq)
return t(*tuple(range(begin, begin+len(seq))))
FreeRectChoiceHeuristic = enum('RectBestAreaFit',
'RectBestShortSideFit',
'RectBestLongSideFit',
'RectWorstAreaFit',
'RectWorstShortSideFit',
'RectWorstLongSideFit')
GuillotineSplitHeuristic = enum('SplitShorterLeftoverAxis',
'SplitLongerLeftoverAxis',
'SplitMinimizeArea',
'SplitMaximizeArea',
'SplitShorterAxis',
'SplitLongerAxis')
class GuillotineBinPack():
def __init__(self, width=0, height=0):
GuillotineBinPack._freeRectChoiceHeuristicMap = [ GuillotineBinPack._scoreBestAreaFit,
GuillotineBinPack._scoreBestShortSideFit,
GuillotineBinPack._scoreBestLongSideFit,
GuillotineBinPack._scoreWorstAreaFit,
GuillotineBinPack._scoreWorstShortSideFit,
GuillotineBinPack._scoreWorstLongSideFit ]
self.initialize(width, height)
def initialize(self, width, height):
self._binWidth = width
self._binHeight = height
self._usedRectangles = []
self._freeRectangles = []
self._freeRectangles.append(core.Rect(0, 0, width, height))
# 분할된 공간에서 텍스쳐가 들어갈 노드를 찾아 공간을 분리하고 사용영역에 추가 및 빈공간 노드에서 빼낸다.
def insert(self, width, height, rectChoice, splitMethod):
if width <= 0 or height <= 0:
print("GuillotineBinPack::_binWidth and _binHeight must not be 0")
return None
newRect, freeNodeIndex = self._findPositionForNewNode(width, height, rectChoice)
if newRect.height == 0:
return newRect
self._splitFreeRectByHeuristic(self._freeRectangles[freeNodeIndex], newRect, splitMethod)
self._freeRectangles.pop(freeNodeIndex)
self._usedRectangles.append(newRect)
return newRect
# 사용 중인 영역을 계산한다.
def occupancy(self):
usedSurfaceArea = 0
for rectItem in self._usedRectangles:
usedSurfaceArea = usedSurfaceArea + (rectItem.width * rectItem.height)
return usedSurfaceArea / (self._binWidth * self._binHeight)
def mergeFreeList(self):
majorIndex = 0
while majorIndex < len(self._freeRectangles):
minorIndex = majorIndex + 1
while minorIndex < len(self._freeRectangles):
if self._freeRectangles[majorIndex].width == self._freeRectangles[minorIndex].width and self._freeRectangles[majorIndex].x == self._freeRectangles[minorIndex].x:
if self._freeRectangles[majorIndex].y == self._freeRectangles[minorIndex].y + self._freeRectangles[minorIndex].height:
self._freeRectangles[majorIndex].y -= self._freeRectangles[minorIndex].height
self._freeRectangles[majorIndex].height += self._freeRectangles[minorIndex].height
self._freeRectangles.pop(minorIndex)
minorIndex = minorIndex - 1
elif self._freeRectangles[majorIndex].y + self._freeRectangles[majorIndex].height == self._freeRectangles[minorIndex].y:
self._freeRectangles[majorIndex].height += self._freeRectangles[minorIndex].height
self._freeRectangles.pop(minorIndex)
minorIndex = minorIndex - 1
elif self._freeRectangles[majorIndex].height == self._freeRectangles[minorIndex].height and self._freeRectangles[majorIndex].y == self._freeRectangles[minorIndex].y:
if self._freeRectangles[majorIndex].x == self._freeRectangles[minorIndex].x + self._freeRectangles[minorIndex].width:
self._freeRectangles[majorIndex].x -= self._freeRectangles[minorIndex].width
self._freeRectangles[majorIndex].width += self._freeRectangles[minorIndex].width
self._freeRectangles.pop(minorIndex)
minorIndex = minorIndex - 1
elif self._freeRectangles[majorIndex].x + self._freeRectangles[majorIndex].width == self._freeRectangles[minorIndex].x:
self._freeRectangles[majorIndex].width += self._freeRectangles[minorIndex].width
self._freeRectangles.pop(minorIndex)
minorIndex = minorIndex - 1
def _findPositionForNewNode(self, width, height, rectChoice):
nodeIndex = 0
bestNode = core.Rect()
bestScore = sys.maxsize
for itemIndex in range(len(self._freeRectangles)):
if width == self._freeRectangles[itemIndex].width and height == self._freeRectangles[itemIndex].height:
bestNode.x = self._freeRectangles[itemIndex].x
bestNode.y = self._freeRectangles[itemIndex].y
bestNode.width = width
bestNode.height = height
bestScore = sys.maxsize
nodeIndex = itemIndex
break
elif height == self._freeRectangles[itemIndex].width and width == self._freeRectangles[itemIndex].height:
bestNode.x = self._freeRectangles[itemIndex].x
bestNode.y = self._freeRectangles[itemIndex].y
bestNode.width = height
bestNode.height = width
bestScore = sys.maxsize
nodeIndex = itemIndex
break
elif width <= self._freeRectangles[itemIndex].width and height <= self._freeRectangles[itemIndex].height:
score = GuillotineBinPack._scoreByHeuristic(width, height, self._freeRectangles[itemIndex], rectChoice)
if score < bestScore:
bestNode.x = self._freeRectangles[itemIndex].x
bestNode.y = self._freeRectangles[itemIndex].y
bestNode.width = width
bestNode.height = height
bestScore = score
nodeIndex = itemIndex
elif height <= self._freeRectangles[itemIndex].width and width <= self._freeRectangles[itemIndex].height:
score = GuillotineBinPack._scoreByHeuristic(height, width, self._freeRectangles[itemIndex], rectChoice)
if score < bestScore:
bestNode.x = self._freeRectangles[itemIndex].x
bestNode.y = self._freeRectangles[itemIndex].y
bestNode.width = height
bestNode.height = width
bestScore = score
nodeIndex = itemIndex
return bestNode, nodeIndex
def _scoreByHeuristic(width, height, freeRect, rectChoice):
return (GuillotineBinPack._freeRectChoiceHeuristicMap[rectChoice])(width, height, freeRect)
def _scoreBestAreaFit(width, height, freeRect):
return freeRect.width * freeRect.height - width * height
def _scoreBestShortSideFit(width, height, freeRect):
leftoverHoriz = abs(freeRect.width - width)
leftoverVert = abs(freeRect.height - height)
return min(leftoverHoriz, leftoverVert)
def _scoreBestLongSideFit(width, height, freeRect):
leftoverHoriz = abs(freeRect.width - width)
leftoverVert = abs(freeRect.height - height)
return max(leftoverHoriz, leftoverVert)
def _scoreWorstAreaFit(width, height, freeRect):
return -GuillotineBinPack._scoreBestAreaFit(width, height, freeRect)
def _scoreWorstShortSideFit(width, height, freeRect):
return -GuillotineBinPack._scoreBestShortSideFit(width, height, freeRect)
def _scoreWorstLongSideFit(width, height, freeRect):
return -GuillotineBinPack._scoreBestLongSideFit(width, height, freeRect)
def _splitFreeRectByHeuristic(self, freeRect, placedRect, method):
splitHorizontal = False
fixedWidth = freeRect.width - placedRect.width
fixedHeight = freeRect.height - placedRect.height
if method == GuillotineSplitHeuristic.SplitShorterLeftoverAxis:
splitHorizontal = (fixedWidth <= fixedHeight)
elif method == GuillotineSplitHeuristic.SplitLongerLeftoverAxis:
splitHorizontal = (fixedWidth > fixedHeight)
elif method == GuillotineSplitHeuristic.SplitMinimizeArea:
splitHorizontal = (placedRect.width * fixedHeight > fixedWidth * placedRect.height)
elif method == GuillotineSplitHeuristic.SplitMaximizeArea:
splitHorizontal = (placedRect.width * fixedHeight <= fixedWidth * placedRect.height)
elif method == GuillotineSplitHeuristic.SplitShorterAxis:
splitHorizontal = (freeRect.width <= freeRect.height)
elif method == GuillotineSplitHeuristic.SplitLongerAxis:
splitHorizontal = (freeRect.width > freeRect.height)
else:
splitHorizontal = True
self._splitFreeRectAlongAxis(freeRect, placedRect, splitHorizontal)
def _splitFreeRectAlongAxis(self, freeRect, placedRect, splitHorizontal):
bottom = core.Rect()
bottom.x = freeRect.x
bottom.y = freeRect.y + placedRect.height
bottom.height = freeRect.height - placedRect.height
right = core.Rect()
right.x = freeRect.x + placedRect.width
right.y = freeRect.y
right.width = freeRect.width - placedRect.width
if splitHorizontal:
bottom.width = freeRect.width
right.height = placedRect.height
else:
bottom.width = placedRect.width
right.height = freeRect.height
if bottom.width > 0 and bottom.height > 0:
self._freeRectangles.append(bottom)
if right.width > 0 and right.height > 0:
self._freeRectangles.append(right)
|
113633
|
import types
import logging
import time
import numpy as np
from jbopt.de import de
from jbopt.classic import classical
from starkit.fitkit.priors import PriorCollection
logger = logging.getLogger(__name__)
def fit_evaluate(self, model_param):
# returns the likelihood of observing the data given the model param_names
parameters = self.parameters.copy()
parameters[~self.fixed_mask()] = model_param
loglikelihood = self.evaluate(*parameters)
return float(loglikelihood)
def fixed_mask(self):
return np.array([getattr(self, param_name).fixed
for param_name in self.param_names])
class JBOptPriorCollection(PriorCollection):
def prior_transform(self, cube):
cube = np.asarray(cube)
super(JBOptPriorCollection, self).prior_transform(cube,
None, len(cube))
return cube
class JBOpt(object):
def __init__(self, likelihood, priors, output_basename='test_all'):
self.likelihood = likelihood
self.likelihood.fit_evaluate = types.MethodType(
fit_evaluate, self.likelihood)
self.likelihood.fixed_mask = types.MethodType(fixed_mask,
self.likelihood)
if not hasattr(priors, 'prior_transform'):
self.priors = JBOptPriorCollection(priors)
else:
self.priors = priors
self.fit_parameter_names = [
item for i, item in enumerate(self.likelihood.param_names)
if not self.likelihood.fixed_mask()[i]]
self.args = dict(loglikelihood=self.likelihood.fit_evaluate,
transform=self.priors.prior_transform,
prior=lambda x: 0,
parameter_names=self.fit_parameter_names,
)
def run(self, output_basename, method='de', start=None, nsteps=2000, verbose=0):
if start is None:
start = [0.5] * len(self.fit_parameter_names)
self.args['start'] = start
self.args['nsteps'] = nsteps
self.args['disp'] = verbose
self.args['output_basename'] = output_basename
start_time = time.time()
if method == 'de':
self.result = self._run_de()
elif method in ('cobyla', 'ralg', 'mma', 'auglag', 'minuit',
'neldermead'):
self.result = self._run_classical(method)
logger.info('Fit took {0:.2f}s'.format(time.time() - start_time))
self.result['best_values'] = self.priors.prior_transform(
self.result['start'])
self.likelihood.parameters[~self.likelihood.fixed_mask()] = (
self.result['best_values'])
return self.result
def _run_de(self):
return de(**self.args)
def _run_classical(self, method):
return classical(method=method, **self.args)
|
113635
|
from typing import Union
from .textstyle import TextStyle, compose_style
class StyleContainer:
def __init__(self, styles):
self._styles = styles
def has_style(self, style_name: str) -> bool:
"""Returns True if the box has a style with the given name."""
return style_name in self._styles
def update_style(self, style_name: str, style: TextStyle) -> TextStyle:
"""Updates the style associated with the given name and returns it."""
assert isinstance(style_name, str)
old_style = self.get_style(style_name, full_style=False)
old_style.update(style)
self._styles = self._styles.copy()
self._styles[style_name] = old_style
return old_style
def set_style(self, style_name: str, style: TextStyle, base="default"):
"""
Assigns the style to the given name.
If `base` is specified, the style will be first composed with `base`.
Parameters
----------
style_name: str
Name of the style.
style: TextStyle
Definition of the style.
base: str
Name of a style that will be composed with the given style.
"""
assert isinstance(style_name, str)
assert isinstance(style, TextStyle)
if base != "default":
base_style = self.get_style(base)
base_style.update(style)
style = base_style
self._styles = self._styles.copy()
self._styles[style_name] = style
return style
def get_style(self, style: Union[str, TextStyle], full_style=False) -> TextStyle:
"""
Returns a style associated with the given name.
If `full_style=True`, you can also pass a text style to this function to compose it with
the default style.
"""
return compose_style(self._styles, style, full_style)
|
113637
|
import xbos_services_getter as xsg
import numpy as np
"""Thermostat class to model temperature change.
Note, set STANDARD fields to specify error for actions which do not have enough data for valid predictions. """
class Tstat:
STANDARD_MEAN = 0
STANDARD_VAR = 0
STANDARD_UNIT = "F"
def __init__(self, building, zone, temperature, last_temperature=None, suppress_not_enough_data_error=False):
self.temperature = temperature
self.last_temperature = last_temperature
self.indoor_temperature_prediction_stub = xsg.get_indoor_temperature_prediction_stub()
self.error = {}
for action in [xsg.NO_ACTION, xsg.HEATING_ACTION, xsg.COOLING_ACTION]:
try:
raise Exception("ERROR: Hack. Whoever sees this, yell at Daniel to get back to fixing the thermal model.")
mean, var, unit = xsg.get_indoor_temperature_prediction_error(self.indoor_temperature_prediction_stub,
building,
zone,
action)
except:
if not suppress_not_enough_data_error:
raise Exception("ERROR: Tstat for building: '{0}' and zone: '{1}' did not receive error data from "
"indoor_temperature_prediction microservice for action: '{2}'.")
print("WARNING: Tstat for building: '{0}' and zone: '{1}' did not receive error data from "
"indoor_temperature_prediction microservice for action: '{2}' and is now using STANDARD error.".format(building, zone, action))
mean, var, unit = Tstat.STANDARD_MEAN, Tstat.STANDARD_VAR, Tstat.STANDARD_UNIT
self.error[action] = {"mean": mean, "var": var}
def next_temperature(self, action):
self.last_temperature = self.temperature
self.temperature += 1 * (action == 1) - 1 * (action == 2) + np.random.normal(self.error[action]["mean"],
self.error[action]["var"])
return self.temperature
def reset(self, temperature, last_temperature=None):
self.temperature = temperature
self.last_temperature = last_temperature
class OutdoorThermostats:
pass
|
113647
|
import re
import pytest
from invoke.context import Context
from test.test_utils.benchmark import execute_single_node_benchmark, get_py_version
@pytest.mark.skip(reason="Temp skip due to timeout")
@pytest.mark.model("resnet18_v2")
@pytest.mark.integration("cifar10 dataset")
def test_performance_mxnet_cpu(mxnet_training, cpu_only):
ctx = Context()
python_version = get_py_version(mxnet_training)
task_name = f"mx_train_single_node_cpu_{python_version}_resnet18v2_cifar10"
script_url = " https://github.com/awslabs/deeplearning-benchmark.git"
execute_single_node_benchmark(ctx, mxnet_training, "mxnet", task_name, python_version, script_url)
|
113649
|
f = open("HaltestellenVVS_simplified_utf8.csv", "r")
r = open("HaltestellenVVS_simplified_utf8_stationID.csv", "w")
r.write(f.readline())
lines = f.readlines()
for line in lines:
stationID = line.split(",")[0]
newStationID = int(stationID)+5000000
outLine = str(newStationID) + line[len(stationID):]
r.write(outLine)
r.close()
f.close()
|
113659
|
from geneal.genetic_algorithms._binary import BinaryGenAlgSolver
from geneal.genetic_algorithms._continuous import ContinuousGenAlgSolver
|
113720
|
import torch
from torch import nn
from maskrcnn_benchmark.modeling.poolers import LevelMapper
from maskrcnn_benchmark.modeling.utils import cat
from maskrcnn_benchmark.layers import ROIAlign
class SRPooler(nn.Module):
"""
SRPooler for Detection with or without FPN.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales, sampling_ratio):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(SRPooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes, sr=None):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
sr(list([BoxList])): search region boxes.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
if sr is None:
rois = self.convert_to_roi_format(boxes)
else:
# extract features for SR when it is none
rois = self.convert_to_roi_format(sr)
if num_levels == 1:
return self.poolers[0](x[0], rois)
# Always use the template box to get the feature level
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level).to(dtype)
return result
|
113724
|
import logging
from datetime import timedelta
from src.alerter.factory.alerting_factory import AlertingFactory
from src.alerter.grouped_alerts_metric_code.node.evm_node_metric_code \
import GroupedEVMNodeAlertsMetricCode as AlertsMetricCode
from src.configs.alerts.node.evm import EVMNodeAlertsConfig
from src.utils.configs import parse_alert_time_thresholds
from src.utils.timing import (TimedTaskTracker, TimedTaskLimiter)
class EVMNodeAlertingFactory(AlertingFactory):
"""
This class is in charge of alerting and managing the alerting state for the
EVM node alerter. The alerting_state dict is to be structured as
follows:
{
<parent_id>: {
<node_id>: {
Optional[warning_sent]: {
GroupedEVMNodeAlertsMetricCode.value: bool
},
Optional[critical_sent]: {
GroupedEVMNodeAlertsMetricCode.value: bool
},
Optional[error_sent]: {
GroupedEVMNodeAlertsMetricCode.value: bool
},
Optional[warning_window_timer]: {
GroupedEVMNodeAlertsMetricCode.value: TimedTaskTracker
},
Optional[critical_window_timer]: {
GroupedEVMNodeAlertsMetricCode.value: TimedTaskTracker
},
Optional[critical_repeat_timer]: {
GroupedEVMNodeAlertsMetricCode.value: TimedTaskLimiter
},
Optional[current_height]: Int
}
}
}
"""
def __init__(self, component_logger: logging.Logger) -> None:
super().__init__(component_logger)
def create_alerting_state(
self, parent_id: str, node_id: str,
evm_node_alerts_config: EVMNodeAlertsConfig) -> None:
"""
If no state is already stored, this function will create a new alerting
state for a node based on the passed alerts config.
:param parent_id: The id of the chain
:param node_id: The id of the node
:param evm_node_alerts_config: The alerts configuration
:return: None
"""
if parent_id not in self.alerting_state:
self.alerting_state[parent_id] = {}
if node_id not in self.alerting_state[parent_id]:
warning_sent = {
AlertsMetricCode.NoChangeInBlockHeight.value: False,
AlertsMetricCode.BlockHeightDifference.value: False,
AlertsMetricCode.NodeIsDown.value: False
}
critical_sent = {
AlertsMetricCode.NoChangeInBlockHeight.value: False,
AlertsMetricCode.BlockHeightDifference.value: False,
AlertsMetricCode.NodeIsDown.value: False
}
error_sent = {
AlertsMetricCode.InvalidUrl.value: False,
}
evm_node_is_down_thresholds = parse_alert_time_thresholds(
['warning_threshold', 'critical_threshold', 'critical_repeat'],
evm_node_alerts_config.evm_node_is_down)
block_height_difference_thresholds = parse_alert_time_thresholds(
['critical_repeat'],
evm_node_alerts_config.
evm_block_syncing_block_height_difference)
no_change_in_block_height_thresholds = parse_alert_time_thresholds(
['warning_threshold', 'critical_threshold', 'critical_repeat'],
evm_node_alerts_config.
evm_block_syncing_no_change_in_block_height)
warning_window_timer = {
AlertsMetricCode.NoChangeInBlockHeight.value:
TimedTaskTracker(timedelta(
seconds=no_change_in_block_height_thresholds[
'warning_threshold'])),
AlertsMetricCode.NodeIsDown.value:
TimedTaskTracker(timedelta(
seconds=evm_node_is_down_thresholds[
'warning_threshold'])),
}
critical_window_timer = {
AlertsMetricCode.NoChangeInBlockHeight.value:
TimedTaskTracker(timedelta(
seconds=no_change_in_block_height_thresholds[
'critical_threshold'])),
AlertsMetricCode.NodeIsDown.value:
TimedTaskTracker(timedelta(
seconds=evm_node_is_down_thresholds[
'critical_threshold'])),
}
critical_repeat_timer = {
AlertsMetricCode.NoChangeInBlockHeight.value: TimedTaskLimiter(
timedelta(seconds=no_change_in_block_height_thresholds[
'critical_repeat'])),
AlertsMetricCode.NodeIsDown.value:
TimedTaskLimiter(timedelta(
seconds=evm_node_is_down_thresholds[
'critical_repeat'])),
AlertsMetricCode.BlockHeightDifference.value:
TimedTaskLimiter(timedelta(
seconds=block_height_difference_thresholds[
'critical_repeat']))
}
self.alerting_state[parent_id][node_id] = {
'warning_sent': warning_sent,
'critical_sent': critical_sent,
'error_sent': error_sent,
'warning_window_timer': warning_window_timer,
'critical_window_timer': critical_window_timer,
'critical_repeat_timer': critical_repeat_timer,
'current_height': None,
}
def remove_chain_alerting_state(self, parent_id: str) -> None:
"""
This function deletes an entire alerting state for a chain.
:param parent_id: The id of the chain to be deleted
:return: None
"""
if parent_id in self.alerting_state:
del self.alerting_state[parent_id]
|
113739
|
class Solution:
# @return a string
def convert(self, s, nRows):
if nRows == 1 or len(s) <= 2:
return s
# compute the length of the zigzag
zigzagLen = 2*nRows - 2;
lens = len(s)
res = ''
for i in range(nRows):
idx = i
while idx < lens:
res = res + s[idx]
if i != 0 and i != nRows - 1:
x = idx + (zigzagLen - 2*i)
if (x < lens):
res = res + s[x]
idx = idx + zigzagLen
return res
s = Solution()
assert s.convert('0123456789', 5) == '0817926354'
assert s.convert('0123456789', 3) == '0481357926'
assert s.convert('0123456789', 2) == '0246813579'
assert s.convert('012', 1) == '012'
|
113759
|
from os.path import abspath, dirname, join, normpath
from setuptools import setup
setup(
# Basic package information:
name = 'django-clear-cache',
version = '0.3',
packages = (
'clear_cache',
'clear_cache.management',
'clear_cache.management.commands'
),
# Packaging options:
zip_safe = False,
include_package_data = True,
# Package dependencies:
install_requires = ['Django>=1.0'],
# Metadata for PyPI:
author = '<NAME>',
author_email = '<EMAIL>',
license = 'UNLICENSE',
url = 'https://github.com/rdegges/django-clear-cache',
keywords = 'django cache management memcached clear',
description = 'A simple Django management command which clears your cache.',
long_description = open(normpath(join(dirname(abspath(__file__)),
'README.md'))).read()
)
|
113776
|
import sys
from BaseHTTPServer import HTTPServer
from unittest import TestCase
from samsungtv.httpd.subscribe_handler import SubscribeHttpRequestHandler
class TestSubscribeHttpRequestHandler(TestCase):
def test__log(self):
self.skipTest("Todo")
# def test_real(self):
# host = ""
# port = 8007
#
# try:
# # SubscribeRequestHandler.protocol_version = "HTTP/1.0"
#
# httpd = HTTPServer((host, port), SubscribeHttpRequestHandler)
#
# except Exception as e:
# sys.stderr.write(str(e))
# sys.exit(-1)
#
# print "Serving on " + host + ":" + str(port) + " ... "
#
# while True:
# httpd.handle_request()
|
113799
|
expected_output = {
'power-usage-information': {
'power-usage-item': [
{
'name': '<NAME>',
'state': 'Online',
'dc-input-detail2': {
'dc-input-status':
'OK (INP0 feed expected, INP0 feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '489.25',
'str-zone': 'Lower',
'str-dc-current': '9.50',
'str-dc-voltage': '51.50',
'str-dc-load': '23.30'
}
}, {
'name': '<NAME>',
'state': 'Online',
'dc-input-detail2': {
'dc-input-status':
'OK (INP0 feed expected, INP0 feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '489.25',
'str-zone': 'Lower',
'str-dc-current': '9.50',
'str-dc-voltage': '51.50',
'str-dc-load': '23.30'
}
}, {
'name': '<NAME>',
'state': 'Online',
'dc-input-detail2': {
'dc-input-status':
'OK (INP0 feed expected, INP0 feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '504.56',
'str-zone': 'Lower',
'str-dc-current': '9.75',
'str-dc-voltage': '51.75',
'str-dc-load': '24.03'
}
}, {
'name': '<NAME>',
'state': 'Online',
'dc-input-detail2': {
'dc-input-status':
'OK (INP0 feed expected, INP0 feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '491.62',
'str-zone': 'Lower',
'str-dc-current': '9.50',
'str-dc-voltage': '51.75',
'str-dc-load': '23.41'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Lower',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Lower',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Lower',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Lower',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Lower',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': 'PSM 9',
'state': 'Online',
'dc-input-detail2': {
'dc-input-status':
'OK (INP0 feed expected, INP0 feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '309.00',
'str-zone': 'Upper',
'str-dc-current': '6.00',
'str-dc-voltage': '51.50',
'str-dc-load': '14.71'
}
}, {
'name': '<NAME>',
'state': 'Online',
'dc-input-detail2': {
'dc-input-status':
'OK (INP0 feed expected, INP0 feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '307.50',
'str-zone': 'Upper',
'str-dc-current': '6.00',
'str-dc-voltage': '51.25',
'str-dc-load': '14.64'
}
}, {
'name': '<NAME>',
'state': 'Online',
'dc-input-detail2': {
'dc-input-status':
'OK (INP0 feed expected, INP0 feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '309.00',
'str-zone': 'Upper',
'str-dc-current': '6.00',
'str-dc-voltage': '51.50',
'str-dc-load': '14.71'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Upper',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Upper',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Upper',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Unknown',
'dc-input-detail2': {
'dc-input-status': 'Not ready'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Upper',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Upper',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}, {
'name': '<NAME>',
'state': 'Present',
'dc-input-detail2': {
'dc-input-status':
'Check (No feed expected, No feed connected)'
},
'pem-capacity-detail': {
'capacity-actual': '2100',
'capacity-max': '2500'
},
'dc-output-detail2': {
'str-dc-power': '0.00',
'str-zone': 'Upper',
'str-dc-current': '0.00',
'str-dc-voltage': '0.00',
'str-dc-load': '0.00'
}
}
],
'power-usage-system': {
'power-usage-zone-information': [{
'str-zone': 'Upper',
'capacity-actual': '6300',
'capacity-max': '7500',
'capacity-allocated': '3332',
'capacity-remaining': '2968',
'capacity-actual-usage': '925.50'
}, {
'str-zone': 'Lower',
'capacity-actual': '8400',
'capacity-max': '10000',
'capacity-allocated': '6294',
'capacity-remaining': '2106',
'capacity-actual-usage': '1974.69'
}],
'capacity-sys-actual':
'14700',
'capacity-sys-max':
'17500',
'capacity-sys-remaining':
'5074'
}
}
}
|
113826
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import sys
sys.path.append("..")
from option import args
OPS = {
'none': lambda C, stride, affine: Zero(stride),
'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect': lambda C, stride, affine: Identity(),
'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7': lambda C, stride, affine: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1, 7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C, C, (7, 1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
),
'conv_3x3': lambda C, stride, affine: ReLUConvBN(C, C, 3, stride, 1, affine=affine),
'conv_3x3_no_relu': lambda C, stride, affine: ConvBN(C, C, 3, stride, 1, affine=affine),
'conv_3x3_no_relu_no_bn': lambda C, stride, affine: Conv(C, C, 3, stride, 1, affine=affine),
'conv_3x3_no_bn': lambda C, stride, affine: ReLUConv(C, C, 3, stride, 1, affine=affine),
'rcab': lambda C, stride, affine: RCAB( C, 3, 3,bias=True, bn=True, act=nn.ReLU(True), res_scale=1),
#first 3 is kernel-size,the second 3 is the number of feature map
'up_and_down': lambda C, stride, affine: UPANDDOWN( C, scale_factor=args.scale[0]),
#upsampling cell:
'sub_pixel':lambda C, stride, affine: SUBPIXEL(C, scale_factor=stride),
'manual_sub_pixel':lambda C, stride, affine: SUBPIXEL(C, scale_factor=2),
'deconvolution':lambda C, stride, affine: Deconvolution(C,stride),
'bilinear':lambda C, stride, affine: Bilinear(stride),
'nearest':lambda C, stride, affine: Nearest(stride),
'linear':lambda C, stride, affine: Linear(stride),
'area':lambda C, stride, affine: Area(stride),
'upproject': lambda C, stride, affine: Upproject(C, scale_factor=2),
'downproject': lambda C, stride, affine: Downproject(C, scale_factor=2),
'upprojectnone': lambda C, stride, affine: UpprojectNone(C, scale_factor=2),
}
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class ReLUConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
# nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class Downproject(nn.Module):
def __init__(
self, base_filter, scale_factor):
super(Downproject, self).__init__()
if scale_factor == 2:
kernel = 6
stride = 2
padding = 2
elif scale_factor == 4:
kernel = 8
stride = 4
padding = 2
elif scale_factor == 8:
kernel = 12
stride = 8
padding = 2
# self.up1 = UpBlock(base_filter, kernel, stride, padding)
self.down1 = DownBlock(base_filter, kernel, stride, padding)
def forward(self, x):
x = self.down1(x)
return x
class UpprojectNone(nn.Module):
def __init__(
self, base_filter, scale_factor):
super(UpprojectNone, self).__init__()
if scale_factor == 2:
kernel = 6
stride = 2
padding = 2
elif scale_factor == 4:
kernel = 8
stride = 4
padding = 2
elif scale_factor == 8:
kernel = 12
stride = 8
padding = 2
self.up1 = UpBlock(base_filter, kernel, stride, padding)
# self.down1 = DownBlock(base_filter, kernel, stride, padding)
def forward(self, x):
x = self.up1(x)
return x.mul(0.)
class Upproject(nn.Module):
def __init__(
self, base_filter, scale_factor):
super(Upproject, self).__init__()
if scale_factor == 2:
kernel = 6
stride = 2
padding = 2
elif scale_factor == 4:
kernel = 8
stride = 4
padding = 2
elif scale_factor == 8:
kernel = 12
stride = 8
padding = 2
self.up1 = UpBlock(base_filter, kernel, stride, padding)
# self.down1 = DownBlock(base_filter, kernel, stride, padding)
def forward(self, x):
x = self.up1(x)
return x
class Conv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(Conv, self).__init__()
self.op = nn.Sequential(
# nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
# nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class ConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ConvBN, self).__init__()
self.op = nn.Sequential(
# nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class Bilinear(nn.Module):
def __init__(self, stride):
super(Bilinear, self).__init__()
self.scale=stride
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale, mode='bilinear')
class Linear(nn.Module):
def __init__(self, stride):
super(Linear, self).__init__()
self.scale=stride
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale, mode='linear')
class Area(nn.Module):
def __init__(self, stride):
super(Area, self).__init__()
self.scale=stride
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale, mode='area')
class Nearest(nn.Module):
def __init__(self, stride):
super(Nearest, self).__init__()
self.scale=stride
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale, mode='nearest')
class Deconvolution(nn.Module):
def __init__(self, C, stride):
super(Deconvolution, self).__init__()
if stride==2:
kernel_size=3
output_padding=1
elif stride==4:
kernel_size=5
output_padding = 1
else:
kernel_size=3
output_padding = 0
self.deconv=nn.ConvTranspose2d(C, C,kernel_size=kernel_size,stride=stride, padding=1,output_padding=output_padding)
def forward(self, x):
return self.deconv(x)
class SUBPIXEL(nn.Module):
def __init__(self, C, scale_factor,conv=default_conv):
super(SUBPIXEL, self).__init__()
self.upsample=Upsampler(conv, scale_factor, C, act=False)
def forward(self, x):
return self.upsample(x)
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
return x.mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
if x.size(2)%2!=0:
x = F.pad(x, (1,0,1,0), "constant", 0)
out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class ThinConv2d(nn.Conv2d):
"""
custom convolutional layers for thin convolution
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super(ThinConv2d, self).__init__(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
def _thin_weight(self, input, index=None):
n, c, h, w = input.size()
# print(index.size())
# print(index)
num_nodes = index.size(0)
index = index.view(1, num_nodes, 1, 1)
final_index = index.expand(n, num_nodes, h, w)
thin_data = torch.gather(input, 1, final_index)
return thin_data
def forward(self, input, index=None):
if index is not None:
thin_weight = self._thin_weight(self.weight, index)
else:
thin_weight = self.weight
return F.conv2d(input, thin_weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class FinalConv(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FinalConv, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.thin_conv = ThinConv2d(C_in, C_out, 1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x, index):
x = self.relu(x)
out = self.thin_conv(x, index)
out = self.bn(out)
return out
## Channel Attention (CA) Layer
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
## Residual Channel Attention Block (RCAB)
class RCAB(nn.Module):
def __init__(
self, n_feat, kernel_size, reduction,conv=default_conv,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2d(n_feat))
if i == 0: modules_body.append(act)
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
#res = self.body(x).mul(self.res_scale)
res += x
return res
class UPANDDOWN(nn.Module):
def __init__(
self, base_filter, scale_factor):
super(UPANDDOWN, self).__init__()
if scale_factor == 2:
kernel = 6
stride = 2
padding = 2
elif scale_factor == 4:
kernel = 8
stride = 4
padding = 2
elif scale_factor == 8:
kernel = 12
stride = 8
padding = 2
self.up1 = UpBlock(base_filter, kernel, stride, padding)
self.down1 = DownBlock(base_filter, kernel, stride, padding)
def forward(self, x):
x = self.up1(x)
x = self.down1(x)
return x
class UpBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias=True, activation='prelu', norm=None):
super(UpBlock, self).__init__()
self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class DownBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias=True, activation='prelu', norm=None):
super(DownBlock, self).__init__()
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None)
def forward(self, x):
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None):
super(DeconvBlock, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.deconv(x))
else:
out = self.deconv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm =='batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation is not None:
return self.act(out)
else:
return out
|
113858
|
from fastapi import APIRouter, Depends, Body
from core import deps
from scheams import User_Pydantic, UserIn_Pydantic, Response200, Response400
from models import User
user = APIRouter(tags=["用户相关"], dependencies=[Depends(deps.get_current_user)])
@user.get("/user", summary="当前用户")
async def user_info(user_obj: User = Depends(deps.get_current_user)):
"""
- username: str 必传
- password: str 必传
"""
return Response200(data=await User_Pydantic.from_tortoise_orm(user_obj))
@user.put("/user", summary="修改信息")
async def user_update(user_form: UserIn_Pydantic, user_obj: User = Depends(deps.get_current_user)):
"""
修改当前用户信息
"""
user_form.username = user_obj.username
user_form.password = <PASSWORD>
if await User.filter(username=user_obj.username).update(**user_form.dict()):
return Response200(data=await User_Pydantic.from_tortoise_orm(user_obj))
return Response400(msg="更新失败")
|
113870
|
from django.conf.urls import url
from django.contrib import messages
from jet.dashboard import dashboard
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpRequest
from django.http import JsonResponse
from django.contrib.auth import get_user_model, authenticate, login
from dashboard_modules import JobModule
from django.db.models import Q
from .models import Advanced_Job
from datetime import datetime
from threading import Thread
import yfinance as yf
import pandas as pd
import requests
import telegram
import json
import uuid
import time
global tele_token, token
tele_token = '<PASSWORD>:AA<PASSWORD>'
token = 'pk_<PASSWORD>'
# Create your views here.
def home(request):
return render(request, 'admin/base.html')
def historical_data(symbol, interval='1h', period='5y'):
columns = ['Symbol', 'Datetime', 'Open', 'High', 'Low', 'Close']
try:
ticker = yf.Ticker(symbol)
if interval=='1m':
df = ticker.history(interval=interval, period='5d')
elif 'mo' in interval or 'wk' in interval:
df = ticker.history(interval=interval, period='5y')
df = df.dropna()
else:
df = ticker.history(interval=interval)
df = df.reset_index(col_level=0)
df['Symbol'] = symbol
columns = df.columns.tolist()
columns[0] = 'Datetime'
df.columns = columns
columns = ['Symbol', 'Datetime', 'Open', 'High', 'Low', 'Close']
df = df[columns]
print(df.iloc[0])
except:
root_url = 'https://api.binance.com/api/v1/klines'
url = root_url + '?symbol=' + symbol + '&interval=' + interval
data = json.loads(requests.get(url).text)
df = pd.DataFrame(data)
df.columns = ['open_time',
'Open', 'High', 'Low', 'Close', 'v',
'Datetime', 'qav', 'num_trades',
'taker_base_vol', 'taker_quote_vol', 'ignore']
df['Symbol'] = symbol
df = df[columns]
df['Datetime'] = [datetime.fromtimestamp(x/1000.0) for x in df['Datetime']]
df['Datetime'] = pd.to_datetime(df['Datetime'])
df = df.tail(50).reset_index(drop=True)
df['Close'] = df['Close'].fillna(method='ffill')
df['Close'] = df['Close'].astype(float)
return df
def calculate_rsi_old(df, n=14):
delta = df['Close'].diff()
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RolUp = dUp.rolling(window=n).mean()
RolDown = dDown.rolling(window=n).mean().abs()
RS = RolUp / RolDown
df['RSI']= 100.0 - (100.0 / (1.0 + RS))
return df
def calculate_rsi(df, time_window=14):
data = df['Close']
diff = data.diff(1).dropna()
up_chg = 0 * diff
down_chg = 0 * diff
up_chg[diff > 0] = diff[ diff>0 ]
down_chg[diff < 0] = diff[ diff < 0 ]
up_chg_avg = up_chg.ewm(com=time_window-1 , min_periods=time_window).mean()
down_chg_avg = down_chg.ewm(com=time_window-1 , min_periods=time_window).mean()
rs = abs(up_chg_avg/down_chg_avg)
rsi = 100 - 100/(1+rs)
df['RSI'] = rsi
return df
def bollingerbands(df, n=20, bb_std=2):
print(n)
print(bb_std)
df['MA20'] = df['Close'].rolling(window=n).mean()
df['20dSTD'] = df['Close'].rolling(window=n).std()
df['Upper'] = df['MA20'] + (df['20dSTD'] * bb_std)
df['Lower'] = df['MA20'] - (df['20dSTD'] * bb_std)
return df
def sendAlertToChannel(msg):
chat_id = '@jackie_stock_channel'
url = 'https://api.telegram.org/bot{}/'.format(tele_token)
params = {
'chat_id': chat_id,
'text': msg
}
res = requests.post(url+'sendMessage', data=params)
print(res.json())
def sendAlertToGroupOrUser(msg):
tele_token = '<KEY>'
bot = telegram.Bot(tele_token)
chat_id = '-446515297'
msg = msg
print(bot.send_message(chat_id=chat_id, text=msg))
def send_alert(alerts):
for alert in alerts:
msg = json.dumps(alert, indent=4)
msg = ""
for key in alert.keys():
msg += "{}: {}\n".format(key, alert[key])
sendAlertToChannel(msg)
# sendAlertToGroupOrUser(msg)
def run_monitoring(job, user_id):
current_time = datetime.now().time()
prev_hour = current_time.hour
prev_minute = current_time.minute
isCheck = True
while True:
if isCheck:
symbol = job.symbol
interval = job.interval
rsi_period = job.rsi_period
rsi_value = job.rsi_value
bb_period = job.bb_period
bb_option = job.bb_option
# bb_upperband = job.bb_upperband
# bb_lowerband = job.bb_lowerband
bb_std = job.bb_std_num
interval = interval.replace(' min', 'm')
interval = interval.replace(' hour', 'h')
interval = interval.replace(' day', 'd')
interval = interval.replace(' week', 'wk')
interval = interval.replace(' month', 'mo')
print(symbol, interval, rsi_period)
df = historical_data(symbol, interval=interval)
df = calculate_rsi(df, time_window=rsi_period)
df = bollingerbands(df, n=bb_period, bb_std=bb_std)
print(df)
lastvalue = df.iloc[-1]
print(lastvalue)
rsi = lastvalue['RSI']
upper = lastvalue['Upper']
lower = lastvalue['Lower']
close = lastvalue['Close']
bb_value = {'Upperband': upper, 'Lowerband': lower}
alerts = []
if rsi >= rsi_value:
alert = {
'Job name': job.name,
'Symbol': symbol,
'Time': datetime.now().time().strftime("%H:%M:%S"),
'Alert': 'Crossed over defined RSI value!',
'RSI value': rsi,
}
alerts.append(alert)
if bb_option=='Upperband':
if close >= upper:
alert = {
'Job_name': job.name,
'Symbol': symbol,
'Time': datetime.now().time().strftime("%H:%M:%S"),
'Alert': 'Reached bollingerbands upperband!',
'Current close': close,
}
alerts.append(alert)
if bb_option=='Lowerband':
if close <= lower:
alert = {
'Job_name': job.name,
'Symbol': symbol,
'Time': datetime.now().time().strftime("%H:%M:%S"),
'Alert': 'Reached bollingerbands lowerband!',
'Current close': close,
}
alerts.append(alert)
if len(alerts) == 0:
alert = {
'Job_name': job.name,
'Symbol': job.symbol,
'Time': datetime.now().time().strftime("%H:%M:%S"),
'Alert': 'No alert!',
'RSI value': rsi,
'Current close': close,
}
alerts.append(alert)
# sending alert to telegram
send_alert(alerts)
print(json.dumps(alerts, indent=4))
with open('data.json', 'w') as f:
json.dump(alerts, f, indent=4)
time.sleep(1)
# else:
# alerts = []
# alert = {
# 'Job_name': job.name,
# 'Symbol': job.symbol,
# 'Time': datetime.now().time().strftime("%H:%M:%S"),
# 'Alert': 'No alert!'
# }
# alerts.append(alert)
# send_alert(alerts)
# print(json.dumps(alerts, indent=4))
# with open('data.json', 'w') as f:
# json.dump(alerts, f, indent=4)
# time.sleep(1)
# determine if we need to check again the historical data
my_jobs = Advanced_Job.objects.filter(Q(user_id=user_id))
if not my_jobs:
data = {
'job_name': job.name,
'time': datetime.now().time().strftime("%H:%M:%S"),
'status': 'Stopped'
}
print(data)
with open('data.json', 'w') as f:
json.dump(data, f, indent=4)
print('Job stopped or db not found. Exiting now...')
break
else:
current_time = datetime.now().time()
print(current_time)
current_hour = current_time.hour
current_minute = current_time.minute
if 'h' in job.interval:
print('hourly')
time_interval = int(job.interval.split('h')[0].strip(' '))
if current_hour - prev_hour >= time_interval:
prev_hour = current_hour
prev_minute = current_minute
isCheck = True
else:
isCheck = False
elif 'min' in job.interval:
print('minutely')
time_interval = int(job.interval.split('m')[0].strip(' '))
if current_minute - prev_minute >= time_interval or (current_minute < prev_minute and current_minute+60 > prev_minute):
prev_hour = current_hour
prev_minute = current_minute
isCheck = True
else:
isCheck = False
print(isCheck)
time.sleep(2)
@csrf_exempt
def start_monitor(request):
base_url = request.build_absolute_uri('/').strip("/")
try:
titles = request.POST.getlist("title")
symbols = request.POST.getlist("symbol")
intervals = request.POST.getlist("interval")
rsi_periods = request.POST.getlist("rsi_period")
rsi_values = request.POST.getlist("rsi_value")
bb_periods = request.POST.getlist("bb_period")
bb_options = request.POST.getlist("bb_option")
# bb_upperbands = request.POST.getlist("bb_upperband")
# bb_lowerbands = request.POST.getlist("bb_lowerband")
bb_stds = request.POST.getlist("bb_std")
user_id = request.user.id
except:
print('No jobs defined')
return redirect('/admin/')
print(titles)
for i in range(len(titles)):
title = titles[i]
symbol = symbols[i]
interval = intervals[i]
rsi_period = int(rsi_periods[i])
rsi_value = float(rsi_values[i])
bb_period = int(bb_periods[i])
bb_option = bb_options[i]
# bb_upperband = float(bb_upperbands[i])
# bb_lowerband = float(bb_lowerbands[i])
bb_std = int(bb_stds[i])
print(title, interval)
try:
result = Advanced_Job.objects.filter(Q(user_id=user_id) & Q(name=title))
if result:
print('There is a record', result)
pass
else:
print('There was no record')
print("{} is being inserted...".format(title))
job_instance = Advanced_Job.objects.create(
user_id = user_id,
name = title,
symbol = symbol,
interval = interval,
rsi_period = rsi_period,
rsi_value = rsi_value,
bb_period = bb_period,
bb_option = bb_option,
# bb_upperband = bb_upperband,
# bb_lowerband = bb_lowerband,
bb_std_num=bb_std
)
except:
print("{} is being inserted...".format(title))
job_instance = Advanced_Job.objects.create(
user_id = user_id,
name = title,
symbol = symbol,
interval = interval,
rsi_period = rsi_period,
rsi_value = rsi_value,
bb_period = bb_period,
bb_option = bb_option,
# bb_upperband = bb_upperband,
# bb_lowerband = bb_lowerband,
bb_std_num=bb_std
)
my_jobs = Advanced_Job.objects.filter(Q(user_id=user_id))
for job in my_jobs:
print(job.name)
th = Thread(target=run_monitoring, args=(job, user_id))
th.start()
return redirect('/admin/')
def view_monitor(request):
try:
with open('data.json') as f:
data = json.load(f)
except:
data = 'Alert not ready!'
params = {'data': data}
return render(request, 'admin/monitor.html', params)
def stop_monitor(request):
user_id = request.user.id
Advanced_Job.objects.filter(Q(user_id=user_id)).delete()
my_jobs = Advanced_Job.objects.filter(Q(user_id=user_id))
print(my_jobs)
return redirect('/admin/')
|
113877
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import spi, sensor
from esphome.const import (
CONF_CURRENT,
CONF_ID,
CONF_POWER,
CONF_VOLTAGE,
UNIT_VOLT,
UNIT_AMPERE,
UNIT_WATT,
DEVICE_CLASS_POWER,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_VOLTAGE,
)
from esphome import automation
from esphome.automation import maybe_simple_id
CODEOWNERS = ["@balrog-kun"]
DEPENDENCIES = ["spi"]
cs5460a_ns = cg.esphome_ns.namespace("cs5460a")
CS5460APGAGain = cs5460a_ns.enum("CS5460APGAGain")
PGA_GAIN_OPTIONS = {
"10X": CS5460APGAGain.CS5460A_PGA_GAIN_10X,
"50X": CS5460APGAGain.CS5460A_PGA_GAIN_50X,
}
CS5460AComponent = cs5460a_ns.class_("CS5460AComponent", spi.SPIDevice, cg.Component)
CS5460ARestartAction = cs5460a_ns.class_("CS5460ARestartAction", automation.Action)
CONF_SAMPLES = "samples"
CONF_PHASE_OFFSET = "phase_offset"
CONF_PGA_GAIN = "pga_gain"
CONF_CURRENT_GAIN = "current_gain"
CONF_VOLTAGE_GAIN = "voltage_gain"
CONF_CURRENT_HPF = "current_hpf"
CONF_VOLTAGE_HPF = "voltage_hpf"
CONF_PULSE_ENERGY = "pulse_energy"
def validate_config(config):
current_gain = abs(config[CONF_CURRENT_GAIN]) * (
1.0 if config[CONF_PGA_GAIN] == "10X" else 5.0
)
voltage_gain = config[CONF_VOLTAGE_GAIN]
pulse_energy = config[CONF_PULSE_ENERGY]
if current_gain == 0.0 or voltage_gain == 0.0:
raise cv.Invalid("The gains can't be zero")
max_energy = (0.25 * 0.25 / 3600 / (2**-4)) / (voltage_gain * current_gain)
min_energy = (0.25 * 0.25 / 3600 / (2**18)) / (voltage_gain * current_gain)
mech_min_energy = (0.25 * 0.25 / 3600 / 7.8) / (voltage_gain * current_gain)
if pulse_energy < min_energy or pulse_energy > max_energy:
raise cv.Invalid(
"For given current&voltage gains, the pulse energy must be between "
f"{min_energy} Wh and {max_energy} Wh and in mechanical counter mode "
f"between {mech_min_energy} Wh and {max_energy} Wh"
)
return config
validate_energy = cv.float_with_unit("energy", "(Wh|WH|wh)?", optional_unit=True)
CONFIG_SCHEMA = cv.All(
cv.Schema(
{
cv.GenerateID(): cv.declare_id(CS5460AComponent),
cv.Optional(CONF_SAMPLES, default=4000): cv.int_range(min=1, max=0xFFFFFF),
cv.Optional(CONF_PHASE_OFFSET, default=0): cv.int_range(min=-64, max=63),
cv.Optional(CONF_PGA_GAIN, default="10X"): cv.enum(
PGA_GAIN_OPTIONS, upper=True
),
cv.Optional(CONF_CURRENT_GAIN, default=0.001): cv.negative_one_to_one_float,
cv.Optional(CONF_VOLTAGE_GAIN, default=0.001): cv.zero_to_one_float,
cv.Optional(CONF_CURRENT_HPF, default=True): cv.boolean,
cv.Optional(CONF_VOLTAGE_HPF, default=True): cv.boolean,
cv.Optional(CONF_PULSE_ENERGY, default=10.0): validate_energy,
cv.Optional(CONF_VOLTAGE): sensor.sensor_schema(
unit_of_measurement=UNIT_VOLT,
accuracy_decimals=0,
device_class=DEVICE_CLASS_VOLTAGE,
),
cv.Optional(CONF_CURRENT): sensor.sensor_schema(
unit_of_measurement=UNIT_AMPERE,
accuracy_decimals=1,
device_class=DEVICE_CLASS_CURRENT,
),
cv.Optional(CONF_POWER): sensor.sensor_schema(
unit_of_measurement=UNIT_WATT,
accuracy_decimals=0,
device_class=DEVICE_CLASS_POWER,
),
}
)
.extend(cv.COMPONENT_SCHEMA)
.extend(spi.spi_device_schema(cs_pin_required=False)),
validate_config,
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await spi.register_spi_device(var, config)
cg.add(var.set_samples(config[CONF_SAMPLES]))
cg.add(var.set_phase_offset(config[CONF_PHASE_OFFSET]))
cg.add(var.set_pga_gain(config[CONF_PGA_GAIN]))
cg.add(var.set_gains(config[CONF_CURRENT_GAIN], config[CONF_VOLTAGE_GAIN]))
cg.add(var.set_hpf_enable(config[CONF_CURRENT_HPF], config[CONF_VOLTAGE_HPF]))
cg.add(var.set_pulse_energy_wh(config[CONF_PULSE_ENERGY]))
if CONF_VOLTAGE in config:
conf = config[CONF_VOLTAGE]
sens = await sensor.new_sensor(conf)
cg.add(var.set_voltage_sensor(sens))
if CONF_CURRENT in config:
conf = config[CONF_CURRENT]
sens = await sensor.new_sensor(conf)
cg.add(var.set_current_sensor(sens))
if CONF_POWER in config:
conf = config[CONF_POWER]
sens = await sensor.new_sensor(conf)
cg.add(var.set_power_sensor(sens))
@automation.register_action(
"cs5460a.restart",
CS5460ARestartAction,
maybe_simple_id(
{
cv.Required(CONF_ID): cv.use_id(CS5460AComponent),
}
),
)
async def restart_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
return cg.new_Pvariable(action_id, template_arg, paren)
|
113882
|
from itertools import groupby
# If you have problems with 'rufv' show=True,
# enable the console compatbility mode to use
# more common characters.
compat = False
if compat:
vbar = '|'
hbar = '-'
intersection = '+'
else:
vbar = '│'
hbar = '─'
intersection = '┼'
def ruf(pol, x):
"""For the polynomial 'pol' (a list of its coefficients),
and a value to test with 'x', returns True if 'x' is
a root for the polynomial, using the Ruffini's rule.
"""
c = pol[0]
for i in range(1, len(pol)):
c = pol[i] + c*x
return c == 0
def rufv(pol, x, show=False, show_top=True):
"""Same as 'ruf', but verbose. Returns the factorized
polynomial iff 'x' is a root for the polynomial.
If 'show' and not 'skip_top', the first line won't be
shown (useful to chain multiple calls to this method).
"""
r = [pol[0]]
for i in range(1, len(pol)):
r.append(pol[i] + r[-1] * x)
if show:
# |pol pol pol
# x| r*x r*x
# -------------
# | r r r
lines = ['' for _ in range(4)]
lines[1] += str(x)
lines[0] += ' ' * len(lines[1]) + vbar
lines[2] += hbar * len(lines[1]) + intersection
lines[1] += vbar
lines[3] = lines[0]
for i in range(len(pol)):
pi = str(pol[i])
rx = str(r[i-1] * x) if i != 0 else ''
ri = str(r[i])
pad = max(len(pi), len(rx), len(ri))
lines[0] += pi.rjust(pad) + ' '
lines[1] += rx.rjust(pad) + ' '
lines[3] += ri.rjust(pad) + ' '
lines[2] += hbar * (pad + 1)
if show_top:
print('\n'.join(lines))
else:
print('\n'.join(lines[1:]))
if r[-1] == 0:
return r
def findruf(pol, limit=100, show=False):
"""Finds a possible integer root for the given polynomial
and returns which root this should be. Returns None if
no root is found within [-limit, limit].
If 'limit' is None, then the algorithm will never stop.
"""
# If there is no constant term, then we can divide everything
# by 'x' to get one grade less, thus, 0 is valid for Ruffini.
if pol[-1] == 0:
if show:
rufv(pol, 0, show=True)
return 0
if limit is None:
limit = -1
i = 0
while i != limit:
i += 1
if ruf(pol, i):
if show:
rufv(pol, i, show=True)
return i
if ruf(pol, -i):
if show:
rufv(pol, -i, show=True)
return -i
def intfactorize(pol, limit=100, show=False):
"""Tries to factorize the given polynomial using Ruffini's rule,
and returns a list of lists containing the coefficients for
each x's grade (in decreasing, e.g., [2, 4, 1] for 2x² + x⁴ + 1)
"""
result = []
show_top = True
while True:
r = findruf(pol, limit)
if r is None:
break
# Resulting polynomial, without the trailing 0
pol = rufv(pol, r, show=show, show_top=show_top)[:-1]
# Factor is (x - r)
result.append([1, -r])
# We're done if the polynomial is grade 2 or less
if len(pol) <= 2:
break
# Not showing the top next time
show_top = False
result.append(pol)
if show:
line = []
for p, items in groupby(sorted(result)):
line.append('(')
line.append(strpol(p))
line.append(')')
count = sum(1 for _ in items)
if count != 1:
line.append(strpower(count))
print(''.join(line))
return result
def strpower(i):
"""Stringifies the i'th power"""
powers = '⁰¹²³⁴⁵⁶⁷⁸⁹'
if i < 10:
return powers[i]
result = []
while i >= 10:
result.append(powers[i % 10])
i //= 10
result.append(powers[i])
return ''.join(reversed(result))
def strpol(pol, add_spaces=True):
"""Stringifies the given polynomial"""
result = []
i = len(pol)
for v in pol:
i -= 1
if v == 0:
continue
if v != 1:
if v > 1:
result.append('+')
result.append(str(v))
else:
result.append('+')
if i > 0:
result.append('x')
if i > 1:
result.append(strpower(i))
if result[0] == '+':
result = ''.join(result[1:])
else:
result = ''.join(result)
if add_spaces:
result = result.replace('+', ' + ').replace('-', ' - ')
return result
if __name__ == '__main__':
pol = (1, 4, 6, 1, -24, 2)
print('Stringifying', pol, 'as', strpol(pol))
pol = (1, 3, 7, 21)
print('Found root', findruf(pol), 'for', strpol(pol))
print('About to factorize', strpol(pol), 'as follows:')
intfactorize(pol, show=True)
pol = (1, -5, 9, -7, 2)
print('About to factorize', strpol(pol), 'as follows:')
intfactorize(pol, show=True)
|
113912
|
from .evolution_strategy import EvolutionStrategy
from .genetic_algorithm import GeneticAlgorithm
from .local_search import LocalSearch
from .simulated_annealing import SimulatedAnnealing
|
113914
|
import sqlite3
class Sqlite(object):
def __init__(self, db_path):
self.db_path = db_path
def __enter__(self):
self.connect = sqlite3.connect(self.db_path)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connect.close()
def execute(self, *args, **kwargs):
with self.connect:
return self.connect.execute(*args, **kwargs)
|
113946
|
from __future__ import annotations
import typing
import toolstr
from . import cpmm_spec
from . import cpmm_trade
def print_pool_summary(
x_reserves: int | float,
y_reserves: int | float,
lp_total_supply: int | float | None = None,
x_name: str | None = None,
y_name: str | None = None,
fee_rate: float | None = None,
indent: int | str | None = None,
depths: typing.Sequence[float] | None = None,
) -> None:
# add in +/- 2% depth
if x_name is None:
x_name = 'X'
if y_name is None:
y_name = 'Y'
indent = toolstr.indent_to_str(indent)
print(
indent + '- ' + x_name + ' reserves:',
toolstr.format(x_reserves, order_of_magnitude=True),
)
print(
indent + '- ' + y_name + ' reserves:',
toolstr.format(y_reserves, order_of_magnitude=True),
)
if lp_total_supply is not None:
print(
indent + '- total lp tokens:',
toolstr.format(lp_total_supply, order_of_magnitude=True),
)
print(
indent + '-',
x_name,
'/',
y_name + ' price:',
# '%.6f' % (x_reserves / y_reserves),
toolstr.format(x_reserves / y_reserves),
)
print(
indent + '-',
y_name,
'/',
x_name + ' price:',
# '%.6f' % (y_reserves / x_reserves),
toolstr.format(y_reserves / x_reserves),
)
print(indent + '-', x_name + ' / ' + y_name, 'liquidity depth:')
print()
print_liquidity_depth(
x_reserves=x_reserves,
y_reserves=y_reserves,
x_name=x_name,
y_name=y_name,
fee_rate=fee_rate,
indent=indent,
depths=depths,
)
print()
print(indent + '-', y_name + ' / ' + x_name, 'liquidity depth:')
print()
print_liquidity_depth(
x_reserves=y_reserves,
y_reserves=x_reserves,
x_name=y_name,
y_name=x_name,
fee_rate=fee_rate,
indent=indent,
depths=depths,
)
print()
def print_liquidity_depth(
x_reserves: int | float,
y_reserves: int | float,
depths: typing.Sequence[float] | None = None,
x_name: str | None = None,
y_name: str | None = None,
fee_rate: float | None = None,
indent: int | str | None = None,
) -> None:
if x_name is None:
x_name = 'X'
if y_name is None:
y_name = 'Y'
if depths is None:
depths = [-0.10, -0.05, -0.02, 0, 0.02, 0.05, 0.10]
format_str = '{:,.2f}'
current_x_per_y = x_reserves / y_reserves
labels = ['depth', 'new price', x_name, y_name]
trades = []
for depth in depths:
trade = []
# x per y
if depth == 0:
trade.append(' 0%')
else:
trade.append('%+.0f' % (depth * 100) + '%')
# new price
new_x_per_y = type(current_x_per_y)(1 + depth) * current_x_per_y
trade.append(toolstr.format(new_x_per_y))
trade[-1] = trade[-1] + ' ' + x_name + ' / ' + y_name
# buys and sells
result = cpmm_trade.trade_to_price(
x_reserves=x_reserves,
y_reserves=y_reserves,
new_x_per_y=new_x_per_y,
fee_rate=fee_rate,
)
if depth != 0 and result['x_sold'] > 0:
trade.append(
'sell '
+ toolstr.format(result['x_sold'], order_of_magnitude=True)
)
trade.append(
' buy '
+ toolstr.format(result['y_bought'], order_of_magnitude=True)
)
elif depth != 0 and result['x_sold'] < 0:
trade.append(
' buy '
+ toolstr.format(result['x_bought'], order_of_magnitude=True)
)
trade.append(
'sell '
+ toolstr.format(result['y_sold'], order_of_magnitude=True)
)
else:
trade.append(' 0.00')
trade.append(' 0.00')
trades.append(trade)
indent = ' ' * 4 + toolstr.indent_to_str(indent)
toolstr.print_table(rows=trades, labels=labels, indent=indent)
def print_trade_summary(
x_name: str | None = None,
y_name: str | None = None,
x_holdings_before: int | float | None = None,
y_holdings_before: int | float | None = None,
indent: int | str | None = None,
**trade_kwargs: typing.Any,
) -> None:
if x_name is None:
x_name = 'X'
if y_name is None:
y_name = 'Y'
trade_summary = summarize_trade(**trade_kwargs)
x_sold = trade_summary['trade_results']['x_sold']
y_sold = trade_summary['trade_results']['y_sold']
indent = toolstr.indent_to_str(indent=indent)
if x_sold == 0:
print(indent + 'trade size of 0')
elif x_sold > 0:
print(indent + '-', x_name, 'sold:', toolstr.format(x_sold))
print(indent + '-', y_name, 'bought:', toolstr.format(-y_sold))
fees = trade_summary['x_fees']
print(indent + '- fees:', toolstr.format(fees), x_name)
else:
print(indent + '-', x_name, 'bought:', toolstr.format(-x_sold))
print(indent + '-', y_name, 'sold:', toolstr.format(y_sold))
fees = trade_summary['y_fees']
print(indent + '- fees:', toolstr.format(fees), y_name)
print(indent + '- prices:')
labels = [
'',
'P_mean',
'P_start',
'P_end',
'mean slippage',
'end slippage',
]
rows = []
row = [
x_name + ' / ' + y_name,
toolstr.format(trade_summary['mean_x_per_y']),
toolstr.format(trade_summary['x_per_y_start']),
toolstr.format(trade_summary['x_per_y_end']),
toolstr.format(trade_summary['mean_slippage_x_per_y']),
toolstr.format(trade_summary['end_slippage_x_per_y']),
]
rows.append(row)
row = [
y_name + ' / ' + x_name,
toolstr.format(trade_summary['mean_y_per_x']),
toolstr.format(trade_summary['y_per_x_start']),
toolstr.format(trade_summary['y_per_x_end']),
toolstr.format(trade_summary['mean_slippage_y_per_x']),
toolstr.format(trade_summary['end_slippage_y_per_x']),
]
rows.append(row)
print()
toolstr.print_table(
rows=rows,
labels=labels,
indent=' ' + indent,
column_gap=1,
)
print()
print(indent + '- pool reserve sizes:')
print()
labels = ['', 'before', 'after', 'change']
new_x_reserves = trade_summary['trade_results']['new_pool']['x_reserves']
new_y_reserves = trade_summary['trade_results']['new_pool']['y_reserves']
x_change = new_x_reserves / trade_kwargs['x_reserves'] - 1
y_change = new_y_reserves / trade_kwargs['y_reserves'] - 1
rows = [
[
x_name,
toolstr.format(trade_kwargs['x_reserves']),
toolstr.format(new_x_reserves),
toolstr.format(x_change),
],
[
y_name,
toolstr.format(trade_kwargs['y_reserves']),
toolstr.format(new_y_reserves),
toolstr.format(y_change),
],
]
toolstr.print_table(
rows=rows,
labels=labels,
indent=' ' + indent,
column_gap=1,
format={'decimals': 2, 'trailing_zeros': True},
)
def summarize_trade(**trade_kwargs: typing.Any) -> cpmm_spec.TradeSummary:
"""compute y_bought and new pool values when trading x_sold"""
# compute trade
results = cpmm_trade.trade(**trade_kwargs)
# compute mean price
mean_x_per_y = results['x_sold'] / results['y_bought']
mean_y_per_x = 1 / mean_x_per_y
# compute slippage
x_per_y_start = trade_kwargs['x_reserves'] / trade_kwargs['y_reserves']
y_per_x_start = 1 / x_per_y_start
x_per_y_end = (
results['new_pool']['x_reserves'] / results['new_pool']['y_reserves']
)
y_per_x_end = 1 / x_per_y_end
end_slippage_x_per_y = x_per_y_end / x_per_y_start - 1
end_slippage_y_per_x = y_per_x_end / y_per_x_start - 1
mean_slippage_x_per_y = mean_x_per_y / x_per_y_start - 1
mean_slippage_y_per_x = mean_y_per_x / y_per_x_start - 1
# compute fees
if trade_kwargs.get('fee_rate') is None:
trade_kwargs['fee_rate'] = 0.003
if results['x_sold'] > 0:
x_fees = results['x_sold'] * trade_kwargs['fee_rate']
y_fees = 0
else:
y_fees = results['y_sold'] * trade_kwargs['fee_rate']
x_fees = 0
return {
'end_slippage_x_per_y': end_slippage_x_per_y,
'end_slippage_y_per_x': end_slippage_y_per_x,
'mean_slippage_x_per_y': mean_slippage_x_per_y,
'mean_slippage_y_per_x': mean_slippage_y_per_x,
'mean_x_per_y': mean_x_per_y,
'mean_y_per_x': mean_y_per_x,
'x_per_y_start': x_per_y_start,
'y_per_x_start': y_per_x_start,
'x_per_y_end': x_per_y_end,
'y_per_x_end': y_per_x_end,
'x_fees': x_fees,
'y_fees': y_fees,
'trade_results': results,
}
|
113954
|
from datetime import datetime, timedelta
from django.http import JsonResponse
from django.views.generic.base import TemplateView
from deploy.models import DeployPool
from envx.models import Env
from django.db.models import Count
def get_deploy_count(request):
return_list = []
now = datetime.now()
a_month = now - timedelta(days=60)
select = {'day': 'date(add_date)'}
env = request.GET.get('env', 'All')
if env != 'All':
env_id = Env.objects.get(name=env).id
a_month_deploy_qs = DeployPool.objects. \
filter(env_name_id=env_id). \
filter(add_date__range=(a_month, now)). \
extra(select=select).\
values('day').\
distinct().\
order_by("day").\
annotate(number=Count('add_date'))
else:
a_month_deploy_qs = DeployPool.objects. \
filter(add_date__range=(a_month, now)). \
extra(select=select). \
values('day'). \
distinct(). \
order_by("day"). \
annotate(number=Count('add_date'))
for item in a_month_deploy_qs:
item_dict = {}
item_key = item['day'].strftime('%m-%d')
item_dict[item_key] = item['number']
return_list.append(item_dict)
return JsonResponse(return_list, safe=False)
def get_app_deploy_count(request):
return_list = []
app_deploy_qs = DeployPool.objects.\
values('app_name__name'). \
distinct(). \
annotate(number=Count('app_name')).order_by('-number')[:10]
for item in app_deploy_qs:
item_dict = {}
item_key = item['app_name__name']
item_dict[item_key] = item['number']
return_list.append(item_dict)
return JsonResponse(return_list, safe=False)
class DeployCountView(TemplateView):
template_name = "deploy/deploy_count.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['current_page_name'] = "发布数据"
return context
class AppDeployCountView(TemplateView):
template_name = "deploy/app_deploy_count.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['current_page_name'] = "应用统计"
return context
|
113976
|
from recon.core.module import BaseModule
from datetime import datetime
from urlparse import parse_qs
class Module(BaseModule):
meta = {
'name': 'Twitter Geolocation Search',
'author': '<NAME> (@LaNMaSteR53)',
'description': 'Searches Twitter for media in the specified proximity to a location.',
'required_keys': ['twitter_api', 'twitter_secret'],
'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL',
'options': (
('radius', 1, True, 'radius in kilometers'),
),
}
def module_run(self, points):
rad = self.options['radius']
url = 'https://api.twitter.com/1.1/search/tweets.json'
for point in points:
self.heading(point, level=0)
self.output('Collecting data for an unknown number of tweets...')
results = self.search_twitter_api({'q':'', 'geocode': '%s,%fkm' % (point, rad)})
for tweet in results:
if not tweet['geo']:
continue
tweet_id = tweet['id_str']
source = 'Twitter'
screen_name = tweet['user']['screen_name']
profile_name = tweet['user']['name']
profile_url = 'https://twitter.com/%s' % screen_name
media_url = 'https://twitter.com/%s/statuses/%s' % (screen_name, tweet_id)
thumb_url = tweet['user']['profile_image_url_https']
message = tweet['text']
latitude = tweet['geo']['coordinates'][0]
longitude = tweet['geo']['coordinates'][1]
time = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
self.add_pushpins(source, screen_name, profile_name, profile_url, media_url, thumb_url, message, latitude, longitude, time)
self.verbose('%s tweets processed.' % (len(results)))
|
113988
|
from django.apps import AppConfig
class DatamartEndpointsConfig(AppConfig):
name = 'datamart_endpoints'
|
114012
|
from django.contrib import admin
from product.modules.downloadable.models import DownloadableProduct, DownloadLink
admin.site.register(DownloadableProduct)
admin.site.register(DownloadLink)
|
114123
|
import argparse
import ast
import codecs
import encodings
import io
import sys
import tokenize
import warnings
from typing import Match
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
import tokenize_rt
def _ast_parse(contents_text: str) -> ast.Module:
# intentionally ignore warnings, we might be fixing warning-ridden syntax
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return ast.parse(contents_text.encode())
def _ast_to_offset(node: ast.expr) -> tokenize_rt.Offset:
return tokenize_rt.Offset(node.lineno, node.col_offset)
class Visitor(ast.NodeVisitor):
def __init__(self) -> None:
self.offsets: Set[tokenize_rt.Offset] = set()
def visit_AnnAssign(self, node: ast.AnnAssign) -> None:
self.offsets.add(_ast_to_offset(node.annotation))
self.generic_visit(node)
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
args = []
if hasattr(node.args, 'posonlyargs'): # pragma: no cover (py38+)
args.extend(node.args.posonlyargs)
args.extend(node.args.args)
if node.args.vararg is not None:
args.append(node.args.vararg)
args.extend(node.args.kwonlyargs)
if node.args.kwarg is not None:
args.append(node.args.kwarg)
for arg in args:
if arg.annotation is not None:
self.offsets.add(_ast_to_offset(arg.annotation))
if node.returns is not None:
self.offsets.add(_ast_to_offset(node.returns))
self.generic_visit(node)
utf_8 = encodings.search_function('utf8')
def _new_coding_cookie(match: Match[str]) -> str:
s = match[0]
i = 0
while s[i].isspace():
i += 1
ret = f'{s[:i]}# {"*" * (len(s) - 2 - i)}'
assert len(ret) == len(s), (len(ret), len(s))
return ret
def decode(b: bytes, errors: str = 'strict') -> Tuple[str, int]:
u, length = utf_8.decode(b, errors)
# replace encoding cookie so there isn't a recursion problem
lines = u.splitlines(True)
for idx in (0, 1):
if idx >= len(lines):
break
lines[idx] = tokenize.cookie_re.sub(_new_coding_cookie, lines[idx])
u = ''.join(lines)
visitor = Visitor()
visitor.visit(_ast_parse(u))
tokens = tokenize_rt.src_to_tokens(u)
for i, token in tokenize_rt.reversed_enumerate(tokens):
if token.offset in visitor.offsets:
# look forward for a `:`, `,`, `=`, ')'
depth = 0
j = i + 1
while depth or tokens[j].src not in {':', ',', '=', ')', '\n'}:
if tokens[j].src in {'(', '{', '['}:
depth += 1
elif tokens[j].src in {')', '}', ']'}:
depth -= 1
j += 1
j -= 1
# look backward to delete whitespace / comments / etc.
while tokens[j].name in tokenize_rt.NON_CODING_TOKENS:
j -= 1
quoted = repr(tokenize_rt.tokens_to_src(tokens[i:j + 1]))
tokens[i:j + 1] = [tokenize_rt.Token('STRING', quoted)]
return tokenize_rt.tokens_to_src(tokens), length
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, input, errors, final): # pragma: no cover
if final:
return decode(input, errors)
else:
return '', 0
class StreamReader(utf_8.streamreader):
"""decode is deferred to support better error messages"""
_stream = None
_decoded = False
@property
def stream(self):
if not self._decoded:
text, _ = decode(self._stream.read())
self._stream = io.BytesIO(text.encode('UTF-8'))
self._decoded = True
return self._stream
@stream.setter
def stream(self, stream):
self._stream = stream
self._decoded = False
# codec api
codec_map = {
name: codecs.CodecInfo(
name=name,
encode=utf_8.encode,
decode=decode,
incrementalencoder=utf_8.incrementalencoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=utf_8.streamwriter,
)
for name in ('future-annotations', 'future_annotations')
}
def register() -> None: # pragma: no cover
codecs.register(codec_map.get)
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(description='Prints transformed source.')
parser.add_argument('filename')
args = parser.parse_args(argv)
with open(args.filename, 'rb') as f:
text, _ = decode(f.read())
getattr(sys.stdout, 'buffer', sys.stdout).write(text.encode('UTF-8'))
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
114143
|
from keras.layers import Input, Dense, Flatten, Concatenate, Conv2D, Dropout
from keras.losses import mean_squared_error
from keras.models import Model, clone_model, load_model
from keras.optimizers import SGD, Adam, RMSprop
import numpy as np
class RandomAgent(object):
def __init__(self, color=1):
self.color = color
def predict(self, board_layer):
return np.random.randint(-5, 5) / 5
def select_move(self, board):
moves = [x for x in board.generate_legal_moves()]
return np.random.choice(moves)
class GreedyAgent(object):
def __init__(self, color=-1):
self.color = color
def predict(self, layer_board, noise=True):
layer_board1 = layer_board[0, :, :, :]
pawns = 1 * np.sum(layer_board1[0, :, :])
rooks = 5 * np.sum(layer_board1[1, :, :])
minor = 3 * np.sum(layer_board1[2:4, :, :])
queen = 9 * np.sum(layer_board1[4, :, :])
maxscore = 40
material = pawns + rooks + minor + queen
board_value = self.color * material / maxscore
if noise:
added_noise = np.random.randn() / 1e3
return board_value + added_noise
class Agent(object):
def __init__(self, lr=0.003, network='big'):
self.optimizer = RMSprop(lr=lr)
self.model = Model()
self.proportional_error = False
if network == 'simple':
self.init_simple_network()
elif network == 'super_simple':
self.init_super_simple_network()
elif network == 'alt':
self.init_altnet()
elif network == 'big':
self.init_bignet()
else:
self.init_network()
def fix_model(self):
"""
The fixed model is the model used for bootstrapping
Returns:
"""
self.fixed_model = clone_model(self.model)
self.fixed_model.compile(optimizer=self.optimizer, loss='mse', metrics=['mae'])
self.fixed_model.set_weights(self.model.get_weights())
def init_network(self):
layer_state = Input(shape=(8, 8, 8), name='state')
openfile = Conv2D(3, (8, 1), padding='valid', activation='relu', name='fileconv')(layer_state) # 3,8,1
openrank = Conv2D(3, (1, 8), padding='valid', activation='relu', name='rankconv')(layer_state) # 3,1,8
quarters = Conv2D(3, (4, 4), padding='valid', activation='relu', name='quarterconv', strides=(4, 4))(
layer_state) # 3,2,2
large = Conv2D(8, (6, 6), padding='valid', activation='relu', name='largeconv')(layer_state) # 8,2,2
board1 = Conv2D(16, (3, 3), padding='valid', activation='relu', name='board1')(layer_state) # 16,6,6
board2 = Conv2D(20, (3, 3), padding='valid', activation='relu', name='board2')(board1) # 20,4,4
board3 = Conv2D(24, (3, 3), padding='valid', activation='relu', name='board3')(board2) # 24,2,2
flat_file = Flatten()(openfile)
flat_rank = Flatten()(openrank)
flat_quarters = Flatten()(quarters)
flat_large = Flatten()(large)
flat_board = Flatten()(board1)
flat_board3 = Flatten()(board3)
dense1 = Concatenate(name='dense_bass')(
[flat_file, flat_rank, flat_quarters, flat_large, flat_board, flat_board3])
dropout1 = Dropout(rate=0.1)(dense1)
dense2 = Dense(128, activation='sigmoid')(dropout1)
dense3 = Dense(64, activation='sigmoid')(dense2)
dropout3 = Dropout(rate=0.1)(dense3, training=True)
dense4 = Dense(32, activation='sigmoid')(dropout3)
dropout4 = Dropout(rate=0.1)(dense4, training=True)
value_head = Dense(1)(dropout4)
self.model = Model(inputs=layer_state,
outputs=[value_head])
self.model.compile(optimizer=self.optimizer,
loss=[mean_squared_error]
)
def init_simple_network(self):
layer_state = Input(shape=(8, 8, 8), name='state')
conv1 = Conv2D(8, (3, 3), activation='sigmoid')(layer_state)
conv2 = Conv2D(6, (3, 3), activation='sigmoid')(conv1)
conv3 = Conv2D(4, (3, 3), activation='sigmoid')(conv2)
flat4 = Flatten()(conv3)
dense5 = Dense(24, activation='sigmoid')(flat4)
dense6 = Dense(8, activation='sigmoid')(dense5)
value_head = Dense(1)(dense6)
self.model = Model(inputs=layer_state,
outputs=value_head)
self.model.compile(optimizer=self.optimizer,
loss=mean_squared_error
)
def init_super_simple_network(self):
layer_state = Input(shape=(8, 8, 8), name='state')
conv1 = Conv2D(8, (3, 3), activation='sigmoid')(layer_state)
flat4 = Flatten()(conv1)
dense5 = Dense(10, activation='sigmoid')(flat4)
value_head = Dense(1)(dense5)
self.model = Model(inputs=layer_state,
outputs=value_head)
self.model.compile(optimizer=self.optimizer,
loss=mean_squared_error
)
def init_altnet(self):
layer_state = Input(shape=(8, 8, 8), name='state')
conv1 = Conv2D(6, (1, 1), activation='sigmoid')(layer_state)
flat2 = Flatten()(conv1)
dense3 = Dense(128, activation='sigmoid')(flat2)
value_head = Dense(1)(dense3)
self.model = Model(inputs=layer_state,
outputs=value_head)
self.model.compile(optimizer=self.optimizer,
loss=mean_squared_error
)
def init_bignet(self):
layer_state = Input(shape=(8, 8, 8), name='state')
conv_xs = Conv2D(4, (1, 1), activation='relu')(layer_state)
conv_s = Conv2D(8, (2, 2), strides=(1, 1), activation='relu')(layer_state)
conv_m = Conv2D(12, (3, 3), strides=(2, 2), activation='relu')(layer_state)
conv_l = Conv2D(16, (4, 4), strides=(2, 2), activation='relu')(layer_state)
conv_xl = Conv2D(20, (8, 8), activation='relu')(layer_state)
conv_rank = Conv2D(3, (1, 8), activation='relu')(layer_state)
conv_file = Conv2D(3, (8, 1), activation='relu')(layer_state)
f_xs = Flatten()(conv_xs)
f_s = Flatten()(conv_s)
f_m = Flatten()(conv_m)
f_l = Flatten()(conv_l)
f_xl = Flatten()(conv_xl)
f_r = Flatten()(conv_rank)
f_f = Flatten()(conv_file)
dense1 = Concatenate(name='dense_bass')([f_xs, f_s, f_m, f_l, f_xl, f_r, f_f])
dense2 = Dense(256, activation='sigmoid')(dense1)
dense3 = Dense(128, activation='sigmoid')(dense2)
dense4 = Dense(56, activation='sigmoid')(dense3)
dense5 = Dense(64, activation='sigmoid')(dense4)
dense6 = Dense(32, activation='sigmoid')(dense5)
value_head = Dense(1)(dense6)
self.model = Model(inputs=layer_state,
outputs=value_head)
self.model.compile(optimizer=self.optimizer,
loss=mean_squared_error
)
def predict_distribution(self, states, batch_size=256):
"""
:param states: list of distinct states
:param n: each state is predicted n times
:return:
"""
predictions_per_state = int(batch_size / len(states))
state_batch = []
for state in states:
state_batch = state_batch + [state for x in range(predictions_per_state)]
state_batch = np.stack(state_batch, axis=0)
predictions = self.model.predict(state_batch)
predictions = predictions.reshape(len(states), predictions_per_state)
mean_pred = np.mean(predictions, axis=1)
std_pred = np.std(predictions, axis=1)
upper_bound = mean_pred + 2 * std_pred
return mean_pred, std_pred, upper_bound
def predict(self, board_layer):
return self.model.predict(board_layer)
def TD_update(self, states, rewards, sucstates, episode_active, gamma=0.9):
"""
Update the SARSA-network using samples from the minibatch
Args:
minibatch: list
The minibatch contains the states, moves, rewards and new states.
Returns:
td_errors: np.array
array of temporal difference errors
"""
suc_state_values = self.fixed_model.predict(sucstates)
V_target = np.array(rewards) + np.array(episode_active) * gamma * np.squeeze(suc_state_values)
# Perform a step of minibatch Gradient Descent.
self.model.fit(x=states, y=V_target, epochs=1, verbose=0)
V_state = self.model.predict(states) # the expected future returns
td_errors = V_target - np.squeeze(V_state)
return td_errors
def MC_update(self, states, returns):
"""
Update network using a monte carlo playout
Args:
states: starting states
returns: discounted future rewards
Returns:
td_errors: np.array
array of temporal difference errors
"""
self.model.fit(x=states, y=returns, epochs=0, verbose=0)
V_state = np.squeeze(self.model.predict(states))
td_errors = returns - V_state
return td_errors
|
114154
|
from setuptools import setup
import re
from io import open
# Get the current version
version = None
with open('plexiglas/__init__.py') as handle:
for line in handle.readlines():
if line.startswith('__version__'):
version = re.findall("'([^']+?)'", line)[0]
break
if version is None:
print('Unable to find package version')
exit(1)
# Get README.md contents
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
readme = f.read()
# Get requirements
requirements = []
dependency_links = []
with open('requirements.txt') as handle:
for line in handle.readlines():
if not line.startswith('#') and not line.startswith('-i '):
if '://' in line:
link = line.strip()
dependency_links.append(link)
requirements.append(re.findall("#egg=(.*?)-[\d.]+", line)[0])
else:
requirements.append(line.strip())
setup(
name='plexiglas',
version=version,
packages=['plexiglas'],
package_dir={'plexiglas': 'plexiglas'},
url='https://github.com/andrey-yantsen/plexiglass',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Tool for downloading videos from your Plex server to an external HDD',
include_package_data=True,
long_description=readme,
long_description_content_type='text/markdown',
install_requires=requirements,
dependency_links=dependency_links,
entry_points={
'console_scripts': ['plexiglas = plexiglas.cli:main'],
'plexiglas.plugins': [
'mobile_sync = plexiglas.mobile_sync',
'simple_sync = plexiglas.simple_sync',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
)
|
114170
|
class Solution:
def nextGreaterElement(self, n: int) -> int:
num = list(str(n))
i = len(num) - 2
while i >= 0:
if num[i] < num[i + 1]:
break
i -= 1
if i == -1:
return -1
j = len(num) - 1
while num[j] <= num[i]:
j -= 1
num[i], num[j] = num[j], num[i]
result = int(''.join(num[:i + 1] + num[i + 1:][::-1]))
return result if result < (1 << 31) else -1
|
114236
|
import os
import pytest
from jina import Document
from jina.optimizers import FlowOptimizer, EvaluationCallback
from jina.optimizers.flow_runner import SingleFlowRunner
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def config(tmpdir):
os.environ['JINA_OPTIMIZER_WORKSPACE_DIR'] = str(tmpdir)
os.environ['JINA_OPTIMIZER_PARAMETER_FILE'] = os.path.join(cur_dir, 'parameter.yml')
os.environ['JINA_OPTIMIZER_DATA_FILE'] = os.path.join(cur_dir, 'data.jsonlines')
yield
del os.environ['JINA_OPTIMIZER_WORKSPACE_DIR']
del os.environ['JINA_OPTIMIZER_PARAMETER_FILE']
del os.environ['JINA_OPTIMIZER_DATA_FILE']
def document_generator_option1(num_doc):
for _ in range(num_doc):
doc = Document(content='DummyCrafterOption1')
groundtruth_doc = Document(content='hello')
yield doc, groundtruth_doc
def document_generator_option2(num_doc):
for _ in range(num_doc):
doc = Document(content='DummyCrafterOption2')
groundtruth_doc = Document(content='hello')
yield doc, groundtruth_doc
def test_optimizer_single_flow_option1(tmpdir, config):
eval_flow_runner = SingleFlowRunner(
flow_yaml=os.path.join(cur_dir, 'flow_pod_choice.yml'),
documents=document_generator_option1(10),
request_size=1,
execution_endpoint='search',
)
opt = FlowOptimizer(
flow_runner=eval_flow_runner,
parameter_yaml=os.path.join(cur_dir, 'parameter_pod_choice.yml'),
evaluation_callback=EvaluationCallback(),
workspace_base_dir=str(tmpdir),
n_trials=10,
)
result = opt.optimize_flow()
assert (
result.best_parameters['JINA_DUMMYCRAFTER_CHOICE'] == 'pods/craft_option1.yml'
)
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM1'] == 0
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM2'] == 1
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM3'] == 1
def test_optimizer_single_flow_option2(tmpdir, config):
eval_flow_runner = SingleFlowRunner(
flow_yaml=os.path.join(cur_dir, 'flow_pod_choice.yml'),
documents=document_generator_option2(10),
request_size=1,
execution_endpoint='search',
)
opt = FlowOptimizer(
flow_runner=eval_flow_runner,
parameter_yaml=os.path.join(cur_dir, 'parameter_pod_choice.yml'),
evaluation_callback=EvaluationCallback(),
workspace_base_dir=str(tmpdir),
n_trials=20,
)
result = opt.optimize_flow()
assert (
result.best_parameters['JINA_DUMMYCRAFTER_CHOICE'] == 'pods/craft_option2.yml'
)
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM4'] == 0
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM5'] == 1
assert result.best_parameters['JINA_DUMMYCRAFTER_PARAM6'] == 1
|
114240
|
import glob
from queue import Queue
from threading import Thread
import pandas as pd
import torch
from tqdm import tqdm
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
from training.model import BaselineModel
class ParallelExperimentRunner:
def __init__(self, root, file_parse, meter, num_workers, out, model_class=BaselineModel, devices=None, random=False):
if devices is None:
devices = ['cpu', 'cuda:0']
model_list = glob.glob(os.path.join(root, '*.pt'))
self.model_queue = Queue()
for model in model_list:
self.model_queue.put((model, file_parse(model)))
self.len = len(model_list)
self.meter = meter
self.num_workers = num_workers
self.out = out
self.devices = devices
self.random = random
self.model_class = model_class
def make_worker(self, progress_bar, sink, device):
def worker():
while True:
if self.model_queue.empty():
break
model_file, metadata = self.model_queue.get()
model = self.model_class(metadata['n_bn'], metadata['d_vvs'], metadata['n_ch']).to(device)
if not self.random:
state_dict = torch.load(model_file, map_location=device)
try:
model.load_conv_dict(state_dict)
except:
model.load_state_dict(state_dict)
# torch.save(model.conv_dict(), model_file)
for param in model.parameters():
param.requires_grad = False
res = self.meter(model, metadata, device)
sink.put(res)
self.model_queue.task_done()
progress_bar.update()
return worker
def make_aggregator(self, sink):
def worker():
while True:
result = sink.get()
worker.frames.append(result)
sink.task_done()
worker.frames = []
return worker
def run(self):
bar = tqdm(total=self.len)
sink = Queue()
for i in range(self.num_workers):
t = Thread(target=self.make_worker(bar, sink, self.devices[i % len(self.devices)]))
t.daemon = True
t.start()
if self.num_workers == 0:
self.make_worker(bar, sink, self.devices[0 % len(self.devices)])()
aggregator = self.make_aggregator(sink)
t = Thread(target=aggregator)
t.daemon = True
t.start()
self.model_queue.join()
sink.join()
frame = pd.concat(aggregator.frames, ignore_index=True)
frame.to_pickle(self.out)
if __name__ == "__main__":
# from rfdeviation import RFDeviation
from statistics.devalois import DeValois
from statistics.spatial_opponency import SpatialOpponency
# from orientation import RFOrientation
from training import BaselineModel
from training.model_imagenet import ImageNetModel
def file_parse(file):
v = file.split('.')[0].split('_')
return {'n_bn': int(v[1]), 'd_vvs': int(v[2]), 'rep': int(v[3]), 'n_ch': 3}
runner = ParallelExperimentRunner('../models/colour-mos', file_parse, SpatialOpponency(), 0, 'spatial-mos.pd', model_class=BaselineModel, devices=['cuda']) #0 to debug
runner.run()
|
114261
|
from os.path import join, abspath, dirname
from nose.tools import raises
from indra.statements import Complex, Phosphorylation
from indra.sources import hprd
test_dir = join(abspath(dirname(__file__)), 'hprd_tests_data')
id_file = join(test_dir, 'HPRD_ID_MAPPINGS.txt')
def test_process_complexes():
cplx_file = join(test_dir, 'PROTEIN_COMPLEXES.txt')
hp = hprd.process_flat_files(id_file, complexes_file=cplx_file)
assert isinstance(hp, hprd.HprdProcessor)
assert isinstance(hp.statements, list)
assert len(hp.statements) == 3
s0 = hp.statements[0]
assert isinstance(s0, Complex)
assert len(s0.members) == 3
assert set([ag.name for ag in s0.members]) == \
set(['ASCL1', 'TCF3', 'MEF2C'])
assert s0.members[0].db_refs == \
{'HGNC': '738', 'UP': 'P50553', 'EGID': '429',
'REFSEQ_PROT': 'NP_004307.2'}
assert s0.members[1].db_refs == \
{'HGNC': '11633', 'UP': 'P15923', 'EGID': '6929',
'REFSEQ_PROT': 'NP_003191.1'}
assert s0.members[2].db_refs == \
{'HGNC': '6996', 'UP': 'Q06413', 'EGID': '4208',
'REFSEQ_PROT': 'NP_002388.2'}
assert len(s0.evidence) == 2
assert s0.evidence[0].pmid == '8900141'
assert s0.evidence[0].source_api == 'hprd'
assert s0.evidence[0].annotations['evidence'] == ['in vivo']
assert s0.evidence[0].source_id == ('http://hprd.org/interactions?'
'hprd_id=00011&isoform_id=00011_1'
'&isoform_name=Isoform_1')
assert s0.evidence[1].pmid == '8948587'
def test_process_ptms():
ptm_file = join(test_dir, 'POST_TRANSLATIONAL_MODIFICATIONS.txt')
seq_file = join(test_dir, 'PROTEIN_SEQUENCES.txt')
hp = hprd.process_flat_files(id_file, ptm_file=ptm_file, seq_file=seq_file)
assert isinstance(hp, hprd.HprdProcessor)
assert isinstance(hp.statements, list)
assert len(hp.statements) == 13
s0 = hp.statements[0]
assert isinstance(s0, Phosphorylation)
assert s0.enz.name == 'MAPK1'
assert s0.enz.db_refs == {'UP': 'P28482', 'HGNC': '6871', 'EGID': '5594',
'REFSEQ_PROT': 'NP_002736.3'}
assert s0.sub.name == 'TCF3'
assert s0.sub.db_refs == {'UP': 'P15923', 'HGNC': '11633', 'EGID': '6929',
'REFSEQ_PROT': 'NP_003191.1'}
assert s0.residue == 'T'
assert s0.position == '355'
assert len(s0.evidence) == 1
assert s0.evidence[0].pmid == '14592976'
assert s0.evidence[0].source_api == 'hprd'
assert s0.evidence[0].annotations['evidence'] == ['in vivo']
assert s0.evidence[0].annotations['site_motif'] == \
{'motif': 'NFSSSPSTPVGSPQG', 'respos': 8,
'off_by_one': False}
def test_process_ppis():
ppi_file = join(test_dir, 'BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt')
hp = hprd.process_flat_files(id_file, ppi_file=ppi_file)
assert isinstance(hp, hprd.HprdProcessor)
assert isinstance(hp.statements, list)
assert len(hp.statements) == 5
s0 = hp.statements[0]
assert isinstance(s0, Complex)
assert len(s0.members) == 2
assert set([ag.name for ag in s0.members]) == set(['ITGA7', 'CHRNA1'])
assert s0.members[0].db_refs == \
{'HGNC': '6143', 'UP': 'Q13683', 'EGID': '3679',
'REFSEQ_PROT': 'NP_001138468.1'}
assert s0.members[1].db_refs == \
{'HGNC': '1955', 'UP': 'P02708', 'EGID': '1134',
'REFSEQ_PROT': 'NP_001034612.1'}
assert len(s0.evidence) == 1
assert s0.evidence[0].pmid == '10910772'
assert s0.evidence[0].source_api == 'hprd'
assert s0.evidence[0].annotations['evidence'] == ['in vivo']
assert s0.evidence[0].source_id == ('http://hprd.org/interactions?'
'hprd_id=02761&isoform_id=02761_1'
'&isoform_name=Isoform_1')
@raises(ValueError)
def test_process_ptms_no_seq():
ptm_file = join(test_dir, 'POST_TRANSLATIONAL_MODIFICATIONS.txt')
hp = hprd.process_flat_files(id_file, ptm_file=ptm_file)
|
114324
|
word="I'm a boby, I'm a girl. When it is true, it is ture. that are cats, the red is red."
word = word.replace('.','').replace(',','')
li = word.split()
print(li)
# 第一种方法:
key = set(li)
value = [0 for i in range(len(key))]
dic = {k:v for k,v in zip(key,value)}
print(dic)
for i in dic:
for j in li:
if i == j:
dic[i]+=1
continue
print(dic)
# 第二种方法:
s =set(li)
for i in s:
count=word.count(i)
print(i,'出现次数:',count)
# 第三种方法:
dict = {}
for key in li:
dict[key] = dict.get(key,0) + 1
print(dic)
|
114334
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import utils as tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.layers.normalization import BatchNormalization
from tensorflow.python.ops import gen_control_flow_ops
class MovingFreeBatchNormalization(BatchNormalization):
def build(self, input_shape):
super(BatchNormalization, self).build(input_shape)
self.built = False
# all assertion are
input_shape = tensor_shape.TensorShape(input_shape)
ndims = len(input_shape)
# Raise parameters of fp16 batch norm to fp32
if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16:
param_dtype = dtypes.float32
else:
param_dtype = self.dtype or dtypes.float32
axis_to_dim = {x: input_shape[x].value for x in self.axis}
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
# internal statistics fitted during a pre-inference step
self.mean = self.add_variable(
name='mean',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_mean_initializer,
trainable=False)
self.variance = self.add_variable(
name='variance',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_variance_initializer,
trainable=False)
self.n_updates = self.add_variable(
name='n_updates',
shape=[],
dtype=param_dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, momentum):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, momentum]) as scope:
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (variable - value) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def _update_statistics(self, variable, value, n_updates):
with ops.name_scope(None, 'UpdateStatistics',
[variable, value, n_updates]) as scope:
with ops.colocate_with(variable):
stat = variable * n_updates + value
stat /= n_updates + 1
return state_ops.assign(variable, stat, name=scope)
def _fused_batch_norm(self, inputs, training, use_moving_statistics):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
# use_moving_statistics==True use moving_mean and moving_variance, else mean and variance
mean = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_mean, lambda: self.mean)
variance = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_variance, lambda: self.variance)
# these variables will be used in _fused_batch_norm_inference(), thanks to python closure
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=mean,
variance=variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(training, _fused_batch_norm_training, _fused_batch_norm_inference)
# if training == True: mean and variance returned are mean and variance of the current batch
# elif training == False: mean and variance return are (self.mean, self.variance) or
# (self.moving_mean, self.moving_variance) depending of the value of use_moving_statistics
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
# if training, first create operations which update self.mean and self.variance
mean_update = self._update_statistics(self.mean, mean, self.n_updates)
variance_update = self._update_statistics(self.variance, variance, self.n_updates)
with ops.control_dependencies([mean_update, variance_update]):
update_n_updates = state_ops.assign_add(self.n_updates, 1., )
# add this combination of operations to a specific collection 'UPDATE_BN_OPS'
ops.add_to_collection('UPDATE_BN_OPS', update_n_updates)
# operations to reset bn statistics
reset_mean = state_ops.assign(self.mean, array_ops.zeros_like(self.mean))
reset_variance = state_ops.assign(self.variance, array_ops.zeros_like(self.variance))
reset_n_updates = state_ops.assign(self.n_updates, 0.)
with ops.control_dependencies([reset_mean, reset_variance, reset_n_updates]):
reset_bn = gen_control_flow_ops.no_op("ResetBatchNormStats")
ops.add_to_collection('RESET_BN_OPS', reset_bn)
# to keep the classical behavior of the Batch Norm !
# update moving averages and add operations to tf.GraphKeys.UPDATE_OPS
# these operation must be run when optimizing the network
moving_mean_update = self._assign_moving_average(self.moving_mean, mean, momentum)
moving_variance_update = self._assign_moving_average(self.moving_variance, variance, momentum)
self.add_update(moving_mean_update, inputs=True)
self.add_update(moving_variance_update, inputs=True)
return output
def call(self, inputs, training=None, use_moving_statistics=True):
"""
:param inputs: input features
:param training: boolean or boolean Tensor (with shape []) which determines the current training phase
:param use_moving_statistics: boolean or boolean Tensor (with shape []) which selects statistics to use
when training==True (or the Tensor value) statistics (mean and variance) are from the inputs !
when training==False, if use_moving_statistics==True -> feed forward with moving statistics (updated
with operations defined in GraphKeys.UPDATE_OPS)
else (use_moving_statistics==False -> feed forward with raw statistics (updated
with operations from collections 'UPDATE_BN_OPS'
'RESET_BN_OPS' contains operations to reset these vaiables between inferences.
"""
in_eager_mode = context.executing_eagerly()
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training, use_moving_statistics=use_moving_statistics)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
outputs = undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.get_shape()
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape[self.axis[0]].value
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = tf_utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = tf_utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = tf_utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
# mean and variance of the current batch
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
mean = tf_utils.smart_cond(training,
lambda: mean,
lambda: tf_utils.smart_cond(use_moving_statistics,
lambda: self.moving_mean,
lambda: self.mean))
variance = tf_utils.smart_cond(training,
lambda: variance,
lambda: tf_utils.smart_cond(use_moving_statistics,
lambda: self.moving_variance,
lambda: self.variance))
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
else:
new_mean, new_variance = mean, variance
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(mean, axis=1, keepdims=True)
new_variance = math_ops.reduce_mean(variance, axis=1, keepdims=True)
def _do_update(var, value):
if in_eager_mode and not self.trainable:
return
return self._assign_moving_average(var, value, self.momentum)
moving_mean_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_mean, new_mean),
lambda: self.moving_mean)
moving_variance_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_variance, new_variance),
lambda: self.moving_variance)
if not context.executing_eagerly():
self.add_update(moving_mean_update, inputs=True)
self.add_update(moving_variance_update, inputs=True)
mean_update = self._update_statistics(self.mean, mean, self.n_updates)
variance_update = self._update_statistics(self.variance, variance, self.n_updates)
with ops.control_dependencies([mean_update, variance_update]):
# update n_updates only after updating self.mean and self.variance
update_n_updates = state_ops.assign_add(self.n_updates, 1.)
ops.add_to_collection('UPDATE_BN_OPS', update_n_updates)
reset_mean = state_ops.assign(self.mean, array_ops.zeros_like(self.mean))
reset_variance = state_ops.assign(self.variance, array_ops.zeros_like(self.variance))
reset_n_updates = state_ops.assign(self.n_updates, 0.)
with ops.control_dependencies([reset_mean, reset_variance, reset_n_updates]):
reset_bn = gen_control_flow_ops.no_op("ResetBatchNormStats")
ops.add_to_collection('RESET_OPS', reset_bn)
else:
# training == False
mean = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_mean, lambda: self.mean)
variance = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_variance, lambda: self.variance)
mean = math_ops.cast(mean, inputs.dtype)
variance = math_ops.cast(variance, inputs.dtype)
if offset is not None:
offset = math_ops.cast(offset, inputs.dtype)
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
outputs = undo_virtual_batching(outputs)
return outputs
def moving_free_batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
use_moving_statistics=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
virtual_batch_size=None,
adjustment=None):
"""
:param inputs: input tensor
:param axis: An `int`, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
:param momentum: Momentum for the moving average.
:param epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
:param center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
:param scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
:param beta_initializer: Initializer for the beta weight.
:param gamma_initializer: Initializer for the gamma weight.
:param moving_mean_initializer: Initializer for the moving mean and the raw mean (when not using the moving
statistics).
:param moving_variance_initializer: Initializer for the moving variance and the raw variance (when not using the
moving statistics).
:param beta_regularizer: Optional regularizer for the beta weight.
:param gamma_regularizer: Optional regularizer for the gamma weight.
:param beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
:param gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
:param training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
:param trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
:param use_moving_statistics: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder).
Whether to use moving statitics or computed statitics in inference mode (training==False).
:param name: String, the name of the layer.
:param reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
:param renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
:param renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
:param renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
:param fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
:param virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
:param adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
:return: Output tensor, corresponding to the normalized neural activation
"""
layer = MovingFreeBatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training, use_moving_statistics=use_moving_statistics)
# Aliases
MovingFreeBatchNorm = MovingFreeBatchNormalization
moving_free_batch_norm = moving_free_batch_normalization
|
114337
|
from flask import Blueprint, render_template, redirect, request, g, session, make_response, flash
import libmfa
import libuser
import libsession
mod_user = Blueprint('mod_user', __name__, template_folder='templates')
@mod_user.route('/login', methods=['GET', 'POST'])
def do_login():
session.pop('username', None)
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
otp = request.form.get('otp')
username = libuser.login(username, password)
if not username:
flash("Invalid user or password");
return render_template('user.login.mfa.html')
if libmfa.mfa_is_enabled(username):
if not libmfa.mfa_validate(username, otp):
flash("Invalid OTP");
return render_template('user.login.mfa.html')
response = make_response(redirect('/'))
response = libsession.create(response=response, username=username)
return response
return render_template('user.login.mfa.html')
@mod_user.route('/create', methods=['GET', 'POST'])
def do_create():
session.pop('username', None)
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
#email = request.form.get('password')
if not username or not password:
flash("Please, complete username and password")
return render_template('user.create.html')
libuser.create(username, password)
flash("User created. Please login.")
return redirect('/user/login')
#session['username'] = libuser.login(username, password)
#if session['username']:
# return redirect('/')
return render_template('user.create.html')
@mod_user.route('/chpasswd', methods=['GET', 'POST'])
def do_chpasswd():
if request.method == 'POST':
password = request.form.get('password')
password_again = request.form.get('password_again')
if password != password_again:
flash("The passwords don't match")
return render_template('user.chpasswd.html')
if not libuser.password_complexity(password):
flash("The password don't comply our complexity requirements")
return render_template('user.chpasswd.html')
libuser.password_change(g.session['username'], password) # = libuser.login(username, password)
flash("Password changed")
return render_template('user.chpasswd.html')
|
114339
|
from controller.ControllerError import *
class PlayUi:
def __init__(self, controller):
self.__controller = controller
def print_score(self):
print('current score: {}\nhighest score: {}'.format(self.__controller.get_current_score(), self.__controller.get_high_score()))
def run_play(self):
print('\nYou are in play mode')
try:
while True:
self.print_score()
word = self.__controller.get_word()
print('\nscrambled word: {}'.format(word.get_scrambled()))
result = input('\ngive me the correct word: ')
self.__controller.modify_points(word, result)
if not self.__controller.can_continue():
break
if self.__controller.new_high_score():
response = input('\nType <<exit>> to exit play mode: ')
if response == 'exit':
break
except Exception as e:
print(e)
|
114372
|
import torch
from torch.optim.optimizer import Optimizer, required
from torch.optim import SGD
class rSGD(SGD):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
super(rSGD, self).__init__(params, lr=lr, momentum=momentum,
dampening=dampening, weight_decay=weight_decay,
nesterov=nesterov)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Also added case where parameter is constrained to a manifold.
Current implementation just supports normal SGD update without
momentum.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
if not hasattr(p, 'manifold') or p.manifold is None:
d_p = p.grad
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
else:
p.data.add_(p.manifold.retr(p.data,
-group['lr'] * p.rgrad.data) - p.data)
return loss
|
114380
|
import sqlite3
import pprint
db = sqlite3.connect('jobs.sqlite')
cur = db.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
tables = cur.fetchall()
table = tables[0][0]
print(tables)
print(table)
cur.execute("PRAGMA table_info({})".format(table))
pprint.pprint(cur.fetchall())
cur.execute("SELECT * FROM {}".format(table))
result = cur.fetchall()
s = result[-1][-1]
print(s.decode("utf-8", errors="ignore"))
print(type(s))
|
114422
|
import subprocess
import argparse
from prompt_toolkit import print_formatted_text as print
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.shortcuts import CompleteStyle
from prompt_toolkit.history import FileHistory
from .bash.history import BashHistoryIndexError, expand_history
from .completion import BashCompleter
from .config import settings, get_aliases
from .history import get_history_file
from .prompt import PtreplSession, get_prompt_tokens
def main(command, prompt=None):
local_shada_path = settings.LOCAL_SHADA_PATH if settings.LOCAL_SHADA else None
history = FileHistory(get_history_file(command, local_shada_path=local_shada_path))
aliases = get_aliases(command)
completer = BashCompleter(command, aliases)
complete_style = (
CompleteStyle.READLINE_LIKE
if settings.READLINE_COMPLETION
else CompleteStyle.COLUMN
)
vi_mode = settings.EDITING_MODE == "vi"
session = PtreplSession(
command,
aliases,
message="",
completer=completer,
complete_style=complete_style,
history=history,
enable_system_prompt=True,
enable_suspend=True,
vi_mode=vi_mode,
auto_suggest=AutoSuggestFromHistory(),
enable_history_search=True,
)
prompt = prompt if prompt is not None else command
while True:
try:
_get_prompt_tokens = get_prompt_tokens(
prompt,
settings.PARSE_PS1,
settings.SHOW_MODE_IN_PROMPT,
settings.EMACS_MODE_STRING,
settings.VI_INS_MODE_STRING,
settings.VI_CMD_MODE_STRING,
)
subcommand = session.prompt(_get_prompt_tokens)
try:
expanded_subcommand, execute = expand_history(
subcommand, session.default_buffer.history.get_strings()
)
except BashHistoryIndexError as e:
print(f"{command}: {e}: event not found")
continue
if not execute:
print(expanded_subcommand)
continue
if subcommand != expanded_subcommand:
session.default_buffer.history.get_strings()[-1] = expanded_subcommand
subcommand = completer.get_real_subcommand(expanded_subcommand)
if subcommand is None:
break
call_command = f"{command} {subcommand}"
for alias, alias_command in aliases.items():
if call_command.startswith(alias):
if call_command != alias:
alias = f"{alias} "
alias_command = f"{alias_command} "
call_command = call_command.replace(alias, alias_command)
subprocess.call(call_command, shell=True)
except EOFError:
break # Control-D pressed.
except KeyboardInterrupt:
pass
print("GoodBye!")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("command")
parser.add_argument("--prompt")
args = parser.parse_args()
main(args.command, prompt=args.prompt)
|
114446
|
import itertools
import torch
class Optimizer(object):
_ARG_MAX_GRAD_NORM = 'max_grad_norm'
def __init__(self, optim, max_grad_norm=0):
self.optimizer = optim
self.max_grad_norm = max_grad_norm
def step(self):
if self.max_grad_norm>0:
params = itertools.chain.from_iterable(\
[group['params'] for group in self.optimizer.param_groups])
torch.nn.utils.clip_grad_norm(params, self.max_grad_norm)
self.optimizer.step()
|
114474
|
import pytest
from bevel.utils import *
from pandas.testing import assert_frame_equal
@pytest.fixture
def sample_response_data():
a, b, c = 'a', 'b', 'c'
x, y, z = 'x', 'y', 'z'
return pd.DataFrame.from_dict({
'groups_a': [a, a, b, b, b, b, b, c, c, c, c, c],
'groups_x': [x, x, x, x, x, y, y, y, y, y, z, z],
'response': [1, 2, 1, 2, 3, 4, 4, 2, 3, 3, 4, 4],
'weights_': [1, 4, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
})
def test_pivot_proportions(sample_response_data):
actual = pivot_proportions(sample_response_data, 'groups_a', 'response')
expected = pd.DataFrame([
{'response': 1, 'a': 0.5, 'b': 0.2, 'c': 0.0},
{'response': 2, 'a': 0.5, 'b': 0.2, 'c': 0.2},
{'response': 3, 'a': 0.0, 'b': 0.2, 'c': 0.4},
{'response': 4, 'a': 0.0, 'b': 0.4, 'c': 0.4},
])
expected = expected.set_index('response', drop=True).rename_axis('groups_a', axis='columns')
assert_frame_equal(actual, expected)
def test_pivot_proportions_with_weights(sample_response_data):
actual = pivot_proportions(
sample_response_data,
'groups_a',
'response',
weights=sample_response_data['weights_']
)
expected = pd.DataFrame([
{'response': 1, 'a': 0.2, 'b': 0.2, 'c': 0.0},
{'response': 2, 'a': 0.8, 'b': 0.2, 'c': 0.2},
{'response': 3, 'a': 0.0, 'b': 0.2, 'c': 0.4},
{'response': 4, 'a': 0.0, 'b': 0.4, 'c': 0.4},
])
expected = expected.set_index('response', drop=True).rename_axis('groups_a', axis='columns')
assert_frame_equal(actual, expected)
|
114530
|
YT_API_SERVICE_NAME = 'youtube'
DEVELOPER_KEY = "<KEY>"
MAX_RESULTS = 50
YT_API_VERSION = 'v3'
LINK = 'https://www.youtube.com/watch?v='
|
114549
|
import unittest
from stensorflow.engine.start_server import start_local_server
import tensorflow as tf
import stensorflow as stf
from stensorflow.random.random import random_init
from stensorflow.basic.basic_class.private import PrivateTensor
from stensorflow.ml.logistic_regression import LogisticRegression
from stensorflow.ml.logistic_regression2 import LogisticRegression2
import os
stf_home = os.environ.get("stf_home", "..")
start_local_server(os.path.join(stf_home, "conf", "config.json"))
model_file_path = "/dev/null"
class MyTestCase(unittest.TestCase):
def setUp(self):
tf.compat.v1.disable_eager_execution()
self.sess = tf.compat.v1.Session("grpc://0.0.0.0:8887")
def tearDown(self):
self.sess.close()
def test_lr(self):
featureNumX = 5
featureNumY = 0
record_num = 6
epoch = 1
batch_size = 3
learning_rate = 0.01
train_batch_num = epoch * record_num // batch_size + 1
x_train = stf.PrivateTensor(owner='L')
y_train = stf.PrivateTensor(owner='R')
x_train.load_from_tf_tensor(tf.random.normal(shape=[batch_size, featureNumX]))
y_train.load_from_tf_tensor(tf.random.normal(shape=[batch_size, featureNumY + 1]))
model = LogisticRegression(num_features=featureNumX + featureNumY, learning_rate=learning_rate)
model.fit(self.sess, x_train, y_train, num_batches=train_batch_num)
model.save(model_file_path=model_file_path)
def test_lr2(self):
featureNumX = 5
featureNumY = 5
record_num = 6
epoch = 1
batch_size = 3
learning_rate = 0.01
train_batch_num = epoch * record_num // batch_size + 1
xL_train = PrivateTensor(owner='L')
xy_train = PrivateTensor(owner='R')
xL_train.load_from_tf_tensor(tf.random.normal(shape=[batch_size, featureNumX]))
xy_train.load_from_tf_tensor(tf.random.normal(shape=[batch_size, featureNumY+1]))
xR_train, y_train = xy_train.split(size_splits=[featureNumY, 1], axis=1)
model = LogisticRegression2(num_features_L=featureNumX, num_features_R=featureNumY, learning_rate=learning_rate,
l2_regularzation=0.01)
model.fit(sess=self.sess, x_L=xL_train, x_R=xR_train, y=y_train, num_batches=train_batch_num)
model.save(model_file_path=os.path.join(stf_home, "output", "model"))
if __name__ == '__main__':
unittest.main()
|
114555
|
from django import forms
from .models import Placement_Company_Detail,Profile,StudentBlogModel,ResorcesModel
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.utils.translation import gettext,gettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm,UserChangeForm,PasswordChangeForm
from allauth.account.forms import LoginForm
from django.contrib.auth.forms import ReadOnlyPasswordHashField
class Job_Post_Form(forms.ModelForm):
class Meta:
model = Placement_Company_Detail
fields = ('title','snippet','author','Company_image','Job_Description','apply_link','job_type')
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Job Post'}),
'apply_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of apply button'}),
'author' : forms.TextInput(attrs={'class':'form-control','value':'','id':'elder','type':'hidden'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'job_type' : forms.Select(attrs={'class':'form-control','placeholder':"Job Type"}),
'Job_Description' : forms.Textarea(attrs={'class':'form-control','placeholder':'Body of the Blog'}),
'snippet' : forms.Textarea(attrs={'class':'form-control','placeholder':'Add short detail of job'}),
}
class Edit_Post_Form(forms.ModelForm):
class Meta:
model = Placement_Company_Detail
fields = ('title','snippet','Company_image','Job_Description','apply_link','job_type')
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Job Post'}),
'apply_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of apply button'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'job_type' : forms.Select(attrs={'class':'form-control','placeholder':"Job Type"}),
'Job_Description' : forms.Textarea(attrs={'class':'form-control','placeholder':'Body of the Blog'}),
'snippet' : forms.Textarea(attrs={'class':'form-control','placeholder':'Add short detail of job'}),
}
class Blog_Post_Form(forms.ModelForm):
class Meta:
model = StudentBlogModel
fields = ('title','author','body','snippet')
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Blog Post'}),
'author' : forms.TextInput(attrs={'class':'form-control','value':'','id':'elder','type':'hidden'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'body' : forms.Textarea(attrs={'class':'form-control','placeholder':'Body of the Blog'}),
'snippet' : forms.Textarea(attrs={'class':'form-control','placeholder':'Add snippet of Blog'}),
}
class ResorcesModelForm(forms.ModelForm):
class Meta:
model = ResorcesModel
fields = ('title','docs','author','course1_title','course1_Img','course1_link','course2_title','course2_Img','course2_link','course3_title','course3_Img','course3_link','course4_title','course4_Img','course4_link','course5_title','course5_Img','course5_link',
'channel1_title','channel1_Img','channel1_link','channel2_title','channel2_Img','channel2_link','channel3_title','channel3_Img','channel3_link','channel4_title','channel4_Img','channel4_link','channel5_title','channel5_Img','channel5_link',
'Website1_title','Website1_Img','Website1_link','Website2_title','Website2_Img','Website2_link','Website3_title','Website3_Img','Website3_link','Website4_title','Website4_Img','Website4_link','Website5_title','Website5_Img','Website5_link',)
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Blog Post'}),
'author' : forms.TextInput(attrs={'class':'form-control','value':'','id':'elder','type':'hidden'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'docs' : forms.TextInput(attrs={'class':'form-control','placeholder':'Link of documentation'}),
'course1_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 1'}),
'course1_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 1'}),
'course2_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 2'}),
'course2_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 2'}),
'course3_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 3'}),
'course3_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 3'}),
'course4_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 3'}),
'course4_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 3'}),
'course5_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of course 3'}),
'course5_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of course 3'}),
'channel1_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 1'}),
'channel1_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 1'}),
'channel2_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 2'}),
'channel2_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 2'}),
'channel3_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 3'}),
'channel3_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 3'}),
'channel4_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 3'}),
'channel4_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 3'}),
'channel5_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of channel 3'}),
'channel5_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of channel 3'}),
'Website1_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 1'}),
'Website1_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 1'}),
'Website2_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 2'}),
'Website2_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 2'}),
'Website3_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 3'}),
'Website3_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 3'}),
'Website4_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 3'}),
'Website4_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 3'}),
'Website5_title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of Website 3'}),
'Website5_link' : forms.TextInput(attrs={'class':'form-control','placeholder':'link of Website 3'}),
}
class Edit_Blog_Post_Form(forms.ModelForm):
class Meta:
model = StudentBlogModel
fields = ('title','snippet','body')
widgets = {
'title' : forms.TextInput(attrs={'class':'form-control','placeholder':'Title of the Blog Post'}),
# 'author' : forms.TextInput(attrs={'class':'form-control','value':'','id':'elder','type':'hidden'}),
# 'author' : forms.Select(attrs={'class':'form-control','placeholder':"author's name"}),
'body' : forms.Textarea(attrs={'class':'form-control','placeholder':'Body of the Blog'}),
'snippet' : forms.Textarea(attrs={'class':'form-control','placeholder':'Add snippet of Blog'}),
}
class UserLoginForm(LoginForm):
username=forms.CharField(widget=forms.TextInput(attrs={'autofocus':True,'class':'form-control'}))
password=forms.CharField(label=_('Password'),strip=False,widget=forms.PasswordInput(attrs={'autocomplete':'current-password','autofocus':True,'class':'form-control'}))
class ProfilePageView(forms.ModelForm):
class Meta:
model = Profile
fields = ('bio','Gender','Mobile_Number','city','state','profile_pic','twitter_url','instagram_url','linkdin_url','github_url')
widgets = {
'bio': forms.Textarea(attrs={'class':'form-control','placeholder':'Write a summary about you...'}),
# 'profile_pic': forms.ImageField(),
'Gender': forms.Select(attrs={'class':'form-control'}),
'Mobile_Number': forms.TextInput(attrs={'class':'form-control','placeholder':'Enter your Mobile number'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.Select(attrs={'class':'form-control'}),
'twitter_url': forms.TextInput(attrs={'class':'form-control'}),
'instagram_url': forms.TextInput(attrs={'class':'form-control'}),
'linkdin_url': forms.TextInput(attrs={'class':'form-control'}),
'github_url': forms.TextInput(attrs={'class':'form-control'}),
}
class EditProfileFormPage(forms.ModelForm):
class Meta:
model = Profile
fields = ('bio','Gender','Mobile_Number','city','state','profile_pic','twitter_url','instagram_url','linkdin_url','github_url')
widgets = {
'bio': forms.Textarea(attrs={'class':'form-control','placeholder':'Write a summary about you...'}),
# 'profile_pic': forms.ImageField(),
'Gender': forms.Select(attrs={'class':'form-control'}),
'Mobile_Number': forms.TextInput(attrs={'class':'form-control'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.Select(attrs={'class':'form-control'}),
'twitter_url': forms.TextInput(attrs={'class':'form-control'}),
'instagram_url': forms.TextInput(attrs={'class':'form-control'}),
'linkdin_url': forms.TextInput(attrs={'class':'form-control'}),
'github_url': forms.TextInput(attrs={'class':'form-control'}),
}
class EditProfileForm(UserChangeForm):
date_joined = forms.CharField(max_length=100,disabled=True)
password = <PASSWORD>(label=("Password"),
help_text=("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"../accounts/password/change/\">this form</a>."))
class Meta:
model =User
fields = ['username','first_name','last_name','email','date_joined']
labels={
'first_name' : 'First Name',
'last_name':'Last Name',
'email': 'Email',
}
widgets = {
'username': forms.TextInput(attrs={'class':'form-control'}),
'first_name': forms.TextInput(attrs={'class':'form-control'}),
'last_name': forms.TextInput(attrs={'class':'form-control'}),
'email': forms.EmailInput(attrs={'class':'form-control'}),
'date_joined': forms.TextInput(attrs={'class':'form-control'}),
}
|
114564
|
import os
import sys
d = "localedata"
d = os.path.join(sys._MEIPASS, d)
import babel.localedata
babel.localedata._dirname = d
|
114566
|
import unittest
import hail as hl
from .flags import (
get_expr_for_consequence_lc_lof_flag,
get_expr_for_variant_lc_lof_flag,
get_expr_for_genes_with_lc_lof_flag,
get_expr_for_consequence_loftee_flag_flag,
get_expr_for_variant_loftee_flag_flag,
get_expr_for_genes_with_loftee_flag_flag,
)
class TestFlags(unittest.TestCase):
def setUp(self):
self.all_lc_lof = hl.literal(
[
hl.struct(gene_id="foo", lof="LC", lof_flags="", lof_info=""),
hl.struct(gene_id="foo", lof="NC", lof_flags="", lof_info=""),
hl.struct(gene_id="bar", lof="LC", lof_flags="", lof_info=""),
hl.struct(gene_id="baz", lof="", lof_flags="", lof_info=""),
hl.struct(gene_id="baz", lof="LC", lof_flags="", lof_info=""),
]
)
self.some_lc_lof = hl.literal(
[
hl.struct(gene_id="foo", lof="LC", lof_flags="", lof_info=""),
hl.struct(gene_id="foo", lof="", lof_flags="", lof_info=""),
hl.struct(gene_id="bar", lof="LC", lof_flags="", lof_info=""),
hl.struct(gene_id="baz", lof="HC", lof_flags="", lof_info=""),
hl.struct(gene_id="baz", lof="LC", lof_flags="", lof_info=""),
]
)
self.all_loftee_flags = hl.literal(
[
hl.struct(gene_id="foo", lof="HC", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="foo", lof="HC", lof_flags="flag2", lof_info=""),
hl.struct(gene_id="bar", lof="LC", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="baz", lof="HC", lof_flags="flag2", lof_info=""),
hl.struct(gene_id="baz", lof="", lof_flags="", lof_info=""),
]
)
self.some_loftee_flags = hl.literal(
[
hl.struct(gene_id="foo", lof="HC", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="foo", lof="HC", lof_flags="", lof_info=""),
hl.struct(gene_id="bar", lof="", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="bar", lof="LC", lof_flags="flag1", lof_info=""),
hl.struct(gene_id="baz", lof="HC", lof_flags="flag2", lof_info=""),
hl.struct(gene_id="baz", lof="HC", lof_flags="flag3", lof_info=""),
]
)
def test_consequence_lc_lof_flag(self):
self.assertTrue(hl.eval(get_expr_for_consequence_lc_lof_flag(hl.struct(lof="LC"))))
self.assertFalse(hl.eval(get_expr_for_consequence_lc_lof_flag(hl.struct(lof="HC"))))
self.assertFalse(hl.eval(get_expr_for_consequence_lc_lof_flag(hl.struct(lof=""))))
def test_variant_lc_lof_flag(self):
self.assertTrue(hl.eval(get_expr_for_variant_lc_lof_flag(self.all_lc_lof)))
self.assertFalse(hl.eval(get_expr_for_variant_lc_lof_flag(self.some_lc_lof)))
def test_genes_with_lc_lof_flag(self):
self.assertSetEqual(hl.eval(get_expr_for_genes_with_lc_lof_flag(self.all_lc_lof)), set(["foo", "bar", "baz"]))
self.assertSetEqual(hl.eval(get_expr_for_genes_with_lc_lof_flag(self.some_lc_lof)), set(["foo", "bar"]))
def test_consequence_loftee_flag_flag(self):
self.assertTrue(hl.eval(get_expr_for_consequence_loftee_flag_flag(hl.struct(lof="HC", lof_flags="foo"))))
self.assertFalse(hl.eval(get_expr_for_consequence_loftee_flag_flag(hl.struct(lof="", lof_flags=""))))
self.assertFalse(hl.eval(get_expr_for_consequence_loftee_flag_flag(hl.struct(lof="", lof_flags="bar"))))
def test_variant_loftee_flag_flag(self):
self.assertTrue(hl.eval(get_expr_for_variant_loftee_flag_flag(self.all_loftee_flags)))
self.assertFalse(hl.eval(get_expr_for_variant_loftee_flag_flag(self.some_loftee_flags)))
def test_genes_with_loftee_flag_flag(self):
self.assertSetEqual(
hl.eval(get_expr_for_genes_with_loftee_flag_flag(self.all_loftee_flags)), set(["foo", "bar", "baz"])
)
self.assertSetEqual(
hl.eval(get_expr_for_genes_with_loftee_flag_flag(self.some_loftee_flags)), set(["bar", "baz"])
)
if __name__ == "__main__":
unittest.main()
|
114579
|
import sqlite3, random, time
cnx = sqlite3.connect('taqueria.db')
cursor = cnx.cursor()
def queryTacos():
consultarTacos = cursor.execute("SELECT ID, Nombre_Taco FROM tacos")
return consultarTacos.fetchall()
def queryClientes():
consultarClientes = cursor.execute("SELECT ID, Nombre FROM clientes")
return consultarClientes.fetchall()
def queryOrdenes():
consultarOrdenes = cursor.execute("SELECT ordenes.ID_Orden,ordenes.ID_Taco,ordenes.ID_Cliente,tacos.Nombre_Taco FROM ordenes INNER JOIN tacos ON ordenes.ID_Taco = tacos.ID")
return consultarOrdenes.fetchall()
def unCliente(idcliente):
consultaUncliente = cursor.execute("SELECT * FROM clientes WHERE ID=?",(idcliente,))
return consultaUncliente.fetchall()
def unTaco(idTaco):
consultaUntaco = cursor.execute("SELECT * FROM tacos WHERE ID=?",(idtaco,))
return consultaUntaco.fetchall()
def unaOrden(idorden):
consultaUnaorden = cursor.execute("SELECT * FROM ordenes WHERE ID=?",(idorden,))
return consultaUnaorden.fetchall()
cnx.commit()
class Main():
def menu():
print ("Simulando una Taqueria \n Escoge: ")
print ("\t1 - Consultar el menu de Tacos")
print ("\t2 - Ordenar taco")
print ("\t3 - Ver todos los Clientes")
print ("\t4 - Ver todas las Ordenes")
print ("\t5 - Consultar un cliente especifico")
print ("\t6 - Consultar un taco especifico")
print ("\t7 - Consultar una orden especifica")
print ("\t0 - Salir")
while True:
menu()
opcion = input("Selecciona la opción que desees: ")
if opcion == "1":
print("\nTecleaste la opcion 1\n")
todosTacos = queryTacos()
for taco in todosTacos:
print(taco,"\n")
continue
if opcion == "2":
print("\nTecleaste la opcion 2\n")
idorden = random.randint(0,100)
orden = input("Teclea el ID del taco que deseas ordenar: ")
cliente = input("Teclea tu ID para registrar tu orden: ")
cursor.execute("INSERT INTO ordenes (ID_Orden,ID_Taco,ID_Cliente,) VALUES(?,?,?)",(idorden, idtaco, idcliente))
cnx.commit()
continue
if opcion == "3":
print("\nTecleaste la opcion 3\n")
todosclientes = queryClientes()
for cliente in todosclientes:
print(cliente,"\n")
continue
if opcion == "4":
print("\nTecleaste la opcion 4\n")
todasordenes = queryOrdenes()
for orden in todasordenes:
print(orden,"\n")
continue
if opcion == "5":
print("\nTecleaste la opcion 5\n")
uncliente = input("Teclea el ID del cliente que quieres consultar: ")
mostrar = unCliente(uncliente,)
print(mostrar,"\n")
continue
if opcion == "6":
print("\nTecleaste la opcion 6\n")
untaco = input("Teclea el ID del taco que quieres consultar: ")
mostrar = unTaco(untaco)
print(mostrar,"\n")
continue
if opcion == "7":
print("\nTecleaste la opcion 7\n")
unaorden = input("Teclea el ID de la orden que quieres consultar: ")
mostrar = unaOrden(unaorden)
print(mostrar,"\n")
continue
if opcion == "0":
print("\nTecleaste la opcion 0, salida del menu\n")
time.sleep(1)
break
cnx.close()
if __name__ == '__main__':
Main()
|
114624
|
import demistomock as demisto
from CommonServerPython import *
SCRIPT_NAME = 'ListCreator'
def configure_list(list_name: str, list_data: str) -> bool:
"""Create system lists using the createList built-in method.
"""
demisto.debug(f'{SCRIPT_NAME} - Setting "{list_name}" list.')
res = demisto.executeCommand('createList', {'listName': list_name, 'listData': list_data})
if is_error(res):
error_message = f'{SCRIPT_NAME} - {get_error(res)}'
demisto.debug(error_message)
return False
return True
def main():
args = demisto.args()
list_name = args.get('list_name')
list_data = args.get('list_data')
try:
configuration_status = configure_list(list_name, list_data)
return_results(
CommandResults(
outputs_prefix='ConfigurationSetup.Lists',
outputs_key_field='listname',
outputs={
'listname': list_name,
'creationstatus': 'Success.' if configuration_status else 'Failure.',
},
)
)
except Exception as e:
return_error(f'{SCRIPT_NAME} - Error occurred while setting up machine.\n{e}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
114636
|
import numpy as np
from .muscle_simulation_stepupdate import step_update_state
class MuscleTendonComplex:
def __init__(self, nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi, eref=0.04, act=0.01,
tau=0.01, w=0.56, c=0.05, N=1.5, K=5.0, stim=0.0,
vce=0.0, frcmtc=0.0, lmtc=0.0):
self.init_nameMuscle = nameMuscle
self.init_frcmax = float(frcmax)
self.init_vmax = float(vmax)
self.init_eref = float(eref)
self.init_lslack = float(lslack)
self.init_lopt = float(lopt)
self.init_tau = float(tau)
self.init_w = float(w)
self.init_c = float(c)
self.init_N = float(N)
self.init_K = float(K)
self.init_stim = float(stim)
self.init_act = float(act)
self.init_lmtc = float(lmtc)
self.init_lce = float(lce)
self.init_vce = float(vce)
self.init_frcmtc = float(frcmtc)
self.init_r = r.astype('float')
self.init_phiref = phiref.astype('float')
self.init_phimaxref = phimaxref.astype('float')
self.init_rho = rho.astype('float')
self.init_dirAng = dirAng.astype('float')
self.init_phiScale = phiScale.astype('float')
self.init_offsetCorr = offsetCorr.astype('int')
self.init_timestep = float(timestep)
self.init_angJoi = angJoi.astype('float')
self.reset_state()
self.MR = float(0.01)
self.typeMuscle = int(self.angJoi.size)
self.levelArm = np.zeros(self.typeMuscle).astype('float')
tmpL = np.zeros(self.typeMuscle)
for i in range(0, self.typeMuscle):
if self.offsetCorr[i] == 0:
tmpL[i] = self.dirAng[i] * (self.angJoi[i] - self.phiref[i]) * self.r[i] * self.rho[i]
self.levelArm[i] = self.r[i]
elif self.offsetCorr[i] == 1:
tmp1 = np.sin((self.phiref[i] - self.phimaxref[i]) * self.phiScale[i])
tmp2 = np.sin((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i])
tmpL[i] = self.dirAng[i] * (tmp2 - tmp1) * self.r[i] * self.rho[i] / self.phiScale[i]
self.levelArm[i] = np.cos((self.angJoi[i] - self.phimaxref[i]) * self.phiScale[i]) * self.r[i]
else:
raise ValueError('Invalid muscle level arm offset correction type. ')
self.lmtc = self.lslack + self.lopt + np.sum(tmpL)
self.lce = self.lmtc - self.lslack
self.lse = float(self.lmtc - self.lce)
# unitless parameters
self.Lse = float(self.lse / self.lslack)
self.Lce = float(self.lce / self.lopt)
self.actsubstep = float((self.stim - self.act) * self.timestep / 2.0 / self.tau + self.act)
self.lcesubstep = float(self.vce * self.timestep / 2.0 + self.lce)
# test
self.lce_avg = float(self.lce)
self.vce_avg = float(self.vce)
self.frcmtc_avg = float(0)
self.act_avg = float(self.act)
self.frame = 0
def stepUpdateState(self, angJoi):
"""
Muscle Tendon Complex Dynamics
update muscle states based on the muscle dynamics
Muscle state stim has to be updated outside before this function is called
"""
self.frcmax, self.vmax, self.eref, self.lslack, self.lopt, self.tau, \
self.w, self.c, self.N, self.K, self.stim, self.act, self.lmtc, self.lce, \
self.vce, self.frcmtc, \
self.r, self.phiref, \
self.phimaxref, self.rho, \
self.dirAng, self.phiScale, \
self.angJoi, self.levelArm, self.offsetCorr, \
self.timestep, self.MR, self.typeMuscle, \
self.lse, self.Lse, self.Lce, self.actsubstep, \
self.lcesubstep, self.lce_avg, self.vce_avg, self.frcmtc_avg, self.act_avg, self.frame = \
step_update_state(
self.frcmax, self.vmax, self.eref, self.lslack, self.lopt, self.tau,
self.w, self.c, self.N, self.K, self.stim, self.act, self.lmtc, self.lce,
self.vce, self.frcmtc,
self.r, self.phiref,
self.phimaxref, self.rho,
self.dirAng, self.phiScale,
angJoi, self.levelArm, self.offsetCorr,
self.timestep, self.MR, self.typeMuscle,
self.lse, self.Lse, self.Lce, self.actsubstep,
self.lcesubstep, self.lce_avg, self.vce_avg, self.frcmtc_avg, self.act_avg, self.frame)
def reset_state(self):
self.frame = int(0)
self.lce_avg = float(0)
self.frcmtc_avg = float(0)
self.act_avg = float(0)
self.vce_avg = float(0)
self.nameMuscle = self.init_nameMuscle
self.frcmax = self.init_frcmax
self.vmax = self.init_vmax
self.eref = self.init_eref
self.lslack = self.init_lslack
self.lopt = self.init_lopt
self.tau = self.init_tau
self.w = self.init_w
self.c = self.init_c
self.N = self.init_N
self.K = self.init_K
self.stim = self.init_stim
self.act = self.init_act
self.lmtc = self.init_lmtc
self.lce = self.init_lce
self.vce = self.init_vce
self.frcmtc = self.init_frcmtc
self.r = self.init_r
self.phiref = self.init_phiref
self.phimaxref = self.init_phimaxref
self.rho = self.init_rho
self.dirAng = self.init_dirAng
self.phiScale = self.init_phiScale
self.offsetCorr = self.init_offsetCorr
self.timestep = self.init_timestep
self.angJoi = self.init_angJoi
class TIA(MuscleTendonComplex):
"""
Tibialis Anterior (TIA): The Tibialis anterior (Tibialis anticus) is situated on the lateral
side of the tibia. In real human it serves multiple function which are, Dorsal Flexion
of the ankle, Inversion of the foot, Adduction of the foot and also Contributing in
maintaining the medial arch of the foot. Here TIA is modelled as muscle actuating the
ankle dorsiflexion in the sagittal plane.
"""
def __init__(self, angAnk, timestep):
frcmax = 800 # maximum isometric force [N]
lopt = 0.06 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.24 # tendon slack length [m]
# Tibialis Anterior attachment
rTIAmax = 0.04 # [m] maximum lever contribution
rTIAmin = 0.01 # [m] minimum lever contribution
phimaxTIA = 80 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminTIA = 180 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefTIA = 110 * np.pi / 180 # [rad] reference angle at which MTU length equals
phiScaleTIA = np.arccos(rTIAmin / rTIAmax) / (phiminTIA - phimaxTIA)
rhoTIA = 0.7
r = np.array((rTIAmax,))
phiref = np.array((phirefTIA,))
phimaxref = np.array((phimaxTIA,))
rho = np.array((rhoTIA,))
dirAng = np.array((1.0,))
offsetCorr = np.array((1,))
phiScale = np.array((phiScaleTIA,))
lce = lopt
angJoi = np.array((angAnk,))
nameMuscle = "TIA"
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class SOL(MuscleTendonComplex):
"""
Soleus (SOL): Soleus muscles is Located in superficial posterior compartment of the
leg, along with GAS it helps in the plantarflexion of the ankle joint. Here SOL is
modelled as a muscle actuating the ankle plantarflexion in the sagittal plane.
"""
def __init__(self, angAnk, timestep):
frcmax = 4000 # maximum isometric force [N]
lopt = 0.04 # optimum fiber length CE [m]
vmax = 6.0 # maximum contraction velocity [lopt/s]
lslack = 0.26 # tendon slack length [m]
# SOLeus attachment
rSOLmax = 0.06 # [m] maximum lever contribution
rSOLmin = 0.02 # [m] minimum lever contribution
phimaxSOL = 100 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminSOL = 180 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefSOL = 90 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoSOL = 0.5 # sum of lopt and lslack
phiScaleSOL = np.arccos(rSOLmin / rSOLmax) / (phiminSOL - phimaxSOL)
r = np.array((rSOLmax,))
phiref = np.array((phirefSOL,))
phimaxref = np.array((phimaxSOL,))
rho = np.array((rhoSOL,))
dirAng = np.array((-1.0,))
offsetCorr = np.array((1,))
phiScale = np.array((phiScaleSOL,))
lce = lopt
angJoi = np.array((angAnk,))
nameMuscle = "SOL"
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class GAS(MuscleTendonComplex):
"""
Gastrocnemius (GAS): Gastrocnemius muscle which the major bulk at the back of
lower leg is a bi-articular muscle having two heads and runs from back of knee to the
heel. The gastrocnemius helps plantarflexion of the ankle joint and flexion of the knee
joint. Here GAS is modelled as a bi-articular MTU contributing to the knee flexion
and ankle plantarflexion actuations in the sagittal plane.
"""
def __init__(self, angKne, angAnk, timestep):
frcmax = 1500 # maximum isometric force [N]
lopt = 0.05 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.40 # tendon slack length [m]
# GAStrocnemius attachment (knee joint)
rGASkmax = 0.05 # [m] maximum lever contribution
rGASkmin = 0.02 # [m] minimum lever contribution
phimaxGASk = 140 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminGASk = 45 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefGASk = 165 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoGASk = 0.7 # sum of lopt and lslack
# rhoGASk = 0.045 # sum of lopt and lslack
phiScaleGASk = np.arccos(rGASkmin / rGASkmax) / (phiminGASk - phimaxGASk)
# GAStrocnemius attachment (ankle joint)
rGASamax = 0.06 # [m] maximum lever contribution
rGASamin = 0.02 # [m] minimum lever contribution
phimaxGASa = 100 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminGASa = 180 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefGASa = 80 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoGASa = 0.7 # sum of lopt and lslack
# rhoGASa = 0.045 # sum of lopt and lslack
phiScaleGASa = np.arccos(rGASamin / rGASamax) / (phiminGASa - phimaxGASa)
r = np.array((rGASkmax, rGASamax))
phiref = np.array((phirefGASk, phirefGASa))
phimaxref = np.array((phimaxGASk, phimaxGASa))
rho = np.array((rhoGASk, rhoGASa))
dirAng = np.array((1.0, -1.0))
offsetCorr = np.array((1, 1))
phiScale = np.array((phiScaleGASk, phiScaleGASa))
lce = lopt
nameMuscle = "GAS"
angJoi = np.array((angKne, angAnk))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class BFSH(MuscleTendonComplex):
"""
Biceps Femoris Short Head(BFSH): This is a part of hamstring muscle in the real hu-
man and is responsible for knee flexion. Here BFSH is modelled as muscle contributing
to the knee flexion actuation in the sagittal plane.
"""
def __init__(self, angKne, timestep):
frcmax = 350 # maximum isometric force [N]
lopt = 0.12 # optimum fiber length CE [m]
vmax = 12.0 # 6 # maximum contraction velocity [lopt/s]
lslack = 0.10 # tendon slack length [m]
# BFSH group attachment
rBFSH = 0.04 # [m] constant lever contribution
phirefBFSH = 160 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoBFSH = 0.7 # sum of lopt and lslack
r = np.array((rBFSH,))
phiref = np.array((phirefBFSH,))
phimaxref = np.array((0.0,))
rho = np.array((rhoBFSH,))
dirAng = np.array((1.0,))
offsetCorr = np.array((0,))
phiScale = np.array((0.0,))
lce = lopt
nameMuscle = "BFSH",
angJoi = np.array((angKne,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class VAS(MuscleTendonComplex):
"""
Vasti (VAS): Vasti is a group of 3 muscles located in the thigh and is responsible for
knee extension. Here VAS is modelled as a muscle actuating the knee extension in the
sagittal plane.
"""
def __init__(self, angKne, timestep):
frcmax = 6000 # maximum isometric force [N]
lopt = 0.08 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.23 # tendon slack length [m]
# VAS group attachment
rVASmax = 0.06 # [m] maximum lever contribution
rVASmin = 0.04 # [m] minimum lever contribution
phimaxVAS = 165 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminVAS = 45 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefVAS = 120 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoVAS = 0.6 # sum of lopt and lslack
phiScaleVAS = np.arccos(rVASmin / rVASmax) / (phiminVAS - phimaxVAS)
r = np.array((rVASmax,))
phiref = np.array((phirefVAS,))
phimaxref = np.array((phimaxVAS,))
rho = np.array((rhoVAS,))
dirAng = np.array((-1.0,))
offsetCorr = np.array((1,))
phiScale = np.array((phiScaleVAS,))
lce = lopt
nameMuscle = "VAS"
angJoi = np.array((angKne,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class REF(MuscleTendonComplex):
"""
Rectus Femoris (RF): The Rectus femoris muscle is one of the four quadriceps mus-
cles. It is located in the middle of the front of the thigh and is responsible for knee
extension and hip flexion. Here RF is modelled as a bi-articular MTU contributing to
the hip flexion and knee extension actuations in the sagittal plane.
"""
def __init__(self, angHip, angKne, timestep):
frcmax = 1200 # maximum isometric force [N]
lopt = 0.08 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.35 # tendon slack length [m]
# REF group attachement (hip)
rREFh = 0.08 # [m] constant lever contribution
phirefREFh = 170 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoREFh = 0.3 # sum of lopt and lslack
# REF group attachment (knee)
rREFkmax = 0.06 # [m] maximum lever contribution
rREFkmin = 0.04 # [m] minimum lever contribution
phimaxREFk = 165 * np.pi / 180 # [rad] angle of maximum lever contribution
phiminREFk = 45 * np.pi / 180 # [rad] angle of minimum lever contribution
phirefREFk = 125 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoREFk = 0.5 # sum of lopt and lslack
phiScaleREFk = np.arccos(rREFkmin / rREFkmax) / (phiminREFk - phimaxREFk)
r = np.array((rREFh, rREFkmax))
phiref = np.array((phirefREFh, phirefREFk))
phimaxref = np.array((0.0, phimaxREFk))
rho = np.array((rhoREFh, rhoREFk))
dirAng = np.array((1.0, -1.0))
offsetCorr = np.array((0, 1))
phiScale = np.array((0.0, phiScaleREFk))
lce = lopt
nameMuscle = "REF"
angJoi = np.array((angHip, angKne))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class HAM(MuscleTendonComplex):
"""
Hamstrings (HAM): The hamstring muscles are a group of four muscles located in the
back of the thigh. They are bi-articular muscles crossing the hip and knee joints, so
they can help in both knee flexion and hip extension at the same time. Here HAM
is modelled as a bi-articular MTU contributing to the hip extension and knee flexion
actuations in the sagittal plane.
"""
def __init__(self, angHip, angKne, timestep):
frcmax = 3000 # maximum isometric force [N]
lopt = 0.10 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.31 # tendon slack length [m]
# hamstring hip level arm and refernce angle
rHAMh = 0.08 # [m] constant lever contribution
phirefHAMh = 150 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoHAMh = 0.5 # sum of lopt and lslack
# hamstring knee level arm and reference angle
rHAMk = 0.05 # [m] constant lever contribution
phirefHAMk = 180 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoHAMk = 0.5 # sum of lopt and lslack
r = np.array((rHAMh, rHAMk))
phiref = np.array((phirefHAMh, phirefHAMk))
phimaxref = np.array((0.0, 0.0))
rho = np.array((rhoHAMh, rhoHAMk))
dirAng = np.array((-1.0, 1.0))
offsetCorr = np.array((0, 0))
phiScale = np.array((0.0, 0.0))
lce = lopt
nameMuscle = "HAM"
angJoi = np.array((angHip, angKne))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class HFL(MuscleTendonComplex):
"""
Hip Flexor (HFL): The hip flexors are a group of muscles that help to bring the legs
and trunk together in a flexion movement. HFL allow us to move our leg or knee up
towards your torso, as well as to bend your torso forward at the hip. The HLF modelled
here is one of the actuator for the hip flexion in the sagittal plane.
"""
def __init__(self, angHip, timestep):
frcmax = 2000 # maximum isometric force [N]
lopt = 0.11 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.10 # tendon slack length [m]
# level arm and reference angle
r = np.array((0.08,)) # [m] constant lever contribution
phiref = np.array((160 * np.pi / 180,)) # [rad] reference angle at which MTU length equals
phimaxref = np.array((0.0, 0.0))
rho = np.array((0.5,)) # sum of lopt and lslack
dirAng = np.array((1.0,)) # angle increase leads to MTC length increase
offsetCorr = np.array((0,)) # no level arm correction
phiScale = np.array((0.0,))
lce = lopt
angJoi = np.array((angHip,))
nameMuscle = "HFL"
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class GLU(MuscleTendonComplex):
"""
Glutei (GLU): The glutei muscles are a group muscles in the gluteal region, in real life
locomotion their functions include extension, abduction, external rotation and internal
rotation of the hip joint. But here in the model GLU is modelled antagonistic to HFL
as hip extensor, acting as one of the hip joint actuator in the sagittal plane.
"""
def __init__(self, angHip, timestep):
frcmax = 1500.0 # maximum isometric force [N]
lopt = 0.11 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.13 # tendon slack length [m]
# level arm and reference angle
r = np.array((0.08,)) # [m] constant lever contribution
phiref = np.array((120 * np.pi / 180,)) # [rad] reference angle at which MTU length equals
phimaxref = np.array((0.0, 0.0))
rho = np.array((0.5,)) # sum of lopt and lslack
dirAng = np.array((-1.0,)) # angle increase leads to MTC length decrease
offsetCorr = np.array((0,)) # no level arm correction
phiScale = np.array((0.0,))
lce = lopt # will be computed in the initialization
nameMuscle = "GLU"
angJoi = np.array((angHip,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class HAD(MuscleTendonComplex):
"""
Hip Adductor (HAD): Hip adductors in the thigh are a group of muscles near the groin
area which helps in moving the leg towards the midline of the body in the coronal
plane. They are basically the are antagonistic to the hip abductors and also help in
stabilizing the hip joint in real life locomotion. The HAD modelled here will act as the
second actuator for the hip adduction in the coronal plane.
"""
def __init__(self, angHipFront, timestep):
frcmax = 4500.0 # maximum isometric force [N]
lopt = 0.10 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.18 # tendon slack length [m]
rHAD = 0.03 # [m] constant lever contribution
phirefHAD = 15 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoHAD = 1.0 # sum of lopt and lslack
r = np.array((rHAD,))
phiref = np.array((phirefHAD,))
phimaxref = np.array((0.0,))
rho = np.array((rhoHAD,))
dirAng = np.array((1.0,))
offsetCorr = np.array((0,))
phiScale = np.array((0.0,))
lce = lopt
nameMuscle = "HAD"
angJoi = np.array((angHipFront,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
class HAB(MuscleTendonComplex):
"""
Hip Abductor (HAB): The hip abductor muscles in the thigh include a group of muscles
which helps in moving the leg away from the midline of the body in the coronal plane.
They also help to rotate the thigh in the hip socket and to stabilize the hip joint. The
HAB modelled here will act as an actuator for the hip adbuction in the coronal plane.
"""
def __init__(self, angHipFront, timestep):
frcmax = 3000.0 # maximum isometric force [N]
lopt = 0.09 # optimum fiber length CE [m]
vmax = 12.0 # maximum contraction velocity [lopt/s]
lslack = 0.07 # tendon slack length [m]
rHAB = 0.06 # [m] constant lever contribution
phirefHAB = 10 * np.pi / 180 # [rad] reference angle at which MTU length equals
rhoHAB = 0.7 # sum of lopt and lslack
r = np.array((rHAB,))
phiref = np.array((phirefHAB,))
phimaxref = np.array((0.0,))
rho = np.array((rhoHAB,))
dirAng = np.array((-1.0,))
offsetCorr = np.array((0,))
phiScale = np.array((0.0,))
lce = lopt
nameMuscle = "HAB"
angJoi = np.array((angHipFront,))
super().__init__(nameMuscle, frcmax, vmax, lslack, lopt,
lce, r, phiref, phimaxref, rho, dirAng, phiScale,
offsetCorr, timestep, angJoi)
|
114656
|
import numpy as np
import sys
pp = "/Users/andres.perez/source/parametric_spatial_audio_processing"
sys.path.append(pp)
import parametric_spatial_audio_processing as psa
import matplotlib.pyplot as plt
import scipy.stats
from utils import *
from file_utils import build_result_dict_from_metadata_array, build_metadata_result_array_from_event_dict
from seld_dcase2019_master.metrics.evaluation_metrics import distance_between_spherical_coordinates_rad
def preprocess(data, sr, params):
"""
Assert first order ambisonics and dimensionality order.
Compute Stft.
:param data: np.array (num_frames, num_channels)
:param sr: sampling rate
:param params: params dict
:return: psa.Stft instance
"""
num_frames = np.shape(data)[0]
num_channels = np.shape(data)[1]
assert num_channels == 4
start_frame = 0
if params['quick_test']:
end_frame = int(np.ceil(sr * params['quick_test_file_duration']))
else:
end_frame = num_frames
window_size = params['window_size']
window_overlap = params['window_overlap']
nfft = params['nfft']
x = psa.Signal(data[start_frame:end_frame].T, sr, 'acn', 'n3d')
X = psa.Stft.fromSignal(x,
window_size=window_size,
window_overlap=window_overlap,
nfft=nfft
).limit_bands(params['fmin'], params['fmax'])
if params['plot']:
psa.plot_magnitude_spectrogram(X)
return X
def estimate_doa(data, sr, params):
"""
Given an input audio, get the most significant tf bins per frame
:param data: np.array (num_frames, num_channels)
:param sr: sampling rate
:param params: params dict
:return: an array in the form :
[frame, [class_id, azi, ele],[class_id, azi, ele]... ]
without repeated frame instances, quantized at hop_size,
containing all valid tf bins doas.
"""
### Preprocess data
X = preprocess(data, sr, params)
N = X.get_num_time_bins()
K = X.get_num_frequency_bins()
r = params['r']
### Diffuseness mask
doa = psa.compute_DOA(X)
directivity = X.compute_ita_re(r=r)
directivity_mask = directivity.compute_mask(th=params['directivity_th'])
### Energy density mask
e = psa.compute_energy_density(X)
block_size = params['energy_density_local_th_size']
tl = e.compute_threshold_local(block_size=block_size)
e_mask = e.compute_mask(tl)
### DOA Variance mask (computed on azimuth variance)
vicinity_radius = params['doa_std_vicinity_radius']
if np.size(vicinity_radius) == 1:
# Square!
r_k = vicinity_radius
r_n = vicinity_radius
elif np.size(vicinity_radius) == 2:
# Rectangle! [k, n]
r_k = vicinity_radius[0]
r_n = vicinity_radius[1]
else:
Warning.warn()
# TODO: optimize the for loop
std = np.zeros((K, N))
doa0_k_array = []
for r in range(-r_n,r_n+1):
doa0_k_array.append(np.roll(doa.data[0,:,:],r))
doa0_k = np.stack(doa0_k_array, axis=0)
for k in range(r_k, K - r_k):
std[k, :] = scipy.stats.circstd(doa0_k[:, k - r_k:k + r_k + 1, :], high=np.pi, low=-np.pi, axis=(0, 1))
# not optimized version...
# for k in range(r_k, K-r_k):
# for n in range(r_n, N-r_n):
# # azi
# std[k, n] = scipy.stats.circstd(doa.data[0, k-r_k:k+r_k+1, n-r_n:n+r_n+1], high=np.pi, low=-np.pi)
# # ele
# # std[k, n] = np.std(doa.data[1, k-r_k:k+r_k+1, n-r_n:n+r_n+1])
# Edges: largest value
std_max = np.max(std)
std[0:r_k, :] = std_max
std[K-r_k:K, :] = std_max
std[:, 0:r_n] = std_max
std[:, N - r_n:N] = std_max
# Scale values to min/max
std_scaled = std / std_max
# Invert values
std_scaled_inv = 1 - std_scaled
# Compute mask
doa_std = psa.Stft(doa.t, doa.f, std_scaled_inv, doa.sample_rate)
doa_std_mask = doa_std.compute_mask(th=params['doa_std_th'])
mask_all = doa_std_mask.apply_mask(directivity_mask).apply_mask(e_mask)
doa_th = doa.apply_mask(mask_all)
## Median average
median_averaged_doa = np.empty(doa.data.shape)
median_averaged_doa.fill(np.nan)
vicinity_size = (2*r_k-1) + (2*r_n-1)
doa_median_average_nan_th = params['doa_median_average_nan_th']
vicinity_radius = params['median_filter_vicinity_radius']
if np.size(vicinity_radius) == 1:
# Square!
r_k = vicinity_radius
r_n = vicinity_radius
elif np.size(vicinity_radius) == 2:
# Rectangle! [k, n]
r_k = vicinity_radius[0]
r_n = vicinity_radius[1]
else:
Warning.warn()
# TODO: optimize the for loop
for k in range(r_k, K - r_k):
for n in range(r_n, N - r_n):
azis = discard_nans(doa_th.data[0, k - r_k:k + r_k + 1, n - r_n:n + r_n + 1].flatten())
if azis.size > vicinity_size * doa_median_average_nan_th:
median_averaged_doa[0, k, n] = circmedian(azis, 'rad')
eles = discard_nans(doa_th.data[1, k - r_k:k + r_k + 1, n - r_n:n + r_n + 1].flatten())
if eles.size > vicinity_size * doa_median_average_nan_th:
median_averaged_doa[1, k, n] = np.median(eles)
doa_th_median = psa.Stft(doa.t, doa.f, median_averaged_doa, doa.sample_rate)
## Plot stuff
if params['plot']:
psa.plot_doa(doa, title='doa')
psa.plot_doa(doa.apply_mask(e_mask), title='e mask')
psa.plot_doa(doa.apply_mask(directivity_mask), title='directivity mask')
psa.plot_doa(doa.apply_mask(doa_std_mask), title='doa std mask')
psa.plot_doa(doa_th, title='doa mask all')
psa.plot_doa(doa_th_median, title='doa circmedian')
plt.show()
## Fold values into a vector
# Get a list of bins with the position estimation according to the selected doa_method
# TODO: OPTIMIZE
active_windows = []
position = []
for n in range(N):
azi = discard_nans(doa_th_median.data[0, :, n])
ele = discard_nans(doa_th_median.data[1, :, n])
if np.size(azi) < params['num_min_valid_bins']:
# Empty! not enough suitable doa values in this analysis window
pass
else:
active_windows.append(n)
position.append([rad2deg(azi), rad2deg(ele)])
# result = [bin, class_id, azi, ele] with likely repeated bin instances
result = []
label = params['default_class_id']
for window_idx, window in enumerate(active_windows):
num_bins = np.shape(position[window_idx])[1]
for b in range(num_bins):
azi = position[window_idx][0][b]
ele = position[window_idx][1][b]
result.append([window, label, azi, ele])
# Perform the window transformation by averaging within frame
## TODO: assert our bins are smaller than required ones
current_window_hop = (params['window_size'] - params['window_overlap']) / float(sr)
window_factor = params['required_window_hop'] / current_window_hop
# Since frames are ordered (at least they should), we can optimise that a little bit
last_frame = -1
# result_quantized = [frame, [class_id, azi, ele],[class_id, azi, ele]... ] without repeated bin instances
result_quantized = []
for row in result:
frame = row[0]
new_frame = int(np.floor(frame / window_factor))
if new_frame == last_frame:
result_quantized[-1].append([row[1], row[2], row[3]])
else:
result_quantized.append([new_frame, [row[1], row[2], row[3]]])
last_frame = new_frame
return result_quantized
# Assumes overlapping, compute (1,2)-Kmeans on each segment
def group_events(result_quantized, params):
"""
Segmentate an array of doas into events
:param result_quantized: an array containing frames and doas
in the form [frame, [class_id, azi, ele],[class_id, azi, ele]... ]
without repeated frame instances, with ordered frames
:param params: params dict
:return: metadata_result_array, result_dict
metadata_result_array: array with one event per row, in the form
[sound_event_recording,start_time,end_time,ele,azi,dist]
result_dict: dict with one frame per key, in the form:
{frame: [class_id, azi, ele] or [[class_id1, azi1, ele1], [class_id2, azi2, ele2]]}
"""
## Generate result_averaged_dict: grouping doas per frame into 1 or 2 clusters
## result_averaged_dict = {frame: [label, azi, ele] or [[label, azi1, ele1],label, azi2, ele2]]}
result_averaged_dict = {}
frames = []
for row in result_quantized:
frames.append(row[0])
std_azis = []
std_eles = []
std_all = []
std_th = params['min_std_overlapping']
label = params['default_class_id']
for r_idx, row in enumerate(result_quantized):
# Get all doas
frame = row[0]
azis = []
eles = []
for v in row[1:]:
azis.append(v[1])
eles.append(v[2])
# Compute std of doas
std_azis.append(scipy.stats.circstd(azis, high=180, low=-180))
std_eles.append(np.std(eles))
std_all.append(std_azis[-1]/2 + std_eles[-1])
# If big std, we assume 2-overlap
if std_all[-1] >= std_th:
# 2 clusters:
x = deg2rad(np.asarray([azis, eles]).T)
try:
kmeans2 = HybridKMeans(n_init=params['num_init_kmeans']).fit(x)
except RuntimeWarning:
# All points in x are equal...
result_averaged_dict[frame] = [label, rad2deg(x[0,0]), rad2deg(x[0,1])]
continue
# Keep the centroids of this frame
result_averaged_dict[frame] = []
for c in kmeans2.cluster_centers_:
azi = rad2deg(c[0])
ele = rad2deg(c[1])
result_averaged_dict[frame].append([label, azi, ele])
else:
# 1 cluster: directly compute the median and keep it
azi = circmedian(np.asarray(azis), unit='deg')
ele = np.median(eles)
result_averaged_dict[frame] = [label, azi, ele]
if params['plot']:
plt.figure()
plt.suptitle('kmeans stds')
plt.scatter(frames,std_all,label='all')
plt.axhline(y=std_th)
plt.legend()
plt.grid()
plt.show()
## Group doas by distance and time proximity
# Generate event_dict = { event_id: [ [label, azi_frame, ele_frame] ...}
# each individual event is a key, and values is a list of [frame, azi, ele]
d_th = params['max_angular_distance_within_event']
frame_th = params['max_frame_distance_within_event']
event_idx = 0
event_dict = {}
# Ensure ascending order
frames = result_averaged_dict.keys()
frames.sort()
# TODO: write in a more modular way
for frame in frames:
value = result_averaged_dict[frame]
if len(value) == 3:
# One source
azi = value[1]
ele = value[2]
if not bool(event_dict):
# Empty: append
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
else:
# Compute distance with all previous frames
new_event = True # default
for idx in range(event_idx):
# Compute distance with median of all previous
azis = np.asarray(event_dict[idx])[:, 1]
eles = np.asarray(event_dict[idx])[:, 2]
median_azi = circmedian(azis, unit='deg')
median_ele = np.median(eles)
d = distance_between_spherical_coordinates_rad(deg2rad(median_azi),
deg2rad(median_ele),
deg2rad(azi),
deg2rad(ele))
last_frame, last_azi, last_ele = event_dict[idx][-1]
if d < d_th and abs(frame - last_frame) < frame_th:
# Same event
new_event = False
event_dict[idx].append([frame, azi, ele])
break
if new_event:
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
elif len(value) == 2:
# Two sources
for v in value:
azi = v[1]
ele = v[2]
if not bool(event_dict):
# Empty: append
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
# print(event_dict)
else:
# Compute distance with previous frame
new_event = True
for idx in range(event_idx):
# Compute distance with median of all previous frames
azis = np.asarray(event_dict[idx])[:, 1]
eles = np.asarray(event_dict[idx])[:, 2]
median_azi = circmedian(azis, unit='deg')
median_ele = np.median(eles)
d = distance_between_spherical_coordinates_rad(deg2rad(median_azi),
deg2rad(median_ele),
deg2rad(azi),
deg2rad(ele))
last_frame, last_azi, last_ele = event_dict[idx][-1]
if d < d_th and abs(frame - last_frame) < frame_th:
# Same event
new_event = False
event_dict[idx].append([frame, azi, ele])
break
if new_event:
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
## Explicitly avoid overlapping > 2
# Generate event_dict_no_overlap: pop doas (in ascending order) if more than 2 overlapping events
# TODO: more sophisticated algorithm based on event confidence or similar
# Get max frame (it might be over 3000)
max_frame = 0
for event_idx, event_values in event_dict.iteritems():
end_frame = event_values[-1][0]
if end_frame >= max_frame:
max_frame = end_frame
# Compute the number of events per frame
events_per_frame = []
for i in range(max_frame+1):
events_per_frame.append([])
for event_idx, event_values in event_dict.iteritems():
start_frame = event_values[0][0]
end_frame = event_values[-1][0]
for frame in range(start_frame, end_frame + 1):
events_per_frame[frame].append(event_idx)
# Pop exceeding events
for i, e in enumerate(events_per_frame):
while len(e) > 2:
e.pop()
# Build event_dict_no_overlap from events_per_frame
event_dict_no_overlap = {}
for event_idx, event_values in event_dict.iteritems():
event_dict_no_overlap[event_idx] = []
for e in event_values:
frame = e[0]
if event_idx in events_per_frame[frame]:
event_dict_no_overlap[event_idx].append(e)
## Filter events to eliminate the spureous ones
event_dict_filtered = {}
filtered_event_idx = 0
min_frames = params['min_num_frames_per_event']
for frame, v in event_dict_no_overlap.iteritems():
if len(v) >= min_frames:
event_dict_filtered[filtered_event_idx] = event_dict_no_overlap[frame]
filtered_event_idx += 1
## Build metadata result array
offset = params['frame_offset']
if np.size(offset) == 1:
pre_offset = post_offset = offset
elif np.size(offset) == 2:
# Rectangle! [k, n]
pre_offset = offset[0]
post_offset= offset[1]
else:
Warning.warn()
hop_size = params['required_window_hop'] # s
metadata_result_array = build_metadata_result_array_from_event_dict(event_dict_filtered,
label,
hop_size,
pre_offset,
post_offset)
## Build result dictionary
result_dict = build_result_dict_from_metadata_array(metadata_result_array,
hop_size)
return metadata_result_array, result_dict
|
114661
|
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from esmvalcore.experimental import Recipe
from esmvalcore.experimental.recipe_output import DataFile
from ewatercycle.forcing import generate, load
from ewatercycle.forcing._lisflood import LisfloodForcing
def test_plot():
f = LisfloodForcing(
directory=".",
start_time="1989-01-02T00:00:00Z",
end_time="1999-01-02T00:00:00Z",
)
with pytest.raises(NotImplementedError):
f.plot()
def create_netcdf(var_name, filename):
var = 15 + 8 * np.random.randn(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
ds = xr.Dataset(
{var_name: (["longitude", "latitude", "time"], var)},
coords={
"lon": (["longitude", "latitude"], lon),
"lat": (["longitude", "latitude"], lat),
"time": pd.date_range("2014-09-06", periods=3),
},
)
ds.to_netcdf(filename)
return DataFile(filename)
@pytest.fixture
def mock_recipe_run(monkeypatch, tmp_path):
"""Overload the `run` method on esmvalcore Recipe's."""
data = {}
# TODO add lisvap input files once implemented, see issue #96
class MockTaskOutput:
data_files = (
create_netcdf("pr", tmp_path / "lisflood_pr.nc"),
create_netcdf("tas", tmp_path / "lisflood_tas.nc"),
)
def mock_run(self):
"""Store recipe for inspection and return dummy output."""
nonlocal data
data["data_during_run"] = self.data
return {"diagnostic_daily/script": MockTaskOutput()}
monkeypatch.setattr(Recipe, "run", mock_run)
return data
class TestGenerateRegionFromShapeFile:
@pytest.fixture
def forcing(self, mock_recipe_run, sample_shape):
return generate(
target_model="lisflood",
dataset="ERA5",
start_time="1989-01-02T00:00:00Z",
end_time="1999-01-02T00:00:00Z",
shape=sample_shape,
)
@pytest.fixture
def reference_recipe(self):
return {
"datasets": [
{
"dataset": "ERA5",
"project": "OBS6",
"tier": 3,
"type": "reanaly",
"version": 1,
}
],
"diagnostics": {
"diagnostic_daily": {
"description": "LISFLOOD input "
"preprocessor for "
"ERA-Interim and ERA5 "
"data",
"scripts": {
"script": {
"catchment": "Rhine",
"script": "hydrology/lisflood.py",
}
},
"variables": {
"pr": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_water",
"start_year": 1989,
},
"rsds": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_radiation",
"start_year": 1989,
},
"tas": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_temperature",
"start_year": 1989,
},
"tasmax": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_temperature",
"start_year": 1989,
},
"tasmin": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_temperature",
"start_year": 1989,
},
"tdps": {
"end_year": 1999,
"mip": "Eday",
"preprocessor": "daily_temperature",
"start_year": 1989,
},
"uas": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_windspeed",
"start_year": 1989,
},
"vas": {
"end_year": 1999,
"mip": "day",
"preprocessor": "daily_windspeed",
"start_year": 1989,
},
},
}
},
"documentation": {
"authors": ["verhoeven_stefan", "kalverla_peter", "andela_bouwe"],
"projects": ["ewatercycle"],
"references": ["acknow_project"],
},
"preprocessors": {
"daily_radiation": {
"convert_units": {"units": "J m-2 " "day-1"},
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
"daily_temperature": {
"convert_units": {"units": "degC"},
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
"daily_water": {
"convert_units": {"units": "kg m-2 d-1"},
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
"daily_windspeed": {
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
"general": {
"custom_order": True,
"extract_region": {
"end_latitude": 52.2,
"end_longitude": 11.9,
"start_latitude": 46.3,
"start_longitude": 4.1,
},
"extract_shape": {"crop": True, "method": "contains"},
"regrid": {
"lat_offset": True,
"lon_offset": True,
"scheme": "linear",
"target_grid": "0.1x0.1",
},
},
},
}
def test_result(self, forcing, tmp_path, sample_shape):
expected = LisfloodForcing(
directory=str(tmp_path),
start_time="1989-01-02T00:00:00Z",
end_time="1999-01-02T00:00:00Z",
shape=str(sample_shape),
PrefixPrecipitation="lisflood_pr.nc",
PrefixTavg="lisflood_tas.nc",
)
assert forcing == expected
def test_recipe_configured(
self, forcing, mock_recipe_run, reference_recipe, sample_shape
):
actual = mock_recipe_run["data_during_run"]
# Remove long description and absolute path so assert is easier
actual_desc = actual["documentation"]["description"]
del actual["documentation"]["description"]
actual_shapefile = actual["preprocessors"]["general"]["extract_shape"][
"shapefile"
]
# Will also del other occurrences of shapefile due to extract shape object
# being shared between preprocessors
del actual["preprocessors"]["general"]["extract_shape"]["shapefile"]
assert actual == reference_recipe
assert actual_shapefile == sample_shape
assert "LISFLOOD" in actual_desc
def test_saved_yaml(self, forcing, tmp_path):
saved_forcing = load(tmp_path)
# shape should is not included in the yaml file
forcing.shape = None
assert forcing == saved_forcing
|
114675
|
def test_modulemap(snapshot):
snapshot.assert_match([1, 2, 4])
def test_runlist():
assert 1 == 1
|
114676
|
from stellar.models import get_unique_hash, Table, Snapshot
def test_get_unique_hash():
assert get_unique_hash()
assert get_unique_hash() != get_unique_hash()
assert len(get_unique_hash()) == 32
def test_table():
table = Table(
table_name='hapsu',
snapshot=Snapshot(
snapshot_name='snapshot',
project_name='myproject',
hash='3330484d0a70eecab84554b5576b4553'
)
)
assert len(table.get_table_name('master')) == 24
|
114678
|
from base_component import *
import os
import xml.etree.ElementTree as ET
class ParseV4L2Headers(Component):
"""
Component which parses the v4l2 headers to get ioctl function pointer structure members.
"""
def __init__(self, value_dict):
c2xml_path = None
kernel_src_dir = None
makeout_file = None
separate_out = None
v4l2_func_list_file = None
v4l2_id_cmd_out = None
opt_bin_path = None
llvm_bc_out = ""
v4l2_config_processor_so = None
if 'c2xml_bin' in value_dict:
c2xml_path = value_dict['c2xml_bin']
if 'kernel_src_dir' in value_dict:
kernel_src_dir = value_dict['kernel_src_dir']
if 'makeout' in value_dict:
makeout_file = value_dict['makeout']
if 'out' in value_dict:
separate_out = value_dict['out']
if 'v4l2_func_list' in value_dict:
v4l2_func_list_file = value_dict['v4l2_func_list']
if 'llvm_bc_out' in value_dict:
llvm_bc_out = value_dict["llvm_bc_out"]
if 'v4l2_id_cmd_out' in value_dict:
v4l2_id_cmd_out = value_dict['v4l2_id_cmd_out']
if 'opt_bin_path' in value_dict:
opt_bin_path = value_dict['opt_bin_path']
if 'v4l2_config_processor_so' in value_dict:
v4l2_config_processor_so = value_dict['v4l2_config_processor_so']
self.kernel_src_dir = kernel_src_dir
self.c2xml_path = c2xml_path
self.makeout_file = makeout_file
self.separate_out = separate_out
self.v4l2_func_list_file = v4l2_func_list_file
self.v4l2_id_cmd_out = v4l2_id_cmd_out
self.opt_bin_path = opt_bin_path
self.llvm_bc_out = llvm_bc_out
self.v4l2_config_processor_so = v4l2_config_processor_so
def setup(self):
"""
Perform setup.
:return: Error msg or none
"""
if not os.path.exists(self.v4l2_config_processor_so):
return "Provided v4l2 config processor so path:" + str(self.v4l2_config_processor_so) + " does not exist."
if not os.path.exists(self.c2xml_path):
return "Provided c2xml path:" + str(self.c2xml_path) + " does not exist."
if not os.path.isdir(self.kernel_src_dir) or not os.path.isdir(os.path.join(self.kernel_src_dir, 'include')):
return "Provided kernel src directory is invalid. " \
"The base directory is not present or it does not contain include folder"
if self.v4l2_func_list_file is None:
return "No file specified to output v4l2 func list."
if not os.path.exists(self.opt_bin_path):
return "Provided opt bin path:" + str(self.opt_bin_path) + " does not exist."
if self.v4l2_id_cmd_out is None:
return "No file specified to output v4l2 id -> cmdid list."
return None
def perform(self):
"""
Parse the headers
:return: True or False
"""
v4l2_hdr_file = os.path.join(self.kernel_src_dir, "include/media/v4l2-ioctl.h")
if os.path.exists(v4l2_hdr_file):
log_success("Grep ran successfully to find ops and operations structures.")
log_info("Running c2xml to find entry point configurations.")
target_bc_file = os.path.join(self.llvm_bc_out, "drivers/media/v4l2-core/v4l2-ioctl.llvm.bc")
if not os.path.exists(target_bc_file):
log_error("Unable to find v4l2 base bitcode file:" + str(target_bc_file))
return False
# second, run c2xml on all the header files.
if self.separate_out is None:
self.separate_out = self.kernel_src_dir
ret_val = _run_c2xml(self.c2xml_path, self.makeout_file, v4l2_hdr_file, self.v4l2_func_list_file,
dst_work_dir=self.separate_out)
if ret_val:
ret_val = os.system(self.opt_bin_path + " -analyze -debug -load " + self.v4l2_config_processor_so +
' -v4l2-config-processor -v4l2config=\"' +
self.v4l2_func_list_file + '\" -v4l2idconfig=\"' + self.v4l2_id_cmd_out + '\" ' +
target_bc_file)
return ret_val == 0
return ret_val
# if ret_val:
else:
log_error("Unable to find v4l2 hdr file:" + str(v4l2_hdr_file))
def get_name(self):
"""
get component name.
:return: Str
"""
return "ParseV4L2Headers"
def is_critical(self):
"""
This component is not critical.
:return: False
"""
return False
gcc_bins = ['aarch64-linux-android-gcc', 'arm-eabi-gcc']
def _is_comp_binary(arg_zero):
global gcc_bins
for curr_c in gcc_bins:
if str(arg_zero).endswith(curr_c):
return True
return False
def _handle_compile_command(comp_str, dst_includes):
comp_args = comp_str.split()
i = 0
while i < len(comp_args):
curr_arg = comp_args[i].strip()
if curr_arg == "-isystem":
curr_arg1 = "-I" + comp_args[i+1].strip()
if curr_arg1 not in dst_includes:
dst_includes.append(curr_arg1)
if curr_arg == "-include":
curr_arg1 = comp_args[i+1].strip()
if "dhd_sec_feature.h" not in curr_arg1:
final_arg = curr_arg + " " + curr_arg1
if final_arg not in dst_includes:
dst_includes.append(final_arg)
if curr_arg[0:2] == "-I":
if curr_arg not in dst_includes:
if 'drivers' not in curr_arg and 'sound' not in curr_arg:
dst_includes.append(curr_arg)
i += 1
def _run_c2xml(c2xml_bin, makeout_file, target_v4l2_hdr, output_file, dst_work_dir=None):
fp = open(makeout_file, "r")
all_comp_lines = fp.readlines()
fp.close()
all_hdr_options = list()
all_hdr_options.append("-Iinclude")
for comp_line in all_comp_lines:
comp_line = comp_line.strip()
comp_args = comp_line.split()
if len(comp_args) > 2:
if _is_comp_binary(comp_args[0]) or _is_comp_binary(comp_args[1]):
_handle_compile_command(comp_line, all_hdr_options)
all_hdr_options.append("-D__KERNEL__")
all_hdr_files = [target_v4l2_hdr]
dummy_out_file = "/tmp/dummy_out.xml"
output_fp = open(output_file, "w")
for curr_hdr_file in all_hdr_files:
curr_hdr_file = curr_hdr_file.strip()
cmd_line = " ".join(all_hdr_options)
cmd_line = c2xml_bin + " " + cmd_line + " " + curr_hdr_file + " > " + dummy_out_file + " 2>/dev/null"
back_wd = os.getcwd()
if dst_work_dir is not None:
os.chdir(dst_work_dir)
# print "Running Command:" + cmd_line
os.system(cmd_line)
if os.path.exists(dummy_out_file) and os.stat(dummy_out_file).st_size > 0:
root = ET.parse(dummy_out_file).getroot()
for curr_s in root:
if curr_s.get("type") == "struct" and curr_s.get("file") == curr_hdr_file and \
curr_s.get("ident") == "v4l2_ioctl_ops":
child_no = 0
for child_s in curr_s:
target_fun_name = child_s.get("ident")
output_fp.write(target_fun_name + "," + str(child_no) + "\n")
child_no += 1
if dst_work_dir is not None:
os.chdir(back_wd)
output_fp.close()
return True
|
114699
|
import os
import re
from typing import (
List,
Optional,
)
from kb_notes.config import (
Config,
WIKILINK_PATTERN,
)
from kb_notes.helpers import execute_command
from kb_notes.types import WikiLinkRegexMatch
class NoteFinder:
def __init__(self, config: Config):
self.config = config
def get_full_path_for_note(
self,
note_name,
):
return os.path.join(self.config.note_folder, note_name) + ".md"
def get_note_name(self, path):
return os.path.splitext(os.path.basename(path))[0]
def find_backlinks(self, note_name: str) -> List[str]:
backlinks_files = []
backlinks = execute_command(
[
"rg",
"-l",
"-e",
f"\\[\\[{note_name}(#([-a-zA-Z0-9\\.\\s]*))?(\\|([-a-zA-Z0-9\\.\\s]*))?(\\^([-a-zA-Z0-9\\.\\s]*))?\\]\\]",
self.config.note_folder,
],
)
for line in backlinks.split("\n"):
if line:
backlinks_files.append(self.get_note_name(line))
return backlinks_files
def find_children(self, note_name: str) -> List[str]:
pattern = re.compile(f"^{note_name}[.](?!md)")
notes = self.find_notes()
def is_child(parent, child):
return parent in child and pattern.match(child)
return [note for note in notes if is_child(note_name, note)]
def find_notes(self) -> List[str]:
res = execute_command(
["ls", "-t", self.config.note_folder],
)
return [
self.get_note_name(line)
for line in res.split("\n")
if line and line.endswith(".md")
]
def find_parent(self, note_name: str) -> Optional[str]:
hierarchy = self.get_parent_notes_hierarchy(note_name)
if not hierarchy:
return
return hierarchy[-1]
def get_not_existing_parents(self, note_name: str) -> List[str]:
not_existing_parents = []
for parent in self.get_parent_notes_hierarchy(note_name):
if not os.path.isfile(self.get_full_path_for_note(parent)):
not_existing_parents.append(parent)
return not_existing_parents
@staticmethod
def get_parent_notes_hierarchy(note_name: str) -> List[str]:
hierarchy = note_name.split(".")[:-1]
res = []
for i, _ in enumerate(hierarchy, start=1):
res.append(".".join(hierarchy[:i]))
return res
@staticmethod
def find_links_in_lines(lines: List[str]) -> List[WikiLinkRegexMatch]:
current_buffer_links = []
for line in lines:
for link in WIKILINK_PATTERN.finditer(line):
current_buffer_links.append(
WikiLinkRegexMatch(
name=link["note"],
reference=link["reference"],
alias=link["alias"],
block_reference=link["block_reference"],
original=link.string[link.start() : link.end()],
)
)
return current_buffer_links
|
114707
|
import os
import argparse
import numpy as np
import processors as pe
from paz.backend.camera import VideoPlayer
from paz.backend.camera import Camera
from demo_pipeline import DetectEigenFaces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Real-time face classifier')
parser.add_argument('-c', '--camera_id', type=int, default=0,
help='Camera device ID')
parser.add_argument('-o', '--offset', type=float, default=0.1,
help='Scaled offset to be added to bounding boxes')
parser.add_argument('-e', '--experiments_path', type=str,
default='experiments',
help='Directory for writing and loading experiments')
parser.add_argument('-d', '--database_path', type=str,
default='database',
help='Directory for the database')
args = parser.parse_args()
if not os.path.exists(args.experiments_path):
os.makedirs(args.experiments_path)
if not os.path.exists(args.database_path):
os.makedirs(args.database_path)
# check if eigenfaces and mean face are already computed
needed_files = ['eigenvalues.npy', 'eigenfaces.npy', 'mean_face.npy']
if set(os.listdir(args.experiments_path)) != set(needed_files):
raise FileNotFoundError('''Need necessary files to run the demo. Please
run eigenface.py first and then try running the
demo.''')
# check if database is available
needed_files = ['images', 'database.npy']
if set(os.listdir(args.database_path)) != set(needed_files):
raise FileNotFoundError('''Need database to run the demo. Please
update the database with database.py first
and then try running the demo.''')
eigenfaces = np.load(os.path.join(args.experiments_path, 'eigenfaces.npy'))
mean_face = np.load(os.path.join(args.experiments_path, 'mean_face.npy'))
database_path = os.path.join(args.database_path, 'database.npy')
weights = np.load(database_path, allow_pickle=True).item()
# user defined parameters
thresh = 1e4
norm_order = 2
# measure = pe.CalculateNorm(norm_order)
measure = pe.CalculateCosineSimilarity()
pipeline = DetectEigenFaces(weights, measure, thresh, eigenfaces,
mean_face, [args.offset, args.offset])
camera = Camera(args.camera_id)
player = VideoPlayer((640, 480), pipeline, camera)
player.run()
|
114730
|
from __future__ import annotations
import vtk
from custom_types import *
from ui import ui_utils
import constants
import vtk.util.numpy_support as numpy_support
class GaussianData:
def make_symmetric(self, other: GaussianData):
# reflection = np.eye(3)
# reflection[0, 0] = -1
# self.total_translate = np.einsum('ad,d->a', reflection, other.total_translate)
self.total_translate = other.total_translate.copy()
self.total_translate[0] *= -1
self.total_rotate = other.total_rotate.copy()
# self.total_rotate = np.einsum('ab,bc->ac', reflection, other.total_rotate)
@staticmethod
def to_positive(p):
up_dir = 1
eye = np.eye(3)
all_dots = (p[:, :] * eye[up_dir, None, :]).sum(-1)
up_axis = all_dots.__abs__().argmax()
return up_axis, all_dots[up_axis] < 0
def permute_p(self, p):
up_dir = 1
p_new = np.eye(3)
p_new[up_dir] = p[self.up_axis]
if self.reflect_up:
p_new[up_dir] = -p_new[up_dir]
p_new[(up_dir + 1) % 3] = p[(self.up_axis + 1) % 3]
p_new[(up_dir + 2) % 3, :] = np.cross(p_new[up_dir, :], p_new[(up_dir + 1) % 3, :])
return p_new
def rotate(self, transition: ui_utils.Transition):
mu = self.mu_baked - transition.transition_origin
mu = np.einsum('ab,b->a', transition.rotation, mu) + transition.transition_origin
self.total_translate = mu - self.mu
self.total_rotate = np.einsum('ab,bc->ac', transition.rotation, self.total_rotate)
def stretch(self, amount):
scale = 0.9 if amount < 0 else 1 / .9
self.eigen = self.eigen * scale
def translate(self, transition: ui_utils.Transition):
self.total_translate = self.total_translate + transition.translation
def get_view_eigen(self):
scale = (self.mu_baked ** 2).sum() / (self.mu ** 2).sum()
return self.eigen * scale
def get_raw_data(self):
# p = np.einsum('da,db->ba', self.p, self.total_rotate)
p = np.einsum('ab,bc->ac', self.total_rotate, self.p.transpose()).transpose()
return self.phi, self.mu_baked, self.eigen, p
def copy_data(self):
return [item.copy() if type(item) is ARRAY else item for item in self.get_raw_data()]
def get_view_data(self):
phi, mu, eigen, p = self.get_raw_data()
p = self.permute_p(p)
return phi, mu, eigen, p
@property
def mu_baked(self) -> ARRAY:
return self.mu + self.total_translate
@property
def phi(self) -> float:
return self.data[0]
@property
def mu(self) -> ARRAY:
return self.data[1]
@property
def eigen(self) -> ARRAY:
return self.data[2]
@property
def p(self) -> ARRAY:
return self.data[3]
@mu.setter
def mu(self, new_mu: ARRAY):
self.data[1] = new_mu
@p.setter
def p(self, new_p: ARRAY):
self.data[3] = new_p
@eigen.setter
def eigen(self, new_eigen: ARRAY):
self.data[2] = new_eigen
def reset(self):
self.total_translate = np.zeros(3)
self.total_rotate = np.eye(3)
def __getitem__(self, item):
return self.data[item]
def __init__(self, gaussian):
self.recover_data = [item.copy() if type(item) is ARRAY else item for item in gaussian]
self.data = list(gaussian)
self.up_axis, self.reflect_up = self.to_positive(self.p)
self.total_translate = np.zeros(3)
self.total_rotate = np.eye(3)
class GaussianStatus(GaussianData):
# copy_constructor
def copy(self: GaussianStatus, render: vtk.vtkRenderer, view_style: ui_utils.ViewStyle,
gaussian_id: Optional[Tuple[int, int]] = None, is_selected: Optional[bool] = None) -> GaussianStatus:
if self.disabled:
return self
gaussian_id = self.gaussian_id if gaussian_id is None else gaussian_id
return GaussianStatus(self.copy_data(), gaussian_id, is_selected or self.is_selected, view_style, render, 1)
@staticmethod
def get_new_gaussian() -> vtk.vtkSphereSource:
return ui_utils.load_vtk_obj(f"{constants.DATA_ROOT}/ui_resources/simple_brick.obj")
def update_gaussian_transform(self, source):
phi, mu, eigen, p = self.get_view_data()
# def replace_mesh(self, mesh: T_Mesh):
# vs, faces = mesh
# vs, faces = vs.detach().cpu(), faces.detach().cpu()
# # vs, faces = mesh_utils.scale_from_ref(mesh, *self.scale)
# source = vtk.vtkPolyData()
# new_vs_vtk = numpy_support.numpy_to_vtk(vs.numpy())
# cells_npy = np.column_stack(
# [np.full(faces.shape[0], 3, dtype=np.int64), faces.numpy().astype(np.int64)]).ravel()
# vs_vtk, faces_vtk = vtk.vtkPoints(), vtk.vtkCellArray()
# vs_vtk.SetData(new_vs_vtk)
# faces_vtk.SetCells(faces.shape[0], numpy_support.numpy_to_vtkIdTypeArray(cells_npy))
#
# source.SetPolys(faces_vtk)
# self.mapper.SetInputData(source)
# self.is_changed = True
# if not self.to_init:
# self.to_init = True
# self.render.AddActor(self.actor)
transform = vtk.vtkTransform()
mat = vtk.vtkMatrix4x4()
p = p * .005
# p = p * eigen[:, None]
for i in range(4):
for j in range(4):
if i > 2:
mat.SetElement(i, j, 0)
elif j > 2:
mat.SetElement(i, j, float(mu[i]))
# mat.SetElement(i, j, 0)
else:
mat.SetElement(i, j, p[j, i])
# mat_t[i, j] = mat.GetElement(i,j)
mat.SetElement(3, 3, 1)
transform.SetMatrix(mat)
transformFilter = vtk.vtkTransformPolyDataFilter()
transformFilter.SetInputData(source)
transformFilter.SetTransform(transform)
transformFilter.Update()
return transformFilter
def update_gaussian(self):
if self.disabled:
return
# source = self.mapper.GetInput()
# source.SetPoints(self.init_points)
# source = self.update_gaussian_transform(source)
# self.mapper.SetInputConnection(source.GetOutputPort())
def end_transition(self, transition: ui_utils.Transition) -> bool:
if self.init_points is None:
return False
if transition.transition_type is ui_utils.EditType.Translating:
self.translate(transition)
return True
elif transition.transition_type is ui_utils.EditType.Rotating:
self.rotate(transition)
return True
elif transition.transition_type is ui_utils.EditType.Scaling:
self.rotate(transition)
return True
return False
def temporary_transition(self, transition: ui_utils.Transition) -> bool:
if self.init_points is None:
return False
source = self.mapper.GetInput()
vs = self.init_points
if transition.transition_type is ui_utils.EditType.Translating:
vs = vs + transition.translation[None, :]
elif transition.transition_type is ui_utils.EditType.Rotating:
vs = vs - transition.transition_origin[None, :]
vs = np.einsum('ad,nd->na', transition.rotation, vs)
vs = vs + transition.transition_origin[None, :]
source.GetPoints().SetData(numpy_support.numpy_to_vtk(vs))
return True
def get_address(self):
if self.disabled:
return f"disabled"
return self.actor.GetAddressAsString('')
@staticmethod
def add_gaussian(render, actor: Optional[vtk.vtkActor]) -> vtk.vtkActor:
if actor is None:
actor = vtk.vtkActor()
mapper = vtk.vtkPolyDataMapper()
actor.GetProperty().SetOpacity(0.3)
actor.SetMapper(mapper)
# source = self.get_new_gaussian()
# init_points = source.GetPoints()
# source = self.update_gaussian_transform(source)
# actor, _ = ui_utils.wrap_mesh(source.GetOutput(), color)
render.AddActor(actor)
ui_utils.set_default_properties(actor, (1., 1., .1))
return actor
def replace_part(self, part_mesh: Optional[vtk.vtkPolyData]):
if part_mesh is not None and not self.disabled:
self.init_points = numpy_support.vtk_to_numpy(part_mesh.GetPoints().GetData())
points = vtk.vtkPoints()
points.SetData(numpy_support.numpy_to_vtk(self.init_points))
part_mesh.SetPoints(points)
self.mapper.SetInputData(part_mesh)
def set_color(self):
if self.disabled:
return
properties = self.actor.GetProperty()
properties.SetOpacity(self.opacity)
properties.SetColor(*self.color)
def turn_off(self):
if self.disabled:
return
self.actor.GetProperty().SetOpacity(0)
self.actor.PickableOff()
def turn_on(self):
if self.disabled:
return
self.actor.GetProperty().SetOpacity(self.opacity)
self.actor.PickableOn()
@property
def is_not_selected(self):
return not self.is_selected
@property
def disabled(self):
return self.mapper is None #or self.mapper.GetInput() is None
def make_symmetric(self, force_include: bool):
if self.disabled or self.twin is None or (not force_include and self.included != self.twin.included):
return
super(GaussianStatus, self).make_symmetric(self.twin)
if force_include:
self.included = self.twin.included
self.update_gaussian()
def apply_affine(self, button: ui_utils.Buttons, key: str):
axis = {"left": 0, "right": 0, "up": 2, "down": 2, "a": 1, "z": 1}[key]
sign = {"left": 1, "right": -1, "up": 1, "down": -1, "a": 1, "z": -1}[key]
if self.disabled or button not in (ui_utils.Buttons.translate, ui_utils.Buttons.stretch, ui_utils.Buttons.rotate):
return
elif button is ui_utils.Buttons.translate:
vec = np.zeros(3)
vec[axis] = .01 * sign
self.translate(vec)
elif button is ui_utils.Buttons.rotate:
self.rotate(sign * .1, axis)
else:
self.stretch(.01 * sign)
self.update_gaussian()
@property
def opacity(self) -> float:
if self.is_selected:
if self.included:
opacity = self.view_style.opacity + 0.4
else:
opacity = self.view_style.opacity
else:
if self.included:
opacity = self.view_style.opacity + 0.2
else:
opacity = self.view_style.opacity
return max(0., min(1., opacity))
@property
def color(self) -> Tuple[float, float, float]:
if self.is_selected:
return self.view_style.selected_color
if self.included:
return self.view_style.included_color
else:
return self.view_style.base_color
def toggle_inclusion(self, included: Optional[bool] = None):
if self.disabled:
return
if included is None:
included = not self.included
if included != self.included:
self.included = not self.included
self.set_color()
def toggle_selection(self):
if self.disabled:
return
self.is_selected = not self.is_selected
self.set_color()
def reset(self):
if self.disabled:
return
super(GaussianStatus, self).reset()
self.included = False
self.is_selected = False
self.set_color()
# self.update_gaussian()
def delete(self, render):
if not self.disabled:
render.RemoveActor(self.actor)
if self.twin is not None:
self.twin.twin = None
@property
def parent_id(self):
return self.gaussian_id[0]
@property
def child_id(self):
return self.gaussian_id[1]
@property
def mapper(self):
if self.actor is None:
return None
return self.actor.GetMapper()
def __init__(self, gaussian, gaussian_id: Tuple[int, int], is_selected: bool, view_style: ui_utils.ViewStyle,
render: vtk.vtkRenderer, normalized_phi: float, actor: Optional[vtk.vtkActor] = None):
self.view_style = view_style
super(GaussianStatus, self).__init__(gaussian)
self.gaussian_id = gaussian_id
self.twin: Optional[GaussianStatus] = None
self.init_points = None
if normalized_phi > 0.001 or actor is not None:
self.actor = self.add_gaussian(render, actor)
self.is_selected = is_selected
self.included = True
self.set_color()
else:
self.actor = None
self.is_selected = False
self.included = False
|
114743
|
class OperationStaResult(object):
def __init__(self):
self.total = None
self.wait = None
self.processing = None
self.success = None
self.fail = None
self.stop = None
self.timeout = None
def getTotal(self):
return self.total
def setTotal(self, total):
self.total = total
def getWait(self):
return self.wait
def setWait(self, wait):
self.wait = wait
def getProcessing(self):
return self.processing
def setProcessing(self, processing):
self.processing = processing
def getSuccess(self):
return self.success
def setSuccess(self, success):
self.success = success
def getFail(self):
return self.fail
def setFail(self, fail):
self.fail = fail
def getStop(self):
return self.stop
def setStop(self, stop):
self.stop = stop
def getTimeout(self):
return self.timeout
def setTimeout(self, timeout):
self.timeout = timeout
|
114745
|
import json
from mock import ANY
from lib.response import OkResponse
from tests.commands.commands_test_base import CommandsTestBase
class GetAudioTest(CommandsTestBase):
def test_get_audio(self):
self.pipeline_mock.amix.getAudioVolumes.return_value = [1.0, 0.0, 0.25]
response = self.commands.get_audio()
self.assertIsInstance(response, OkResponse)
self.assertEqual(response.args, ('audio_status', ANY))
self.assertEqual(json.loads(response.args[1]), {
"cam1": 1.0,
"cam2": 0.0,
"grabber": 0.25
})
|
114763
|
import numpy
def entropy2(*args):
''' E = ENTROPY2(MTX,BINSIZE)
Compute the first-order sample entropy of MTX. Samples of VEC are
first discretized. Optional argument BINSIZE controls the
discretization, and defaults to 256/(max(VEC)-min(VEC)).
NOTE: This is a heavily biased estimate of entropy when you
don't have much data.
<NAME>, 6/96. Ported to Python by <NAME>, 10/15. '''
vec = numpy.array(args[0])
# if 2D flatten to a vector
if len(vec.shape) != 1 and (vec.shape[0] != 1 or vec.shape[1] != 1):
vec = vec.flatten()
(mn, mx) = range2(vec)
if len(args) > 1:
binsize = args[1]
# FIX: why is this max in the Matlab code; it's just a float?
# we insure that vec isn't 2D above, so this shouldn't be needed
#nbins = max( float(mx-mn)/float(binsize) )
nbins = float(mx-mn) / float(binsize)
else:
nbins = 256
[bincount, bins] = histo(vec, nbins)
## Collect non-zero bins:
H = bincount[ numpy.where(bincount > 0) ]
H = H / float(sum(H))
return -sum(H * numpy.log2(H))
|
114779
|
import os
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import numpy as np
import torch
import torchvision.datasets as datasets
class CIFAR10NoisyLabels(datasets.CIFAR10):
"""CIFAR10 Dataset with noisy labels.
Args:
noise_type (string): Noise type (default: 'symmetric').
The value is either 'symmetric' or 'asymmetric'.
noise_rate (float): Probability of label corruption (default: 0.0).
seed (int): Random seed (default: 12345).
This is a subclass of the `CIFAR10` Dataset.
"""
def __init__(self,
noise_type='symmetric',
noise_rate=0.0,
seed=12345,
**kwargs):
super(CIFAR10NoisyLabels, self).__init__(**kwargs)
self.seed = seed
self.num_classes = 10
self.flip_pairs = np.asarray([[9, 1], [2, 0], [4, 7], [3, 5], [5, 3]])
if noise_rate > 0:
if noise_type == 'symmetric':
self.symmetric_noise(noise_rate)
elif noise_type == 'asymmetric':
self.asymmetric_noise(noise_rate)
else:
raise ValueError(
'expected noise_type is either symmetric or asymmetric '
'(got {})'.format(noise_type))
def symmetric_noise(self, noise_rate):
"""Insert symmetric noise.
For all classes, ground truth labels are replaced with uniform random
classes.
"""
np.random.seed(self.seed)
targets = np.array(self.targets)
mask = np.random.rand(len(targets)) <= noise_rate
rnd_targets = np.random.choice(self.num_classes, mask.sum())
targets[mask] = rnd_targets
targets = [int(target) for target in targets]
self.targets = targets
def asymmetric_noise(self, noise_rate):
"""Insert asymmetric noise.
Ground truth labels are flipped by mimicking real mistakes between
similar classes. Following `Making Deep Neural Networks Robust to Label Noise: a Loss Correction Approach`_,
ground truth labels are replaced with
* truck -> automobile,
* bird -> airplane,
* deer -> horse
* cat -> dog
* dog -> cat
.. _Making Deep Neural Networks Robust to Label Noise: a Loss Correction Approach
https://arxiv.org/abs/1609.03683
"""
np.random.seed(self.seed)
targets = np.array(self.targets)
for i, target in enumerate(targets):
if target in self.flip_pairs[:, 0]:
if np.random.uniform(0, 1) <= noise_rate:
idx = int(np.where(self.flip_pairs[:, 0] == target)[0])
targets[i] = self.flip_pairs[idx, 1]
targets = [int(x) for x in targets]
self.targets = targets
def T(self, noise_type, noise_rate):
if noise_type == 'symmetric':
T = (torch.eye(self.num_classes) * (1 - noise_rate) +
(torch.ones([self.num_classes, self.num_classes]) /
self.num_classes * noise_rate))
elif noise_type == 'asymmetric':
T = torch.eye(self.num_classes)
for i, j in self.flip_pairs:
T[i, i] = 1 - noise_rate
T[i, j] = noise_rate
return T
class CIFAR100NoisyLabels(datasets.CIFAR100):
"""CIFAR100 Dataset with noisy labels.
Args:
noise_type (string): Noise type (default: 'symmetric').
The value is either 'symmetric' or 'asymmetric'.
noise_rate (float): Probability of label corruption (default: 0.0).
seed (int): Random seed (default: 12345).
This is a subclass of the `CIFAR100` Dataset.
"""
def __init__(self,
noise_type='synmetric',
noise_rate=0.0,
seed=12345,
**kwargs):
super(CIFAR100NoisyLabels, self).__init__(**kwargs)
self.seed = seed
self.num_classes = 100
self.num_superclasses = 20
if noise_rate > 0:
if noise_type == 'symmetric':
self.symmetric_noise(noise_rate)
elif noise_type == 'asymmetric':
self.asymmetric_noise(noise_rate)
else:
raise ValueError(
'expected noise_type is either symmetric or asymmetric '
'(got {})'.format(noise_type))
def symmetric_noise(self, noise_rate):
"""Symmetric noise in CIFAR100.
For all classes, ground truth labels are replaced with uniform random
classes.
"""
np.random.seed(self.seed)
targets = np.array(self.targets)
mask = np.random.rand(len(targets)) <= noise_rate
rnd_targets = np.random.choice(self.num_classes, mask.sum())
targets[mask] = rnd_targets
targets = [int(x) for x in targets]
self.targets = targets
def asymmetric_noise(self, noise_rate):
"""Insert asymmetric noise.
Ground truth labels are flipped by mimicking real mistakes between
similar classes. Following `Making Deep Neural Networks Robust to Label Noise: a Loss Correction Approach`_,
ground truth labels are flipped into the next class circularly within
the same superclasses
.. _Making Deep Neural Networks Robust to Label Noise: a Loss Correction Approach
https://arxiv.org/abs/1609.03683
"""
np.random.seed(self.seed)
targets = np.array(self.targets)
Tdata = self.T('asymmetric', noise_rate).numpy().astype(np.float64)
Tdata = Tdata / np.sum(Tdata, axis=1)[:, None]
for i, target in enumerate(targets):
one_hot = np.random.multinomial(1, Tdata[target, :], 1)[0]
targets[i] = np.where(one_hot == 1)[0]
targets = [int(x) for x in targets]
self.targets = targets
def _load_coarse_targets(self):
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
coarse_targets = []
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
coarse_targets.extend(entry['coarse_labels'])
return coarse_targets
def T(self, noise_type, noise_rate):
if noise_type == 'symmetric':
T = (torch.eye(self.num_classes) * (1 - noise_rate) +
(torch.ones([self.num_classes, self.num_classes]) /
self.num_classes * noise_rate))
elif noise_type == 'asymmetric':
num_classes = self.num_classes
num_superclasses = self.num_superclasses
num_subclasses = num_classes // num_superclasses
targets = np.array(self.targets)
coarse_targets = np.asarray(self._load_coarse_targets())
T = torch.eye(num_classes) * (1 - noise_rate)
for i in range(num_superclasses):
subclass_targets = np.unique(targets[coarse_targets == i])
clean = subclass_targets
noisy = np.concatenate([clean[1:], clean[:1]])
for j in range(num_subclasses):
T[clean[j], noisy[j]] = noise_rate
return T
|
114780
|
from django.contrib import admin
from corehq.apps.toggle_ui.models import ToggleAudit
@admin.register(ToggleAudit)
class ToggleAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
list_display = ('username', 'slug', 'action', 'namespace', 'item', 'randomness')
list_filter = ('slug', 'namespace')
ordering = ('created',)
|
114826
|
import os
import csv
import glob
import logging
logger = logging.getLogger(__name__)
def extract(func):
"""
Decorator function. Open and extract data from CSV files. Return list of dictionaries.
:param func: Wrapped function with *args and **kwargs arguments.
"""
def _wrapper(*args):
out = []
instance, prefix = args
for fname in glob.glob(os.path.join(getattr(instance, 'directory'), *prefix)):
with open(fname) as g:
out.extend(func(instance, data=csv.DictReader(g)))
return out
return _wrapper
class BaseCSV(object):
def __init__(self, directory):
self.directory = directory
@staticmethod
def column(field, **kwargs):
try:
value = kwargs[field].strip()
return value if value != "" else None
except (AttributeError, KeyError, TypeError) as ex:
return None
def column_unicode(self, field, **kwargs):
try:
return self.column(field, **kwargs).decode('utf-8')
except (KeyError, AttributeError):
return None
def column_int(self, field, **kwargs):
try:
return int(self.column(field, **kwargs))
except (KeyError, TypeError):
return None
def column_bool(self, field, **kwargs):
try:
return bool(self.column_int(field, **kwargs))
except (KeyError, TypeError):
return None
def column_float(self, field, **kwargs):
try:
return float(self.column(field, **kwargs))
except (KeyError, TypeError):
return None
|
114829
|
import os, logging
from solarpv.training.S2_training_data import *
# conf
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
generator = MakeS2TrainingData(
tilespath=os.path.join(os.getcwd(),'data','cv_all_tiles.geojson'),
polyspath=os.path.join(os.getcwd(),'data','cv_all_polys.geojson'),
outpath=os.path.join(os.getcwd(),'data','crossvalidation','S2_unet'))
generator.download_all_samples(multi=True,n_cpus=4)
generator.make_records(os.path.join(os.getcwd(),'data','crossvalidation','S2_unet'))
|
114843
|
from __future__ import print_function
import sys,inspect
import numpy as np
from flask import json
from collections import OrderedDict
class Evaluator:
def __init__(self,functionList):
self.generatedApp=[]
self.hasPlot=False
self.itemList=[]
self.evalGlobals={}
self.functionList = functionList
self.evalGlobals = functionList.copy()
self.evalGlobals['print_']=self.printer
self.evalGlobals['print']=self.print
self.evalGlobals['button']=self.button
self.evalGlobals['label']=self.label
self.evalGlobals['plot']=self.plot
def toUnique(self,identifier,suffix=0): #Make any ID string unique. returns new string.
newId = identifier+str(suffix) if suffix else identifier
if newId in self.itemList:
return self.toUnique(identifier,suffix+1)
return newId
def print(self,*args):
'''
For now, the print function is being overloaded in order to capture the console output.
Future plans will store each line of execution as a json object. This approach will increase flexibility,
and outputs more than just text, such as images and widgets can be created.
'''
name=self.toUnique("print")
self.generatedApp.append({"type":"text","name":name,"value":[str(a) for a in args]})
self.itemList.append(name)
return name
def printer(self,txt,name="print"):
name=self.toUnique(name)
self.generatedApp.append({"type":"span","name":name,"class":"row well","value":str(txt)})
self.itemList.append(name)
return name
def label(self,txt,name="print",html_class=""):
name=self.toUnique(name)
self.generatedApp.append({"type":"label","name":name,"class":html_class,"value":str(txt)})
self.itemList.append(name)
return name
def button(self,label,endpoint,displayType="display_number",**kwargs):
name = kwargs.get("name","button-id")
name=self.toUnique(name)
self.itemList.append(name)
targetName = kwargs.get('target',name+'-label')
if 'target' not in kwargs: #If a target was not specified, make up a name
targetName = self.toUnique(name+'-label')
successOpts={"datapoint":'result',"type":displayType,"target":targetName}
if displayType=='update-plot': # specify the stacking of data
successOpts['stacking']=kwargs.get('stacking','xy')
self.generatedApp.append({"type":"button", "name":name,"label":label,"fetched_value":"","action":{"type":"POST","endpoint":endpoint,"success":successOpts}})
if 'target' not in kwargs: #If a target was not specified, make a label.
if displayType in ["display_number","display"]:
self.label('',targetName)
return name
#Plots
def plot(self,x,y,**kwargs):
name = kwargs.get('name',self.toUnique('myPlot'))
self.generatedApp.append({"type":"plot","name":name,"data":[np.array([x,y]).T.tolist()]}) #jqplot requires [x,y] pairs . not separate datasets.
self.itemList.append(name)
return name
def runCode(self,code):
self.generatedApp=[]
self.itemList=[]
submitted = compile(code.encode(), '<string>', mode='exec')
self.exec_scope = self.evalGlobals.copy()
try:
exec(submitted, self.exec_scope)
except Exception as e:
print(str(e))
return self.getApp()
def getApp(self):
return self.generatedApp
#### Extract Doc Strings ####
def getDocs(self):
flist = []
for a in self.functionList.keys():
if a[:2]=='__':continue
doc = ''
try:
doc = inspect.getdoc(self.functionList[a])
arglist = inspect.getargspec(self.functionList[a]).args
except Exception as e:
print(a,e)
continue
arglist.remove('self')
flist.append({'doc_string':str(doc),'name':a,'args':arglist})
return flist
|
114867
|
from urlparse import urlparse
def is_url_in_domain(url, domains):
parsed = urlparse(url)
for domain in domains:
if domain.match(parsed.netloc):
return True
return False
def is_absolute(url):
return bool(urlparse(url).netloc)
|
114898
|
from django.urls import include, path, re_path
from django.views.generic import RedirectView
from django.urls import reverse_lazy
from django.http import QueryDict
from rest_framework.urlpatterns import format_suffix_patterns
from django_cas_ng.views import login as cas_login, logout as cas_logout, callback as cas_callback
from imageledger.views import search_views, api_views, list_views, favorite_views, tag_views, site_views
from imageledger.forms import FIELD_DEFAULT
class MetRedirectView(RedirectView):
permanent = True
query_string = True
pattern_name = 'search-met'
def get_redirect_url(self, *args, **kwargs):
url = reverse_lazy('index') + '?'
qd = QueryDict('', mutable=True, )
qd.update({'providers': 'met'})
qd.setlistdefault('search_fields', FIELD_DEFAULT)
url += qd.urlencode()
return url
urlpatterns = [
# Custom search URLs
path('themet', MetRedirectView.as_view(), name='search-met'),
path('', search_views.index, name='index'),
path('image/detail', search_views.by_image, name="by-image"),
re_path('image/detail/(?P<identifier>.*)', search_views.detail, name="detail"),
# CAS
path('accounts/login', cas_login, name='cas_ng_login'),
path('accounts/logout', cas_logout, name='cas_ng_logout'),
path('accounts/callback', cas_callback, name='cas_ng_proxy_callback'),
# Other auth-related pages
path('accounts/profile', site_views.profile, name="profile"),
path('accounts/delete', site_views.delete_account, name="delete-account"),
# Lists (public)
path('list/<slug:slug>/', list_views.OLListDetail.as_view(), name='list-detail'),
# Lists (user admin)
path('list/add/', list_views.OLListCreate.as_view(), name='my-list-add'),
path('list/mine/<slug:slug>/', list_views.OLListUpdate.as_view(), name='my-list-update'),
path('list/mine/<slug:slug>/delete', list_views.OLListDelete.as_view(), name='my-list-delete'),
path('lists/mine', list_views.OLOwnedListList.as_view(), name="my-lists"),
# Favorites
path('favorites/mine', favorite_views.FavoriteList.as_view(), name='my-favorites'),
# User tags
path('tags/mine', tag_views.UserTagsList.as_view(), name='my-tags'),
path('tags/mine/<slug:slug>/', tag_views.UserTagsDetail.as_view(), name='my-tags-detail'),
# About and other static pages
path('about', site_views.about, name='about'),
path('health', site_views.health, name='health'),
path('robots.txt', site_views.robots, name='robots'),
]
apipatterns = [
# List API
path('api/v1/lists', api_views.ListList.as_view()),
path('api/v1/autocomplete/lists', api_views.ListAutocomplete.as_view()),
path('api/v1/lists/<slug:slug>', api_views.ListDetail.as_view()),
# Favorite API
path('api/v1/images/favorite/<str:identifier>', api_views.FavoriteDetail.as_view()),
path('api/v1/images/favorites', api_views.FavoriteList.as_view()),
# User Tags API
path('api/v1/images/tags', api_views.UserTagDetail.as_view()),
path('api/v1/images/tags/<str:identifier>/<str:tag>', api_views.UserTagDetail.as_view()),
path('api/v1/images/tags/<str:identifier>', api_views.UserTagsList.as_view()),
path('api/v1/autocomplete/tags', api_views.UserTagsAutocomplete.as_view()),
]
apipatterns = format_suffix_patterns(apipatterns)
urlpatterns += apipatterns
|
114982
|
import maya.mel as mm
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
import glTools.utils.component
import glTools.utils.constraint
import glTools.utils.mathUtils
import glTools.utils.mesh
import glTools.utils.reference
import glTools.utils.stringUtils
import glTools.utils.transform
import types
def bake( constraint,
start = None,
end = None,
sampleBy = 1,
simulation = True ):
'''
Bake specified constraint
@param constraint: Constraint to bake animation for.
@type constraint: str
@param start: Start frame of bake animation range
@type start: float or None
@param end: End frame of bake animation range
@type end: float or None
@param sampleBy: Sample every Nth frame
@type sampleBy: int
@param simulation: Simulation option for bakeResults
@type simulation: bool
'''
# ==========
# - Checks -
# ==========
# Check Constraint
if not glTools.utils.constraint.isConstraint(constraint):
raise Exception('Object "'+constraint+'" is not a valid constraint node!')
# Check Start/End Frames
if start == None: start = mc.playbackOptions(q=True,min=True)
if end == None: end = mc.playbackOptions(q=True,max=True)
# ====================================
# - Get Slave Transform and Channels -
# ====================================
# Get Slave Transform
slave = glTools.utils.constraint.slave(constraint)
# Get Slave Channels
attrList = mc.listConnections(constraint,s=False,d=True,p=True) or []
slaveAttrs = [i.split('.')[-1] for i in attrList if i.startswith(slave+'.')] or []
if not slaveAttrs: raise Exception('No slave channels to bake!')
# ===================
# - Bake Constraint -
# ===================
mc.refresh(suspend=True)
mc.bakeResults( slave,
at = slaveAttrs,
time = (start,end),
disableImplicitControl = True,
simulation = simulation,
sampleBy = sampleBy )
mc.refresh(suspend=False)
# =================
# - Return Result -
# =================
return [slave+'.'+i for i in slaveAttrs]
def aimConstraint( target,
slave,
aim='z',
up='y',
worldUpType='scene',
worldUpObject=None,
worldUpVector='y',
offset=(0,0,0),
mo=False ):
'''
Create an aim constraint between the specifiec master and slave transforms.
Only constrains open, settable channels.
@param master: Constraint master transform.
@type master: str
@param slave: Constraint slave transform.
@type slave: str
@param aim: Aim axis.
@type aim: str
@param aim: Up axis.
@type aim: str
@param worldUpType: World Up type. Options - "scene", "object", "objectrotation", "vector", or "none".
@type worldUpType: str
@param worldUpObject: World Up object.
@type worldUpObject: str
@param worldUpVector: World Up vector.
@type worldUpVector: str
@param mo: Maintain constraint offset
@type mo: bool
'''
# Build Axis Dict
axis = {'x':(1,0,0),'y':(0,1,0),'z':(0,0,1),'-x':(-1,0,0),'-y':(0,-1,0),'-z':(0,0,-1)}
# ==========
# - Checks -
# ==========
# Check Master
if not mc.objExists(target):
raise Exception('Constraint target "'+target+'" does not exist!')
if not glTools.utils.transform.isTransform(target):
raise Exception('Constraint target "'+target+'" is not a valid transform!')
# Check Slave
if not mc.objExists(slave):
raise Exception('Constraint slave "'+slave+'" does not exist!')
if not glTools.utils.transform.isTransform(slave):
raise Exception('Constraint slave "'+slave+'" is not a valid transform!')
# Check Settable Channels
sk = []
if not mc.getAttr(slave+'.rx',se=True): sk.append('x')
if not mc.getAttr(slave+'.ry',se=True): sk.append('y')
if not mc.getAttr(slave+'.rz',se=True): sk.append('z')
if not sk: sk = 'none'
# =====================
# - Create Constraint -
# =====================
constraint = ''
try:
if worldUpObject:
constraint = mc.aimConstraint( target,
slave,
aim=axis[aim],
u=axis[aim],
worldUpType=worldUpType,
worldUpObject=worldUpObject,
worldUpVector=axis[worldUpVector],
sk=sk,
offset=offset,
mo=mo)[0]
else:
constraint = mc.aimConstraint( target,
slave,
aim=axis[aim],
u=axis[aim],
worldUpType=worldUpType,
worldUpVector=axis[worldUpVector],
sk=sk,
offset=offset,
mo=mo)[0]
except Exception, e:
raise Exception('Error creating constraint from "'+target+'" to "'+slave+'"! Exception msg: '+str(e))
# =================
# - Return Result -
# =================
return constraint
def pointConstraint(master,slave,mo=False,attrList=['tx','ty','tz']):
'''
Create a point constraint between the specifiec master and slave transforms.
Only constrains open, settable channels.
@param master: Constraint master transform.
@type master: str
@param slave: Constraint slave transform.
@type slave: str
@param mo: Maintain constraint offset
@type mo: bool
@param attrList: List of transform attributes to constrain.
@type attrList: list
'''
# ==========
# - Checks -
# ==========
# Check Target (Master)
if isinstance(master,types.StringTypes):
if not mc.objExists(master):
raise Exception('Constraint target "'+master+'" does not exist!')
if not glTools.utils.transform.isTransform(master):
raise Exception('Constraint target "'+master+'" is not a valid transform!')
elif isinstance(master,types.ListType):
for target in master:
if not mc.objExists(target):
raise Exception('Constraint target "'+target+'" does not exist!')
if not glTools.utils.transform.isTransform(target):
raise Exception('Constraint target "'+target+'" is not a valid transform!')
# Check Slave
if not mc.objExists(slave):
raise Exception('Constraint slave "'+slave+'" does not exist!')
if not glTools.utils.transform.isTransform(slave):
raise Exception('Constraint slave "'+slave+'" is not a valid transform!')
# Check Settable Channels
st = []
if not 'tx' in attrList or not mc.getAttr(slave+'.tx',se=True): st.append('x')
if not 'ty' in attrList or not mc.getAttr(slave+'.ty',se=True): st.append('y')
if not 'tz' in attrList or not mc.getAttr(slave+'.tz',se=True): st.append('z')
if not st: st = 'none'
# Skip All Check
if len(st) == 3:
print('No axis to constrain! Unable to create constraint...')
return None
# =====================
# - Create Constraint -
# =====================
constraint = ''
try: constraint = mc.pointConstraint(master,slave,sk=st,mo=mo)[0]
except Exception, e:
raise Exception('Error creating constraint from "'+master+'" to "'+slave+'"! Exception msg: '+str(e))
# =================
# - Return Result -
# =================
return constraint
def orientConstraint(master,slave,mo=False,attrList=['rx','ry','rz']):
'''
Create a point constraint between the specifiec master and slave transforms.
Only constrains open, settable channels.
@param master: Constraint master transform.
@type master: str
@param slave: Constraint slave transform.
@type slave: str
@param mo: Maintain constraint offset
@type mo: bool
@param attrList: List of transform attributes to constrain.
@type attrList: list
'''
# ==========
# - Checks -
# ==========
# Check Target (Master)
if isinstance(master,types.StringTypes):
if not mc.objExists(master):
raise Exception('Constraint target "'+master+'" does not exist!')
if not glTools.utils.transform.isTransform(master):
raise Exception('Constraint target "'+master+'" is not a valid transform!')
elif isinstance(master,types.ListType):
for target in master:
if not mc.objExists(target):
raise Exception('Constraint target "'+target+'" does not exist!')
if not glTools.utils.transform.isTransform(target):
raise Exception('Constraint target "'+target+'" is not a valid transform!')
# Check Slave
if not mc.objExists(slave):
raise Exception('Constraint slave "'+slave+'" does not exist!')
if not glTools.utils.transform.isTransform(slave):
raise Exception('Constraint slave "'+slave+'" is not a valid transform!')
# Check Settable Channels
sr = []
if not 'rx' in attrList or not mc.getAttr(slave+'.rx',se=True): sr.append('x')
if not 'ry' in attrList or not mc.getAttr(slave+'.ry',se=True): sr.append('y')
if not 'rz' in attrList or not mc.getAttr(slave+'.rz',se=True): sr.append('z')
if not st: st = 'none'
# Skip All Check
if len(sr) == 3:
print('No axis to constrain! Unable to create constraint...')
return None
# =====================
# - Create Constraint -
# =====================
constraint = ''
try: constraint = mc.orientConstraint(master,slave,sk=sr,mo=mo)[0]
except Exception, e:
raise Exception('Error creating constraint from "'+master+'" to "'+slave+'"! Exception msg: '+str(e))
# =================
# - Return Result -
# =================
return constraint
def parentConstraint(master,slave,mo=False,attrList=['tx','ty','tz','rx','ry','rz']):
'''
Create a parent constraint between the specifiec master and slave transforms.
Only constrains open, settable channels.
@param master: Constraint master transform.
@type master: str or list
@param slave: Constraint slave transform.
@type slave: str
@param mo: Maintain constraint offset
@type mo: bool
@param attrList: List of transform attributes to constrain.
@type attrList: list
'''
# ==========
# - Checks -
# ==========
# Check Target (Master)
if isinstance(master,types.StringTypes):
if not mc.objExists(master):
raise Exception('Constraint target "'+master+'" does not exist!')
if not glTools.utils.transform.isTransform(master):
raise Exception('Constraint target "'+master+'" is not a valid transform!')
elif isinstance(master,types.ListType):
for target in master:
if not mc.objExists(target):
raise Exception('Constraint target "'+target+'" does not exist!')
if not glTools.utils.transform.isTransform(target):
raise Exception('Constraint target "'+target+'" is not a valid transform!')
# Check Slave
if not mc.objExists(slave):
raise Exception('Constraint slave "'+slave+'" does not exist!')
if not glTools.utils.transform.isTransform(slave):
raise Exception('Constraint slave "'+slave+'" is not a valid transform!')
# Check Settable Channels
st = []
sr = []
if not 'tx' in attrList or not mc.getAttr(slave+'.tx',se=True): st.append('x')
if not 'ty' in attrList or not mc.getAttr(slave+'.ty',se=True): st.append('y')
if not 'tz' in attrList or not mc.getAttr(slave+'.tz',se=True): st.append('z')
if not 'rx' in attrList or not mc.getAttr(slave+'.rx',se=True): sr.append('x')
if not 'ry' in attrList or not mc.getAttr(slave+'.ry',se=True): sr.append('y')
if not 'rz' in attrList or not mc.getAttr(slave+'.rz',se=True): sr.append('z')
if not st: st = 'none'
if not sr: sr = 'none'
# =====================
# - Create Constraint -
# =====================
constraint = ''
try: constraint = mc.parentConstraint(master,slave,st=st,sr=sr,mo=mo)[0]
except Exception, e:
raise Exception('Error creating constraint from "'+master+'" to "'+slave+'"! Exception msg: '+str(e))
# =================
# - Return Result -
# =================
return constraint
def scaleConstraint(master,slave,mo=False,force=False,attrList=['sx','sy','sz']):
'''
Create a scale constraint between the specified master and slave transforms.
Only constrains open, settable channels.
@param master: Constraint master transform.
@type master: str
@param slave: Constraint slave transform.
@type slave: str
@param mo: Maintain constraint offset
@type mo: bool
@param force: Force constraint by deleteing scale channel keys. Use with caution!
@type force: bool
@param attrList: List of transform attributes to constrain.
@type attrList: list
'''
# ==========
# - Checks -
# ==========
# Check Master
if not mc.objExists(master):
raise Exception('Constraint master "'+master+'" does not exist!')
if not glTools.utils.transform.isTransform(master):
raise Exception('Constraint master "'+master+'" is not a valid transform!')
# Check Slave
if not mc.objExists(slave):
raise Exception('Constraint slave "'+slave+'" does not exist!')
if not glTools.utils.transform.isTransform(slave):
raise Exception('Constraint slave "'+slave+'" is not a valid transform!')
# Check Settable Channels
sk = []
if not 'sx' in attrList or not mc.getAttr(slave+'.sx',se=True): sk.append('x')
if not 'sy' in attrList or not mc.getAttr(slave+'.sy',se=True): sk.append('y')
if not 'sz' in attrList or not mc.getAttr(slave+'.sz',se=True): sk.append('z')
if not sk: st = 'none'
# Check All
if len(sk) == 3:
print('All scale channels locked! Unable to add constraint')
return None
# =====================
# - Create Constraint -
# =====================
if force: mc.cutKey(slave,at=attrList)
constraint = ''
try: constraint = mc.scaleConstraint(master,slave,sk=sk,mo=mo)[0]
except Exception, e:
#raise Exception('Error creating constraint from "'+master+'" to "'+slave+'"! Exception msg: '+str(e))
print('Error creating constraint from "'+master+'" to "'+slave+'"! Exception msg: '+str(e))
constraint = None
# =================
# - Return Result -
# =================
return constraint
def nonReferencedConstraints(slaveNSfilter=None,targetNSfilter=None):
'''
Return a list of non referenced constraint nodes in the current scene.
Optionally, filter results by slave and/or target namespace.
@param slaveNSfilter: Constraint slave transform namespace filter list.
@type slaveNSfilter: list
@param targetNSfilter: Constraint target transform namespace filter list.
@type targetNSfilter: list
'''
# =========================
# - Get Scene Constraints -
# =========================
sceneConstraints = mc.ls(type='constraint')
if not sceneConstraints: return []
# Filter Nonreferenced Constraints
nonRefConstraints = []
for constraint in sceneConstraints:
if not glTools.utils.reference.isReferenced(constraint):
if not constraint in nonRefConstraints:
nonRefConstraints.append(constraint)
# =================
# - Filter Result -
# =================
# Slave Namespace Filter
if slaveNSfilter:
for constraint in nonRefConstraints:
filterOut = True
constraintSlave = glTools.utils.constraint.slaveList(constraint)
for slave in constraintSlave:
if not ':' in slave: slave = ':'+slave
slaveNS = slave.split(':')[0]
if slaveNS in slaveNSfilter: filterOut = False
if filterOut:
nonRefConstraints.remove(constraint)
# Master Namespace Filter
if targetNSfilter:
for constraint in nonRefConstraints:
filterOut = True
constraintTarget = glTools.utils.constraint.targetList(constraint)
for target in constraintTarget:
if not ':' in target: target = ':'+target
targetNS = target.split(':')[0]
if targetNS in targetNSfilter: filterOut = False
if filterOut:
nonRefConstraints.remove(constraint)
# =================
# - Return Result -
# =================
return nonRefConstraints
def listReferenceDependents():
'''
'''
pass
def listReferenceDependencies():
'''
'''
pass
def translateOffsetTarget(target,offset,slave,prefix=None):
'''
Create a translate offset target constraint (parentConstraint).
The slave will follow the target in rotation only and the offset in translation.
Used mainly for specific IK pole vector target setup.
@param target: Constraint target.
@type target: str
@param offset: Offset target to follow in translation.
@type offset: str
@param slave: Slave transform to create constraint for.
@type slave: str
@param prefix: Naming prefix.
@type prefix: str or None
'''
# ==========
# - Checks -
# ==========
# Target
if not mc.objExists(target):
raise Exception('Target transform "'+target+'" does not exist!')
if not glTools.utils.transform.isTransform(target):
raise Exception('Target object "'+target+'" is not a valid tranform!')
# Offset
if not mc.objExists(offset):
raise Exception('Offset transform "'+offset+'" does not exist!')
if not glTools.utils.transform.isTransform(offset):
raise Exception('Offset object "'+offset+'" is not a valid tranform!')
# Slave
if not mc.objExists(slave):
raise Exception('Slave transform "'+slave+'" does not exist!')
if not glTools.utils.transform.isTransform(slave):
raise Exception('Slave object "'+slave+'" is not a valid tranform!')
# Prefix
if not prefix: prefix = glTools.utils.stringUtils.stripSuffix(slave)
# ====================
# - Build Constraint -
# ====================
# Parent Slave to Target
mc.delete(parentConstraint(target,slave))
mc.parent(slave,target)
# Create Offset Constraint
offsetConstraint = mc.pointConstraint(offset,slave,mo=True)[0]
offsetConstraint = mc.rename(offsetConstraint,prefix+'_offset_pointConstraint')
# =================
# - Return Result -
# =================
return offsetConstraint
def pointOnPolyConstraintCmd(pt):
'''
Generate a pointOnPolyConstraint setup command string.
@param pt: Mesh point to generate pointOnPolyConstraint command for.
@type pt: str
'''
# ==================
# - Initialize Cmd -
# ==================
cmd = ''
# ===============================
# - Get Mesh from Point on Poly -
# ===============================
fullname = mc.ls(pt,o=True)[0]
mesh = fullname.split(':')[-1]
meshSN = mesh.split('|')[-1]
# Get Mesh Component ID
meshID = glTools.utils.component.index(pt)
prevID = OpenMaya.MScriptUtil()
prevID.createFromInt(0)
prevIDPtr = prevID.asIntPtr()
# =======================
# - Constrain to Vertex -
# =======================
if '.vtx[' in pt:
# Initialize MItMeshVertex
meshIt = glTools.utils.mesh.getMeshVertexIter(mesh)
meshIt.setIndex(meshID,prevIDPtr)
# Get Vertex UV
uv = OpenMaya.MScriptUtil()
uv.createFromDouble(0.0)
uvPtr = uv.asFloat2Ptr()
meshIt.getUV(uvPtr)
uv = [ OpenMaya.MScriptUtil.getFloat2ArrayItem(uvPtr,0,j) for j in [0,1] ]
cmd += '; setAttr ($constraint[0]+".%sU%d") %f; setAttr ($constraint[0]+".%sV%d") %f' % ( meshSN, 0, uv[0], meshSN, 0, uv[1] )
# =====================
# - Constrain to Edge -
# =====================
elif '.e[' in pt:
# Initialize MItMeshEdge
meshIt = glTools.utils.mesh.getMeshEdgeIter(mesh)
meshIt.setIndex(meshID,prevIDPtr)
# Get Edge/Vertices UV
vtx = [ meshIt.index( j ) for j in [0,1] ]
vtxIt = glTools.utils.mesh.getMeshVertexIter(mesh)
uvs = []
for v in vtx:
vtxIt.setIndex(v,prevIDPtr)
uv = OpenMaya.MScriptUtil()
uv.createFromDouble( 0.0 )
uvPtr = uv.asFloat2Ptr()
vtxIt.getUV(uvPtr)
uvs.append( [ OpenMaya.MScriptUtil.getFloat2ArrayItem(uvPtr,0,j) for j in [0,1] ] )
uv = [ 0.5*(uvs[0][j]+uvs[1][j]) for j in [0,1] ]
cmd += '; setAttr ($constraint[0]+".%sU%d") %f; setAttr ($constraint[0]+".%sV%d") %f' % ( meshSN, 0, uv[0], meshSN, 0, uv[1] )
# =====================
# - Constrain to Face -
# =====================
elif '.f[' in pt:
# Initialize MItMeshface
meshIt = glTools.utils.mesh.getMeshFaceIter(mesh)
meshIt.setIndex(meshID,prevIDPtr)
# Get Face UV
u, v = OpenMaya.MFloatArray(), OpenMaya.MFloatArray()
meshIt.getUVs( u, v )
uv = ( sum(u)/len(u), sum(v)/len(v) )
cmd += '; setAttr ($constraint[0]+".%sU%d") %f; setAttr ($constraint[0]+".%sV%d") %f' % ( meshSN, 0, uv[0], meshSN, 0, uv[1] )
# =================
# - Return Result -
# =================
return cmd
|
114994
|
from functools import partial, update_wrapper
from math import exp
import numpy as np
from scipy.sparse import lil_matrix
from scipy.stats import rankdata
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.pairwise import pairwise_distances, euclidean_distances
from sklearn.neighbors import NearestNeighbors
from ...utils.information_theory import conditional_entropy
from ...utils.information_theory import entropy
from ...utils.qpfs_body import qpfs_body
from ...utils.functions import knn_from_class
def _wrapped_partial(func, *args, **kwargs):
partial_func = partial(func, *args, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def fit_criterion_measure(x, y):
"""Calculate the FitCriterion score for features. Bigger values mean more
important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://core.ac.uk/download/pdf/191234514.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import fit_criterion_measure
>>> import numpy as np
>>> x = np.array([[1, 2, 4, 1, 1], [2, 2, 2, 1, 2], [3, 5, 1, 1, 4],
... [1, 1, 1, 1, 4], [2, 2, 2, 1, 5]])
>>> y = np.array([1, 2, 3, 1, 2])
>>> fit_criterion_measure(x, y)
array([1. , 0.8, 0.8, 0.4, 0.6])
"""
def count_hits(feature):
splits = {cl: feature[y == cl] for cl in classes}
means = {cl: np.mean(splits[cl]) for cl in classes}
devs = {cl: np.var(splits[cl]) for cl in classes}
distances = np.vectorize(
lambda x_val: {cl: (
abs(x_val - means[cl])
/ (devs[cl] + 1e-10)) for cl in classes})(feature)
return np.sum(np.vectorize(lambda d: min(d, key=d.get))(distances) == y)
classes = np.unique(y)
return np.apply_along_axis(count_hits, 0, x) / x.shape[0]
def f_ratio_measure(x, y):
"""Calculate Fisher score for features. Bigger values mean more important
features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://papers.nips.cc/paper/2909-laplacian-score-for-feature-selection.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import f_ratio_measure
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> f_ratio_measure(x, y)
array([0.6 , 0.2 , 1. , 0.12, 5.4 ])
"""
def __F_ratio(feature):
splits = {cl: feature[y == cl] for cl in classes}
mean_feature = np.mean(feature)
inter_class = np.sum(
np.vectorize(lambda cl: (
counts_d[cl]
* np.power(mean_feature - np.mean(splits[cl]), 2)))(classes))
intra_class = np.sum(
np.vectorize(lambda cl: (
counts_d[cl]
* np.var(splits[cl])))(classes))
return inter_class / (intra_class + 1e-10)
classes, counts = np.unique(y, return_counts=True)
counts_d = {cl: counts[idx] for idx, cl in enumerate(classes)}
return np.apply_along_axis(__F_ratio, 0, x)
def gini_index(x, y):
"""Calculate Gini index for features. Bigger values mean more important
features. This measure works best with discrete features due to being based
on information theory.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
http://lkm.fri.uni-lj.si/xaigor/slo/clanki/ijcai95z.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import gini_index
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> gini_index(x, y)
array([0.14 , 0.04 , 0.64 , 0.24 , 0.37333333])
"""
def __gini(feature):
values, counts = np.unique(feature, return_counts=True)
counts_d = {val: counts[idx] for idx, val in enumerate(values)}
total_sum = np.sum(
np.vectorize(
lambda val: (
np.sum(
np.square(
np.unique(
y[feature == val], return_counts=True)[1]))
/ counts_d[val]))(values))
return total_sum / x.shape[0] - prior_prob_squared_sum
classes, counts = np.unique(y, return_counts=True)
prior_prob_squared_sum = np.sum(np.square(counts / x.shape[0]))
return np.apply_along_axis(__gini, 0, x)
def su_measure(x, y):
"""SU is a correlation measure between the features and the class
calculated via formula SU(X,Y) = 2 * I(X|Y) / (H(X) + H(Y)). Bigger values
mean more important features. This measure works best with discrete
features due to being based on information theory.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://pdfs.semanticscholar.org/9964/c7b42e6ab311f88e493b3fc552515e0c764a.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import su_measure
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> su_measure(x, y)
array([0.28694182, 0.13715115, 0.79187567, 0.47435099, 0.67126949])
"""
def __SU(feature):
entropy_x = entropy(feature)
return (2 * (entropy_x - conditional_entropy(y, feature))
/ (entropy_x + entropy_y))
entropy_y = entropy(y)
return np.apply_along_axis(__SU, 0, x)
# TODO CONCORDATION COEF
def kendall_corr(x, y):
"""Calculate Sample sign correlation (Kendall correlation) for each
feature. Bigger absolute values mean more important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient
Examples
--------
>>> from ITMO_FS.filters.univariate import kendall_corr
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> kendall_corr(x, y)
array([-0.1, 0.2, -0.4, -0.2, 0.2])
"""
def __kendall_corr(feature):
k_corr = 0.0
for i in range(len(feature)):
k_corr += np.sum(np.sign(feature[i] - feature[i + 1:])
* np.sign(y[i] - y[i + 1:]))
return 2 * k_corr / (feature.shape[0] * (feature.shape[0] - 1))
return np.apply_along_axis(__kendall_corr, 0, x)
def fechner_corr(x, y):
"""Calculate Sample sign correlation (Fechner correlation) for each
feature. Bigger absolute values mean more important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
Examples
--------
>>> from ITMO_FS.filters.univariate import fechner_corr
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> fechner_corr(x, y)
array([-0.2, 0.2, -0.4, -0.2, -0.2])
"""
y_dev = y - np.mean(y)
x_dev = x - np.mean(x, axis=0)
return np.sum(np.sign(x_dev.T * y_dev), axis=1) / x.shape[0]
def reliefF_measure(x, y, k_neighbors=1):
"""Calculate ReliefF measure for each feature. Bigger values mean more
important features.
Note:
Only for complete x
Rather than repeating the algorithm m(TODO Ask Nikita about user defined)
times, implement it exhaustively (i.e. n times, once for each instance)
for relatively small n (up to one thousand).
Calculates spearman correlation for each feature.
Spearman's correlation assesses monotonic relationships (whether linear or
not). If there are no repeated data values, a perfect Spearman correlation
of +1 or −1 occurs when each of the variables is a perfect monotone
function of the other.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples.
k_neighbors : int, optional
The number of neighbors to consider when assigning feature importance
scores. More neighbors results in more accurate scores but takes
longer. Selection of k hits and misses is the basic difference to
Relief and ensures greater robustness of the algorithm concerning noise.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
<NAME> al. Relief-based feature selection: Introduction and
review. Journal of Biomedical Informatics 85 (2018) 189–203
Examples
--------
>>> from ITMO_FS.filters.univariate import reliefF_measure
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1], [1, 2, 1, 4, 2], [4, 3, 2, 3, 1]])
>>> y = np.array([1, 2, 2, 1, 2, 1, 2])
>>> reliefF_measure(x, y)
array([-0.14285714, -0.57142857, 0.10714286, -0.14285714, 0.07142857])
>>> reliefF_measure(x, y, k_neighbors=2)
array([-0.07142857, -0.17857143, -0.07142857, -0.0952381 , -0.17857143])
"""
def __calc_misses(index):
misses_diffs_classes = np.abs(
np.vectorize(
lambda cl: (
x[index]
- x[knn_from_class(dm, y, index, k_neighbors, cl)])
* prior_prob[cl],
signature='()->(n,m)')(classes[classes != y[index]]))
return (np.sum(np.sum(misses_diffs_classes, axis=1), axis=0)
/ (1 - prior_prob[y[index]]))
classes, counts = np.unique(y, return_counts=True)
if np.any(counts <= k_neighbors):
raise ValueError(
"Cannot calculate relieff measure because one of theclasses has "
"less than %d samples" % (k_neighbors + 1))
prior_prob = dict(zip(classes, np.array(counts) / len(y)))
n_samples = x.shape[0]
n_features = x.shape[1]
# use manhattan distance instead of euclidean
dm = pairwise_distances(x, x, 'manhattan')
indices = np.arange(n_samples)
# use abs instead of square because of manhattan distance
hits_diffs = np.abs(
np.vectorize(
lambda index: (
x[index]
- x[knn_from_class(dm, y, index, k_neighbors, y[index])]),
signature='()->(n,m)')(indices))
H = np.sum(hits_diffs, axis=(0,1))
misses_sum_diffs = np.vectorize(
lambda index: __calc_misses(index),
signature='()->(n)')(indices)
M = np.sum(misses_sum_diffs, axis=0)
weights = M - H
# dividing by m * k guarantees that all final weights
# will be normalized within the interval [ − 1, 1].
weights /= n_samples * k_neighbors
# The maximum and minimum values of A are determined over the entire
# set of instances.
# This normalization ensures that weight updates fall
# between 0 and 1 for both discrete and continuous features.
with np.errstate(divide='ignore', invalid="ignore"): # todo
return weights / (np.amax(x, axis=0) - np.amin(x, axis=0))
def relief_measure(x, y, m=None, random_state=42):
"""Calculate Relief measure for each feature. This measure is supposed to
work only with binary classification datasets; for multi-class problems use
the ReliefF measure. Bigger values mean more important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples.
m : int, optional
Amount of iterations to do. If not specified, n_samples iterations
would be performed.
random_state : int, optional
Random state for numpy random.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
<NAME> al. Relief-based feature selection: Introduction and
review. Journal of Biomedical Informatics 85 (2018) 189–203
Examples
--------
>>> from ITMO_FS.filters.univariate import relief_measure
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 2, 2, 1, 2])
>>> relief_measure(x, y)
array([ 0. , -0.6 , -0.1875, -0.15 , -0.4 ])
"""
weights = np.zeros(x.shape[1])
classes, counts = np.unique(y, return_counts=True)
if len(classes) == 1:
raise ValueError("Cannot calculate relief measure with 1 class")
if 1 in counts:
raise ValueError(
"Cannot calculate relief measure because one of the classes has "
"only 1 sample")
n_samples = x.shape[0]
n_features = x.shape[1]
if m is None:
m = n_samples
x_normalized = MinMaxScaler().fit_transform(x)
dm = euclidean_distances(x_normalized, x_normalized)
indices = np.random.default_rng(random_state).integers(
low=0, high=n_samples, size=m)
objects = x_normalized[indices]
hits_diffs = np.square(
np.vectorize(
lambda index: (
x_normalized[index]
- x_normalized[knn_from_class(dm, y, index, 1, y[index])]),
signature='()->(n,m)')(indices))
misses_diffs = np.square(
np.vectorize(
lambda index: (
x_normalized[index]
- x_normalized[knn_from_class(
dm, y, index, 1, y[index], anyOtherClass=True)]),
signature='()->(n,m)')(indices))
H = np.sum(hits_diffs, axis=(0,1))
M = np.sum(misses_diffs, axis=(0,1))
weights = M - H
return weights / m
def chi2_measure(x, y):
"""Calculate the Chi-squared measure for each feature. Bigger values mean
more important features. This measure works best with discrete features due
to being based on statistics.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
http://lkm.fri.uni-lj.si/xaigor/slo/clanki/ijcai95z.pdf
Example
-------
>>> from ITMO_FS.filters.univariate import chi2_measure
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> chi2_measure(x, y)
array([ 1.875 , 0.83333333, 10. , 3.75 , 6.66666667])
"""
def __chi2(feature):
values, counts = np.unique(feature, return_counts=True)
values_map = {val: idx for idx, val in enumerate(values)}
splits = {cl: np.array([values_map[val] for val in feature[y == cl]])
for cl in classes}
e = np.vectorize(
lambda cl: prior_probs[cl] * counts,
signature='()->(1)')(classes)
n = np.vectorize(
lambda cl: np.bincount(splits[cl], minlength=values.shape[0]),
signature='()->(1)')(classes)
return np.sum(np.square(e - n) / e)
classes, counts = np.unique(y, return_counts=True)
prior_probs = {cl: counts[idx] / x.shape[0] for idx, cl
in enumerate(classes)}
return np.apply_along_axis(__chi2, 0, x)
#
# def __contingency_matrix(labels_true, labels_pred):
# """Build a contingency matrix describing the relationship between labels.
# Parameters
# ----------
# labels_true : int array, shape = [n_samples]
# Ground truth class labels to be used as a reference
# labels_pred : array, shape = [n_samples]
# Cluster labels to evaluate
# Returns
# -------
# contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
# Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
# true class :math:`i` and in predicted class :math:`j`. If
# ``eps is None``, the dtype of this array will be integer. If ``eps`` is
# given, the dtype will be float.
# """
# classes, class_idx = np.unique(labels_true, return_inverse=True)
# clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
# n_classes = classes.shape[0]
# n_clusters = clusters.shape[0]
# # Using coo_matrix to accelerate simple histogram calculation,
# # i.e. bins are consecutive integers
# # Currently, coo_matrix is faster than histogram2d for simple cases
# # TODO redo it with numpy
# contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
# (class_idx, cluster_idx)),
# shape=(n_classes, n_clusters),
# dtype=np.int)
# contingency = contingency.tocsr()
# contingency.sum_duplicates()
# return contingency
#
#
# def __mi(U, V):
# contingency = __contingency_matrix(U, V)
# nzx, nzy, nz_val = sp.find(contingency)
# contingency_sum = contingency.sum()
# pi = np.ravel(contingency.sum(axis=1))
# pj = np.ravel(contingency.sum(axis=0))
# log_contingency_nm = np.log(nz_val)
# contingency_nm = nz_val / contingency_sum
# # Don't need to calculate the full outer product, just for non-zeroes
# outer = (pi.take(nzx).astype(np.int64, copy=False)
# * pj.take(nzy).astype(np.int64, copy=False))
# log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
# mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
# contingency_nm * log_outer)
# return mi.sum()
#
def spearman_corr(x, y):
"""Calculate Spearman's correlation for each feature. Bigger absolute
values mean more important features. This measure works best with discrete
features due to being based on statistics.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://en.wikipedia.org/wiki/Spearman's_rank_correlation_coefficient
Examples
--------
>>> from ITMO_FS.filters.univariate import spearman_corr
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> spearman_corr(x, y)
array([-0.186339 , 0.30429031, -0.52704628, -0.30555556, 0.35355339])
"""
n = x.shape[0]
if n < 2:
raise ValueError("The input should contain more than 1 sample")
x_ranks = np.apply_along_axis(rankdata, 0, x)
y_ranks = rankdata(y)
return pearson_corr(x_ranks, y_ranks)
def pearson_corr(x, y):
"""Calculate Pearson's correlation for each feature. Bigger absolute
values mean more important features. This measure works best with discrete
features due to being based on statistics.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
Examples
--------
>>> from ITMO_FS.filters.univariate import pearson_corr
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> pearson_corr(x, y)
array([-0.13363062, 0.32732684, -0.60631301, -0.26244533, 0.53452248])
"""
x_dev = x - np.mean(x, axis=0)
y_dev = y - np.mean(y)
sq_dev_x = x_dev * x_dev
sq_dev_y = y_dev * y_dev
sum_dev = y_dev.T.dot(x_dev).reshape((x.shape[1],))
denominators = np.sqrt(np.sum(sq_dev_y) * np.sum(sq_dev_x, axis=0))
results = np.array(
[(sum_dev[i] / denominators[i]) if denominators[i] > 0.0 else 0 for i
in range(len(denominators))])
return results
# TODO need to implement unsupervised way
def laplacian_score(x, y, k_neighbors=5, t=1, metric='euclidean', **kwargs):
"""Calculate Laplacian Score for each feature. Smaller values mean more
important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples.
k_neighbors : int, optional
The number of neighbors to construct a nearest neighbor graph.
t : float, optional
Suitable constant for weight matrix S
where Sij = exp(-(|xi - xj| ^ 2) / t).
metric : str or callable, optional
Norm function to compute distance between two points or one of the
commonly used strings ('euclidean', 'manhattan' etc.) The default
metric is euclidean.
weights : array-like, shape (n_samples, n_samples)
The weight matrix of the graph that models the local structure of
the data space. By default it is constructed using KNN algorithm.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
https://papers.nips.cc/paper/2909-laplacian-score-for-feature-selection.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import laplacian_score
>>> import numpy as np
>>> x = np.array([[1, 2, 3, 3, 1], [2, 2, 3, 3, 2], [1, 3, 3, 1, 3],
... [3, 1, 3, 1, 4], [4, 4, 3, 1, 5]])
>>> y = np.array([1, 2, 3, 4, 5])
>>> laplacian_score(x, y)
array([1.98983619, 1.22248371, nan, 0.79710221, 1.90648048])
"""
n, m = x.shape
k_neighbors = min(k_neighbors, n - 1)
if 'weights' in kwargs.keys():
S = kwargs['weights']
else:
if n > 100000:
S = lil_matrix((n, n))
else:
S = np.zeros((n, n))
graph = NearestNeighbors(n_neighbors=k_neighbors, metric=metric)
graph.fit(x)
distances, neighbors = graph.kneighbors()
for i in range(n):
for j in range(k_neighbors):
S[i, neighbors[i][j]] = S[neighbors[i][j], i] = exp(
-distances[i][j] * distances[i][j] / t)
ONE = np.ones((n,))
D = np.diag(S.dot(ONE))
L = D - S
t = D.dot(ONE)
F = x - x.T.dot(t) / ONE.dot(t)
F = F.T.dot(L.dot(F)) / F.T.dot(D.dot(F))
return np.diag(F)
def information_gain(x, y):
"""Calculate mutual information for each feature by formula
I(X,Y) = H(Y) - H(Y|X). Bigger values mean more important features. This
measure works best with discrete features due to being based on information
theory.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
Examples
--------
>>> from ITMO_FS.filters.univariate import information_gain
>>> import numpy as np
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> x = np.array([[1, 2, 3, 3, 1], [2, 2, 3, 3, 2], [1, 3, 3, 1, 3],
... [3, 1, 3, 1, 4], [4, 4, 3, 1, 5]])
>>> y = np.array([1, 2, 3, 4, 5])
>>> est = KBinsDiscretizer(n_bins=10, encode='ordinal')
>>> x = est.fit_transform(x)
>>> information_gain(x, y)
array([1.33217904, 1.33217904, 0. , 0.67301167, 1.60943791])
"""
entropy_x = entropy(y)
cond_entropy = np.apply_along_axis(conditional_entropy, 0, x, y)
return entropy_x - cond_entropy
def anova(x, y):
"""Calculate anova measure for each feature. Bigger values mean more
important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
<NAME>. "Concepts and Applications of Inferential Statistics".
Chapter 14. http://vassarstats.net/textbook/
Note:
The Anova score is counted for checking hypothesis if variances of two
samples are similar, this measure only returns you counted F-score.
For understanding whether samples' variances are similar you should
compare recieved result with value of F-distribution function, for
example use:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.fdtrc.html#scipy.special.fdtrc
Examples
--------
>>> from ITMO_FS.filters.univariate import anova
>>> import numpy as np
>>> x = np.array([[1, 2, 3, 3, 1], [2, 2, 3, 3, 2], [1, 3, 3, 1, 3],
... [3, 1, 3, 1, 4], [4, 4, 3, 1, 5]])
>>> y = np.array([1, 2, 1, 3, 3])
>>> anova(x, y)
array([12.6 , 0.04, nan, 1.4 , 3. ])
"""
split_by_class = [x[y == k] for k in np.unique(y)]
num_classes = len(np.unique(y))
num_samples = x.shape[0]
num_samples_by_class = [s.shape[0] for s in split_by_class]
sq_sum_all = sum((s ** 2).sum(axis=0) for s in split_by_class)
sum_group = [np.asarray(s.sum(axis=0)) for s in split_by_class]
sq_sum_combined = sum(sum_group) ** 2
sum_sq_group = [np.asarray((s ** 2).sum(axis=0)) for s in split_by_class]
sq_sum_group = [s ** 2 for s in sum_group]
sq_sum_total = sq_sum_all - sq_sum_combined / float(num_samples)
sq_sum_within = sum(
[sum_sq_group[i] - sq_sum_group[i] / num_samples_by_class[i] for i in
range(num_classes)])
sq_sum_between = sq_sum_total - sq_sum_within
deg_free_between = num_classes - 1
deg_free_within = num_samples - num_classes
ms_between = sq_sum_between / float(deg_free_between)
ms_within = sq_sum_within / float(deg_free_within)
f = ms_between / ms_within
return np.array(f)
def modified_t_score(x, y):
"""Calculate the Modified T-score for each feature. Bigger values mean
more important features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples. There can be only 2 classes.
Returns
-------
array-like, shape (n_features,) : feature scores
See Also
--------
For more details see paper <https://dergipark.org.tr/en/download/article-file/261247>.
Examples
--------
>>> from ITMO_FS.filters.univariate import modified_t_score
>>> import numpy as np
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 1, 2, 1, 2])
>>> modified_t_score(x, y)
array([1.68968099, 0.12148022, 0.39653932, 0.17682997, 2.04387142])
"""
classes = np.unique(y)
size_class0 = y[y == classes[0]].size
size_class1 = y[y == classes[1]].size
mean_class0 = np.mean(x[y == classes[0]], axis=0)
mean_class0 = np.nan_to_num(mean_class0)
mean_class1 = np.mean(x[y == classes[1]], axis=0)
mean_class1 = np.nan_to_num(mean_class1)
std_class0 = np.std(x[y == classes[0]], axis=0)
std_class0 = np.nan_to_num(std_class0)
std_class1 = np.std(x[y == classes[1]], axis=0)
std_class1 = np.nan_to_num(std_class1)
corr_with_y = np.apply_along_axis(
lambda feature: abs(np.corrcoef(feature, y)[0][1]), 0, x)
corr_with_y = np.nan_to_num(corr_with_y)
corr_with_others = abs(np.corrcoef(x, rowvar=False))
corr_with_others = np.nan_to_num(corr_with_others)
mean_of_corr_with_others = (
corr_with_others.sum(axis=1)
- corr_with_others.diagonal()) / (len(corr_with_others) - 1)
t_score_numerator = abs(mean_class0 - mean_class1)
t_score_denominator = np.sqrt(
(size_class0 * np.square(std_class0) + size_class1 * np.square(
std_class1)) / (size_class0 + size_class1))
modificator = corr_with_y / mean_of_corr_with_others
modified_t_score = t_score_numerator / t_score_denominator * modificator
modified_t_score = np.nan_to_num(modified_t_score)
return modified_t_score
MEASURE_NAMES = {"FitCriterion": fit_criterion_measure,
"FRatio": f_ratio_measure,
"GiniIndex": gini_index,
"SymmetricUncertainty": su_measure,
"SpearmanCorr": spearman_corr,
"PearsonCorr": pearson_corr,
"FechnerCorr": fechner_corr,
"KendallCorr": kendall_corr,
"ReliefF": reliefF_measure,
"Chi2": chi2_measure,
"Anova": anova,
"LaplacianScore": laplacian_score,
"InformationGain": information_gain,
"ModifiedTScore": modified_t_score,
"Relief": relief_measure}
def select_best_by_value(value):
return _wrapped_partial(__select_by_value, value=value, more=True)
def select_worst_by_value(value):
return _wrapped_partial(__select_by_value, value=value, more=False)
def __select_by_value(scores, value, more=True):
if more:
return np.flatnonzero(scores >= value)
else:
return np.flatnonzero(scores <= value)
def select_k_best(k):
return _wrapped_partial(__select_k, k=k, reverse=True)
def select_k_worst(k):
return _wrapped_partial(__select_k, k=k)
def __select_k(scores, k, reverse=False):
if not isinstance(k, int):
raise TypeError("Number of features should be integer")
if k > scores.shape[0]:
raise ValueError(
"Cannot select %d features with n_features = %d" % (k, len(scores)))
order = np.argsort(scores)
if reverse:
order = order[::-1]
return order[:k]
def __select_percentage_best(scores, percent):
return __select_k(
scores, k=(int)(scores.shape[0] * percent), reverse=True)
def select_best_percentage(percent):
return _wrapped_partial(__select_percentage_best, percent=percent)
def __select_percentage_worst(scores, percent):
return __select_k(
scores, k=(int)(scores.shape[0] * percent), reverse=False)
def select_worst_percentage(percent):
return _wrapped_partial(__select_percentage_worst, percent=percent)
CR_NAMES = {"Best by value": select_best_by_value,
"Worst by value": select_worst_by_value,
"K best": select_k_best,
"K worst": select_k_worst,
"Worst by percentage": select_worst_percentage,
"Best by percentage": select_best_percentage}
def qpfs_filter(X, y, r=None, sigma=None, solv='quadprog', fn=pearson_corr):
"""Performs Quadratic Programming Feature Selection algorithm.
Note: this realization requires labels to start from 1 and be numerical.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
y : array-like, shape (n_samples,)
The classes for the samples.
r : int
The number of samples to be used in Nystrom optimization.
sigma : double
The threshold for eigenvalues to be used in solving QP optimization.
solv : string, default
The name of qp solver according to
qpsolvers(https://pypi.org/project/qpsolvers/) naming. Note quadprog
is used by default.
fn : function(array, array), default
The function to count correlation, for example pierson correlation or
mutual information. Note mutual information is used by default.
Returns
-------
array-like, shape (n_features,) : the ranks of features in dataset, with
rank increase, feature relevance increases and redundancy decreases.
See Also
--------
http://www.jmlr.org/papers/volume11/rodriguez-lujan10a/rodriguez-lujan10a.pdf
Examples
--------
>>> from ITMO_FS.filters.univariate import qpfs_filter
>>> from sklearn.datasets import make_classification
>>> x = np.array([[3, 3, 3, 2, 2], [3, 3, 1, 2, 3], [1, 3, 5, 1, 1],
... [3, 1, 4, 3, 1], [3, 1, 2, 3, 1]])
>>> y = np.array([1, 3, 2, 1, 2])
>>> ranks = qpfs_filter(x, y)
>>> print(ranks)
"""
return qpfs_body(X, y, fn, r=r, sigma=sigma, solv=solv)
|
115063
|
import sys
sys.path.append('../configs')
sys.path.append('../utils')
sys.path.append('../tfops')
# ../utils
from reader import read_npy
# ../config
from path import CIFARPROCESSED
from info import CIFARNCLASS
def test1():
val_embed = read_npy(CIFARPROCESSED+'val_image.npy')
val_label = read_npy(CIFARPROCESSED+'val_label.npy')
cifar = TripletDatamanager(val_embed, val_label, CIFARNCLASS, nsclass=10)
count = np.zeros(cifar.nclass)
nbatch = cifar.ndata//50+1
for i in range(nbatch):
_, label = cifar.next_batch(50)
for index in range(len(label)):
count[label[index]]+=1
print(count)
def test2():
val_embed = read_npy(CIFARPROCESSED+'val_image.npy')
val_label = read_npy(CIFARPROCESSED+'val_label.npy')
cifar = NpairDatamanager(val_embed, val_label, CIFARNCLASS, nsclass=4)
_, _, anc_l, pos_l = cifar.next_batch(32)
print(anc_l)
print(pos_l)
if __name__=='__main__':
test1()
test2()
|
115072
|
import torch
import gpytorch
from gpytorch.mlls import ExactMarginalLogLikelihood
from botorch.models.gp_regression import SingleTaskGP
from gpytorch.kernels import RBFKernel, ScaleKernel
from online_gp.utils import regression
class OnlineExactRegression(torch.nn.Module):
def __init__(self, stem, init_x, init_y, lr, **kwargs):
super().__init__()
self.stem = stem.to(init_x.device)
if init_y.t().shape[0] != 1:
_batch_shape = init_y.t().shape[:-1]
else:
_batch_shape = torch.Size()
features = self.stem(init_x)
self.gp = SingleTaskGP(
features,
init_y,
covar_module=ScaleKernel(RBFKernel(batch_shape=_batch_shape, ard_num_dims=stem.output_dim),
batch_shape=_batch_shape)
)
self.mll = ExactMarginalLogLikelihood(self.gp.likelihood, self.gp)
self.optimizer = torch.optim.Adam(self.parameters(), lr=lr)
self._raw_inputs = [init_x]
self._target_batch_shape = _batch_shape
self.target_dim = init_y.size(-1)
def update(self, inputs, targets, update_stem=True, update_gp=True):
inputs = inputs.view(-1, self.stem.input_dim)
targets = targets.view(-1, self.target_dim)
# add observation
self.train()
self._raw_inputs = [torch.cat([*self._raw_inputs, inputs])]
self.gp.train_targets = torch.cat([
self.gp.train_targets,
self._reshape_targets(targets)
], dim=-1)
if update_stem:
self._refresh_features(*self._raw_inputs, strict=False)
else:
with torch.no_grad():
self._refresh_features(*self._raw_inputs, strict=False)
self.mll = ExactMarginalLogLikelihood(self.gp.likelihood, self.gp)
# update stem and GP
if update_gp:
self.optimizer.zero_grad()
with gpytorch.settings.skip_logdet_forward(True):
train_dist = self.gp(*self.gp.train_inputs)
loss = -self.mll(train_dist, self.gp.train_targets).sum()
loss.backward()
self.optimizer.step()
self.gp.zero_grad()
# update GP training data again
if update_stem:
with torch.no_grad():
self._refresh_features(*self._raw_inputs)
self.eval()
stem_loss = gp_loss = loss.item() if update_gp else 0.
return stem_loss, gp_loss
def fit(self, inputs, targets, num_epochs, test_dataset=None):
records = []
self.gp.train_targets = self._reshape_targets(targets)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, num_epochs, 1e-4)
for epoch in range(num_epochs):
self.train()
self.mll.train()
self.optimizer.zero_grad()
self._refresh_features(inputs)
train_dist = self.gp(*self.gp.train_inputs)
with gpytorch.settings.skip_logdet_forward(False):
loss = -self.mll(train_dist, self.gp.train_targets).sum()
loss.backward()
self.optimizer.step()
lr_scheduler.step()
self.gp.zero_grad()
rmse = nll = float('NaN')
if test_dataset is not None:
test_x, test_y = test_dataset[:]
rmse, nll = self.evaluate(test_x, test_y)
records.append({'train_loss': loss.item(), 'test_rmse': rmse,
'test_nll': nll, 'noise': self.gp.likelihood.noise.mean().item(),
'epoch': epoch + 1})
with torch.no_grad():
self._refresh_features(inputs)
self.eval()
return records
def forward(self, inputs):
inputs = inputs.view(-1, self.stem.input_dim)
features = self.stem(inputs)
return self.gp(features)
def predict(self, inputs):
self.eval()
pred_dist = self(inputs)
pred_dist = self.gp.likelihood(pred_dist)
return pred_dist.mean, pred_dist.variance
def evaluate(self, inputs, targets):
inputs = inputs.view(-1, self.stem.input_dim)
targets = targets.view(-1, self.target_dim)
with torch.no_grad():
return regression.evaluate(self, inputs, targets)
def set_train_data(self, inputs, targets, strict):
inputs = inputs.expand(*self._target_batch_shape, -1, -1)
if self.target_dim == 1:
targets = targets.squeeze(0)
self.gp.set_train_data(inputs, targets, strict)
def _reshape_targets(self, targets):
targets = targets.view(-1, self.target_dim)
if targets.size(-1) == 1:
targets = targets.squeeze(-1)
else:
targets = targets.t()
return targets
def _refresh_features(self, inputs, strict=True):
features = self.stem(inputs)
self.set_train_data(features, self.gp.train_targets, strict)
return features
def set_lr(self, gp_lr, stem_lr=None, bn_mom=None):
stem_lr = gp_lr if stem_lr is None else stem_lr
self.optimizer = torch.optim.Adam([
dict(params=self.gp.parameters(), lr=gp_lr),
dict(params=self.stem.parameters(), lr=stem_lr)
])
if bn_mom is not None:
for m in self.stem.modules():
if isinstance(m, torch.nn.BatchNorm1d):
m.momentum = bn_mom
@property
def noise(self):
return self.gp.likelihood.noise
|
115150
|
from hashlib import sha256
from hmac import HMAC
import os
class Encrypt(object):
def encrypt(self, password, salt=None):
if salt is None:
salt = os.urandom(8)
result = password.encode('utf-8')
for i in range(10):
result = HMAC(result, salt, sha256).digest()
return salt + result
def vaildate(self, password, hashed):
return hashed == self.encrypt(password, salt=hashed[:8])
if __name__ == '__main__':
obj = Encrypt()
hashed = obj.encrypt('wh5622')
# print(bytes.decode(hashed))
ans = obj.vaildate('wh5622', hashed)
print(ans)
|
115194
|
import os
import subprocess
import itertools
import pytest
from click.testing import CliRunner
from hobbit import main as hobbit
from hobbit.bootstrap import templates
from . import BaseTest, rmdir, chdir
class TestHobbit(BaseTest):
wkdir = os.path.abspath('hobbit-tox-test')
def setup_method(self, method):
rmdir(self.wkdir)
def teardown_method(self, method):
os.chdir(self.root_path)
rmdir(self.wkdir)
@pytest.fixture
def runner(self):
yield CliRunner()
def test_not_exist_cmd(self, runner):
result = runner.invoke(hobbit)
assert result.exit_code == 0
result = runner.invoke(hobbit, ['doesnotexistcmd'], obj={})
assert 'Error: cmd not exist: doesnotexistcmd' in result.output
@pytest.mark.parametrize(
'name,template,celery_,dist',
itertools.product(
['haha'], templates, ['--celery', '--no-celery'],
[None, '.', wkdir]))
@chdir(wkdir)
def test_new_cmd(self, runner, name, template, celery_, dist):
options = [
'--echo', 'new', '-p 1024', '-n', name, '-t', template, celery_]
if dist:
assert os.getcwd() == os.path.abspath(dist)
options.extend(['-d', dist])
result = runner.invoke(hobbit, options, obj={})
assert result.exit_code == 0, result.output
assert 'mkdir\t{}'.format(self.wkdir) in result.output
assert 'render\t{}'.format(self.wkdir) in result.output
file_nums = {
# tart + 29 files + 11 dir + 1 end + empty
'shire | --no-celery': 1 + 29 + 11 + 1 + 1 - 1,
# start + files + mkdir + tail
'shire | --celery': 1 + 30 + 12 + 1,
'rivendell | --no-celery': 1 + 31 + 11 + 1,
'rivendell | --celery': 1 + 32 + 12 + 1,
}
assert len(result.output.split('\n')) == file_nums[
f'{template} | {celery_}']
assert subprocess.call(['flake8', '.']) == 0
assert subprocess.call(
'pip install -r requirements.txt '
'--upgrade-strategy=only-if-needed',
shell=True, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0
assert subprocess.call(['pytest'], stdout=subprocess.DEVNULL) == 0
# test --force option
result = runner.invoke(hobbit, options, obj={})
assert all([i in result.output for i in ['exists ', 'ignore ...']])
options.extend(['-f'])
result = runner.invoke(hobbit, options, obj={})
assert any([i in result.output for i in ['exists ', 'ignore ...']])
@chdir(wkdir)
def test_dev_init_cmd(self, runner):
# new project use rivendell template
cmd = ['--echo', 'new', '-n', 'haha', '-p', '1024', '-t', 'rivendell']
result = runner.invoke(hobbit, cmd, obj={})
assert result.exit_code == 0
result = runner.invoke(hobbit, ['dev', 'init', '--all'], obj={})
assert result.exit_code == 0, result.output
|
115236
|
import SimpleITK as sitk
data_mha = open('data_mha.txt', 'r')
mha_dir = data_mha.readlines()
data_nii = open('data_nii.txt', 'r')
nii_dir = data_nii.readlines()
for i in range(len(mha_dir)):
print(i)
path, _ = mha_dir[i].split("\n")
savepath, _ = nii_dir[i].split("\n")
img = sitk.ReadImage(path)
sitk.WriteImage(img, savepath)
|
115251
|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from scipy.stats import norm
import time
import os
from demoire.epll.epll import EPLLhalfQuadraticSplit
from demoire.epll.utils import get_gs_matrix
def process(noiseI, GS, matpath, DC):
patchSize = 8
noiseSD = 25/255
# same to matlab seed
# np.random.seed(1)
# rand = np.array(norm.ppf(np.random.rand(I.shape[1], I.shape[0]))).T
# noiseI = I + noiseSD * rand
excludeList = []
LogLFunc = []
cleanI, psnr, cost = EPLLhalfQuadraticSplit(
noiseI = noiseI,
rambda = patchSize**2/noiseSD**2,
patchSize = patchSize,
betas = (1/(noiseSD**2))*np.array([1,4,8,16,32]),
T = 1,
I = None,
LogLFunc = LogLFunc,
GS = GS,
excludeList = None,
SigmaNoise = None,
matpath = matpath,
DC = DC
)
return cleanI
def denoise( target,
matpath,
DC,
convert_type = 'RGB'
):
convert_type = convert_type.upper()
GS = get_gs_matrix(path=matpath, DC=DC)
if convert_type == 'L':
targetI = np.array(Image.open(target).convert(convert_type))/255
print('grayscale')
cleanI = process(targetI, GS, matpath, DC)
elif convert_type == 'RGB':
targetI = np.array(Image.open(target).convert(convert_type))/255
cleanI = np.empty(targetI.shape)
for i in range(3):
print()
if i == 0:
print('R channel')
elif i == 1:
print('G channel')
else :
print('B channel')
cleanI[:,:,i] = process(targetI[:,:,i], GS, matpath, DC)
else:
print('ValueError: covert type should be grayscale(L) or RGB')
exit(-1)
return cleanI
def save_result(cleanI, resultpath):
assert os.path.exists(os.path.dirname(resultpath)), print('result directory not exists')
if cleanI.ndim == 2:
cmap='gray'
elif cleanI.ndim == 3:
cmap=None
else:
print('image dimesion should be 2 or 3')
exit(-1)
plt.imsave(resultpath, cleanI, cmap=cmap)
def main(target, matfile, DC, resultdir):
if DC:
print('background')
else:
print('moire')
matdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
matpath = os.path.join(matdir, matfile)
# cleanI = np.array(Image.open(target).convert('RGB'))/255
cleanI = denoise(target=target, matpath=matpath, DC=DC, convert_type='L')
img_type = os.path.basename(target).split('.')[-1]
filename = ''.join(os.path.basename(target).split('.')[:-1]) + '_' + ('background' if DC else 'moire') + '.' + img_type
resultpath = os.path.join(resultdir, filename)
save_result(cleanI, resultpath)
|
115267
|
from collections import OrderedDict
from urllib import urlencode
from admino.serializers import ModelAdminSerializer
from django.core.urlresolvers import reverse_lazy
from django.http import JsonResponse
from django.views.generic import View
class APIView(View):
def json_response(self, data, *args, **kwargs):
return JsonResponse(data, safe=False, *args, **kwargs)
class ChangeListRetrieveAPIView(APIView):
def get_api_next_url(self, request, cl):
page_num = cl.page_num
if page_num and page_num is not int or not cl.multi_page:
return None
info = self.model._meta.app_label, self.model._meta.model_name
url = reverse_lazy("admin:%s_%s_api_list" % info)
host = request.get_host()
params = cl.params
params["p"] = page_num + 1
return "%s://%s%s?%s" % (request.scheme, host, url, urlencode(params))
def get_api_previous_url(self, request, cl):
page_num = cl.page_num
if page_num == 0 or not cl.multi_page:
return None
info = self.model._meta.app_label, self.model._meta.model_name
url = reverse_lazy("admin:%s_%s_api_list" % info)
host = request.get_host()
params = cl.params
params["p"] = page_num - 1
return "%s://%s%s?%s" % (request.scheme, host, url, urlencode(params))
def get(self, request, model_admin, admin_cl, *args, **kwargs):
self.model = admin_cl.model
results = []
for obj in admin_cl.result_list:
results.append(model_admin.obj_as_dict(request, obj))
data = OrderedDict()
data["count"] = admin_cl.result_count
data["next"] = self.get_api_next_url(request, admin_cl)
data["previous"] = self.get_api_previous_url(request, admin_cl)
data["results"] = results
return self.json_response(data)
class APIMetaView(APIView):
def get(self, request, model_admin, *args, **kwargs):
form = model_admin.get_form(request)
data = ModelAdminSerializer(model_admin=model_admin, admin_form=form).data
return self.json_response(data)
class AdminDetailRetrieveAPIView(APIView):
def get(self, request, model_admin, admin_cl, *args, **kwargs):
return self.json_response("ok")
|
115269
|
from .base_capsule_options import BaseCapsuleOptionsWidget
from brainframe_qt.api_utils import api
class StreamCapsuleOptionsWidget(BaseCapsuleOptionsWidget):
def __init__(self, stream_id, parent=None):
super().__init__(parent=parent)
assert stream_id is not None
self.window().setWindowTitle(self.tr("Stream Capsule Options"))
self.stream_id = stream_id
def change_capsule(self, capsule_name):
# Add all of the global options
super().change_capsule(capsule_name)
# Get stream-specific option items
stream_options = api.get_capsule_option_vals(capsule_name,
self.stream_id)
enabled_option = api.is_capsule_active(capsule_name, self.stream_id)
# Lock all options that are not overwritten by stream specific options
for option_item in self.option_items:
is_locked = option_item.option_name not in stream_options.keys()
option_item.show_lock(True)
option_item.set_locked(is_locked)
# Set the state for other generic options
self.enabled_option.show_lock(True)
self.enabled_option.set_locked(enabled_option is None)
# Set the stream-specific setting, if there is one
if enabled_option is not None:
self.enabled_option.set_val(enabled_option)
for option_name, option_patch in stream_options.items():
option_item = next(o for o in self.option_items
if o.option_name == option_name)
option_item.set_val(option_patch)
def apply_changes(self, stream_id=None):
super().apply_changes(stream_id=self.stream_id)
|
115317
|
import json
import os
import shutil
import stat
import tarfile
import urllib
import subprocess
import docker
from requirementslib import Requirement
from tqdm import tqdm
from .release import get_release
class NoReleaseCandidate(Exception):
def __init__(self, requirement):
super(NoReleaseCandidate, self).__init__()
self.requirement = requirement
class NoReleaseAsset(Exception):
def __init__(self, package_build):
super(NoReleaseAsset, self).__init__()
self.package_build = package_build
class ReleaseRequirementsMissmatched(Exception):
def __init__(self, requirement, potential_candidates):
super(ReleaseRequirementsMissmatched, self).__init__()
self.requirement = requirement
self.potential_candidates = potential_candidates
def get_requirements_from_pipenv(dev):
if dev:
command = "{ pipenv lock --dev -r & pipenv lock -r; }"
else:
command = "pipenv lock -r"
with os.popen(command) as pipenv_subprocess:
return pipenv_subprocess.read()
def _parse_requirement_line(line):
if len(line) == 0:
return None
if line[0] == '#':
return None
if line[:2] == '-i':
return None
return {
"line": line,
"requirement": Requirement.from_line(line)
}
def parse_requirements(requirements_string):
return list(filter(lambda x: x is not None, map(_parse_requirement_line, requirements_string.split('\n'))))
def resolve_requirements(requirements, package_builds):
resolved_requirements = {}
for requirement in requirements:
name = requirement['requirement'].name
if not name in package_builds:
resolved_requirements[name] = None
else:
candidates = list(reversed(sorted(package_builds[name], key=lambda build: build.package_version)))
predicate = lambda build: build.is_compatiple(requirement['requirement'], requirements)
selected_candidate = next(filter(predicate, candidates), None)
resolved_requirements[name] = selected_candidate
if not selected_candidate:
predicate = lambda build: build.version_matches(requirement['requirement'])
potential_candidates = list(filter(predicate, candidates))
if len(potential_candidates) > 0:
raise ReleaseRequirementsMissmatched(requirement['requirement'], potential_candidates)
else:
raise NoReleaseCandidate(requirement['requirement'])
for name, requirement in resolved_requirements.copy().items():
if requirement is None:
continue
for pypi_dep_name, pypi_dep_specifier in requirement.pypi_dependencies():
if not pypi_dep_name in resolved_requirements:
candidates = list(reversed(sorted(package_builds[pypi_dep_name], key=lambda build: build.package_version)))
requirement = _parse_requirement_line(''.join([pypi_dep_name, pypi_dep_specifier]))
predicate = lambda build: build.is_compatiple(requirement['requirement'], requirements)
selected_candidate = next(filter(predicate, candidates), None)
if selected_candidate == None:
raise NoReleaseCandidate(requirement['requirement'])
resolved_requirements[pypi_dep_name] = selected_candidate
return resolved_requirements
def prepare_tarfile(url, download_filename, package_directory):
urllib.request.urlretrieve(url, download_filename)
tar = tarfile.open(download_filename, "r:gz")
tar.extractall(package_directory)
tar.close()
def download_and_prepare_asset(asset, package_release, package_build):
url = asset.browser_download_url
download_directory = os.environ['HOME'] + '/.lambdipy/packages/'
os.makedirs(download_directory, exist_ok=True)
print(f'Downloading {package_build.package_name} from GitHub release {package_release.tag_name}')
download_filename = download_directory + os.path.basename(url)
package_directory = download_directory + '/' + package_build.git_tag()
prepare_tarfile(url, download_filename, package_directory)
return download_directory + '/' + package_build.git_tag()
def build_and_prepare_package(package_build):
print(f'Building {package_build.package_name} build version {package_build.git_tag()}')
package_build.build_docker()
package_build.copy_from_docker()
return package_build.build_directory()
def find_package_in_cache(package_build):
download_directory = os.environ['HOME'] + '/.lambdipy/packages/'
package_directory = download_directory + '/' + package_build.git_tag()
if os.path.isdir(package_directory):
return package_directory
def prepare_resolved_requirements(resolved_requirements):
package_paths = {}
for package_name, package_build in resolved_requirements.items():
if not package_build:
continue
cached_path = find_package_in_cache(package_build)
if cached_path:
package_paths[package_name] = cached_path
print(f'Found {package_build.package_name} {package_build.git_tag()} in cache')
continue
use_token = os.environ.get('GITHUB_TOKEN') is not None
package_release = get_release(package_build, use_token)
if package_release:
assets = package_release.get_assets()
if assets.totalCount == 0:
raise NoReleaseAsset(package_build)
package_paths[package_name] = download_and_prepare_asset(assets[0], package_release, package_build)
else:
package_paths[package_name] = build_and_prepare_package(package_build)
return package_paths
# https://stackoverflow.com/a/12514470/6871665
def _copytree(src, dest):
os.makedirs(dest, exist_ok=True)
if not os.path.isdir(src):
shutil.copy2(src, dest)
else:
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dest, item)
if os.path.isdir(s):
if not os.path.isdir(d):
shutil.copytree(s, d)
else:
shutil.copy2(s, d)
def copy_prepared_releases_to_build_directory(package_paths, build_directory='./build'):
shutil.rmtree(build_directory, ignore_errors=True)
os.makedirs(build_directory, exist_ok=True)
for _, directory in package_paths.items():
for item in os.listdir(directory):
_copytree(directory + '/' + item, build_directory + '/' + os.path.basename(item))
def _run_command_in_docker(command, build_directory, python_version):
volumes = {
f'{os.path.abspath(build_directory)}/': {
'bind': '/tmp/export/',
'mode': 'rw'
}
}
environment = {
'HOME': '/home'
}
cli = docker.APIClient()
image_tag = f'build-python{python_version}'
image = f'lambci/lambda:{image_tag}'
progress_bars = {}
pull_generator = cli.pull(image, stream=True)
for line in (line for output in pull_generator for line in output.decode().split('\n') if len(line) > 0):
progress_dict = json.loads(line)
if 'id' not in progress_dict or progress_dict['id'] == image_tag:
print(progress_dict)
elif progress_dict['id'] in progress_bars:
progress_bar = progress_bars[progress_dict['id']]
progress_detail = progress_dict['progressDetail']
if 'current' in progress_detail:
progress_bar.update(progress_detail['current'] - progress_bar.n)
if 'total' in progress_detail and progress_detail['total'] != progress_bar.total:
progress_bar.reset(progress_detail['total'])
progress_bar.set_description(progress_dict['id'] + ' | ' + progress_dict['status'])
else:
progress_bars[progress_dict['id']] = tqdm(desc=progress_dict['id'] + ' | ' + progress_dict['status'])
container = cli.create_container(
image,
volumes=list(map(lambda x: x['bind'], volumes.values())),
host_config=cli.create_host_config(binds=volumes),
command='sleep infinity',
environment=environment,
user=f'{os.getuid()}:{os.getgid()}'
)
cli.start(container=container.get('Id'))
try:
command_exec = cli.exec_create(container=container.get('Id'), cmd=command)
command_runtime = cli.exec_start(exec_id=command_exec.get('Id'), stream=True)
for line in command_runtime:
print(line.decode('utf-8'), end='')
finally:
cli.kill(container.get('Id'))
cli.remove_container(container.get('Id'))
def install_non_resolved_requirements(resolved_requirements, requirements, python_version, keep_tests=None, no_docker=False,
build_directory='./build'):
install_dir = build_directory if no_docker else '/tmp/export'
packages_to_install = ''
for requirement in requirements:
if resolved_requirements[requirement['requirement'].name] is not None:
continue
requirement_line = requirement['line']
packages_to_install += f' "{requirement_line}"'
# GIT_SSH_COMMAND="/usr/bin/ssh -o StrictHostKeyChecking=no"
install_command = f'pip install {packages_to_install} -t {install_dir}' if len(packages_to_install) > 0 else ''
if len(packages_to_install) > 0:
print(f'Installing remaining packages via pip')
exclude_tests_pattern = '\|'.join(keep_tests) if keep_tests else '*'
with open(build_directory + '/build', "w") as f:
f.writelines([
'#!/bin/bash\n',
'set -ex\n',
install_command + '\n',
f'rm -rf {install_dir}/*.egg-info\n',
f'rm -rf {install_dir}/*.dist-info\n',
f'find {install_dir}/ -name __pycache__ | xargs rm -rf\n',
f'find {install_dir}/ -name tests | grep -v "{exclude_tests_pattern}" | xargs rm -rf\n',
f'find {install_dir}/ -name "*.so" | xargs strip\n'
])
st = os.stat(build_directory + '/build')
os.chmod(build_directory + '/build', st.st_mode | stat.S_IEXEC)
print(open(build_directory + '/build').read())
if no_docker:
print("Installing without docker...")
return_code = subprocess.Popen([build_directory + '/build']).wait()
if return_code != 0:
print("Error in building lambdipy build.")
exit(return_code)
else:
print("Installing in a docker container...")
_run_command_in_docker(f'{install_dir}/build', build_directory=build_directory, python_version=python_version)
print('Finalizing the build')
os.remove(build_directory + '/build')
def copy_include_paths(include_paths, build_directory='./build'):
for path in include_paths:
basename = os.path.basename(path)
if len(basename) == 0:
basename = path
if os.path.isdir(path):
shutil.copytree(path, build_directory + '/' + basename)
else:
shutil.copy2(path, build_directory + '/' + basename)
|
115344
|
import pickle
import os
import numpy as np
import torch
import warnings
from tqdm import tqdm
from Metrics import evaluateTracking
from dataset.dataLoader import Data_Loader_MOT
from network.tubetk import TubeTK
from post_processing.tube_nms import multiclass_nms
from apex import amp
import argparse
import multiprocessing
from configs.default import __C, cfg_from_file
from post_processing.tube_iou_matching import matching
warnings.filterwarnings('ignore')
import shutil
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not torch.distributed.is_available():
return
if not torch.distributed.is_initialized():
return
world_size = torch.distributed.get_world_size()
if world_size == 1:
return
torch.distributed.barrier()
def match_video(video_name, tmp_dir, output_dir, model_arg):
tubes_path = os.path.join(tmp_dir, video_name)
tubes = []
frames = sorted([int(x) for x in os.listdir(tubes_path)])
for f in frames:
tube = pickle.load(open(os.path.join(tubes_path, str(f)), 'rb'))
tubes.append(tube)
tubes = np.concatenate(tubes)
matching(tubes, save_path=os.path.join(output_dir, video_name + '.txt'), verbose=True, arg=model_arg)
def evaluate(model, loader, test_arg, model_arg, output_dir='output'):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tmp_dir = os.path.join(output_dir, 'tmp')
try:
shutil.rmtree(tmp_dir)
except:
pass
os.makedirs(tmp_dir, exist_ok=True)
if test_arg.rank == 0:
loader = tqdm(loader, ncols=20)
for i, data in enumerate(loader):
imgs, img_metas = data[:2]
imgs = imgs.cuda()
with torch.no_grad():
tubes, _, _ = zip(*model(imgs, img_metas, return_loss=False))
for img, tube, img_meta in zip(imgs, tubes, img_metas):
# ===========================================VIS OUTPUT====================================================
# if img is not None:
# vis_output(img.cpu(), img_meta, bbox.cpu(), stride=model_arg.frame_stride, out_folder='/home/pb/results/')
# =========================================================================================================
tube[:, [0, 5, 10]] += img_meta['start_frame']
os.makedirs(os.path.join(tmp_dir, img_meta['video_name']), exist_ok=True)
tube = tube.cpu().data.numpy()
pickle.dump(tube, open(os.path.join(tmp_dir, img_meta['video_name'], str(img_meta['start_frame'])), 'wb'))
synchronize()
if test_arg.rank == 0:
print('Finish prediction, Start matching')
video_names = os.listdir(tmp_dir)
pool = multiprocessing.Pool(processes=20)
pool_list = []
for vid in video_names:
pool_list.append(pool.apply_async(match_video, (vid, tmp_dir, os.path.join(output_dir, 'res'), model_arg,)))
for p in tqdm(pool_list, ncols=20):
p.get()
pool.close()
pool.join()
shutil.rmtree(tmp_dir)
if test_arg.trainOrTest == 'train' and test_arg.dataset == 'MOT17':
print("FINISH MATCHING, START EVALUATE")
seq_map = 'MOT17_train.txt'
evaluateTracking(seq_map, os.path.join(output_dir, 'res'),
os.path.join(test_arg.data_url, 'train'), 'MOT17')
# elif test_arg.trainOrTest == 'train' and test_arg.dataset == 'MOT15':
# print("FINISH MATCHING, START EVALUATE")
# seq_map = 'MOT15_train.txt'
# evaluateTracking(seq_map, os.path.join(output_dir, 'res'),
# os.path.join(test_arg.data_url[3], 'train'), 'MOT15')
def main(test_arg, model_arg):
torch.distributed.init_process_group(backend="nccl", init_method='env://')
local_rank = int(os.environ["LOCAL_RANK"])
print('Rank: ' + str(test_arg.rank) + " Start!")
torch.cuda.set_device(local_rank)
if local_rank == 0:
print("Building TubeTK Model")
model = TubeTK(num_classes=1, arg=model_arg, pretrained=False)
data_loader = Data_Loader_MOT(
batch_size=test_arg.batch_size,
num_workers=8,
input_path=test_arg.data_url,
train_epoch=1,
test_epoch=1,
model_arg=model_arg,
dataset=test_arg.dataset,
test_seq=None,
test_type=test_arg.trainOrTest,
)
model = model.cuda(local_rank)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if test_arg.apex:
model = amp.initialize(model, opt_level='O1')
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True)
if test_arg.local_rank == 0:
print("Loading Model")
checkpoint = torch.load(test_arg.model_path + '/' + test_arg.model_name, map_location=
{'cuda:0': 'cuda:' + str(test_arg.local_rank),
'cuda:1': 'cuda:' + str(test_arg.local_rank),
'cuda:2': 'cuda:' + str(test_arg.local_rank),
'cuda:3': 'cuda:' + str(test_arg.local_rank),
'cuda:4': 'cuda:' + str(test_arg.local_rank),
'cuda:5': 'cuda:' + str(test_arg.local_rank),
'cuda:6': 'cuda:' + str(test_arg.local_rank),
'cuda:7': 'cuda:' + str(test_arg.local_rank)})
model.load_state_dict(checkpoint['state'], strict=False)
if test_arg.local_rank == 0:
print("Finish Loading")
del checkpoint
model.eval()
loader = data_loader.test_loader
evaluate(model, loader, test_arg, model_arg, output_dir=test_arg.output_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--model_path', default='./models', type=str, help='model path')
parser.add_argument('--model_name', default='TubeTK', type=str, help='model name')
parser.add_argument('--data_url', default='./data/', type=str, help='model path')
parser.add_argument('--output_dir', default='./link_res', type=str, help='output path')
parser.add_argument('--apex', action='store_true', help='whether use apex')
parser.add_argument('--config', default='./configs/TubeTK_resnet_50_FPN_8frame_1stride.yaml', type=str, help='config file')
parser.add_argument('--dataset', default='MOT17', type=str, help='test which dataset: MOT17, MOT15')
parser.add_argument('--trainOrTest', default='test', type=str, help='evaluate train or test set')
parser.add_argument('--local_rank', type=int, help='gpus')
test_arg, unparsed = parser.parse_known_args()
model_arg = __C
if test_arg.config is not None:
cfg_from_file(test_arg.config)
test_arg.rank = int(os.environ["RANK"])
main(test_arg, model_arg)
|
115372
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt("sample.dat")
mean = data.mean()
sigma = data.std()
x = np.linspace(data.min(), data.max(), 100)
y = np.exp(-0.5 * ((x -mean)/sigma)**2)
y = y/(np.sqrt(2.0* np.pi * sigma**2))
plt.hist(data, alpha=0.5, bins=20, normed=True, label="Data. N={}".format(len(data)))
plt.plot(x,y, label="Estimate. mean={:.1f} sigma={:.1f}".format(mean, sigma))
plt.xlabel("x")
plt.ylabel("PDF (x)")
plt.legend(loc=2)
plt.savefig("sample.pdf")
|
115407
|
import unittest
import xr
class TestBool(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_bool(self):
self.assertTrue(True)
self.assertFalse(False)
self.assertTrue(xr.TRUE)
self.assertFalse(xr.FALSE)
self.assertTrue(xr.Bool32(True))
self.assertFalse(xr.Bool32(False))
if __name__ == '__main__':
unittest.main()
|
115410
|
import copy
import collections
import itertools
import core.attributes as attributes
import core.properties as properties
from core.exceptions import WrongArityError
from core.TOS import _TOS
class BaseExpression( object ):
def __init__( self, children = [], line = -1 ):
self.head = self.__class__
# For a simple use of patterns after matching
# asumming only two levels of nestedness, that is,
# children may have iterables (sequences or lists) but
# not iterables containing more iterables
self.children = []
for ch in children:
#if isinstance( ch, collections.Iterable ):
if isinstance( ch, Sequence ):
self.children.extend( ch )
else:
self.children.append( ch )
self.properties = set()
self.size = None
def get_head( self ):
return self.head
def get_children( self ):
return self.children
def set_children( self, i, expr ):
self.children[i] = expr
def get_size( self ):
return self.size
def num_nodes( self ):
return len(list(self.iterate_preorder()))
def _cleanup( self ):
raise NotImplementedError
def match( self, ctx ):
raise NotImplementedError
def iterate_preorder( self ):
yield self
for child in self.get_children():
yield from child.iterate_preorder()
def _preorder_position( self, parent=(None, None) ):
yield (id(self), parent)
for i, child in enumerate(self.get_children()):
yield from child._preorder_position( (self, i) )
def _postorder_stack( self, parent=(None, None) ):
if len( self.get_children() ) == 0:
return [ self, parent, [] ]
else:
return [ self, parent, [ch._postorder_stack((self, i)) for i, ch in enumerate(self.get_children())] ]
def __eq__( self, other ):
return self.get_head() == other.get_head() and \
len(self.get_children()) == len(other.get_children()) and \
all( [ x == y for x,y in zip(self.get_children(), other.get_children()) ] )
#
# Property handling
#
def set_property( self, prop ):
self.properties.add( prop )
def get_properties( self ):
return self.properties
def isInput( self ):
return properties.INPUT in self.properties
def isOutput( self ):
return properties.OUTPUT in self.properties
def isTemporary( self ):
return properties.TEMPORARY in self.properties
def isScalar( self ):
size = self.get_size()
return len( [dim for dim in size if dim != sONE] ) == 0
#return properties.SCALAR in self.properties
def isVector( self ):
size = self.get_size()
return len( [dim for dim in size if dim != sONE] ) == 1
#return properties.VECTOR in self.properties
def isMatrix( self ):
size = self.get_size()
return len( [dim for dim in size if dim != sONE] ) == 2
#return properties.MATRIX in self.properties
def isSquare( self ):
return properties.SQUARE in self.properties
def isZero( self ):
return properties.ZERO in self.properties
def isIdentity( self ):
return properties.IDENTITY in self.properties
def isDiagonal( self ):
return properties.DIAGONAL in self.properties
def isTriangular( self ):
return properties.TRIANGULAR in self.properties
def isLowerTriangular( self ):
return properties.LOWER_TRIANGULAR in self.properties
def isUpperTriangular( self ):
return properties.UPPER_TRIANGULAR in self.properties
def isUnitDiagonal( self ):
return properties.UNIT_DIAGONAL in self.properties
def isImplicitUnitDiagonal( self ):
return properties.IMPLICIT_UNIT_DIAGONAL in self.properties
def isSymmetric( self ):
return properties.SYMMETRIC in self.properties
def isSPD( self ):
return properties.SPD in self.properties
def isNonSingular( self ):
return properties.NON_SINGULAR in self.properties
def isOrthogonal( self ):
return properties.ORTHOGONAL in self.properties
def isFullRank( self ):
return properties.FULL_RANK in self.properties
#
# Printing
#
def __repr__( self ):
raise NotImplementedError
class Atom( BaseExpression ):
def __init__( self ):
BaseExpression.__init__( self, [] )
def match( self, ctx ):
# nothing to match, failure
if len( ctx.stack_expr ) == 0:
return None
# pop the expression to match
expr = ctx.stack_expr.pop()
if self == expr:
patt = ctx.stack_patt.pop()
for m in patt.match( ctx ):
yield m
def _cleanup( self ):
return self
def __eq__( self, other ):
raise NotImplementedError
class Expression( BaseExpression ):
pass
class Sequence( Expression ):
def __init__( self, children ):
Expression.__init__( self, children )
def __iter__( self ):
yield from self.get_children()
def _cleanup( self ):
self.children = [ch._cleanup() for ch in self.get_children()]
return self
def match( self, ctx ):
# Both are Sequences
patt_ch = self.get_children()
expr = ctx.stack_expr.pop()
expr_ch = expr.get_children()
while len( patt_ch ) == 0 and len( expr_ch ) == 0 and \
len( ctx.stack_patt ) > 0:
patt = ctx.stack_patt.pop()
patt_ch = patt.get_children()
expr = ctx.stack_expr.pop()
expr_ch = expr.get_children()
# Exited loop because no more patterns in the stack
# and current sequence is also complete: yield match and done
if len( patt_ch ) == 0 and len( expr_ch ) == 0:
yield ctx.match
return None
# if both empty, match complete, yield
#if len( patt_ch ) == 0 and len( expr_ch ) == 0:
#yield ctx.match
# No pattern to match the expression, failure
if len( patt_ch ) == 0 and len( expr_ch ) > 0:
return None
patt_leaf, patt_next = patt_ch[0], Sequence(patt_ch[1:])
if isinstance( patt_leaf, Pattern ):
ctx.stack_patt.append( patt_next )
# Push full expr back
# The pattern itself will decide what it may match what is left for next
ctx.stack_expr.append( expr )
for m in patt_leaf.match( ctx ):
yield m
else: # Atom or Operator
# nothing to match, failure
if len( expr_ch ) == 0:
return None
else:
expr_leaf, expr_next = expr_ch[0], Sequence(expr_ch[1:])
ctx.stack_patt.append( patt_next )
ctx.stack_expr.append( expr_next )
ctx.stack_expr.append( expr_leaf )
for m in patt_leaf.match( ctx ):
yield m
def __repr__( self ):
return "[ " + ", ".join( [ str(ch) for ch in self.get_children() ] ) + " ]"
class NList( Expression ):
def __init__( self, children ):
Expression.__init__( self, children )
def __iter__( self ):
yield from self.get_children()
def _cleanup( self ):
self.children = [ch._cleanup() for ch in self.get_children()]
return self
def match( self, ctx ):
# nothing to match, failure
if len( ctx.stack_expr ) == 0:
return None
# pop the expression to match
expr = ctx.stack_expr.pop()
if self.get_head() == expr.get_head():
patt_seq = Sequence(self.get_children())
expr_seq = Sequence(expr.get_children())
ctx.stack_expr.append( expr_seq )
for m in patt_seq.match( ctx ):
yield m
def __repr__( self ):
return "NL[ " + ", ".join( [ str(ch) for ch in self.get_children() ] ) + " ]"
#
# Operators
#
class Operator( Expression ):
def __init__( self, children, attr, arity ):
Expression.__init__( self, children )
self.attributes = attr
self.flatten_associative()
# Apply identity if so (e.g., Plus(a) -> a) (not here, at most in __new__)
self.arity = arity
self.check_arity()
def flatten_associative( self ):
if attributes.ASSOCIATIVE in self.attributes:
children = []
for ch in self.get_children():
if isinstance( ch, self.__class__ ):
children.extend( ch.get_children() )
else:
children.append( ch )
self.children = children
def check_arity( self ):
if self.arity == attributes.UNARY and \
len( self.children ) != 1:
raise WrongArityError
if self.arity == attributes.BINARY and \
len( self.children ) != 2:
raise WrongArityError
def set_children( self, i, expr ):
Expression.set_children( self, i, expr )
#self.flatten_associative() # [TODO] DOUBLE-CHECK if it can stay commented!!!!
# Also identity
def _cleanup( self ):
self.children = [ch._cleanup() for ch in self.get_children()]
self.flatten_associative()
if attributes.IDENTITY in self.attributes and \
len(self.children) == 1:
return self.children[0]
#return self.__class__( self.children, self.attributes, self.arity )
#return self.__class__( self.children )
return self
def match( self, ctx ):
# nothing to match, failure
if len( ctx.stack_expr ) == 0:
return None
# pop the expression to match
expr = ctx.stack_expr.pop()
if self.get_head() == expr.get_head():
patt_seq = Sequence(self.get_children())
if attributes.COMMUTATIVE not in self.attributes:
expr_seq = Sequence(expr.get_children())
ctx.stack_expr.append( expr_seq )
for m in patt_seq.match( ctx ):
yield m
else:
#_ctx = copy.deepcopy( ctx )
_ctx = copy.copy( ctx )
expr_ch = expr.get_children()
for ch_permutation in itertools.permutations( expr_ch ):
#ctx = copy.deepcopy( _ctx )
ctx = copy.copy( _ctx )
ctx.stack_expr.append( Sequence( list(ch_permutation) ) )
for m in patt_seq.match( ctx ):
yield m
def __eq__( self, other ):
if self.get_head() == other.get_head() and \
len(self.get_children()) == len(other.get_children()):
if attributes.COMMUTATIVE in self.attributes:
return sorted( [ str(ch) for ch in self.get_children() ] ) == \
sorted( [ str(ch) for ch in other.get_children() ] )
else:
return all( [ x == y for x,y in zip(self.get_children(), other.get_children()) ] )
return False
class Equal( Operator ):
def __init__( self, children ):
Operator.__init__( self, children, [], attributes.BINARY )
def lhs( self ):
return self.get_children()[0]
def rhs( self ):
return self.get_children()[1]
def __repr__(self ):
return "Equal( " + ", ".join( [ str(ch) for ch in self.get_children() ] ) + " )"
def to_math( self ):
lhs, rhs = self.children
return "%s = %s" % (lhs.to_math(), rhs.to_math())
class Plus( Operator ):
def __init__( self, children ):
Operator.__init__( self, children, \
[attributes.COMMUTATIVE, attributes.ASSOCIATIVE, attributes.IDENTITY], \
attributes.NARY )
def get_size( self ):
if self.size == None:
self.size = self.get_children()[0].get_size()
return self.size
def __repr__(self ):
return "Plus( " + ", ".join( [ str(ch) for ch in self.get_children() ] ) + " )"
def to_math( self ):
return "(%s)" % " + ".join([ch.to_math() for ch in self.children])
class Minus( Operator ):
def __init__( self, children ):
Operator.__init__( self, children, [], attributes.UNARY )
def get_size( self ):
if self.size == None:
self.size = self.get_children()[0].get_size()
return self.size
def __repr__(self ):
return "Minus( " + ", ".join( [ str(ch) for ch in self.get_children() ] ) + " )"
def to_math( self ):
return "-%s" % self.children[0].to_math()
class Times( Operator ):
def __init__( self, children ):
Operator.__init__( self, children, \
[attributes.ASSOCIATIVE, attributes.IDENTITY], \
attributes.NARY )
def get_size( self ):
if self.size == None:
self.size = self._calc_size()
return self.size
def _calc_size( self ):
non_scalars = list(filter( lambda x: x.get_size() != (sONE, sONE), self.get_children() ))
if len(non_scalars) == 0:
return (1, 1)
rows = non_scalars[0].get_size()[0]
i = 0
while rows == sONE and i < len(non_scalars) - 1:
if non_scalars[i].size[1] == sONE:
rows = non_scalars[i+1].size[0]
i += 1
cols = non_scalars[-1].size[1]
i = len(non_scalars) - 1
while cols == sONE and i > 0:
#if non_scalars[i].size[0] == 1: # [CHECK]
if non_scalars[i].size[0] == sONE:
cols = non_scalars[i-1].size[1]
i -= 1
return (rows, cols)
def __repr__(self ):
return "Times( " + ", ".join( [ str(ch) for ch in self.get_children() ] ) + " )"
def to_math( self ):
return " * ".join([ch.to_math() for ch in self.children])
class Transpose( Operator ):
def __init__(self, children ):
Operator.__init__( self, children, [], attributes.UNARY )
def get_size( self ):
if self.size == None:
self.size = list(reversed(self.get_children()[0].get_size()))
return self.size
def __repr__(self ):
return "Transpose( " + ", ".join( [ str(ch) for ch in self.get_children() ] ) + " )"
def to_math( self ):
return "trans(%s)" % self.children[0].to_math()
class Inverse( Operator ):
def __init__(self, children ):
Operator.__init__( self, children, [], attributes.UNARY )
def get_size( self ):
if self.size == None:
self.size = self.get_children()[0].get_size()
return self.size
def __repr__(self ):
return "Inverse( " + ", ".join( [ str(ch) for ch in self.get_children() ] ) + " )"
def to_math( self ):
return "inverse(%s)" % self.children[0].to_math()
class BlockedExpression( Expression ):
def __init__( self, nd_array, size, shape ):
Expression.__init__( self, nd_array )
self.size = size
#self.shape = shape
self.shape = tuple(shape) # [TODO] Any problem with this?
def set_children( self, i, expr ):
#pointer = self.get_children()
#for pos_i in range( len(i)-1 ):
#pointer = pointer[ i[pos_i] ]
#pointer[i[-1]] = expr
row, col = i // len( self.children[0] ), i % len( self.children[0] )
self.children[row][col] = expr
# only works for flattening matrices
def flatten_children( self ):
return list(itertools.chain.from_iterable( self.get_children() ))
def transpose( self ):
self.children = [list(row) for row in zip(*self.children)]
self.size = list(reversed(self.size))
self.shape = tuple(reversed(self.shape))
def _cleanup( self ):
# TODO: should reassign back to children
for ch in self.flatten_children():
ch._cleanup()
return self
def __iter__( self ):
yield from self.get_children()
def match( self, ctx ):
# nothing to match, failure
if len( ctx.stack_expr ) == 0:
return None
# pop the expression to match
expr = ctx.stack_expr.pop()
if self.get_head() == expr.get_head() and \
self.shape == expr.shape:
# This allows the use of PatternPlus and PatternStar for rows or full blocked expressions
# In principle, no BlockedExpression would appear in a pattern, would it?
patt_seq = Sequence(self.flatten_children())
expr_seq = Sequence(expr.flatten_children())
ctx.stack_expr.append( expr_seq )
for m in patt_seq.match( ctx ):
yield m
def iterate_preorder( self ):
yield self
for child in self.flatten_children():
yield from child.iterate_preorder()
# only for matrices (2D blocked expressions)
def _preorder_position( self, parent=(None, None) ):
yield (id(self), parent)
for i in range(len(self.children)):
for j in range(len(self.children[0])):
yield from self.children[i][j]._preorder_position( (self, (i,j)) )
def _postorder_stack( self, parent=(None,None) ):
return [ self, parent, [ch._postorder_stack((self, i)) for i, ch in enumerate(self.flatten_children())] ]
def __getitem__( self, i ):
if i > len(self.get_children()):
raise TypeError
return self.get_children()[i]
def __eq__( self, other ):
return self.get_head() == other.get_head() and \
self.shape == self.shape and \
self.get_children() == self.get_children()
def __repr__( self ):
#return "[ %s ]" % ( "; ".join([ ", ".join([ cell for cell in row ]) for row in self.get_children() ]) )
return str(self.get_children())
class Predicate( Expression ):
def __init__( self, name, args, size ):
Expression.__init__( self, args )
self.name = name
self.size = []
for s in size:
this_s = []
for dim in s:
if isinstance(dim, str):
this_s.append( Symbol(dim) )
else:
this_s.append( dim )
self.size.append( tuple(this_s) )
def get_name( self ):
return self.name
def set_children( self, i, expr ):
self.children[i] = expr
def _cleanup( self ):
self.children = [ ch._cleanup() for ch in self.get_children()]
return self
def match( self, ctx ):
# nothing to match, failure
if len( ctx.stack_expr ) == 0:
return None
# pop the expression to match
expr = ctx.stack_expr.pop()
if self.get_head() == expr.get_head() and \
self.name == expr.name:
patt_seq = Sequence(self.get_children())
expr_seq = Sequence(expr.get_children())
ctx.stack_expr.append( expr_seq )
for m in patt_seq.match( ctx ):
yield m
def get_size( self ):
return self.size[0]
def __eq__( self, other ):
return self.get_head() == other.get_head() and \
self.name == other.name and \
self.get_children() == other.get_children()
def __repr__( self ):
return "%s( %s )" % (self.name, ", ".join([str(ch) for ch in self.get_children()]))
class Function( Predicate ):
pass
#
# Symbols
#
class Symbol( Atom ):
def __init__( self, name, size=() ):
Atom.__init__( self )
self.name = name
self.size = size
_TOS.register( self ) ###
def get_name( self ):
return self.name
def __eq__( self, other ):
return self.get_head() == other.get_head() and \
self.get_name() == other.get_name()
def __lt__( self, other ):
return self.name < other.name
def set_property( self, prop ):
# TODO improve
if prop == properties.INPUT:
try:
_TOS.unset_property( self.get_name(), properties.OUTPUT )
except KeyError:
pass
elif prop == properties.OUTPUT:
try:
_TOS.unset_property( self.get_name(), properties.INPUT )
except KeyError:
pass
_TOS.set_property( self.get_name(), prop )
def get_properties( self ):
return _TOS.get_properties( self.get_name() )
def isInput( self ):
return properties.INPUT in _TOS.get_properties(self.get_name())
def isOutput( self ):
return properties.OUTPUT in _TOS.get_properties(self.get_name())
def isTemporary( self ):
return properties.TEMPORARY in _TOS.get_properties(self.get_name())
def isScalar( self ):
return properties.SCALAR in _TOS.get_properties(self.get_name())
def isVector( self ):
return properties.VECTOR in _TOS.get_properties(self.get_name())
def isMatrix( self ):
return properties.MATRIX in _TOS.get_properties(self.get_name())
def isSquare( self ):
return properties.SQUARE in _TOS.get_properties(self.get_name())
def isZero( self ):
return properties.ZERO in _TOS.get_properties(self.get_name())
def isIdentity( self ):
return properties.IDENTITY in _TOS.get_properties(self.get_name())
def isDiagonal( self ):
return properties.DIAGONAL in _TOS.get_properties(self.get_name())
def isTriangular( self ):
return properties.TRIANGULAR in _TOS.get_properties(self.get_name())
def isLowerTriangular( self ):
return properties.LOWER_TRIANGULAR in _TOS.get_properties(self.get_name())
def isUpperTriangular( self ):
return properties.UPPER_TRIANGULAR in _TOS.get_properties(self.get_name())
def isUnitDiagonal( self ):
return properties.UNIT_DIAGONAL in _TOS.get_properties(self.get_name())
def isImplicitUnitDiagonal( self ):
return properties.IMPLICIT_UNIT_DIAGONAL in _TOS.get_properties(self.get_name())
def isSymmetric( self ):
return properties.SYMMETRIC in _TOS.get_properties(self.get_name())
def isSPD( self ):
return properties.SPD in _TOS.get_properties(self.get_name())
def isNonSingular( self ):
return properties.NON_SINGULAR in _TOS.get_properties(self.get_name())
def isOrthogonal( self ):
return properties.ORTHOGONAL in _TOS.get_properties(self.get_name())
def isFullRank( self ):
return properties.FULL_RANK in _TOS.get_properties(self.get_name())
def __repr__( self ):
return self.name
def to_math( self ):
return self.name
sONE = Symbol('1')
sZERO = Symbol('0')
class Scalar( Symbol ):
def __init__( self, name, size=None ):
Symbol.__init__( self, name, (sONE, sONE) )
class Vector( Symbol ):
def __init__( self, name, size ):
Symbol.__init__( self, name, (size[0], sONE) ) # ColumnVector
class Matrix( Symbol ):
def __init__( self, name, size ):
Symbol.__init__( self, name, size )
class Tensor( Symbol ): # will need indices as well
def __init__( self, name, size ):
Symbol.__init__( self, name, size )
# This inherits from atom (not a symbol)
class NumericConstant( Atom ):
def __init__( self, value ):
Atom.__init__( self )
self.value = value
self.size = (1, 1)
def get_value( self ):
return self.value
def __eq__( self, other ):
return self.get_head() == other.get_head() and \
self.get_value() == other.get_value()
def __repr__( self ):
return str(self.value)
#
# Patterns
#
# Check validity of a pattern by asserting
# that do not exist two patterns with same name and different length (underscores)
#
# Add the __eq__ to everyone
# If needed, I could have a "same" function for pointer equality
class Pattern( Atom ):
def __init__( self, name ):
Atom.__init__( self )
self.name = name
def get_name( self ):
return self.name
def match( self, ctx ):
# nothing to match, failure
if len( ctx.stack_expr ) == 0:
return None
# pop the expression to match
expr = ctx.stack_expr.pop()
# expr is a Sequence
expr_ch = expr.get_children()
e,o = self.range_in_seq( expr_ch )
#_ctx = copy.deepcopy( ctx )
_ctx = copy.copy( ctx )
for i in range(e,o+1):
expr_leaf, expr_next = Sequence(expr_ch[:i]), Sequence(expr_ch[i:])
#ctx = copy.deepcopy( _ctx )
ctx = copy.copy( _ctx )
ctx.stack_expr.append( expr_next )
patt_name = self.get_name()
#### if PatternDot, then the match is not a sequence but a single element
if isinstance( self, PatternDot ):
expr_leaf = expr_leaf.get_children()[0]
####
if patt_name not in ctx.match or \
patt_name in ctx.match and expr_leaf == ctx.match[patt_name]:
ctx.match[patt_name] = expr_leaf
# unstack and keep going
next_patt = ctx.stack_patt.pop()
for m in next_patt.match( ctx ):
yield m
def __eq__( self, other ):
return self.get_head() == other.get_head() and \
self.get_name() == other.get_name()
def __repr__( self ):
raise NotImplementedError( "Pattern.__repr__ not overloaded!" )
class PatternDot( Pattern ):
def __init__( self, name ):
Pattern.__init__( self, name )
# can match one and only one element
def range_in_seq( self, seq ):
return (1, min(1, len(seq)))
def __repr__( self ):
return self.name + "_"
class PatternPlus( Pattern ):
def __init__( self, name ):
Pattern.__init__( self, name )
# can match one or more elements
def range_in_seq( self, seq ):
return (1, len(seq))
def __repr__( self ):
return self.name + "__"
class PatternStar( Pattern ):
def __init__( self, name ):
Pattern.__init__( self, name )
# can match zero, one or more elements
def range_in_seq( self, seq ):
return (0, len(seq))
def __repr__( self ):
return self.name + "___"
class PatternOr( Expression ):
def __init__( self, children ):
Expression.__init__( self, children )
def match( self, ctx ):
# nothing to match, failure
#if len( ctx.stack_expr ) == 0:
#return None
# pop the expression to match
#expr = ctx.stack_expr.pop()
# expr is a Sequence
#expr_ch = expr.get_children()
#
_ctx = copy.copy( ctx )
for ch in self.get_children():
#print( ch )
#print( ctx.stack_expr[-1] )
ctx = copy.copy( _ctx )
#ctx.stack_expr.append( expr_next )
#patt_name = self.get_name()
##### if PatternDot, then the match is not a sequence but a single element
#if isinstance( self, PatternDot ):
#expr_leaf = expr_leaf.get_children()[0]
#####
#patt_seq = Sequence([ ch ])
#yield from patt_seq.match( ctx )
for m in ch.match( ctx ):
#for m in patt_seq.match( ctx ):
yield m
def __eq__( self, other ):
return self.get_head() == other.get_head() and \
len(self.get_children()) == len(other.get_children())
def __repr__( self ):
return "( " + " | ".join( [ str(ch) for ch in self.get_children() ] ) + " )"
|
115415
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class PayloadProtocolType(Base):
__slots__ = ()
_SDM_NAME = 'payloadProtocolType'
_SDM_ATT_MAP = {
'HeaderPayloadProtocolId': 'payloadProtocolType.header.payloadProtocolId-1',
}
def __init__(self, parent, list_op=False):
super(PayloadProtocolType, self).__init__(parent, list_op)
@property
def HeaderPayloadProtocolId(self):
"""
Display Name: Payload Protocol Id (EtherType)
Default Value: 0xffff
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderPayloadProtocolId']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
115479
|
from distutils.core import setup
setup(
name='gitric',
version='0.4',
description='simple git-based deployment for fabric',
author='<NAME>',
author_email='<EMAIL>',
url='http://dan.bravender.us',
download_url='http://github.com/dbravender/gitric/tarball/0.4',
packages=['gitric'])
|
115514
|
import requests
def is_campus_up():
response_threshold = 3
timeout = 5
url = "https://campus.exactas.uba.ar"
try:
response = requests.get(url, timeout=timeout)
response_time = response.elapsed.total_seconds()
response.raise_for_status()
msg = ""
if response_time > response_threshold:
msg = "El campus pareciera estar andando medio lenteja :/"
else:
msg = "El campus pareciera estar andando :)"
except requests.exceptions.Timeout:
msg = "Tardó bocha y no recibí respuesta. Debe estar caído o andando lento :("
except requests.exceptions.ConnectionError:
msg = "Hubo un error de conexión, debe estar caído. Espero no sea época de parciales..."
except requests.exceptions.HTTPError:
msg = "Recibí una respuesta del campus con error. " + response.status_code + ": " + response.reason
except:
msg = "Hubo un error y no tengo idea de qué fue. Rezale a Shannon."
return msg
|
115567
|
from .BasicSearcher import BasicSearcher
from .AStar import AStar
from .BFS_BEAM import BFS_BEAM
from .Prob import Prob
from .Searcher import Searcher
__all__ = ["AStar","BFS_BEAM", "Prob", "Searcher"]
|
115677
|
from .api import AptlyApi, get_aptly_connection, get_snapshot_name # noqa: F401
from .taskstate import TaskState # noqa: F401
|
115705
|
from datetime import date
from django.forms import CharField, DateInput, Form
from django.utils import translation
from .base import WidgetTest
class DateInputTest(WidgetTest):
widget = DateInput()
def test_render_none(self):
self.check_html(
self.widget, "date", None, html='<input type="text" name="date">'
)
def test_render_value(self):
d = date(2007, 9, 17)
self.assertEqual(str(d), "2007-09-17")
self.check_html(
self.widget,
"date",
d,
html='<input type="text" name="date" value="2007-09-17">',
)
self.check_html(
self.widget,
"date",
date(2007, 9, 17),
html=('<input type="text" name="date" value="2007-09-17">'),
)
def test_string(self):
"""
Should be able to initialize from a string value.
"""
self.check_html(
self.widget,
"date",
"2007-09-17",
html=('<input type="text" name="date" value="2007-09-17">'),
)
def test_format(self):
"""
Use 'format' to change the way a value is displayed.
"""
d = date(2007, 9, 17)
widget = DateInput(format="%d/%m/%Y", attrs={"type": "date"})
self.check_html(
widget, "date", d, html='<input type="date" name="date" value="17/09/2007">'
)
@translation.override("de-at")
def test_l10n(self):
self.check_html(
self.widget,
"date",
date(2007, 9, 17),
html='<input type="text" name="date" value="17.09.2007">',
)
def test_fieldset(self):
class TestForm(Form):
template_name = "forms_tests/use_fieldset.html"
field = CharField(widget=self.widget)
form = TestForm()
self.assertIs(self.widget.use_fieldset, False)
self.assertHTMLEqual(
form.render(),
'<div><label for="id_field">Field:</label>'
'<input id="id_field" name="field" required type="text"></div>',
)
|
115712
|
import itertools
import logging
import os
import geopandas as gpd
import numpy as np
import pandas as pd
import tqdm
from scipy.spatial import KDTree
from shapely.geometry import LineString, Point, Polygon
from delft3dfmpy.converters import hydamo_to_dflowrr
from delft3dfmpy.core import checks, geometry
from delft3dfmpy.datamodels.common import ExtendedGeoDataFrame
from delft3dfmpy.datamodels.cstructures import meshgeom, meshgeomdim
from delft3dfmpy.io import drrreader
logger = logging.getLogger(__name__)
class DFlowRRModel:
"""Main data structure for RR-model in DflowFM. Contains subclasses
for unpaved, paved,greehouse and open water nodes and external forcings (seepage, precipitation, evaporation)
"""
def __init__(self):
self.d3b_parameters = {}
self.unpaved = Unpaved(self)
self.paved = Paved(self)
self.greenhouse = Greenhouse(self)
self.openwater = Openwater(self)
self.external_forcings = ExternalForcings(self)
self.dimr_path = ''
class ExternalForcings:
"""
Class for external forcings, which contains the boundary
conditions and the initial conditions.
"""
def __init__(self, dflowrrmodel):
# Point to relevant attributes from parent
self.dflowrrmodel = dflowrrmodel
self.io = drrreader.ExternalForcingsIO(self)
self.boundary_nodes = {}
self.seepage = {}
self.precip = {}
self.evap = {}
def add_precip(self, id, series):
self.precip[id] = {
'precip' : series
}
def add_evap(self, id, series):
self.evap[id] = {
'evap' : series
}
def add_seepage(self, id, series):
self.seepage[id] = {
'seepage' : series
}
def add_boundary_node(self, id, px, py):
self.boundary_nodes[id] = {
'id' : id,
'px' : px,
'py' : py
}
class Unpaved:
"""
Class for unpaved nodes
"""
def __init__(self, dflowrrmodel):
# Point to relevant attributes from parent
self.dflowrrmodel = dflowrrmodel
# initialize a dataframe for every type of nodes related to 'unpaved'
self.unp_nodes = {}
self.ernst_defs = {}
self.io = drrreader.UnpavedIO(self)
def add_unpaved(self,id, total_area, lu_areas, surface_level, soiltype, surface_storage, infiltration_capacity, initial_gwd, meteo_area, px, py, boundary_node):
self.unp_nodes[id] = {
'id' : 'unp_'+id,
'na' : '16',
'ar' : lu_areas,
'ga' : total_area,
'lv' : surface_level,
'co' : '3',
'su' : '0',
'sd' : surface_storage,
'sp' : 'sep_'+id,
'ic' : infiltration_capacity,
'ed' : 'ernst_'+id,
'bt' : soiltype,
'ig' : initial_gwd,
'mg' : surface_level,
'gl' : '1.5',
'is' : '0',
'ms' : 'ms_'+meteo_area,
'px': px,
'py': py,
'boundary_node': boundary_node
}
def add_ernst_def(self,id, cvo, lv, cvi, cvs):
self.ernst_defs[id] = {
'id' : 'ernst_'+id,
'cvi' : cvi,
'cvs' : cvs,
'cvo' : cvo,
'lv' : lv
}
class Paved:
"""
Class for paved nodes.
"""
def __init__(self, dflowrrmodel):
# Point to relevant attributes from parent
self.dflowrrmodel = dflowrrmodel
self.pav_nodes = {}
self.io = drrreader.PavedIO(self)
self.node_geom = {}
self.link_geom = {}
#PAVE id 'pav_Nde_n003' ar 16200 lv 1 sd '1' ss 0 qc 0 1.94E-05 0 qo 2 2 ms 'Station1' aaf 1 is 0 np 0 dw '1' ro 0 ru 0 qh '' pave#
def add_paved(self,id, area, surface_level, street_storage, sewer_storage, pump_capacity, meteo_area, px, py, boundary_node):
self.pav_nodes[id] = {
'id' : 'pav_'+id,
'ar' : area,
'lv' : surface_level,
'qc' : pump_capacity,
'strs' : street_storage,
'sews' : sewer_storage,
'ms' : 'ms_'+meteo_area,
'is' : '0',
'np' : '0',
'ro' : '0',
'ru' : '0',
'px': px,
'py': py,
'boundary_node': boundary_node
}
class Greenhouse:
"""
Class for greenhouse nodes
"""
def __init__(self, dflowrrmodel):
self.dflowrrmodel = dflowrrmodel
self.gh_nodes = {}
# Create the io class
self.io = drrreader.GreenhouseIO(self)
# GRHS id ’1’ na 10 ar 1000. 0. 0. 3000. 0. 0. 0. 0. 0. 0. sl 1.0 as 0. sd ’roofstor 1mm’ si
# ’silo typ1’ ms ’meteostat1’ is 50.0 grhs
def add_greenhouse(self, id, area, surface_level, roof_storage, meteo_area, px, py, boundary_node):
self.gh_nodes[id] = {
'id': 'gh_'+id,
'ar' : area,
'sl': surface_level,
'sd': roof_storage,
'ms' : 'ms_'+meteo_area,
'is' : '0',
'px': px,
'py': py,
'boundary_node': boundary_node
}
class Openwater:
"""
Class for open water nodes
"""
def __init__(self, dflowrrmodel):
self.dflowrrmodel = dflowrrmodel
self.ow_nodes = {}
# Create the io class
self.io = drrreader.OpenwaterIO(self)
def add_openwater(self, id, area, meteo_area, px, py, boundary_node):
self.ow_nodes[id] = {
'id': 'ow_'+id,
'ar' : area,
'ms' : 'ms_'+meteo_area,
'px': px,
'py': py,
'boundary_node': boundary_node
}
|
115717
|
import argparse
import numpy as np
import util.io
import experiment_descriptor as ed
import misc
def parse_args():
"""
Returns an object describing the command line.
"""
parser = argparse.ArgumentParser(description='Likelihood-free inference experiments.')
subparsers = parser.add_subparsers()
parser_run = subparsers.add_parser('run', help='run experiments')
parser_run.add_argument('files', nargs='+', type=str, help='file(s) describing experiments')
parser_run.set_defaults(func=run_experiment)
parser_trials = subparsers.add_parser('trials', help='run multiple experiment trials')
parser_trials.add_argument('start', type=int, help='# of first trial')
parser_trials.add_argument('end', type=int, help='# of last trial')
parser_trials.add_argument('files', nargs='+', type=str, help='file(s) describing experiments')
parser_trials.set_defaults(func=run_trials)
parser_view = subparsers.add_parser('view', help='view results')
parser_view.add_argument('files', nargs='+', type=str, help='file(s) describing experiments')
parser_view.add_argument('-b', '--block', action='store_true', help='block execution after viewing each experiment')
parser_view.add_argument('-t', '--trial', type=int, default=0, help='trial to view (default is 0)')
parser_view.set_defaults(func=view_results)
parser_log = subparsers.add_parser('log', help='print experiment logs')
parser_log.add_argument('files', nargs='+', type=str, help='file(s) describing experiments')
parser_log.set_defaults(func=print_log)
return parser.parse_args()
def run_experiment(args):
"""
Runs experiments.
"""
from experiment_runner import ExperimentRunner
exp_descs = sum([ed.parse(util.io.load_txt(f)) for f in args.files], [])
for exp_desc in exp_descs:
try:
ExperimentRunner(exp_desc).run(trial=0, sample_gt=False, rng=np.random.RandomState(42))
except misc.AlreadyExistingExperiment:
print 'EXPERIMENT ALREADY EXISTS'
print 'ALL DONE'
def run_trials(args):
"""
Runs experiments for multiple trials with random ground truth.
"""
from experiment_runner import ExperimentRunner
if args.start < 1:
raise ValueError('trial # must be a positive integer')
if args.end < args.start:
raise ValueError('end trial can''t be less than start trial')
exp_descs = sum([ed.parse(util.io.load_txt(f)) for f in args.files], [])
for exp_desc in exp_descs:
runner = ExperimentRunner(exp_desc)
for trial in xrange(args.start, args.end + 1):
try:
runner.run(trial=trial, sample_gt=True, rng=np.random)
except misc.AlreadyExistingExperiment:
print 'EXPERIMENT ALREADY EXISTS'
print 'ALL DONE'
def view_results(args):
"""
Views experiments.
"""
from experiment_viewer import ExperimentViewer, plt
exp_descs = sum([ed.parse(util.io.load_txt(f)) for f in args.files], [])
for exp_desc in exp_descs:
try:
ExperimentViewer(exp_desc).view_results(trial=args.trial, block=args.block)
except misc.NonExistentExperiment:
print 'EXPERIMENT DOES NOT EXIST'
plt.show()
def print_log(args):
"""
Prints experiment logs.
"""
from experiment_viewer import ExperimentViewer
exp_descs = sum([ed.parse(util.io.load_txt(f)) for f in args.files], [])
for exp_desc in exp_descs:
try:
ExperimentViewer(exp_desc).print_log()
except misc.NonExistentExperiment:
print 'EXPERIMENT DOES NOT EXIST'
def main():
args = parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.