text
stringlengths 65
6.05M
| lang
stringclasses 8
values | type
stringclasses 2
values | id
stringlengths 64
64
|
|---|---|---|---|
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.base import KGEModel
from utils.math_utils import householder_reflection, householder_rotation
class EKGEModel(KGEModel):
"""
Euclidean knowledge graph embedding model
"""
def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma, p_norm,
dropout, entity_embedding_multiple, relation_embedding_multiple):
super(EKGEModel, self).__init__(model_name, nentity, nrelation, hidden_dim, gamma, p_norm, dropout)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),
requires_grad=False
)
self.entity_dim = hidden_dim * entity_embedding_multiple
self.relation_dim = hidden_dim * relation_embedding_multiple
self.entity_embedding = nn.Parameter(torch.zeros(nentity, self.entity_dim))
# TODO: try xavier
nn.init.uniform_(
tensor=self.entity_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))
nn.init.uniform_(
tensor=self.relation_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
if model_name == 'pRotatE':
self.modulus = nn.Parameter(torch.Tensor([[0.5 * self.embedding_range.item()]]))
if model_name == 'RotationE' and relation_embedding_multiple - 3 * entity_embedding_multiple != 0:
raise ValueError('RotationE should triple relationship embeddings (center and two reflections)')
if model_name == 'ReflectionE' and relation_embedding_multiple - 2 * entity_embedding_multiple != 0:
raise ValueError('ReflectionE should double relationship embeddings (center and one reflection)')
if model_name == 'RotatE' and (entity_embedding_multiple - 2 * relation_embedding_multiple != 0):
raise ValueError(
'RotatE should use even hidden dimensions for entity embeddings (twice relationship embeddings)')
if model_name == 'ComplEx' and (
not entity_embedding_multiple == relation_embedding_multiple or relation_embedding_multiple % 2 != 0):
raise ValueError('ComplEx should use even hidden dimensions for entity and relation embeddings')
def forward(self, sample, mode='single'):
'''
Forward function that calculate the score of a batch of triples.
In the 'single' mode, sample is a batch of triple.
In the 'head-batch' or 'tail-batch' mode, sample consists two part.
The first part is usually the positive sample.
And the second part is the entities in the negative samples.
Because negative samples and positive samples usually share two elements
in their triple ((head, relation) or (relation, tail)).
'''
if mode == 'single':
batch_size, negative_sample_size = sample.size(0), 1
head = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=sample[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:, 2]
).unsqueeze(1)
elif mode == 'head-batch':
tail_part, head_part = sample
batch_size, negative_sample_size = head_part.size(0), head_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part.view(-1)
).view(batch_size, negative_sample_size, -1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=tail_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=tail_part[:, 2]
).unsqueeze(1)
elif mode == 'tail-batch':
head_part, tail_part = sample
batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=head_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=tail_part.view(-1)
).view(batch_size, negative_sample_size, -1)
else:
raise ValueError('mode %s not supported' % mode)
model_func = {
'TransE': self.TransE,
'DistMult': self.DistMult,
'ComplEx': self.ComplEx,
'RotatE': self.RotatE,
'pRotatE': self.pRotatE,
'ReflectionE': self.ReflectionE,
'RotationE': self.RotationE,
}
if self.model_name in model_func:
head = F.dropout(head, self.dropout, training=self.training)
relation = F.dropout(relation, self.dropout, training=self.training)
tail = F.dropout(tail, self.dropout, training=self.training)
score = model_func[self.model_name](head, relation, tail, mode)
else:
raise ValueError('model %s not supported' % self.model_name)
return score
def TransE(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head + (relation - tail)
else:
score = (head + relation) - tail
score = self.gamma.item() - torch.norm(score, p=self.p_norm, dim=2)
return score
def RotationE(self, head, relation, tail, mode):
'''
Euclidean rotation model with real numbers using two Householder reflections
'''
center, v1, v2 = torch.chunk(relation, 3, dim=2)
if mode == 'head-batch':
# inverse rotation
head_pred = householder_rotation(tail - center, v2, v1) + center
score = head - head_pred
else:
tail_pred = householder_rotation(head - center, v1, v2) + center
score = tail_pred - tail
return self.gamma.item() - torch.norm(prediction - tail, p=self.p_norm, dim=2)
def ReflectionE(self, head, relation, tail, mode):
'''
Euclidean reflection model using one Householder reflection
'''
center, v = torch.chunk(relation, 2, dim=2)
if mode == 'head-batch':
head_pred = householder_reflection(tail - center, v) + center
score = head - head_pred
else:
tail_pred = householder_reflection(head - center, v) + center
score = tail_pred - tail
return self.gamma.item() - torch.norm(score, p=self.p_norm, dim=2)
def DistMult(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head * (relation * tail)
else:
score = (head * relation) * tail
score = score.sum(dim=2)
return score
def ComplEx(self, head, relation, tail, mode):
re_head, im_head = torch.chunk(head, 2, dim=2)
re_relation, im_relation = torch.chunk(relation, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
score = re_head * re_score + im_head * im_score
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = re_score * re_tail + im_score * im_tail
score = score.sum(dim=2)
return score
def RotatE(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
def pRotatE(self, head, relation, tail, mode):
pi = 3.14159262358979323846
# Make phases of entities and relations uniformly distributed in [-pi, pi]
phase_head = head / (self.embedding_range.item() / pi)
phase_relation = relation / (self.embedding_range.item() / pi)
phase_tail = tail / (self.embedding_range.item() / pi)
if mode == 'head-batch':
score = phase_head + (phase_relation - phase_tail)
else:
score = (phase_head + phase_relation) - phase_tail
score = torch.sin(score)
score = torch.abs(score)
score = self.gamma.item() - score.sum(dim=2) * self.modulus
return score
|
Python
|
CL
|
4b8f30cbc7a11e42aeac35655d977ecf35e2ce976b4adf1b02fb34f02ad0ad47
|
#!/usr/bin/python
#
# Copyright (c) 2016-2017, Cray Inc.
# All rights reserved.
## @package set_gattr
# Exercise the PWR_GrpAttrSetValue() function.
import collections
import json
import os
import re
import sys
# Import common test code
from common import init, error_message, skip_test, skip_test_if_not_root
from pwrcmd import EXPECT_SUCCESS, EXPECT_FAILURE, list_name, \
set_attr_and_check
#
# Initialize this test
#
init(os.path.basename(__file__))
skip_test_if_not_root()
# See if we can perform this test
cores = list_name(filter="^core")
hts = list_name(filter="^ht")
if len(cores) < 2:
skip_test("Need 2+ cores, saw {}".format(len(cores)))
if len(hts) < 2:
skip_test("Need 2+ hts, saw {}".format(len(hts)))
if not set_attr_and_check("PWR_ATTR_CSTATE_LIMIT",
hts[0:2],
1,
"PWR_ROLE_RM",
desc="#1 PWR_GrpAttrSetValue set cstate limit 2 pass"):
sys.exit(112)
sys.exit(0)
|
Python
|
CL
|
b7969fdb45375fdae83da5b5f8c49c1ca7d281a14c50a161a9f0e0c62f858722
|
import torch
torch.backends.cudnn.benchmark = True
import torch.nn.functional as F
import os
from core.network import Network
from core.optimizer import Optimizer
from core.buffer import ReplayBuffer
from .base import BaseAgent
from .utils import OU_Noise
class DDPG(BaseAgent):
"""Deep deterministic policy gradient (DDPG) agent.
Args:
state_size (int): dimension of state.
action_size (int): dimension of action.
hidden_size (int): dimension of hidden unit.
actor (str): key of actor network class in _network_dict.txt.
critic (str): key of critic network class in _network_dict.txt.
head (str): key of head in _head_dict.txt.
optim_config (dict): dictionary of the optimizer info.
gamma (float): discount factor.
buffer_size (int): the size of the memory buffer.
batch_size (int): the number of samples in the one batch.
start_train_step (int): steps to start learning.
tau (float): the soft update coefficient.
mu (float): the drift coefficient of the Ornstein-Uhlenbeck process for action exploration.
theta (float): reversion of the time constant of the Ornstein-Uhlenbeck process.
sigma (float): diffusion coefficient of the Ornstein-Uhlenbeck process.
device (str): device to use.
(e.g. 'cpu' or 'gpu'. None can also be used, and in this case, the cpu is used.)
"""
def __init__(
self,
state_size,
action_size,
hidden_size=512,
actor="ddpg_actor",
critic="ddpg_critic",
head="mlp",
optim_config={
"actor": "adam",
"critic": "adam",
"actor_lr": 5e-4,
"critic_lr": 1e-3,
},
gamma=0.99,
buffer_size=50000,
batch_size=128,
start_train_step=2000,
tau=1e-3,
# OU noise
mu=0,
theta=1e-3,
sigma=2e-3,
device=None,
**kwargs,
):
self.device = (
torch.device(device)
if device
else torch.device("cuda" if torch.cuda.is_available() else "cpu")
)
self.actor = Network(
actor, state_size, action_size, D_hidden=hidden_size, head=head
).to(self.device)
self.critic = Network(
critic, state_size, action_size, D_hidden=hidden_size, head=head
).to(self.device)
self.target_actor = Network(
actor, state_size, action_size, D_hidden=hidden_size, head=head
).to(self.device)
self.target_actor.load_state_dict(self.actor.state_dict())
self.target_critic = Network(
critic, state_size, action_size, D_hidden=hidden_size, head=head
).to(self.device)
self.target_critic.load_state_dict(self.critic.state_dict())
self.actor_optimizer = Optimizer(
optim_config.actor, self.actor.parameters(), lr=optim_config.actor_lr
)
self.critic_optimizer = Optimizer(
optim_config.critic, self.critic.parameters(), lr=optim_config.critic_lr
)
self.OU = OU_Noise(action_size, mu, theta, sigma)
self.gamma = gamma
self.tau = tau
self.memory = ReplayBuffer(buffer_size)
self.batch_size = batch_size
self.start_train_step = start_train_step
self.num_learn = 0
@torch.no_grad()
def act(self, state, training=True):
self.actor.train(training)
mu = self.actor(self.as_tensor(state))
mu = mu.cpu().numpy()
action = mu + self.OU.sample() if training else mu
return {"action": action}
def learn(self):
transitions = self.memory.sample(self.batch_size)
for key in transitions.keys():
transitions[key] = self.as_tensor(transitions[key])
state = transitions["state"]
action = transitions["action"]
reward = transitions["reward"]
next_state = transitions["next_state"]
done = transitions["done"]
# Critic Update
with torch.no_grad():
next_actions = self.target_actor(next_state)
next_q = self.target_critic(next_state, next_actions)
target_q = reward + (1 - done) * self.gamma * next_q
q = self.critic(state, action)
critic_loss = F.mse_loss(target_q, q)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
max_Q = torch.max(target_q, axis=0).values.cpu().numpy()[0]
# Actor Update
action_pred = self.actor(state)
actor_loss = -self.critic(state, action_pred).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.num_learn += 1
result = {
"critic_loss": critic_loss.item(),
"actor_loss": actor_loss.item(),
"max_Q": max_Q,
}
return result
def update_target_soft(self):
for t_p, p in zip(self.target_critic.parameters(), self.critic.parameters()):
t_p.data.copy_(self.tau * p.data + (1 - self.tau) * t_p.data)
def process(self, transitions, step):
result = {}
# Process per step
self.memory.store(transitions)
if self.memory.size >= self.batch_size and step >= self.start_train_step:
result = self.learn()
if self.num_learn > 0:
self.update_target_soft()
return result
def save(self, path):
print(f"...Save model to {path}...")
save_dict = {
"actor": self.actor.state_dict(),
"actor_optimizer": self.actor_optimizer.state_dict(),
"critic": self.critic.state_dict(),
"critic_optimizer": self.critic_optimizer.state_dict(),
}
torch.save(save_dict, os.path.join(path, "ckpt"))
def load(self, path):
print(f"...Load model from {path}...")
checkpoint = torch.load(os.path.join(path, "ckpt"), map_location=self.device)
self.actor.load_state_dict(checkpoint["actor"])
self.actor_optimizer.load_state_dict(checkpoint["actor_optimizer"])
self.critic.load_state_dict(checkpoint["critic"])
self.target_critic.load_state_dict(self.critic.state_dict())
self.critic_optimizer.load_state_dict(checkpoint["critic_optimizer"])
def sync_in(self, weights):
self.actor.load_state_dict(weights)
def sync_out(self, device="cpu"):
weights = self.actor.state_dict()
for k, v in weights.items():
weights[k] = v.to(device)
sync_item = {
"weights": weights,
}
return sync_item
|
Python
|
CL
|
8cd64a5dd4671c33050d27742457f30646d57308187a2f40cdcad466caeee90c
|
"""Aiounifi errors."""
class AiounifiException(Exception):
"""Base error for aiounifi."""
class RequestError(AiounifiException):
"""Unable to fulfill request.
Raised when host or API cannot be reached.
"""
class ResponseError(AiounifiException):
"""Invalid response."""
class Unauthorized(AiounifiException):
"""Username is not authorized."""
class LoginRequired(AiounifiException):
"""User is logged out."""
class Forbidden(AiounifiException):
"""Forbidden request."""
class NoPermission(AiounifiException):
"""Users permissions are read only."""
class ServiceUnavailable(RequestError):
"""Service is unavailable.
Common error if controller is restarting and behind a proxy.
"""
class BadGateway(RequestError):
"""Invalid response from the upstream server."""
class TwoFaTokenRequired(AiounifiException):
"""2 factor authentication token required."""
ERRORS = {
"api.err.LoginRequired": LoginRequired,
"api.err.Invalid": Unauthorized,
"api.err.NoPermission": NoPermission,
"api.err.Ubic2faTokenRequired": TwoFaTokenRequired,
}
def raise_error(error: str) -> None:
"""Raise error."""
cls = ERRORS.get(error, AiounifiException)
raise cls(error)
|
Python
|
CL
|
89e8e516de546e3ee5d80124f40dd54e3dc47493f4b990e81898a0657744a167
|
#!/usr/bin/env python
"""
@package ion.agents.instrument.protocol_param_dict
@file ion/agents.instrument/protocol_param_dict.py
@author Edward Hunter
@brief A dictionary class that manages, matches and formats device parameters.
"""
__author__ = 'Edward Hunter'
__license__ = 'Apache 2.0'
import re
import logging
mi_logger = logging.getLogger('mi_logger')
class ParameterDictVal(object):
"""
A parameter dictionary value.
"""
def __init__(self, name, pattern, f_getval, f_format, value=None):
"""
Parameter value constructor.
@param name The parameter name.
@param pattern The regex that matches the parameter in line output.
@param f_getval The fuction that extracts the value from a regex match.
@param f_format The function that formats the parameter value for a set command.
@param value The parameter value (initializes to None).
"""
self.name = name
self.pattern = pattern
self.regex = re.compile(pattern)
self.f_getval = f_getval
self.f_format = f_format
self.value = value
def update(self, input):
"""
Attempt to udpate a parameter value. If the input string matches the
value regex, extract and update the dictionary value.
@param input A string possibly containing the parameter value.
@retval True if an update was successful, False otherwise.
"""
match = self.regex.match(input)
if match:
self.value = self.f_getval(match)
mi_logger.debug('Updated parameter %s=%s', self.name, str(self.value))
return True
else:
return False
class ProtocolParameterDict(object):
"""
Protocol parameter dictionary. Manages, matches and formats device
parameters.
"""
def __init__(self):
"""
Constructor.
"""
self._param_dict= {}
def add(self, name, pattern, f_getval, f_format, value=None):
"""
Add a parameter object to the dictionary.
@param name The parameter name.
@param pattern The regex that matches the parameter in line output.
@param f_getval The fuction that extracts the value from a regex match.
@param f_format The function that formats the parameter value for a set command.
@param value The parameter value (initializes to None).
"""
val = ParameterDictVal(name, pattern, f_getval, f_format, value)
self._param_dict[name] = val
def get(self, name):
"""
Get a parameter value from the dictionary.
@param name Name of the value to be retrieved.
@raises KeyError if the name is invalid.
"""
return self._param_dict[name].value
def set(self, name, value):
"""
Set a parameter value in the dictionary.
@param name The parameter name.
@param value The parameter value.
@raises KeyError if the name is invalid.
"""
self._param_dict[name] = value
def update(self, input):
"""
Update the dictionaray with a line input. Iterate through all objects
and attempt to match and update a parameter.
@param input A string to match to a dictionary object.
@retval The name that was successfully updated, None if not updated
"""
for (name, val) in self._param_dict.iteritems():
if val.update(input):
return name
return False
def get_config(self):
"""
Retrive the configuration (all key values).
@retval name : value configuration dict.
"""
config = {}
for (key, val) in self._param_dict.iteritems():
config[key] = val.value
return config
def format(self, name, val):
"""
Format a parameter for a set command.
@param name The name of the parameter.
@param val The parameter value.
@retval The value formatted as a string for writing to the device.
@raises InstrumentProtocolException if the value could not be formatted.
@raises KeyError if the parameter name is invalid.
"""
return self._param_dict[name].f_format(val)
def get_keys(self):
"""
Return list of all parameter names in the dictionary.
"""
return self._param_dict.keys()
|
Python
|
CL
|
6b1d7f16edb4e667c2dbaad7175d9f70fe31e227a1c9470e1ab9ac63b75e2da4
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2022 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import Any, Dict
from django.test import TestCase
from apps.core.gray.handlers import GrayHandler
from apps.exceptions import ValidationError
from apps.node_man.management.commands.create_ap_for_gse2 import Command
from apps.node_man.models import AccessPoint
from env.constants import GseVersion
class TestCreateApForGse2(TestCase):
def test_create_ap_for_gse2(self):
kwargs: Dict[str, Any] = {"reference_ap_id": -100, "clean_old_map_id": False}
# 测试参考id不存情况
try:
Command().handle(**kwargs)
except Exception as e:
self.assertEqual(e.__class__, ValidationError)
reference_ap_id: int = 1
# 测试参考id为v1版本
kwargs.update(reference_ap_id=reference_ap_id)
Command().handle(**kwargs)
gray_ap_map: Dict[int, int] = GrayHandler.get_gray_ap_map()
ap_id_obj_map: Dict[int, AccessPoint] = AccessPoint.ap_id_obj_map()
gse_v2_ap: AccessPoint = ap_id_obj_map[gray_ap_map[reference_ap_id]]
# 断言生成的ap版本为V2
self.assertEqual(gse_v2_ap.gse_version, GseVersion.V2.value)
# 测试清理掉原来映射 clean_old_map_id
kwargs.update(clean_old_map_id=True)
Command().handle(**kwargs)
self.assertEqual(list(AccessPoint.objects.filter(id=gse_v2_ap.id)), [])
|
Python
|
CL
|
f5e1a4d14c662958a826d7eaf15c2c8e8c3ee46deaf875ea98df3e39d48da353
|
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cuegui.Constants"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import importlib
import os
import mock
import pyfakefs.fake_filesystem_unittest
from qtpy import QtGui
import opencue
import cuegui.Constants
CONFIG_YAML = '''
unused_setting: some value
version: 98.707.68
refresh.job_update_delay: 30000
logger.level: INFO
'''
# pylint: disable=import-outside-toplevel,redefined-outer-name,reimported
class ConstantsTests(pyfakefs.fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self.fs.add_real_file(
os.path.join(os.path.dirname(cuegui.__file__), 'config', 'cuegui.yaml'), read_only=True)
if 'CUEGUI_CONFIG_FILE' in os.environ:
del os.environ['CUEGUI_CONFIG_FILE']
def test__should_load_user_config_from_env_var(self):
config_file_path = '/path/to/config.yaml'
self.fs.create_file(config_file_path, contents=CONFIG_YAML)
os.environ['CUEGUI_CONFIG_FILE'] = config_file_path
import cuegui.Constants
result = importlib.reload(cuegui.Constants)
self.assertEqual('98.707.68', result.VERSION)
self.assertEqual(30000, result.JOB_UPDATE_DELAY)
self.assertEqual(10000, result.LAYER_UPDATE_DELAY)
@mock.patch('platform.system', new=mock.Mock(return_value='Linux'))
@mock.patch('os.path.expanduser', new=mock.Mock(return_value='/home/username'))
def test__should_load_user_config_from_user_profile(self):
config_file_path = '/home/username/.config/opencue/cuegui.yaml'
self.fs.create_file(config_file_path, contents=CONFIG_YAML)
import cuegui.Constants
result = importlib.reload(cuegui.Constants)
self.assertEqual('98.707.68', result.VERSION)
self.assertEqual(30000, result.JOB_UPDATE_DELAY)
self.assertEqual(10000, result.LAYER_UPDATE_DELAY)
@mock.patch('platform.system', new=mock.Mock(return_value='Linux'))
def test__should_use_default_values(self):
import cuegui.Constants
result = importlib.reload(cuegui.Constants)
self.assertNotEqual('98.707.68', result.VERSION)
self.assertEqual(0, result.STARTUP_NOTICE_DATE)
self.assertEqual('', result.STARTUP_NOTICE_MSG)
self.assertEqual(10000, result.JOB_UPDATE_DELAY)
self.assertEqual(10000, result.LAYER_UPDATE_DELAY)
self.assertEqual(10000, result.FRAME_UPDATE_DELAY)
self.assertEqual(20000, result.HOST_UPDATE_DELAY)
self.assertEqual(1000, result.AFTER_ACTION_UPDATE_DELAY)
self.assertEqual(5, result.MINIMUM_UPDATE_INTERVAL)
self.assertEqual('Luxi Sans', result.FONT_FAMILY)
self.assertEqual(10, result.FONT_SIZE)
self.assertEqual(
os.path.join(os.path.dirname(cuegui.__file__), 'images'), result.RESOURCE_PATH)
self.assertEqual(
os.path.join(os.path.dirname(cuegui.__file__), 'config'), result.CONFIG_PATH)
self.assertEqual(
os.path.join(os.path.dirname(cuegui.__file__), 'config'), result.DEFAULT_INI_PATH)
self.assertEqual(
[os.path.join(os.path.dirname(cuegui.__file__), 'plugins')],
result.DEFAULT_PLUGIN_PATHS)
self.assertEqual('%(levelname)-9s %(module)-10s %(message)s', result.LOGGER_FORMAT)
self.assertEqual('WARNING', result.LOGGER_LEVEL)
self.assertEqual('cuemail: please check ', result.EMAIL_SUBJECT_PREFIX)
self.assertEqual('Your PSTs request that you check ', result.EMAIL_BODY_PREFIX)
self.assertEqual('\n\n', result.EMAIL_BODY_SUFFIX)
self.assertEqual('', result.EMAIL_DOMAIN)
self.assertEqual(
'https://github.com/AcademySoftwareFoundation/OpenCue/issues/new',
result.GITHUB_CREATE_ISSUE_URL)
self.assertEqual('https://www.opencue.io/docs/', result.URL_USERGUIDE)
self.assertEqual(
'https://github.com/AcademySoftwareFoundation/OpenCue/issues/new'
'?labels=enhancement&template=enhancement.md', result.URL_SUGGESTION)
self.assertEqual(
'https://github.com/AcademySoftwareFoundation/OpenCue/issues/new'
'?labels=bug&template=bug_report.md', result.URL_BUG)
self.assertEqual(
'gview -R -m -M -U %s +' % os.path.join(
os.path.dirname(cuegui.__file__), 'config', 'gvimrc'),
result.DEFAULT_EDITOR)
self.assertEqual({
'rhel7': '/shots',
'linux': '/shots',
'windows': 'S:',
'mac': '/Users/shots',
'darwin': '/Users/shots',
}, result.LOG_ROOT_OS)
self.assertEqual((
'general', 'desktop', 'playblast', 'util', 'preprocess', 'wan', 'cuda', 'splathw',
'naiad', 'massive'), result.ALLOWED_TAGS)
self.assertEqual(
os.path.join(os.path.dirname(cuegui.__file__), 'config', 'darkpalette.qss'),
result.DARK_STYLE_SHEET)
self.assertEqual('plastique', result.COLOR_THEME)
self.assertEqual(QtGui.QColor(50, 50, 100), result.COLOR_USER_1)
self.assertEqual(QtGui.QColor(100, 100, 50), result.COLOR_USER_2)
self.assertEqual(QtGui.QColor(0, 50, 0), result.COLOR_USER_3)
self.assertEqual(QtGui.QColor(50, 30, 00), result.COLOR_USER_4)
self.assertEqual({
opencue.api.job_pb2.DEAD: QtGui.QColor(255, 0, 0),
opencue.api.job_pb2.DEPEND: QtGui.QColor(160, 32, 240),
opencue.api.job_pb2.EATEN: QtGui.QColor(150, 0, 0),
opencue.api.job_pb2.RUNNING: QtGui.QColor(200, 200, 55),
opencue.api.job_pb2.SETUP: QtGui.QColor(160, 32, 240),
opencue.api.job_pb2.SUCCEEDED: QtGui.QColor(55, 200, 55),
opencue.api.job_pb2.WAITING: QtGui.QColor(135, 207, 235),
opencue.api.job_pb2.CHECKPOINT: QtGui.QColor(61, 98, 247),
}, result.RGB_FRAME_STATE)
self.assertEqual(5242880, result.MEMORY_WARNING_LEVEL)
self.assertEqual(
['error', 'aborted', 'fatal', 'failed', 'killed', 'command not found',
'no licenses could be found', 'killMessage'], result.LOG_HIGHLIGHT_ERROR)
self.assertEqual(['warning', 'not found'], result.LOG_HIGHLIGHT_WARN)
self.assertEqual(['info:', 'rqd cmd:'], result.LOG_HIGHLIGHT_INFO)
self.assertEqual(2147483647, result.QT_MAX_INT)
self.assertEqual({
'max_cores': 32,
'max_gpu_memory': 128,
'max_gpus': 8,
'max_memory': 128,
'max_proc_hour_cutoff': 30,
'redirect_wasted_cores_threshold': 100,
}, result.RESOURCE_LIMITS)
@mock.patch('platform.system', new=mock.Mock(return_value='Darwin'))
def test__should_use_mac_editor(self):
import cuegui.Constants
result = importlib.reload(cuegui.Constants)
self.assertEqual('open -t', result.DEFAULT_EDITOR)
|
Python
|
CL
|
186e5ade2d669e796d3644d3901eda04a840929852dcc394e543275b218cd1b2
|
"""
Base algorithms actions module.
Created on 21.04.2020
@author: Ruslan Dolovanyuk
"""
import abc
class BaseAction(abc.ABC):
"""Base class for all actions."""
def __init__(self, index):
"""Initialization action.
index: id current item for order;
"""
self.index = index
@abc.abstractmethod
def run(self, tree, notes):
"""Run action need overload."""
pass
|
Python
|
CL
|
37c778052893079c803f2366a4fe81d85f06cb059c67310edd16d52cd342f760
|
#
# PySNMP MIB module NETSCREEN-USER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NETSCREEN-USER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:10:44 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
netscreenVpn, = mibBuilder.importSymbols("NETSCREEN-SMI", "netscreenVpn")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, TimeTicks, ObjectIdentity, IpAddress, Gauge32, Unsigned32, Bits, MibIdentifier, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Integer32, iso, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "TimeTicks", "ObjectIdentity", "IpAddress", "Gauge32", "Unsigned32", "Bits", "MibIdentifier", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Integer32", "iso", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
nsVpnUser = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 4, 10))
nsVpnUsrDialupGrpTable = MibTable((1, 3, 6, 1, 4, 1, 3224, 4, 10, 1), )
if mibBuilder.loadTexts: nsVpnUsrDialupGrpTable.setStatus('mandatory')
nsVpnUsrDialupGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3224, 4, 10, 1, 1), ).setIndexNames((0, "NETSCREEN-USER-MIB", "nsVpnUsrDialupGrpIndex"))
if mibBuilder.loadTexts: nsVpnUsrDialupGrpEntry.setStatus('mandatory')
nsVpnUsrDialupGrpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnUsrDialupGrpIndex.setStatus('mandatory')
nsVpnUsrDialupGrpName = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnUsrDialupGrpName.setStatus('mandatory')
nsVpnUsrDialupGrpType = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("undefined", 0), ("manual", 1), ("ike", 2), ("l2tp", 3), ("xauth", 4), ("auth", 5), ("external", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnUsrDialupGrpType.setStatus('mandatory')
nsVpnUsrDialupGrpVsys = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnUsrDialupGrpVsys.setStatus('mandatory')
nsVpnManualKeyUsrTable = MibTable((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2), )
if mibBuilder.loadTexts: nsVpnManualKeyUsrTable.setStatus('mandatory')
nsVpnManualKeyUsrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1), ).setIndexNames((0, "NETSCREEN-USER-MIB", "nsVpnManualKeyUsrIndex"))
if mibBuilder.loadTexts: nsVpnManualKeyUsrEntry.setStatus('mandatory')
nsVpnManualKeyUsrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrIndex.setStatus('mandatory')
nsVpnManualKeyUsrName = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrName.setStatus('mandatory')
nsVpnManualKeyUsrGrp = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrGrp.setStatus('mandatory')
nsVpnManualKeyUsrSILocal = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrSILocal.setStatus('mandatory')
nsVpnManualKeyUsrSIRemote = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrSIRemote.setStatus('mandatory')
nsVpnManualKeyUsrTunnelType = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("esp", 0), ("ah", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrTunnelType.setStatus('mandatory')
nsVpnManualKeyUsrEspEncAlg = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("null", 0), ("des-cbc", 1), ("triple-des-cbc", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrEspEncAlg.setStatus('mandatory')
nsVpnManualKeyUsrEspAuthAlg = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("null", 0), ("md5", 1), ("sha", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrEspAuthAlg.setStatus('mandatory')
nsVpnManualKeyUsrAhHash = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("null", 0), ("md5", 1), ("sha", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrAhHash.setStatus('mandatory')
nsVpnManualKeyUsrVsys = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnManualKeyUsrVsys.setStatus('mandatory')
nsVpnAILUsrTable = MibTable((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3), )
if mibBuilder.loadTexts: nsVpnAILUsrTable.setStatus('mandatory')
nsVpnAILUsrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1), ).setIndexNames((0, "NETSCREEN-USER-MIB", "nsVpnAILUsrIndex"))
if mibBuilder.loadTexts: nsVpnAILUsrEntry.setStatus('mandatory')
nsVpnAILUsrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrIndex.setStatus('mandatory')
nsVpnAILUsrName = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrName.setStatus('mandatory')
nsVpnAILUsrGrp = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrGrp.setStatus('mandatory')
nsVpnAILUsrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrStatus.setStatus('mandatory')
nsVpnAILUsrIKE = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrIKE.setStatus('mandatory')
nsVpnAILUsrIKEIdType = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("not-set", 0), ("ipv4-addr", 1), ("fqdn", 2), ("usr-fqdn", 3), ("ipv4-addr-subnet", 4), ("ipv6-addr", 5), ("ipv6-addr-subnet", 6), ("ipv4-addr-addr-range", 7), ("ipv6-addr-addr-range", 8), ("der-asn1-dn", 9), ("der-asn1-gn", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrIKEIdType.setStatus('mandatory')
nsVpnAILUsrIKEId = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrIKEId.setStatus('mandatory')
nsVpnAILUsrAuth = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrAuth.setStatus('mandatory')
nsVpnAILUsrL2TP = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrL2TP.setStatus('mandatory')
nsVpnAILUsrL2tpRemoteIp = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 10), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrL2tpRemoteIp.setStatus('mandatory')
nsVpnAILUsrL2tpIpPool = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrL2tpIpPool.setStatus('mandatory')
nsVpnAILUsrL2tpIp = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 12), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrL2tpIp.setStatus('mandatory')
nsVpnAILUsrL2tpPriDnsIp = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 13), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrL2tpPriDnsIp.setStatus('mandatory')
nsVpnAILUsrL2tpSecDnsIp = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 14), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrL2tpSecDnsIp.setStatus('mandatory')
nsVpnAILUsrL2tpPriWinsIp = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 15), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrL2tpPriWinsIp.setStatus('mandatory')
nsVpnAILUsrL2tpSecWinsIp = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 16), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrL2tpSecWinsIp.setStatus('mandatory')
nsVpnAILUsrVsys = MibTableColumn((1, 3, 6, 1, 4, 1, 3224, 4, 10, 3, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nsVpnAILUsrVsys.setStatus('mandatory')
mibBuilder.exportSymbols("NETSCREEN-USER-MIB", nsVpnAILUsrGrp=nsVpnAILUsrGrp, nsVpnManualKeyUsrGrp=nsVpnManualKeyUsrGrp, nsVpnAILUsrIndex=nsVpnAILUsrIndex, nsVpnAILUsrL2tpSecDnsIp=nsVpnAILUsrL2tpSecDnsIp, nsVpnAILUsrL2tpIpPool=nsVpnAILUsrL2tpIpPool, nsVpnManualKeyUsrTable=nsVpnManualKeyUsrTable, nsVpnUsrDialupGrpIndex=nsVpnUsrDialupGrpIndex, nsVpnAILUsrName=nsVpnAILUsrName, nsVpnAILUsrL2tpIp=nsVpnAILUsrL2tpIp, nsVpnAILUsrL2tpSecWinsIp=nsVpnAILUsrL2tpSecWinsIp, nsVpnManualKeyUsrIndex=nsVpnManualKeyUsrIndex, nsVpnAILUsrL2tpRemoteIp=nsVpnAILUsrL2tpRemoteIp, nsVpnManualKeyUsrName=nsVpnManualKeyUsrName, nsVpnManualKeyUsrAhHash=nsVpnManualKeyUsrAhHash, nsVpnUsrDialupGrpTable=nsVpnUsrDialupGrpTable, nsVpnAILUsrEntry=nsVpnAILUsrEntry, nsVpnManualKeyUsrSIRemote=nsVpnManualKeyUsrSIRemote, nsVpnUser=nsVpnUser, nsVpnAILUsrVsys=nsVpnAILUsrVsys, nsVpnAILUsrTable=nsVpnAILUsrTable, nsVpnManualKeyUsrEspEncAlg=nsVpnManualKeyUsrEspEncAlg, nsVpnAILUsrL2tpPriWinsIp=nsVpnAILUsrL2tpPriWinsIp, nsVpnAILUsrIKE=nsVpnAILUsrIKE, nsVpnAILUsrIKEId=nsVpnAILUsrIKEId, nsVpnUsrDialupGrpVsys=nsVpnUsrDialupGrpVsys, nsVpnManualKeyUsrTunnelType=nsVpnManualKeyUsrTunnelType, nsVpnManualKeyUsrSILocal=nsVpnManualKeyUsrSILocal, nsVpnAILUsrStatus=nsVpnAILUsrStatus, nsVpnManualKeyUsrEspAuthAlg=nsVpnManualKeyUsrEspAuthAlg, nsVpnAILUsrAuth=nsVpnAILUsrAuth, nsVpnManualKeyUsrVsys=nsVpnManualKeyUsrVsys, nsVpnUsrDialupGrpEntry=nsVpnUsrDialupGrpEntry, nsVpnUsrDialupGrpName=nsVpnUsrDialupGrpName, nsVpnAILUsrL2tpPriDnsIp=nsVpnAILUsrL2tpPriDnsIp, nsVpnAILUsrL2TP=nsVpnAILUsrL2TP, nsVpnAILUsrIKEIdType=nsVpnAILUsrIKEIdType, nsVpnManualKeyUsrEntry=nsVpnManualKeyUsrEntry, nsVpnUsrDialupGrpType=nsVpnUsrDialupGrpType)
|
Python
|
CL
|
be6e6737ddf281aa2128422ffc75d3dcf604769d9b155570ac46b0523db02cae
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Set
from setuptools import find_namespace_packages, setup
def get_version():
root = os.path.dirname(__file__)
changelog = os.path.join(root, "CHANGELOG")
with open(changelog) as f:
return f.readline().strip()
def get_long_description():
root = os.path.dirname(__file__)
with open(os.path.join(root, "README.md")) as f:
description = f.read()
description += "\n\nChangelog\n=========\n\n"
with open(os.path.join(root, "CHANGELOG")) as f:
description += f.read()
return description
base_requirements = {
"commonregex",
"idna<3,>=2.5",
"click<7.2.0,>=7.1.1",
"expandvars>=0.6.5"
"dataclasses>=0.8"
"typing_extensions>=3.7.4"
"mypy_extensions>=0.4.3",
"typing-inspect",
"pydantic>=1.7.4",
"pydantic[email]>=1.7.2",
"google>=3.0.0",
"google-auth>=1.33.0",
"python-dateutil>=2.8.1",
"email-validator>=1.0.3",
"wheel~=0.36.2",
"python-jose==3.3.0",
"sqlalchemy>=1.3.24",
"sql-metadata~=2.0.0",
"requests~=2.26",
"PyYAML",
}
pii_requirements = {
"en_core_web_sm@https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0.tar.gz#egg=en_core_web",
"spacy==3.0.5",
}
report_requirements = {
"asgiref==3.4.1",
"Django==3.2.7",
"pytz==2021.1",
"sqlparse==0.4.2",
}
base_plugins = {
"query-parser",
"metadata-usage",
"file-stage",
"sql-metadata~=2.0.0",
}
plugins: Dict[str, Set[str]] = {
"athena": {"PyAthena[SQLAlchemy]"},
"bigquery": {"openmetadata-sqlalchemy-bigquery==0.2.0"},
"bigquery-usage": {"google-cloud-logging", "cachetools"},
"elasticsearch": {"elasticsearch~=7.13.1"},
"hive": {
"openmetadata-sqlalchemy-hive==0.2.0",
"thrift~=0.13.0",
"sasl==0.3.1",
"thrift-sasl==0.4.3",
},
"kafka": {"confluent_kafka>=1.5.0", "fastavro>=1.2.0"},
"ldap-users": {"ldap3==2.9.1"},
"looker": {"looker-sdk==21.12.2"},
"mssql": {"sqlalchemy-pytds>=0.3"},
"mssql-odbc": {"pyodbc"},
"mysql": {"pymysql>=1.0.2"},
"oracle": {"cx_Oracle"},
"pii-processor": pii_requirements,
"presto": {"pyhive~=0.6.3"},
"trino": {"sqlalchemy-trino"},
"postgres": {"pymysql>=1.0.2", "psycopg2-binary", "GeoAlchemy2"},
"redash": {"redash-toolbelt==0.1.4"},
"redshift": {
"openmetadata-sqlalchemy-redshift==0.2.1",
"psycopg2-binary",
"GeoAlchemy2",
},
"redshift-usage": {
"openmetadata-sqlalchemy-redshift==0.2.1",
"psycopg2-binary",
"GeoAlchemy2",
},
"data-profiler": {"openmetadata-data-profiler"},
"snowflake": {"snowflake-sqlalchemy<=1.2.4"},
"snowflake-usage": {"snowflake-sqlalchemy<=1.2.4"},
"sample-data": {"faker~=8.1.1"},
"superset": {},
"tableau": {"tableau-api-lib==0.1.22"},
"vertica": {"sqlalchemy-vertica[vertica-python]>=0.0.5"},
"report-server": report_requirements,
"airflow": {"apache-airflow >= 1.10.2"},
}
build_options = {"includes": ["_cffi_backend"]}
setup(
name="openmetadata-ingestion",
version="0.4.1",
url="https://open-metadata.org/",
author="OpenMetadata Committers",
license="Apache License 2.0",
description="Ingestion Framework for OpenMetadata",
long_description=get_long_description(),
long_description_content_type="text/markdown",
python_requires=">=3.8",
options={"build_exe": build_options},
package_dir={"": "src"},
zip_safe=False,
dependency_links=[],
project_urls={
"Documentation": "https://docs.open-metadata.org/",
"Source": "https://github.com/open-metadata/OpenMetadata",
},
packages=find_namespace_packages(where="./src", exclude=["tests*"]),
entry_points={
"console_scripts": ["metadata = metadata.cmd:metadata"],
"apache_airflow_provider": [
"provider_info = airflow_provider_openmetadata:get_provider_config"
],
},
install_requires=list(base_requirements),
extras_require={
"base": list(base_requirements),
**{plugin: list(dependencies) for (plugin, dependencies) in plugins.items()},
"all": list(
base_requirements.union(
*[requirements for plugin, requirements in plugins.items()]
)
),
},
)
|
Python
|
CL
|
51dcc24a4ae98b99fcd71f7179093d96115794c71dbf087e3cfdca717f473742
|
from .peaks import read_binary as read_peak
from .calibration import read_csv as read_calibration
from .housekeeping import read_csv as read_housekeeping
from .raw import read as read_raw
from .mie import makeMie_diameter as simulate_scattering_intensity
|
Python
|
CL
|
79c25cd011e097285c8c6628bf7130a3d0789b2b892fb3477b2c0e5486060645
|
import json
import logging
import os
import base64
import hashlib
from slugify import slugify
from search import settings
from search.indexing.strip_html import strip_html
from search.extraction.code_du_travail.cleaned_tags.data import CODE_DU_TRAVAIL_DICT
from search.extraction.fiches_ministere_travail.data import FICHES_MINISTERE_TRAVAIL
from search.extraction.themes_front.data import THEMES
logger = settings.get_logger(__name__)
logger.setLevel(logging.INFO)
CDTN_DOCUMENTS = []
def flatten(item):
if isinstance(item, list):
return ", ".join(item)
return item
def parse_hash_tags(tags):
newTags = []
for key, value in tags.items():
if isinstance(value, list):
for entry in value:
newTags.append(key + ":" + (str(entry) or ""))
else:
newTags.append(key + ":" + (str(value) or ""))
return newTags
def hasher(text):
return ("-" + base64.urlsafe_b64encode(
hashlib.sha1(text).digest()[:10]).decode()
) if text else ""
# make a slug from given text and add short hashed suffix from given seed if any
def make_slug(text, seed):
return slugify(text + str(
hasher(
((text + seed).encode('utf-8'))
)
), to_lower=True)
def populate_cdtn_documents():
with open(os.path.join(settings.BASE_DIR, 'dataset/kali/kali.json')) as json_data:
data = json.load(json_data)
logger.info("Load %s documents from kali", len(data))
for val in data:
CDTN_DOCUMENTS.append({
'source': 'kali',
'slug': val['slug'],
'title': val['titre'],
'text': f"IDCC {val['num']} {val['titre']}",
'url': val['url'],
'idcc': val['num'],
})
logger.info("Load %s documents from code-du-travail", len(CODE_DU_TRAVAIL_DICT))
for val in CODE_DU_TRAVAIL_DICT.values():
CDTN_DOCUMENTS.append({
'source': 'code_du_travail',
'text': val['bloc_textuel'],
'slug': val['num'].lower(),
'title': val['titre'],
'html': val['html'],
'path': val['path'],
'themes': val['themes'],
'date_debut': val['date_debut'],
'date_fin': val['date_fin'],
'url': val['url'],
})
with open(os.path.join(settings.BASE_DIR, 'dataset/fiches_service_public/fiches-sp-travail.json')) as json_data:
data = json.load(json_data)
logger.info("Load %s documents from fiches-service-public", len(data))
for val in data:
CDTN_DOCUMENTS.append({
'date': val['date'],
'raw': val['raw'],
'slug': slugify(val['title'], to_lower=True),
'source': 'fiches_service_public',
'tags': val['tags'],
'text': val['text'],
'references_juridiques': val['references_juridiques'],
'title': val['title'],
'url': val['url'],
})
logger.info("Load %s documents from fiches-ministere-travail", len(FICHES_MINISTERE_TRAVAIL))
for val in FICHES_MINISTERE_TRAVAIL:
CDTN_DOCUMENTS.append({
'source': 'fiches_ministere_travail',
'slug': slugify(val['title'], to_lower=True),
'text': val['text'],
'anchor': val['anchor'],
'html': val["html"],
'title': val['title'],
'url': val['url'],
'date': val.get('date'),
})
for val in THEMES:
CDTN_DOCUMENTS.append({
'source': 'themes',
'slug': val['slug'],
'text': val['text'],
'title': val['title'],
})
with open(os.path.join(settings.BASE_DIR, 'dataset/faq.json')) as json_data:
data = json.load(json_data)
logger.info("Load %s documents from faq", len(data))
for val in data:
faq_text = strip_html(val['reponse'])
tags = parse_hash_tags(val.get("tags"))
CDTN_DOCUMENTS.append({
'source': 'faq',
'slug': make_slug(val['question'], '-'.join(tags)),
'text': faq_text,
'html': val["reponse"],
'title': val['question'],
'tags': tags,
'date': val.get('date'),
'author': val['source'] if 'source' in val else 'DIRRECTE',
})
with open(os.path.join(settings.BASE_DIR, 'dataset/faq-contributions.json')) as json_data:
data = json.load(json_data)
logger.info("Load %s documents from contributions", len(data))
for val in data:
faq_text = strip_html(val['reponse'])
tags = parse_hash_tags(val.get("tags"))
CDTN_DOCUMENTS.append({
'source': 'faq',
'slug': make_slug(val['question'], '-'.join(tags)),
'text': faq_text,
'html': val["reponse"],
'title': val['question'],
'tags': tags,
'date': val.get('date_redaction'),
'date_expiration': val.get('date_expiration'),
'author': 'DIRRECTE',
})
with open(os.path.join(settings.BASE_DIR, 'dataset/faq-snippets.json')) as json_data:
data = json.load(json_data)
logger.info("Load %s documents from snippets", len(data))
for val in data:
faq_text = strip_html(val['reponse'])
tags = parse_hash_tags(val.get("tags"))
CDTN_DOCUMENTS.append({
'source': 'snippet',
'slug': slugify(val['question'], to_lower=True),
'text': faq_text,
'html': val["reponse"],
'title': val['question'],
'tags': tags,
'date': val.get('date_redaction'),
'references': val.get('references'),
'date_expiration': val.get('date_expiration'),
'author': val['redacteur'],
})
with open(os.path.join(settings.BASE_DIR, 'dataset/export-courriers.json')) as json_data:
data = json.load(json_data)
logger.info("Load %s documents from export-courriers.json", len(data))
for val in data:
tags = parse_hash_tags(val.get("tags"))
CDTN_DOCUMENTS.append({
'source': 'modeles_de_courriers',
'title': val['titre'],
'filename': val['filename'],
'slug': slugify(val['titre'], to_lower=True),
'text': ''.join(val['questions']),
'html': val["html"],
'tags': tags,
'description': val.get('description'),
'date': val.get('date_redaction'),
'author': val.get('redacteur'),
'editor': val.get('source'),
})
with open(os.path.join(settings.BASE_DIR, 'dataset/outils.json')) as json_data:
data = json.load(json_data)
logger.info("Load %s documents from outils.json", len(data))
for val in data:
CDTN_DOCUMENTS.append({
'source': 'outils',
'title': val['titre'],
'slug': slugify(val['code'], to_lower=True),
'text': ' '.join(val['questions']),
'themes': val['themes'],
'date': val.get('date'),
'branche': val['branche'],
})
populate_cdtn_documents()
|
Python
|
CL
|
dd60363a37e810cd77c92ebd8003a1bc7d61aee4764e62e7c9090be3cabe2ae6
|
from Constants import constant
from WatchYourBack.Piece import Piece
from Evaluation.Features import Features
from ErrorHandling.Errors import *
from copy import copy
import traceback, sys
'''
THIS CLASS IMPLEMENTS THE BOARD GAME AND ITS MECHANISMS
THIS USES AN OBJECT ORIENTED APPROACH TO REPRESENTING THE BOARD AND THE PIECE
WE TRY TO ACHIEVE EFFICIENCY IN THIS CLASS MY MAKING USE OF DICTIONARIES, SETS AND ONLY UPDATING THE ATTRIBUTES OF
PIECES ON THE BOARD THAT HAVE BEEN AFFECTED BY APPLYING A MOVE TO THE BOARD.
WE TRY TO MINIMISE THE USE OF LISTS TO MINIMISE THE NUMBER OF O(N) CALLS WE ARE MAKING TO THE BOARD THROUGH THE USE
OF REMOVING/TESTING FOR
THIS BOARD ALSO SUPPORTS AN UNDO-MOVE FUNCTIONALITY -- THIS WORKS BY CALLING IT DIRECTLY AFTER AN UPDATE_BOARD CALL IS
MADE. THIS ASSUMES THAT WHEN UPDATE_BOARD IS CALLED, THE ELIMINATED PIECES FOR THAT UPDATE IS STORED THEN PASSED
STRAIGHT BACK INTO THIS UNDO FUNCTION. THIS MEANS THAT WE CAN ONLY UNDO THE BOARD ONCE AN UPDATE TO THE BOARD IS MADE,
THEREFORE THIS DOES NOT SUPPORT MULTIPLY UNDO CALLS IN A SEQUENTIAL FASHION AS WE REQUIRE THE MOVE APPLIED AND THE
ELIMINATED PIECES TO BE STORED.
NOTE: BOARD/BOARD.PY ALSO HAS UNDO-FUNCTIONALITY BUT IT STORES ALL ELIMINATED PIECES FOR THE ENTIRE GAME IN A STACK
AND THE ACTIONS APPLIED TO THE GAME IN A STACK, SUCH THAT WHEN WE UNDO A MOVE, WE JUST POP THE MOST RECENT ITEM OFF
THE STACK AND REVERT THOSE CHANGES. DUE TO MORE OVERHEAD FROM CREATING A STACK, PUSHING/POPPING TO/FROM THE STACK AND
ADDITIONAL CHECKS FOR RECENTLY AVAILABLE MOVES WE BELIEVE THAT THIS MORE LOCAL IMPLEMENTATION OF UNDO-MOVE IS MORE
EFFICIENT.
'''
class Board(object):
# a class to represent the board state of the game
def __init__(self):
# key -- the position of the pieces
# value -- the piece object
# when we place a piece on the board we update pieces_remaining and we look at any neighbours in a 2 block radius
# we need to update these pieces neighbour positions to being False -- a
self.white_pieces = {}
self.white_eliminate_pieces = []
self.black_pieces = {}
self.black_eliminate_pieces = []
self.places_remaining = {constant.WHITE_PIECE: 12, constant.BLACK_PIECE: 12}
# initialise the board representation
# LT RT LB RB
self.corner_pos = [(0, 0), (7, 0), (0, 7), (7, 7)]
# how many moves have been applied to the board so far
self.move_counter = 0
self.phase = constant.PLACEMENT_PHASE
self.num_shrink = 0
# initially no one wins
self.winner = None
self.terminal = False
# who is moving first
self.player_to_move = None
# current size of the board
self.min_dim = 0
self.max_dim = constant.BOARD_SIZE-1
self.free_squares = {}
self.init_free_squares()
# initialise the board with the corner pieces
# this dictionary keeps track of the board and the pieces that are currently on the board
# define the board parameters and constants
self.board_state = self.init_board_rep()
# the number of pieces at the current state -- this is not updated in minimax search
self.root_num_black = 0
self.root_num_white = 0
@staticmethod
def init_board_rep():
# store the board representation as a byte_array length 64 (64bytes)
# create a temp string of length 64
# set the corner locations on the board representation
return bytearray(constant.START_BOARD_STR,"utf-8")
# initialise the free squares of the game
def init_free_squares(self):
for col in range(constant.BOARD_SIZE):
for row in range(constant.BOARD_SIZE):
if (col,row) not in self.corner_pos:
entry = {(col,row): True}
else:
entry = {(col,row): False}
self.free_squares.update(entry)
# check if a square is free or not
def check_free_square(self,pos):
if pos in self.free_squares:
return self.free_squares[pos]
# if the position is not in the free_squares dictionary -- then it is not free
return False
# string_array helper methods
@staticmethod
def get_array_element(byte_array,row,col):
# we assume that the string array is n x n in dimension
# get the dimension
dimension = constant.BOARD_SIZE
# check if row and col are valid
if row > dimension - 1 or col > dimension - 1:
return None
elif row < 0 or col < 0:
return None
# get the index to access in the string
index = row*dimension + col
# return the char at position index
return chr(byte_array[index])
@staticmethod
def set_array_char(byte_array, row, col, new_char):
dimension = constant.BOARD_SIZE
if row > dimension - 1 or col > dimension - 1:
return
elif row < 0 or col < 0:
return
# set the new char in the string
# need to turn char into utf-8 encoding first
byte_array[row * dimension + col] = ord(new_char)
# return the current size of the board
def get_min_dim(self):
return self.min_dim
def get_max_dim(self):
return self.max_dim
# board representation setters and getter methods
def get_board_piece(self, row, col):
return self.get_array_element(self.board_state, row, col)
# change the piece type in the board representation
def set_board(self, row, col, piece_type):
piece_types = (constant.CORNER_PIECE, constant.WHITE_PIECE, constant.BLACK_PIECE,
constant.FREE_SPACE, constant.INVALID_SPACE)
# check if the piece_type is valid
if piece_type not in piece_types:
return
# if valid we can set the board position
self.set_array_char(self.board_state, row, col, piece_type)
# print board method
def print_board(self):
for row in range(constant.BOARD_SIZE):
for col in range(constant.BOARD_SIZE):
# get the char to print
char_index = row * constant.BOARD_SIZE + col
char = chr(self.board_state[char_index])
print('{} '.format(char), end='')
print()
# when we add a piece back to the board -- this will automatically take care of updating the neighbour
# therefore move generation should still happen in constant time -- we don't need to test if squares are free
# or not as this should handle it
def add_piece(self,pos,colour):
try:
new_piece = Piece(pos,colour,self)
col, row = pos
new_piece_entry = {pos: new_piece}
if colour == constant.WHITE_PIECE:
self.white_pieces.update(new_piece_entry)
# update the board representation string
self.set_board(row,col,constant.WHITE_PIECE)
else:
self.black_pieces.update(new_piece_entry)
self.set_board(row,col,constant.BLACK_PIECE)
# once we add ths piece to the board, we need to update its neighbours
for direction in range(constant.MAX_MOVETYPE):
# check if there are any pieces that are occupying nearby squares
new_pos = self.convert_direction_to_coord(pos,direction)
# get the direction for the new_position from the old position
opp_direction = self.get_opposite_direction(direction)
if new_pos in self.black_pieces:
# update this pieces neighbour list
piece = self.black_pieces[new_pos]
# this space is now occupied therefore set the neighbour to false -- can no longer move here
piece.set_neighbour(opp_direction, False)
elif new_pos in self.white_pieces:
piece = self.white_pieces[new_pos]
piece.set_neighbour(opp_direction, False)
except IllegalPlacement:
print("Illegal Piece Placement... at ({}, {})".format(pos[1],pos[0]))
return
# when we want to apply a move to the board -- update the board and the dict associated
# return the eliminated pieces when we apply a move to the board
def apply_move(self, pos, direction, colour):
if colour == constant.WHITE_PIECE:
my_pieces = self.white_pieces
else:
my_pieces = self.black_pieces
# get the piece we are trying to move
try:
piece = self.get_piece(pos)
except PieceNotExist:
piece = None
print("No piece at this location... " + str(pos))
# print(my_pieces)
traceback.print_exc(file=sys.stdout)
exit(1)
# check if the move is legal first
if piece.is_legal_move(direction) is False:
return []
# we know we can make the move now
new_pos = self.convert_direction_to_coord(pos, direction)
new_col, new_row = new_pos
old_col, old_row = pos
# then we can update the dictionaries
piece = my_pieces.pop(pos)
# map it to the new position of the board
new_loc = {new_pos: piece}
my_pieces.update(new_loc)
# update the board representation
self.set_board(old_row, old_col, constant.FREE_SPACE)
self.set_board(new_row, new_col, colour)
# now we can test for elimination at the new position on the board
eliminated_pieces = self.perform_elimination(new_pos, colour)
# update the pieces position
piece.set_position(new_pos)
# update the pieces neighbours
piece.set_valid_neighbours()
# update the neighbours of that piece to False as these pieces
# can no longer move into this square that the piece is now occupying
self.update_neighbouring_squares(new_pos, False)
# neighbouring pieces are able to move into the old location of the moved piece as this
# square is now free
self.update_neighbouring_squares(pos, True)
if len(eliminated_pieces) > 0:
# then there are pieces that have been eliminated, therefore we must update the
# neighbouring pieces free neighbour list
for piece in eliminated_pieces:
elim_pos = piece.get_position()
self.update_neighbouring_squares(elim_pos, True)
return eliminated_pieces
# place a piece on the board and return the eliminated piece if there are any
def apply_placement(self, pos, colour):
col, row = pos
if colour == constant.WHITE_PIECE:
my_pieces = self.white_pieces
else:
my_pieces = self.black_pieces
# add the piece to the board
try:
new_piece = {pos: Piece(pos,colour,self)}
my_pieces.update(new_piece)
# update the free squares of the game
entry = {pos: False}
self.free_squares.update(entry)
except IllegalPlacement:
print("Piece created at illegal position on board : " + str(pos))
traceback.print_exc(file=sys.stdout)
exit(0)
return
# first we update the board representation
self.set_board(row, col, colour)
# perform the elimination around the piece that has been placed
eliminated_pieces = self.perform_elimination(pos, colour)
# for this position we must check if there are any pieces that are neighbouring this piece
# if there are, we must update those pieces free neighbours
# since we have placed a piece on the board, this location is no longer free
self.update_neighbouring_squares(pos, False)
# for each eliminated piece we must update the neighbouring squares of that piece
if len(eliminated_pieces) > 0:
for piece in eliminated_pieces:
elim_pos = piece.get_position()
self.update_neighbouring_squares(elim_pos,True)
# update the free squares of the game
entry = {elim_pos: True}
self.free_squares.update(entry)
if eliminated_pieces is None:
print("THIS SHOULD NOT BE HAPPENING ----------------------------------------")
return eliminated_pieces
# update the pieces neighbour free list --
# if we are wanting to indicate that the current square that we are on is no longer free we set the
# value to False,
# if the sqaure we are currently on is free, we set the value to true, indicating that this is a possible
# square that the neighboring sqaure can possibly move into
def update_neighbouring_squares(self,pos, bool_value):
# print(pos)
# for the eliminated piece we must also update any neighboutrs
for direction in range(constant.MAX_MOVETYPE):
# get the position of any possible neighbouring squares
neighbour_pos = self.convert_direction_to_coord(pos, direction)
# get the direction that is pointing from from neighbour to reference square
opp_dir = self.get_opposite_direction(direction)
# if a neighbouring square is occupied by this piece, we must update its free-neighbour list
if neighbour_pos in self.white_pieces:
# print(neighbour_pos)
# print(opp_dir)
neighbour = self.white_pieces[neighbour_pos]
neighbour.set_neighbour(opp_dir, bool_value)
# print(neighbour)
# if a neighbour is occupied by the opponent piece, we need to update its free-neighbour list
elif neighbour_pos in self.black_pieces:
# print(neighbour_pos)
# print(opp_dir)
neighbour = self.black_pieces[neighbour_pos]
neighbour.set_neighbour(opp_dir, bool_value)
# print(neighbour)
# went we want to update the board we call this function
# move has to be in the form ((row,col),direction)
def update_board(self, move, colour):
# check if the move passed in was a forfeit move
if move is None:
self.move_counter += 1
return []
eliminated_pieces = []
# make the action
if self.phase == constant.PLACEMENT_PHASE:
# make the placement -- this should take care of the update to the piece position list
# as well as the move counter
eliminated_pieces = self.apply_placement(move, colour)
# after an action is applied we can increment the move counter of the board
self.move_counter += 1
# test if we need to switch from placement to moving
if self.move_counter == 24 and self.phase == constant.PLACEMENT_PHASE:
# change the phase from placement to moving
self.phase = constant.MOVING_PHASE
self.move_counter = 0
# update each piece
# we need to re-evaluate the piece neighbours
for pos in self.white_pieces:
piece = self.white_pieces[pos]
piece.set_valid_neighbours()
for pos in self.black_pieces:
piece = self.black_pieces[pos]
piece.set_valid_neighbours()
#elif self.phase == constant.MOVING_PHASE:
elif self.phase == constant.MOVING_PHASE:
if self.move_counter >= 0:
# move is in the form (pos, direction)
pos = move[0]
direction = move[1]
# print(pos)
# make the move
eliminated = self.apply_move(pos, direction, colour)
# print(eliminated)
for p in eliminated:
eliminated_pieces.append(p)
self.move_counter += 1
# when we are applying the 127th move, we are now at 128, therefore now we need to shrink the board
# when we apply the move and wee go into shrinking phase -- then we do the shrink
if self.move_counter == 128 or self.move_counter == 192:
# add the eliminated pieces from the shrink board to this
shrink_elim = self.shrink_board()
if len(shrink_elim) > 0:
for shrink_piece in shrink_elim:
eliminated_pieces.append(shrink_piece)
# also update any neighbouring pieces to the corners -- they are not able to move into
# this position anymore
for corner in self.corner_pos:
self.update_neighbouring_squares(corner, False)
# we need to re-evaluate the piece neighbours
for pos in self.white_pieces:
piece = self.white_pieces[pos]
piece.set_valid_neighbours()
for pos in self.black_pieces:
piece = self.black_pieces[pos]
piece.set_valid_neighbours()
# all 24 pieces have been placed on the board
# print(eliminated_pieces)
if eliminated_pieces is None:
print("hdsalkjsdhfalkjfhalskdjfhalksjdfhalkjsdhf")
return eliminated_pieces
# check if there is a winner terminal states can only occur in the final phase
def is_terminal(self):
# use the referee code for this
white_num = len(self.white_pieces)
black_num = len(self.black_pieces)
if self.phase == constant.MOVING_PHASE:
if black_num >= 2 and white_num >= 2:
return False
elif black_num >= 2 and white_num < 2:
self.winner = constant.BLACK_PIECE
# self.phase = constant.TERMINAL
self.terminal = True
return True
elif black_num < 2 and white_num >= 2:
self.winner = constant.WHITE_PIECE
# self.phase = constant.TERMINAL
self.terminal = True
return True
elif black_num < 2 and white_num < 2:
self.winner = None
# self.phase = constant.TERMINAL
self.terminal = True
return True
else:
# we have not reached a terminal state
return False
# elimination checkers -- TODO: need to change this to work with this board representation
# perform elimination only eliminates the pieces from the board -- it changes the dictionary
# it does not update the neighbours of the pieces -- NEED TO DO THIS AFTER YOU CALL IT
def perform_elimination(self, my_piece_pos, colour):
eliminated_pieces = []
if colour == constant.WHITE_PIECE:
my_pieces = self.white_pieces
my_elim_pieces = self.white_eliminate_pieces
opponent_pieces = self.black_pieces
opp_elim_pieces = self.black_eliminate_pieces
elif colour == constant.BLACK_PIECE:
my_pieces = self.black_pieces
my_elim_pieces = self.black_eliminate_pieces
opponent_pieces = self.white_pieces
opp_elim_pieces = self.white_eliminate_pieces
while self.check_one_piece_elimination(my_piece_pos, colour) is not None:
# check if this piece has eliminated an opponent
elim_pos = self.check_one_piece_elimination(my_piece_pos, colour)
# want to eliminate about the opposition's piece
if elim_pos in opponent_pieces:
# get the eliminated piece
elim_piece = opponent_pieces.pop(elim_pos)
# eliminate that piece from the board
elim_piece.eliminate()
# add to the opponent eliminated pieces
opp_elim_pieces.append(elim_piece)
# update the string board representation
remove_col, remove_row = elim_pos
self.set_board(remove_row, remove_col, constant.FREE_SPACE)
# update the eliminated piece list
eliminated_pieces.append(elim_piece)
# check for self elimination if there is not opponent piece to be eliminated
elim_pos = self.check_self_elimination(my_piece_pos, colour)
if elim_pos is not None:
# removes item from the board and list
elim_piece = my_pieces.pop(elim_pos)
elim_piece.eliminate()
# add to this players eliminated piece list
my_elim_pieces.append(elim_piece)
remove_col, remove_row = elim_pos
self.set_board(remove_row, remove_col, constant.FREE_SPACE)
# update the eliminated piece list
eliminated_pieces.append(elim_piece)
if eliminated_pieces is None:
print("*"*100)
return eliminated_pieces
# elimination helper function
def check_one_piece_elimination(self, my_piece_pos, colour):
pos_col, pos_row = my_piece_pos
if colour == constant.WHITE_PIECE:
my_pieces = copy(self.white_pieces)
opponent_pieces = self.black_pieces
else:
my_pieces = copy(self.black_pieces)
opponent_pieces = self.white_pieces
# append the corner pieces to the list as these act as your own piece
for corner in self.corner_pos:
my_pieces.update({corner: None})
# test all the 4 cases for this type of elimination
# don't need to test for negative indices and positions outside the boundary of the board because there should
# be no pieces that are placed in these positions and therefore do not exist in these lists
# check left
if (pos_col - 1, pos_row) in opponent_pieces and (pos_col - 2, pos_row) in my_pieces:
if opponent_pieces[(pos_col - 1, pos_row)].is_alive():
return pos_col - 1, pos_row
# check right
if (pos_col + 1, pos_row) in opponent_pieces and (pos_col + 2, pos_row) in my_pieces:
if opponent_pieces[(pos_col + 1, pos_row)].is_alive():
return pos_col + 1, pos_row
# check up
if (pos_col, pos_row - 1) in opponent_pieces and (pos_col, pos_row - 2) in my_pieces:
if opponent_pieces[(pos_col, pos_row - 1)].is_alive():
return pos_col, pos_row - 1
# check down
if (pos_col, pos_row + 1) in opponent_pieces and (pos_col, pos_row + 2) in my_pieces:
if opponent_pieces[(pos_col, pos_row + 1)].is_alive():
return pos_col, pos_row + 1
# if it does not exist therefore there is no piece to be eliminated
return None
def check_self_elimination(self, my_piece_pos, colour, action_eval=False):
# update piecePos from tuple to pos_row and pos_col
pos_col, pos_row = my_piece_pos
if colour == constant.WHITE_PIECE:
opponent_pieces = copy(self.black_pieces)
my_pieces = self.white_pieces
else:
opponent_pieces = copy(self.white_pieces)
my_pieces = self.black_pieces
# append the corner pieces to the list as these act as your own piece
for corner in self.corner_pos:
opponent_pieces.update({corner: None})
# now just need to check horizontal and vertical positions to see if they are in the piecePos list
# horizontal check
if ((pos_col + 1, pos_row) in opponent_pieces) and ((pos_col - 1, pos_row) in opponent_pieces):
if action_eval is False:
if my_pieces[(pos_col, pos_row)].is_alive():
return pos_col, pos_row
else:
return pos_col, pos_row
# vertical piece position check for self elimination
elif ((pos_col, pos_row + 1) in opponent_pieces) and ((pos_col, pos_row - 1) in opponent_pieces):
if action_eval is False:
if my_pieces[(pos_col, pos_row)].is_alive():
return pos_col, pos_row
else:
return pos_col, pos_row
else:
return None
# helper method for the moving phase of the game
@staticmethod
def convert_direction_to_coord(my_piece_pos, direction):
# piece pos is in the form of a tuple (col,row)
# moves types
# 0 - right 1 space
# 1 - down 1 space
# 2 - left 1 space
# 3 - up 1 spaces
# 4 - right 2 spaces
# 5 - down 2 spaces
# 6 - left 2 spaces
# 7 - up 2 spaces
# convert the tuple to row, col variable
pos_col, pos_row = my_piece_pos
# do the conversion -- this function does not handle
if direction == constant.RIGHT_1:
return pos_col + 1, pos_row
elif direction == constant.DOWN_1:
return pos_col, pos_row + 1
elif direction == constant.LEFT_1:
return pos_col - 1, pos_row
elif direction == constant.UP_1:
return pos_col, pos_row - 1
elif direction == constant.RIGHT_2:
return pos_col + 2, pos_row
elif direction == constant.DOWN_2:
return pos_col, pos_row + 2
elif direction == constant.LEFT_2:
return pos_col - 2, pos_row
elif direction == constant.UP_2:
return pos_col, pos_row - 2
@staticmethod
def convert_coord_to_direction(coord_1, coord_2):
# coord is the stationary piece, coord_2 is the piece we want to move to
# the move type returned is the move type to get from coord_1 to coord_2
coord_1_col, coord_1_row = coord_1
# print(coord_1_col, coord_1_row)
coord_2_col, coord_2_row = coord_2
# check left and right first
# check left
if coord_1_col + 1 == coord_2_col and coord_1_row == coord_2_row:
return constant.RIGHT_1
elif coord_1_col == coord_2_col and coord_1_row + 1 == coord_2_row:
return constant.DOWN_1
elif coord_1_col - 1 == coord_2_col and coord_1_row == coord_2_row:
return constant.LEFT_1
elif coord_1_col == coord_2_col and coord_1_row - 1 == coord_2_row:
return constant.UP_1
elif coord_1_col + 2 == coord_2_col and coord_1_row == coord_2_row:
return constant.RIGHT_2
elif coord_1_col == coord_2_col and coord_1_row + 2 == coord_2_row:
return constant.DOWN_2
elif coord_1_col - 2 == coord_2_col and coord_1_row == coord_2_row:
return constant.LEFT_2
elif coord_1_col == coord_2_col and coord_1_row - 2 == coord_2_row:
return constant.UP_2
@staticmethod
def get_opposite_direction(direction):
if direction == constant.UP_1:
return constant.DOWN_1
elif direction == constant.LEFT_1:
return constant.RIGHT_1
elif direction == constant.DOWN_1:
return constant.UP_1
elif direction == constant.RIGHT_1:
return constant.LEFT_1
elif direction == constant.UP_2:
return constant.DOWN_2
elif direction == constant.LEFT_2:
return constant.RIGHT_2
elif direction == constant.DOWN_2:
return constant.UP_2
elif direction == constant.RIGHT_2:
return constant.LEFT_2
# shrink the board -- this does not update any neighbouring squares apart from squares that are
# neighbouring the corner positions
def shrink_board(self):
eliminated_pieces = []
if self.num_shrink > 2:
return
# use the referee code for the shrinking of the board
offset = self.num_shrink
for i in range(offset, constant.BOARD_SIZE - offset):
# list storing the row and column we need to shrink
shrink = [(i, offset), (offset, i), (i, 7 - offset), (7 - offset, i)]
for (col, row) in shrink:
# update the board representation
# set the row to invalid spaces
self.set_board(row, col, constant.INVALID_SPACE)
# remove any piece that is eliminated from the position lists
if (col, row) in self.black_pieces:
piece = self.black_pieces.pop((col, row))
piece.eliminate()
self.black_eliminate_pieces.append(piece)
eliminated_pieces.append(piece)
elif (col, row) in self.white_pieces:
piece = self.white_pieces.pop((col, row))
piece.eliminate()
self.white_eliminate_pieces.append(piece)
eliminated_pieces.append(piece)
# set the column to invalid spaces
self.set_board(col, row, constant.INVALID_SPACE)
self.num_shrink += 1
offset += 1
# set the new corner
self.corner_pos[0] = (offset, offset)
self.corner_pos[1] = (7 - offset, offset)
self.corner_pos[2] = (offset, 7 - offset)
self.corner_pos[3] = (7 - offset, 7 - offset)
# if a corner is on top of a piece , eliminate that piece
for corner in self.corner_pos:
# eliminate the white piece
if corner in self.white_pieces:
piece = self.white_pieces.pop(corner)
piece.eliminate()
self.white_eliminate_pieces.append(piece)
eliminated_pieces.append(piece)
elif corner in self.black_pieces:
# eliminate the black piece
piece = self.black_pieces.pop(corner)
piece.eliminate()
self.black_eliminate_pieces.append(piece)
eliminated_pieces.append(piece)
# set the board
corner_col, corner_row = corner
self.set_board(corner_row, corner_col, constant.CORNER_PIECE)
# check for one space eliminations about the corner pieces in the specific order
# according to the rule sheet -- [U.L -> L.L -> L.R -> U.R]
for i in (0, 2, 3, 1):
corner = self.corner_pos[i]
pieces = self.corner_elimination(corner)
# print("CORNER PIECES ")
print# (pieces)
# add the eliminated pieces from corner elimination to the list of eliminated pieces
eliminated_pieces += pieces
# set the min and max dimensions of the board
self.min_dim += 1
self.max_dim -= 1
return eliminated_pieces
# un shrink the board representation -- this method does not replace any eliminated pieces
# this will be taken care of in the undo function
def unshrink_board(self):
if self.min_dim < 1 or self.max_dim > constant.BOARD_SIZE - 1:
return
# reset the corners
for col,row in self.corner_pos:
self.set_board(row,col,constant.FREE_SPACE)
# reset the new min and max dimensions of the board
self.min_dim -= 1
self.max_dim += 1
# reset the corner positions
new_corners = [(self.min_dim,self.min_dim), (self.max_dim,self.min_dim), (self.min_dim, self.max_dim)\
,(self.max_dim,self.max_dim)]
# replace all the invalid spaces with free spaces
# use the referee code for the shrinking of the board
for i in range(self.min_dim, self.max_dim + 1):
# list storing the row and column we need to shrink
shrink = [(i, self.min_dim), (self.min_dim, i), (i, self.max_dim), (self.max_dim, i)]
for (col, row) in shrink:
# update the board representation
# set the row to free spaces
self.set_board(row, col, constant.FREE_SPACE)
# set the column to free spaces
self.set_board(col, row, constant.FREE_SPACE)
# replace the corners on the board
for col, row in new_corners:
self.set_board(row,col, constant.CORNER_PIECE)
self.corner_pos = new_corners
self.num_shrink-=1
# helper function for elimination of pieces at a corner -- for board shrinks
# this does not update any neighbouring squares -- need to do this after the call
def corner_elimination(self, corner):
eliminated_pieces = []
players = (constant.WHITE_PIECE, constant.BLACK_PIECE)
# the corner piece can act as the player piece -- therefore we can eliminate
# the white pieces around the corner first, then the black pieces
for player in players:
if player == constant.WHITE_PIECE:
opponent_pieces = self.black_pieces
opp_elim_pieces = self.black_eliminate_pieces
else:
opponent_pieces = self.white_pieces
opp_elim_pieces = self.white_eliminate_pieces
# there can be more than one elimination or there can be None
while self.check_one_piece_elimination(corner, player) is not None:
eliminated_pos = self.check_one_piece_elimination(corner, player)
elim_piece = opponent_pieces.pop(eliminated_pos)
elim_piece.eliminate()
opp_elim_pieces.append(elim_piece)
col, row = eliminated_pos
# update the board representation
self.set_board(row, col, constant.FREE_SPACE)
eliminated_pieces.append(elim_piece)
return eliminated_pieces
# get the piece at a given position on the board
# returns -- the piece object or None if there is no piece corresponding to that position
def get_piece(self, pos):
if pos in self.white_pieces:
return self.white_pieces[pos]
elif pos in self.black_pieces:
return self.black_pieces[pos]
else:
raise PieceNotExist
# get the opposite piece type
@staticmethod
def get_opp_piece_type(piece_type):
if piece_type == constant.WHITE_PIECE:
return constant.BLACK_PIECE
else:
return constant.WHITE_PIECE
def reverse_eliminated_pieces(self,eliminated_pieces):
if eliminated_pieces is None:
raise InvalidEliminatedList
for elim_piece in eliminated_pieces:
# for each eliminated piece we must:
# put them back on the board
elim_pos = elim_piece.get_position()
col, row = elim_pos
# get the relevant piece dictionary
if elim_piece.get_colour() == constant.WHITE_PIECE:
my_pieces = self.white_pieces
else:
my_pieces = self.black_pieces
self.set_board(row, col, elim_piece.get_colour())
# put them back in the piece dictionaries
elim_piece.revert()
entry = {elim_piece.get_position(): elim_piece}
my_pieces.update(entry)
def undo_action(self, action_applied, colour, eliminated_pieces):
# get the relavent piece dictionary
if colour == constant.WHITE_PIECE:
my_pieces = self.white_pieces
else:
my_pieces = self.black_pieces
# then we need to update the board to being in the state where it originally was in
# first we need to see if the board has just recently been shrunk
if self.move_counter == 128 or self.move_counter == 192:
# the board has just been shrunk once
# restore the invalid piece positions to being free squares
for corner in self.corner_pos:
# print(corner)
# also update any neighbouring pieces to the corners -- they are not able to move into
# this position anymore
self.update_neighbouring_squares(corner, True)
self.unshrink_board()
# also update any neighbouring pieces to the corners -- they are not able to move into
# this position anymore
for corner in self.corner_pos:
# print(corner)
self.update_neighbouring_squares(corner, False)
# place all the eliminated pieces back on the board
self.reverse_eliminated_pieces(eliminated_pieces)
# need to re-evaluate all pieces neighbours on the board
for pos in self.white_pieces:
piece = self.white_pieces[pos]
piece.set_valid_neighbours()
for pos in self.black_pieces:
piece = self.black_pieces[pos]
piece.set_valid_neighbours()
if action_applied is None:
self.move_counter -= 1
return
# if there was an action applied, need to determine if it was in placement or moving
# phase
if self.phase == constant.PLACEMENT_PHASE:
# put the eliminated pieces back on the board, then move the piece
self.reverse_eliminated_pieces(eliminated_pieces)
# reverse the move that was placed on the board
col,row = action_applied
# print(pos)
if action_applied in my_pieces:
# print("POP----------------------------------------------")
piece = my_pieces.pop(action_applied)
# print("POPPED PIECE")
self.set_board(row, col, constant.FREE_SPACE)
# update the free squares of the game
entry = {action_applied: True}
self.free_squares.update(entry)
# reset the valid neighbours of this piece
piece.set_valid_neighbours()
# update any neighbouring pieces -- we have remove the piece, therefore the neighbouring
# square values should be set to True since they can now move into this square
self.update_neighbouring_squares(action_applied, True)
# for any eliminated piece we must update their valid positoin and also the neighbouring positions
for elim_piece in eliminated_pieces:
elim_piece.set_valid_neighbours()
elim_pos = elim_piece.get_position()
# update the free squares of the game
entry = {elim_pos: False}
self.free_squares.update(entry)
self.update_neighbouring_squares(elim_pos, False)
# decrease the moving counter
self.move_counter -= 1
return
elif self.phase == constant.MOVING_PHASE:
# need to check if it is the first move of moving phase
# if we are at move_counter = 0, we have placed a piece on the
# board to get to this stage, therefore we just need to revert this
# placement on the board
if self.move_counter == 0:
col, row = action_applied
# this is the first move, then the action applied to the
# board is a placement
self.reverse_eliminated_pieces(eliminated_pieces)
# reverse the move that was placed on the board
if action_applied in my_pieces:
piece = my_pieces.pop(action_applied)
self.set_board(row, col, constant.FREE_SPACE)
# update the free squares of the game
entry = {action_applied: True}
self.free_squares.update(entry)
piece.set_valid_neighbours()
# we have removed the pieces, therefore the neighbours can move into this space
self.update_neighbouring_squares(action_applied, True)
for elim_piece in eliminated_pieces:
elim_piece.set_valid_neighbours()
elim_pos = elim_piece.get_position()
# eliminated pieces are now put back onto the board, therefore set to False
# neighbouring squares can't move into this space anymore
# update the free squares of the game
entry = {elim_pos: False}
self.free_squares.update(entry)
self.update_neighbouring_squares(elim_pos, False)
# decrease the move counter
self.move_counter = 23
self.phase = constant.PLACEMENT_PHASE
return
else:
# if the move counter was not 128,192 we need to place
# the eliminated pieces back on the board
# print("started here")
# check if the eliminated pieces have not been placed back on the board
if self.move_counter not in (0, 128, 192):
self.reverse_eliminated_pieces(eliminated_pieces)
# we just need to undo the move that was made -- the action applied to the board moves a piece
# from one square to another given a direction, therefore
# to get the piece we want to move back we need to apply the move to get the to and from squares
to_pos = action_applied[0]
direction = action_applied[1]
# get the position of the piece of where it was moved to given an action was applied
from_pos = self.convert_direction_to_coord(to_pos, direction)
# reset the new location to being free
self.set_board(from_pos[1], from_pos[0], constant.FREE_SPACE)
# put the piece back to its old location
self.set_board(to_pos[1], to_pos[0], colour)
# get the old piece
if from_pos in my_pieces:
piece = my_pieces.pop(from_pos)
# change the location of this piece to its old location
piece.set_position(to_pos)
# add this piece back to the piece dictionary
entry = {to_pos: piece}
my_pieces.update(entry)
# reset the valid neighbours of this piece
piece.set_valid_neighbours()
# the old_pos is now occypied by this piece -- therefore we set
# the neighbouring square to false
self.update_neighbouring_squares(to_pos,False)
# the position before the undo is now FREE
self.update_neighbouring_squares(from_pos,True)
# reset the valid neigbours of the neighbours of
for elim_piece in eliminated_pieces:
elim_piece.set_valid_neighbours()
elim_pos = elim_piece.get_position()
self.update_neighbouring_squares(elim_pos, False)
# decrease the move counter
self.move_counter -= 1
return
# METHODS TO RETURN ALL AVAILABLE ACTIONS
def get_placement_list(self,colour):
actions = []
for action in self.free_squares.keys():
if self.free_squares[action] is True and self.within_starting_area(action,colour):
actions.append(action)
return actions
def get_move_list(self,colour):
actions = []
if colour == constant.WHITE_PIECE:
my_pieces = self.white_pieces
else:
my_pieces = self.black_pieces
# iterate through all the pieces
for pos in my_pieces.keys():
piece = my_pieces[pos]
actions += piece.get_legal_actions()
return actions
def update_actions(self, colour):
if self.phase == constant.PLACEMENT_PHASE:
return self.get_placement_list(colour)
else:
return self.get_move_list(colour)
@staticmethod
def within_starting_area(move, colour):
# update the starting rows based off the player colour
if colour == constant.WHITE_PIECE:
min_row = 0
max_row = 5
else:
min_row = 2
max_row = 7
col, row = move
if min_row <= row <= max_row:
return True
else:
return False
# return a sorted list of actions based on an evaluation function for those actions
def sort_actions(self,actions,colour):
# lets sort the list using another list of weights
# iterating + sorting + reconstructing -- nlog(n) + 2n : this is not good enough
weights = [0]*len(actions)
MAX_DIST = 14
'''
ACTION- EVALUATION FUNCTION
'''
for i, action in enumerate(actions):
# get the min manhattan distance of a piece -- if the distance is large we want to append a small value --
# max distance will be 14
# get the distance of each piece to the centre of the board
if self.phase == constant.PLACEMENT_PHASE:
# we minus the distance because if we are further away from the centre, it is less ideal than being
# close to the centre
weights[i] -= Features.dist_to_center(action) * 50
# at the start of the game we want to place our pieces close to the opponent, but the weighting
# is not as high as in the moving phase as we don't necessarily want to evaluate actions that
# closer to the center of the board
weights[i] += (MAX_DIST - Features.min_manhattan_dist(self, action, colour)) * 10
else:
pos = self.convert_direction_to_coord(action[0],action[1])
weights[i] -= Features.dist_to_center(pos) * 50
weights[i] += (MAX_DIST - Features.min_manhattan_dist(self, action, colour)) * 25
# if an action is able to capture a piece then we need to increase the weight of this action
if Features.can_action_capture(self,action,colour) is True:
weights[i] += 10500
# if an action is going to self eliminate itself then we need to decrease the weight of this action
if Features.check_self_elimination(self,action,colour) is True:
weights[i] -= 4000
# if a piece is able to surround an enemy piece increase the weight
if Features.can_action_surround(self,action,colour) is True:
weights[i] += 300
# if a piece is able to form a cluster then this is a good move to make
if Features.can_form_cluster(self,action,colour) is True:
weights[i] += 750
# is a middle square free -- if it is this should be one of the first moves we should try
if Features.occupy_middle(self,action,colour) is True:
# if we are the second player, it may not be the best to go for the middle therefore we need to
# decrease the weight if we are the black player
if colour == constant.WHITE_PIECE:
weights[i] += 1000
else:
weights[i] += 500
# if we are already in a middle square we don't really want to move this piece
if self.phase == constant.MOVING_PHASE:
if Features.in_middle(self, action) is True:
weights[i] -= 700
# if we can form a pattern that guarantees us a capture, then this move has more importance than
# other moves
if Features.form_elim_pattern(self, action, colour) is True:
weights[i] += 800
# if the colour is black we should check if we are placing in a vulnerable position -- these actions
# are not desired as it means we can easily be taken -- we care about this more when we are
# the black piece as we have to play more defensively
if Features.check_vulnerable_action(self, action, colour) is True:
if colour == constant.WHITE_PIECE:
weights[i] -= 350
else:
weights[i] -= 450
return [action for _, action in sorted(zip(weights,actions), reverse=True)]
'''
# STANDARD METHODS OVERRIDDEN FOR THIS CLASS
'''
def __str__(self):
board = ""
for row in range(constant.BOARD_SIZE):
for col in range(constant.BOARD_SIZE):
# get the char to print
char_index = row * constant.BOARD_SIZE + col
char = chr(self.board_state[char_index])
board+=char + " "
board+="\n"
return board
|
Python
|
CL
|
be15e93031edd6f74124f8a03bc82d4944caf629e22681bc433b9982a3d68d4a
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import seaborn as sns
def set_plot_details(axs, xticks=None, yticks=None, xlabel=None, xlabel_fontsize=None, ylabel=None,
ylabel_fontsize=None, title=None, title_fontsize=None, grid=False, legend=None):
"""
Function to modularize a few plot details.
"""
if xticks is not None:
axs.set_xticks(xticks)
if yticks is not None:
axs.set_yticks(yticks)
if xlabel is not None:
axs.set_xlabel(xlabel, fontsize=xlabel_fontsize)
if ylabel is not None:
axs.set_ylabel(ylabel, fontsize=ylabel_fontsize)
if title is not None:
axs.set_title(title, fontsize=title_fontsize)
if grid:
axs.grid()
if legend is not None:
axs.legend(legend)
# Class designed to implement an analizer with a few statistical tests.
class Stat_Analizer_TLC():
def __init__(self, alpha, sample_size, n_sample):
self.alpha = alpha
self.sample_size = sample_size
self.n_sample = n_sample
def sampling(self, data):
sample_size = self.sample_size
index = np.random.choice(range(0, data.shape[0]), size=sample_size)
sample_df = data.iloc[index]
return sample_df
# Function designed to collect samples from a dataset within Central Limit Theorem hypothesis
def sample_means(self, data):
n_sample = self.n_sample
sample_size = self.sample_size
samp_means = []
for sample in range(n_sample):
sample = self.sampling(data)
samp_means.append(sample.mean())
return samp_means
def kurtosis_check(self, dist, title):
kurt = st.kurtosis(dist, fisher=False)
print('Distribuição de ' + title + ': Curtose = {0:.3f}'.format(kurt))
# Performs t-test to compare one sample with the population it was drawn from.
def perform_t_test_1_sample(self, dist, pop_dist, title):
alpha = self.alpha
sample_size = self.sample_size
t_test = st.ttest_1samp(dist, np.array(pop_dist).mean())
if t_test[1] < 1-alpha:
result = ' A amostra é estatisticamente diferente da população! (rejeitamos a hipótese nula).'
else:
result = 'As amostras não é estatisticamente diferente da população! (não rejeitamos a hipótese nula).'
interval = st.t.interval(alpha=alpha, df=st.kurtosis(pop_dist, fisher=False), loc=np.array(
pop_dist).mean(), scale=np.array(pop_dist).std()/np.sqrt(sample_size))
print('>>> ' + title+':')
print('Média: {0:.3f}'.format(np.array(dist).mean()))
print(
'Intervalo de confiança: {0:.3f} <------------> {1:.3f}'.format(interval[0], interval[1]))
print('p-valor = {0:.3f}'.format(t_test[1]))
print('Resultado: '+result)
# Performs t-test comparing two independent samples from the same population.
def perform_t_test_2_sample(self, dist1, dist2, title1, title2):
alpha = self.alpha
sample_size = self.sample_size
t_test = st.ttest_ind(dist1, dist2)
if t_test[1] < 1-alpha:
result = ' As amostra são estatisticamente diferentes entre si! (rejeitamos a hipótese nula).'
else:
result = 'As amostras não são estatisticamente diferentes entre si! (não rejeitamos a hipótese nula).'
interval1 = st.t.interval(alpha=alpha, df=st.kurtosis(dist1, fisher=False), loc=np.array(
dist1).mean(), scale=np.array(dist1).std()/np.sqrt(sample_size))
interval2 = st.t.interval(alpha=alpha, df=st.kurtosis(dist2, fisher=False), loc=np.array(
dist2).mean(), scale=np.array(dist2).std()/np.sqrt(sample_size))
print('Analisando ' + title1 + ' contra '+title2+':')
print('>>> ' + title1+':')
print('Média: {0:.3f}'.format(np.array(dist1).mean()))
print(
'Intervalo de confiança: {0:.3f} <------------> {1:.3f}'.format(interval1[0], interval1[1]))
print('>>> ' + title2+':')
print('Média: {0:.3f}'.format(np.array(dist2).mean()))
print(
'Intervalo de confiança: {0:.3f} <------------> {1:.3f}'.format(interval2[0], interval2[1]))
print('>>> Resultado:')
print('p-valor = {0:.3f}'.format(t_test[1]))
print('Conclusão: '+result)
|
Python
|
CL
|
38d11a88b2665cd88924e691f6992d71db56c89b94216174b44b12368886d45d
|
from serial import Serial
from packets import ArduinoToPiPacket, PiToArduinoPacket, PiToArduinoCmd, ArduinoToPiRsp
class ArduinoInterface: # NOTE: CALLBACKS WILL FAIL AFTER 2^32 PACKETS ARE SENT
def __init__(self, port, baudrate, timeout=3.0): # NOTE: REQUIRES TWO SECONDS TO START
self.serial_port = Serial(port, baudrate, timeout=timeout)
self.serial_port.flushInput()
self.seq_num = -1
self.callbacks = {}
# self.packet_queue = []
# self.serial_ready = False # TODO: WAIT FOR IT TO BE READY. ASYNC?
# Reads the buffer and processes packets.
# Set debug=True to print off strings.
def process_buffer(self, debug=False):
if debug:
while self.serial_port.in_waiting:
print(self.serial_port.readline())
else:
while self.serial_port.in_waiting >= PiToArduinoPacket.PACKET_SIZE:
bytes_read = self.serial_port.read(PiToArduinoPacket.PACKET_SIZE)
#print('Got {} bytes {}'.format(len(bytes_read), bytes_read))
read_packet = ArduinoToPiPacket(bytes_read)
# Look up whether there is a registered callback function
if read_packet.seq_num in self.callbacks:
#print('Found a callback seqnum={}'.format(read_packet.seq_num))
self.callbacks[read_packet.seq_num] \
(read_packet.arg1, read_packet.arg2, read_packet.arg3)
# Creates and sends the packet. Also registers the callback function.
def send_packet(self, command_id, arg1=0.0, arg2=0.0, arg3=0.0, callback_fcn=None):
#print('Sending packet')
# Increment and save sequence number
self.seq_num += 1
seq_num = self.seq_num
# Construct the packet
send_packet = PiToArduinoPacket(command_id, seq_num, arg1, arg2, arg3)
# Register the callback, if any
if callback_fcn:
self.callbacks[seq_num] = callback_fcn
#print('Registering callback with seq_num {}'.format(seq_num))
# Send the packet
bytes_written = self.serial_port.write(bytes(send_packet.to_byte_string()))
def echo(self, arg1, arg2, arg3, callback=None):
self.send_packet(PiToArduinoCmd.ECHO, arg1, arg2, arg3, callback)
def set_speed_limit(self, cm_per_sec):
self.send_packet(PiToArduinoCmd.SET_SPEEDLIMIT, cm_per_sec)
def command_motor_pwms(self, left, right, callback=None):
self.send_packet(PiToArduinoCmd.SET_MOTORS, arg1=left, arg2=right, \
callback_fcn=callback)
def get_odometry(self, callback):
self.send_packet(PiToArduinoCmd.GET_ODOMETRY, callback_fcn=callback)
# def get_ticks(self, callback):
# self.send_packet(PiToArduinoCmd.CMD_GET_TICKS, callback_fcn=callback)
def command_openloop_straight(self, speed, distance, callback=None):
self.send_packet(PiToArduinoCmd.OPENLOOP_STRAIGHT, arg1=speed, \
arg2=distance, callback_fcn=callback)
def command_openloop_rcurve(self, speed, turn_radius, theta, callback=None):
self.send_packet(PiToArduinoCmd.OPENLOOP_R_CURVE, arg1=speed, \
arg2=turn_radius, arg3=theta, callback_fcn=callback)
def command_openloop_lcurve(self, speed, turn_radius, theta, callback=None):
self.send_packet(PiToArduinoCmd.OPENLOOP_L_CURVE, arg1=speed, \
arg2=turn_radius, arg3=theta, callback_fcn=callback)
#self.serial_port.write(bytes(self.send_packet.to_byte_string()))
# Sends a closed-loop command with the given robot coordinates (x, y, theta)
def command_closedloop(self, speed, theta_error, callback=None):
self.send_packet(PiToArduinoCmd.CLOSEDLOOP, arg1=speed, arg2=theta_error, \
callback_fcn=callback)
def reset_odometry(self):
self.send_packet(PiToArduinoCmd.RESET_ODOMETRY)
def turn_statistics_on(self, period_sec, callback=None):
self.send_packet(PiToArduinoCmd.TURN_STATISTICS_ON, arg1=period_sec, callback_fcn=callback)
|
Python
|
CL
|
32115387390a4f8f8bb3a9d2b18685024f9d4bd4719fbf9822e0d5c8548c9787
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'AppServiceCertificateResponse',
'CertificateDetailsResponse',
'CertificateOrderCertificateResponse',
]
@pulumi.output_type
class AppServiceCertificateResponse(dict):
"""
Key Vault container for a certificate that is purchased through Azure.
"""
def __init__(__self__, *,
provisioning_state: str,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None):
"""
Key Vault container for a certificate that is purchased through Azure.
:param str provisioning_state: Status of the Key Vault secret.
:param str key_vault_id: Key Vault resource Id.
:param str key_vault_secret_name: Key Vault secret name.
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
if key_vault_id is not None:
pulumi.set(__self__, "key_vault_id", key_vault_id)
if key_vault_secret_name is not None:
pulumi.set(__self__, "key_vault_secret_name", key_vault_secret_name)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Status of the Key Vault secret.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> Optional[str]:
"""
Key Vault resource Id.
"""
return pulumi.get(self, "key_vault_id")
@property
@pulumi.getter(name="keyVaultSecretName")
def key_vault_secret_name(self) -> Optional[str]:
"""
Key Vault secret name.
"""
return pulumi.get(self, "key_vault_secret_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CertificateDetailsResponse(dict):
"""
Certificate Details
"""
def __init__(__self__, *,
location: str,
id: Optional[str] = None,
issuer: Optional[str] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
not_after: Optional[str] = None,
not_before: Optional[str] = None,
raw_data: Optional[str] = None,
serial_number: Optional[str] = None,
signature_algorithm: Optional[str] = None,
subject: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
thumbprint: Optional[str] = None,
type: Optional[str] = None,
version: Optional[int] = None):
"""
Certificate Details
:param str location: Resource Location
:param str id: Resource Id
:param str issuer: Issuer
:param str kind: Kind of resource
:param str name: Resource Name
:param str not_after: Valid to
:param str not_before: Valid from
:param str raw_data: Raw certificate data
:param str serial_number: Serial Number
:param str signature_algorithm: Signature Algorithm
:param str subject: Subject
:param Mapping[str, str] tags: Resource tags
:param str thumbprint: Thumbprint
:param str type: Resource type
:param int version: Version
"""
pulumi.set(__self__, "location", location)
if id is not None:
pulumi.set(__self__, "id", id)
if issuer is not None:
pulumi.set(__self__, "issuer", issuer)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if not_after is not None:
pulumi.set(__self__, "not_after", not_after)
if not_before is not None:
pulumi.set(__self__, "not_before", not_before)
if raw_data is not None:
pulumi.set(__self__, "raw_data", raw_data)
if serial_number is not None:
pulumi.set(__self__, "serial_number", serial_number)
if signature_algorithm is not None:
pulumi.set(__self__, "signature_algorithm", signature_algorithm)
if subject is not None:
pulumi.set(__self__, "subject", subject)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
if type is not None:
pulumi.set(__self__, "type", type)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def location(self) -> str:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def issuer(self) -> Optional[str]:
"""
Issuer
"""
return pulumi.get(self, "issuer")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notAfter")
def not_after(self) -> Optional[str]:
"""
Valid to
"""
return pulumi.get(self, "not_after")
@property
@pulumi.getter(name="notBefore")
def not_before(self) -> Optional[str]:
"""
Valid from
"""
return pulumi.get(self, "not_before")
@property
@pulumi.getter(name="rawData")
def raw_data(self) -> Optional[str]:
"""
Raw certificate data
"""
return pulumi.get(self, "raw_data")
@property
@pulumi.getter(name="serialNumber")
def serial_number(self) -> Optional[str]:
"""
Serial Number
"""
return pulumi.get(self, "serial_number")
@property
@pulumi.getter(name="signatureAlgorithm")
def signature_algorithm(self) -> Optional[str]:
"""
Signature Algorithm
"""
return pulumi.get(self, "signature_algorithm")
@property
@pulumi.getter
def subject(self) -> Optional[str]:
"""
Subject
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
Thumbprint
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> Optional[int]:
"""
Version
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CertificateOrderCertificateResponse(dict):
"""
Class representing the Key Vault container for certificate purchased through Azure
"""
def __init__(__self__, *,
location: str,
id: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
type: Optional[str] = None):
"""
Class representing the Key Vault container for certificate purchased through Azure
:param str location: Resource Location
:param str id: Resource Id
:param str key_vault_id: Key Vault Csm resource Id
:param str key_vault_secret_name: Key Vault secret name
:param str kind: Kind of resource
:param str name: Resource Name
:param str provisioning_state: Status of the Key Vault secret
:param Mapping[str, str] tags: Resource tags
:param str type: Resource type
"""
pulumi.set(__self__, "location", location)
if id is not None:
pulumi.set(__self__, "id", id)
if key_vault_id is not None:
pulumi.set(__self__, "key_vault_id", key_vault_id)
if key_vault_secret_name is not None:
pulumi.set(__self__, "key_vault_secret_name", key_vault_secret_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> Optional[str]:
"""
Key Vault Csm resource Id
"""
return pulumi.get(self, "key_vault_id")
@property
@pulumi.getter(name="keyVaultSecretName")
def key_vault_secret_name(self) -> Optional[str]:
"""
Key Vault secret name
"""
return pulumi.get(self, "key_vault_secret_name")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Status of the Key Vault secret
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
Python
|
CL
|
f9ce5b12e861c614dc30f8fe9d9353a6eb0b4e5b32841526a2112a738e096fb8
|
import socket
import binascii
class IpPacket(object):
"""
Represents the *required* data to be extracted from an IP packet.
"""
def __init__(self, protocol, ihl, source_address, destination_address, payload):
self.protocol = protocol
self.ihl = ihl
self.source_address = source_address
self.destination_address = destination_address
self.payload = payload
class TcpPacket(object):
"""
Represents the *required* data to be extracted from a TCP packet.
"""
def __init__(self, src_port, dst_port, data_offset, payload):
self.src_port = src_port
self.dst_port = dst_port
# As far as I know, this field doesn't appear in Wireshark for some reason.
self.data_offset = data_offset
self.payload = payload
def parse_raw_ip_addr(raw_ip_addr: bytes) -> str:
# Converts a byte-array IP address to a string
# the input is on the form b'\xaa\xab'... a byte array
addr = ""
i = 0
while i < len(raw_ip_addr):
if i == len(raw_ip_addr) - 1:
addr+= str(raw_ip_addr[i] & 0xFF)
else:
addr+= str(raw_ip_addr[i] & 0xFF) + "."
i+=1
return addr
def parse_application_layer_packet(ip_packet_payload: bytes) -> TcpPacket:
# Parses raw bytes of a TCP packet
# That's a byte literal (~byte array) check resources section
srcport = int.from_bytes(ip_packet_payload[0:2], byteorder="big")
destport = int.from_bytes(ip_packet_payload[2:4], byteorder="big")
offset = (ip_packet_payload[12] & 0xF0) >> 4
data = getdata(offset, ip_packet_payload)
decdata = ""
try:
decdata = data.decode("utf-8")
print("decdata:", decdata)
except:
print("none")
return TcpPacket(srcport, destport, offset, data)
def parse_network_layer_packet(ip_packet: bytes) -> IpPacket:
# Parses raw bytes of an IPv4 packet
# That's a byte literal (~byte array) check resources section
ihl = ip_packet[0] & 0x0F
protocol = ip_packet[9] & 0xFF
srcaddr = parse_raw_ip_addr(ip_packet[12:16])
destaddr = parse_raw_ip_addr(ip_packet[16:20])
data = getdata(ihl, ip_packet)
return IpPacket(protocol, ihl, srcaddr, destaddr, data)
def getdata(offset, packet):
start = int(offset*32/8)
return packet[start:]
def main():
# Un-comment this line if you're getting too much noisy traffic.
# to bind to an interface on your PC. (or you can simply disconnect from the internet)
# iface_name = "lo"
# stealer.setsockopt(socket.SOL_SOCKET,
# socket.SO_BINDTODEVICE, bytes(iface_name, "ASCII"))
TCP = 0x0006 # this is the protocol number of TCP in hex
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, TCP)
while True:
# Receive packets and do processing here
bindata, addr = sniffer.recvfrom(5000)
ippacket = parse_network_layer_packet(bindata)
parse_application_layer_packet(ippacket.payload)
if __name__ == "__main__":
main()
|
Python
|
CL
|
727a1354dd1f98bd3e003e194d1f903c255a401201716bbc53e929ba327fecb0
|
import os.path as op
from sfepy.base.base import *
import sfepy.base.ioutils as io
from sfepy.fem import ProblemDefinition
from sfepy.solvers.generic import solve_direct
from application import Application
def assign_standard_hooks(obj, get, mod):
"""
Set standard hook function attributes from `mod` to `obj` using the
`get` function.
"""
hook = get('step_hook', None)
if hook is not None:
hook = getattr(mod, hook)
obj.step_hook = hook
hook = get('post_process_hook', None)
if hook is not None:
hook = getattr(mod, hook)
obj.post_process_hook = hook
hook = get('post_process_hook_final', None)
if hook is not None:
hook = getattr(mod, hook)
obj.post_process_hook_final = hook
class SimpleApp( Application ):
def process_options( options ):
"""Application options setup. Sets default values for missing
non-compulsory options."""
get = options.get_default_attr
save_results = get( 'save_results', True )
# Save each variable into a separate file, using the region of its
# definition only.
file_per_var = get( 'file_per_var', False )
output_format = get( 'output_format', 'vtk' )
output_dir = get( 'output_dir', '.' )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
# Called after each time step, can do anything, no return value.
step_hook = get( 'step_hook', None )
# Called after each time step.
post_process_hook = get( 'post_process_hook', None )
# Called after all time steps, or in the stationary case.
post_process_hook_final = get( 'post_process_hook_final', None )
use_equations = get('use_equations', 'equations')
return Struct( **locals() )
process_options = staticmethod( process_options )
def __init__(self, conf, options, output_prefix,
init_equations=True, **kwargs):
"""`kwargs` are passed to ProblemDefinition.from_conf()
Command-line options have precedence over conf.options."""
Application.__init__( self, conf, options, output_prefix )
self.setup_options()
is_eqs = init_equations
if hasattr(options, 'solve_not') and options.solve_not:
is_eqs = False
self.problem = ProblemDefinition.from_conf(conf,
init_equations=is_eqs,
**kwargs)
self.setup_output_info( self.problem, self.options )
def setup_options( self ):
self.app_options = SimpleApp.process_options( self.conf.options )
assign_standard_hooks(self, self.app_options.get_default_attr,
self.conf.funmod)
# Override default equations, if use_equations is set.
if hasattr(self.conf, 'equations'):
self.conf.equations = getattr(self.conf,
self.app_options.use_equations)
def setup_output_info(self, problem, options):
"""Modifies both problem and options!"""
if options.output_filename_trunk is None:
ofn_trunk = io.get_trunk(self.conf.filename_mesh)
options.output_filename_trunk = ofn_trunk
else:
ofn_trunk = options.output_filename_trunk
if hasattr(options, 'output_format') \
and (options.output_format is not None):
output_format = options.output_format
else:
output_format = self.app_options.output_format
problem.setup_output(output_filename_trunk=ofn_trunk,
output_dir=self.app_options.output_dir,
output_format=output_format,
file_per_var=self.app_options.file_per_var)
def call( self ):
out = solve_direct( self.conf, self.options,
problem=self.problem,
step_hook=self.step_hook,
post_process_hook=self.post_process_hook,
post_process_hook_final=self.post_process_hook_final)
return out
|
Python
|
CL
|
7731e22d930ea6706b08335374544b17a6e76410dfa638dfc3cbe67812b70c12
|
""" This file handles anything that comes to applications
including apply for project, accept and deny applications.
"""
import cherrypy
import json
import requests
import psycopg2.extras
from datetime import date
class ApplicationHandler(object):
""" Object for project's applications """
# This is domain for calling notifications' API
# Currently set to dev's domain (http://localhost:8080)
# Will be changed to http://teamseek.io in the future when hosting
domain = 'http://localhost:8080'
# Notification type for application
notification_type = 1
# Only these _ACTIONs are allowed
_ACTION = {
# [GET] actions
'_GET': {
# Getting user's applications
'my_applications': '',
# Checking if a project has been applied
'is_applied': '',
},
# [POST] actions
'_POST': {
# Approving an application (used for project's leader)
'approve': '',
# Denying an application (used for project's leader)
'deny': ''
},
# [PUT] actions
'_PUT': {
# Sending out an application to the project's leader
'new_application': '',
}
}
def __init__(self, db=None):
""" Run these instructions when project is initialized """
# Check if database is passed in
if db:
self.db = db
self.cur = db.connection.cursor()
else:
print "applications.py >> Error: Invalid database connection"
@cherrypy.expose
def index(self, **params):
""" Forwarding HTTP requests to the right request handler """
# Check if user's logged in
if 'user' not in cherrypy.session:
return json.dumps({"error": "You shouldn't be here"})
# Forwarding to the right request handler
http_method = getattr(self, cherrypy.request.method)
return http_method(**params)
@cherrypy.tools.accept(media="text/plain")
def GET(self, **params):
"""
Handle pulling applications
params: i.e. {'action': 'my_applications'}
i.e. {'action': 'is_applied', 'project_id': '5'}
return: a list of applications' details
"""
# Check if everything is provided
if 'action' not in params or params['action'] not in self._ACTION['_GET']:
return json.dumps({'error': 'Not enough data'})
if (not 'my_applications' == params['action']) and \
('ids' not in params and 'project_id' not in params):
return json.dumps({'error': 'Not enough data'})
# Form query for database
query = """
SELECT id, project_id,
(SELECT title FROM project_info WHERE project_info.project_id = applications.project_id),
applicant_id, (SELECT username FROM users WHERE user_id = applicant_id),
status, date_applied
FROM applications
"""
# If user is requesting his/her applications
if 'my_applications' == params['action']:
query_condition = "WHERE applicant_id = (SELECT user_id FROM users WHERE username = %s);"
query_params = (cherrypy.session['user'], )
# If user is requesting to check if she/he has already applied for a particular project
else:
query_condition = """
WHERE applicant_id = (SELECT user_id FROM users WHERE username = %s)
AND project_id = %s;
"""
query_params = (cherrypy.session['user'], params['project_id'], )
# Append the condition to query
query += query_condition
# Send query to database
self.cur.execute(query, query_params )
# Format applications
applications = format_application_details(fetch=self.cur.fetchall())
return json.dumps(applications, indent=4)
@cherrypy.tools.accept(media="text/plain")
def POST(self, **params):
"""
Accept or Deny an application
Also handle project's members
params: i.e. {'action': 'approve', 'application_id': 'application id', 'notification_id': 'notification id'}
i.e. {'action': 'deny', 'application_id': 'application id', 'notification_id': 'notification id'}
return: {} if successful, {'error': 'some error' if failed'}
"""
# Check if everything is provided
if 'action' not in params or \
'application_id' not in params or \
'notification_id' not in params:
return json.dumps ({'error': 'Not enough data'})
# Check if action is allowed
if params['action'] not in self._ACTION['_POST']:
return json.dumps({'error': 'Action is not allowed'})
# Edit the application
status = 'denied'
if params['action'] == 'approve':
status = 'approved'
query = """
CREATE OR REPLACE FUNCTION edit_application()
RETURNS INT AS
$BODY$
DECLARE
in_status VARCHAR;
appID INT;
projectID INT;
applicantID INT;
notificationID INT;
BEGIN
in_status = %s;
appID = %s;
UPDATE applications
SET status = in_status
WHERE id = appID
RETURNING applicant_id, project_id, (SELECT notifications.id
FROM notifications
WHERE notifications.sender_id = applications.id)
INTO applicantID, projectID, notificationID;
IF in_status = 'approved' THEN
INSERT INTO project_members (project_id, member)
VALUES (projectID, (SELECT username FROM users WHERE user_id = applicantID));
END IF;
RETURN notificationID;
END;
$BODY$ LANGUAGE plpgsql;
SELECT edit_application();
"""
self.cur.execute(query, (status, params['application_id'], ))
# Apply changes to database
self.db.connection.commit()
# Grab the returned values from database
fetch = self.cur.fetchone()
notification_id = fetch[0]
# Trigger notification
request_params = {'action': 'delete', 'id': notification_id}
response = requests.post('{0}/api/notifications/'.format(self.domain), params=request_params)
return json.dumps(response.text)
@cherrypy.tools.accept(media="text/plain")
def PUT(self, **params):
"""
Insert a new application and trigger notification
params: i.e. {'action': 'new_application', 'project_id': '1'}
return: {} if successful, {'error': 'some error'} if failed
"""
# Check if everything is provided
if 'action' not in params or \
'project_id' not in params:
return json.dumps({'error': 'Not enough data'})
# Check if action is allowed
if params['action'] not in self._ACTION['_PUT']:
return json.dumps({'error': 'Action is not allowed'})
username = cherrypy.session['user']
# Add new applications to database
# If accepted is not provided meaning that
# the application is still pending
query = """
DROP FUNCTION my_function();
CREATE OR REPLACE FUNCTION my_function()
RETURNS TABLE (sender_id INT, recipient_id INT) AS
$BODY$
DECLARE
projectId INT;
usrId INT;
applicationID INT;
ownerID INT;
BEGIN
projectId = %s;
usrId = (SELECT user_id FROM users WHERE username=%s);
PERFORM id FROM applications WHERE project_id = projectId AND applicant_id = usrId;
IF NOT FOUND THEN
RETURN QUERY
INSERT INTO applications (project_id, applicant_id, date_applied)
VALUES (projectId, usrId, %s)
RETURNING id AS sender_id, (SELECT user_id AS recipient_id
FROM users
WHERE username = (SELECT owner
FROM project_info
WHERE project_id = applications.project_id));
END IF;
END;
$BODY$ LANGUAGE plpgsql;
SELECT * FROM my_function();
"""
dCur = self.db.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
dCur.execute(query, (params['project_id'], username, date.today(), ))
# Grab returned values from database
fetch = dCur.fetchone()
# If application has already existed
# which means that fetch doesn't have any value
# return an error
if not fetch:
return json.dumps({"error": "This user has already applied for this project!"})
# Trigger notification
request_params = {
'action': 'new_notification',
'type_id': self.notification_type,
'recipient_id': fetch['recipient_id'],
'sender_id': fetch['sender_id']
}
response = requests.put('{0}/api/notifications/'.format(self.domain), params=request_params)
# Apply changes to database
self.db.connection.commit()
return json.dumps(response.text)
@cherrypy.tools.accept(media="text/plain")
def DELETE(self, **params):
return json.dumps({"error": "Currently not supported"})
##########################
# Helper functions #
##########################
def format_application_details(fetch=None, notification=False):
""" Formating applications into a list of dictionary """
application_details = []
for application in fetch:
dict = {}
dict['application_id'] = application[0]
dict['project_id'] = application[1]
dict['title'] = application[2]
dict['applicant_id'] = application[3]
dict['applicant_username'] = application[4]
dict['status'] = application[5]
dict['date_applied'] = application[6].strftime('%m-%d-%Y')
if notification:
dict['notification_id'] = application[7]
application_details.append(dict)
return application_details
|
Python
|
CL
|
fb440f4ac580b62a701e5249a3993a89025b233da4eeac93544967a680a94c5a
|
"""
A series of clustering algorithms for use on an adjacency matrix which is
stored in the form of a pandas DataFrame.
"""
import numpy as np
import pandas as pd
import pyximport; pyximport.install(reload_support=True)
import clustering_c
reload(clustering_c)
def _brute_force(adjacency, hessian):
"""
Use brute force to test for every combination of two clusters
using the function specified by `hessian` as the objective function
"""
a = adjacency.adjacency
n = len(a)
names = list(a.index)
binary = [format(x, '#0%ib' % (n + 2))[2:] for x in range(pow(2, n) / 2)]
community_lists = [pd.Series(list(x), index=names).astype(np.int64)
for x in binary]
objective = pd.Series(
{str(c.values):hessian(adjacency, c.values) for c in community_lists})
return objective.order(ascending=False)
if __name__ == "__main__":
# A network with two very obvious communities:
# A, B, C, D and E, F, G, H. A single connection
# from E to D joins the two communities
rows = [[0,1,1,0,0,0,0,0],
[1,0,1,1,0,0,0,0],
[0,1,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0],
[0,0,0,1,0,1,1,0],
[0,0,0,0,1,0,0,1],
[0,0,0,0,0,0,0,1],
[0,0,0,0,1,1,0,0],
]
names = ["A", "B", "C", "D", "E", "F", "G", "H"]
adj = pd.DataFrame(rows).astype(float)
adj.index = names
adj.columns = names
a = clustering_c.Network(adj)
test_communities = pd.Series([0,0,0,0,0,0,0,0], index=names)
modularity = _brute_force(a, clustering_c.modularity)
potts = _brute_force(a, clustering_c.reichardt_bornholdt)
clustering_c.test()
clustering_c.cluster(a)
|
Python
|
CL
|
07bcd69c38df1da2a58dc8fee495e13f46d7e9a62828cac3bcd5df547c8f6b27
|
"""
Crear utilidades matemáticas:
a. Escribir una función a la que se le pasa un número y devuelve una tupla con sus divisores.
b. Se define un número primo como aquel que no tiene más divisores que él mismo y la unidad.
Escribir una función que nos devuelva un True en caso de que ser un número primo.
c. Crear una función a la que se le pasa un límite y nos devuelve una lista con todos los
números primos por debajo de ese límite.
d. Seguir el método de la Criba de Eratóstenes.
e. Escribir una función a la que le vamos a pasar como parámetro un número que indicará una potencia de 10.
Imprimirá la cantidad de primos y el porcentaje de números primos hasta el límite introducido.
f. Escribir una función segmentos_primos(limite, ancho) y devuelva una lista de tuplas que cuente el número
de primos dentro de un rango que irá de ancho en ancho hasta limite.
"""
from math import ceil, sqrt
import time
def divisores(numero):
""" Devuelve una tupla con los divisores de numero
"""
ret = ()
for i in range(1, ceil((numero + 1) / 2)):
if numero % i == 0:
ret += i,
ret += numero,
return ret
def es_primo(numero):
""" Comprueba si el numero es primo o no, devuelve un boolean
"""
loop = 2
if numero < 2:
return False
while loop < ceil(sqrt(numero + 1)):
if numero % loop == 0:
return False
loop += 1
return True
def primos_hasta(numero):
""" Devuelve una lista con todos los primos menores o iguales que numero
"""
ret = []
for i in range(2, numero + 1):
if es_primo(i):
ret.append(i)
return ret
def criba_eratostenes(numero):
""" Devuelve una lista con todos los primos menores o iguales que numero
Usando el método de la Criba de Eratóstenes
"""
primos = [x for x in range(2, numero + 1)]
for index in range(0, (numero + 1) // 2):
primos = criba(index, primos)
return [x for x in primos if x]
def criba(index, lista_criba):
salto = lista_criba[index]
if salto:
for mul in range(index + salto, len(lista_criba), salto):
lista_criba[mul] = False
return lista_criba
def cantidad_primos(incluido, excluido):
""" Devuelve la cantidad de primos comprendidos entre dos valores, el primero
incluído y el segundo excluído del intervalo
"""
ret = 0
for num in range(incluido, excluido):
if es_primo(num):
ret += 1
return ret
def estadistica_primos(potencia_diez):
""" Imprime la cantidad de primos desde 0 hasta 10^potencia y el porcentaje
de éstos dentro de ese intervalo
"""
limite = 10 ** potencia_diez
cantidad = cantidad_primos(2, limite)
print('La cantidad de primos menores que', limite, 'es de', cantidad)
print('En total hay un ', round(cantidad * 100 / limite, 2), "% de primos en el intervalo", sep="")
def segmentos_primos(limite, ancho):
""" Devuelve una lista de tuplas que cuenta el número
de primos dentro de un rango que irá de ancho en ancho hasta limite
"""
ret = []
for cont in range(0, limite, ancho):
izquierda = cont
if izquierda == 0:
izquierda = 1
ret.append((izquierda, cont + ancho, cantidad_primos(cont - 1, cont + ancho - 1)))
return ret
"""print(divisores(22))
print(es_primo(13))
t1 = time.time()
print(primos_hasta(3000))
t2 = time.time()
print(t2 - t1)
print(criba_eratostenes(3000))
print(time.time() - t2)"""
estadistica_primos(3)
print(segmentos_primos(1000, 100))
|
Python
|
CL
|
38be744729f6ccb47aa8e831b3a204e85d4b55a54c5e87ba2c75a1b2c32a8796
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 21 16:02:58 2016
@author: cs390mb
Assignment 2 : Activity Recognition
This is the starter script used to train an activity recognition
classifier on accelerometer data.
See the assignment details for instructions. Basically you will train
a decision tree classifier and vary its parameters and evalute its
performance by computing the average accuracy, precision and recall
metrics over 10-fold cross-validation. You will then train another
classifier for comparison.
Once you get to part 4 of the assignment, where you will collect your
own data, change the filename to reference the file containing the
data you collected. Then retrain the classifier and choose the best
classifier to save to disk. This will be used in your final system.
Make sure to chek the assignment details, since the instructions here are
not complete.
"""
from __future__ import division
from sklearn.linear_model import LogisticRegression
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import svm
from features import extract_features # make sure features.py is in the same directory
from util import slidingWindow, reorient, reset_vars
from sklearn import cross_validation
from sklearn.metrics import confusion_matrix
import pickle
from sklearn.neural_network import MLPClassifier
# %%---------------------------------------------------------------------------
#
# Load Data From Disk
#
# -----------------------------------------------------------------------------
print("Loading data...")
sys.stdout.flush()
data_file = os.path.join('data', 'my-activity-data.csv')
data = np.genfromtxt(data_file, delimiter=',')
print("Loaded {} raw labelled activity data samples.".format(len(data)))
sys.stdout.flush()
# %%---------------------------------------------------------------------------
#
# Pre-processing
#
# -----------------------------------------------------------------------------
print("Reorienting accelerometer data...")
sys.stdout.flush()
reset_vars()
reoriented = np.asarray([reorient(data[i,1], data[i,2], data[i,3]) for i in range(len(data))])
reoriented_data_with_timestamps = np.append(data[:,0:1],reoriented,axis=1)
data = np.append(reoriented_data_with_timestamps, data[:,-1:], axis=1)
# %%---------------------------------------------------------------------------
#
# Extract Features & Labels
#
# -----------------------------------------------------------------------------
# you may want to play around with the window and step sizes
window_size = 20
step_size = 20
# sampling rate for the sample data should be about 25 Hz; take a brief window to confirm this
n_samples = 1000
time_elapsed_seconds = (data[n_samples,0] - data[0,0]) / 1000
sampling_rate = n_samples / time_elapsed_seconds
feature_names = ["mean X", "mean Y", "mean Z", "var X", "var Y", "var Z", "zero crossing rate X", "zero crossing rate Y", "zero crossing rate Z", "magnitude mean", "magnitude var", "X entropy", "Y entropy", "Z entropy", "magnitude entropy"]
class_names = ["Sitting", "Walking", "Running", "Jumping"]
print("Extracting features and labels for window size {} and step size {}...".format(window_size, step_size))
sys.stdout.flush()
n_features = len(feature_names)
X = np.zeros((0,n_features))
y = np.zeros(0,)
for i,window_with_timestamp_and_label in slidingWindow(data, window_size, step_size):
# omit timestamp and label from accelerometer window for feature extraction:
window = window_with_timestamp_and_label[:,1:-1]
# extract features over window:
x = extract_features(window)
# append features:
X = np.append(X, np.reshape(x, (1,-1)), axis=0)
# append label:
y = np.append(y, window_with_timestamp_and_label[10, -1])
print("Finished feature extraction over {} windows".format(len(X)))
print("Unique labels found: {}".format(set(y)))
sys.stdout.flush()
# %%---------------------------------------------------------------------------
#
# Train & Evaluate Classifier
#
# -----------------------------------------------------------------------------
n = len(y)
n_classes = len(class_names)
clf = DecisionTreeClassifier(max_depth=3, max_features=5)
cv = cross_validation.KFold(n, n_folds=10, shuffle=True, random_state=None)
#### Student Implemented
accuracyList = []
precisionList = []
recallList = []
####
for i, (train_indexes, test_indexes) in enumerate(cv):
print("Fold {}".format(i))
#### Student Implemented
clf.fit(X[train_indexes], y[train_indexes])
conf = confusion_matrix(clf.predict(X[test_indexes]), y[test_indexes], labels=range(0, n_classes))
accuracy = sum(sum(np.multiply(conf, np.eye(n_classes)))) / sum(sum(conf))
accuracyList += [accuracy]
precision = [conf[i, i] / sum(conf[:, i]) for i in range(0, n_classes)]
precisionList += [precision]
recall = [conf[i, i] / sum(conf[i, :]) for i in range(0, n_classes)]
recallList += [recall]
####
#### Student Implemented
print "average accuracy:"
print np.nanmean(accuracyList)
print "average precision:"
print np.nanmean(precisionList, axis=0)
print "average recall:"
print np.nanmean(recallList, axis=0)
####
with open('classifier.pickle', 'wb') as f:
pickle.dump(clf, f)
|
Python
|
CL
|
e9a11301e63b102774c55e21bf6a296728677789820e9e06c4932f3816a033c9
|
from string import ascii_uppercase
from reflector import Reflector
from validation import Validator, TypeValidator, RangeValidator
class Rotor(Reflector):
"""
Generic Rotor class
This class simulates a rotor with custom wiring pattern in the Enigma machine. It provides functions for mainly
perform the bidirectional encoding (left-to-right and right-to-left) and the rotation of the rotor.
Two translation tables are built internally in order to encode a character form the bidirectional encoding:
- right-to-left table: mapping alphabetic characters from A to Z to the wiring characters
- left-to-right mapping the wiring characters to the alphabetic characters from A to Z
:param wiring: The internal rotor wiring composed by 26 alphabetic characters in upper case
:type wiring: str
:param notch: The character corresponding to the notch (default value is None)
:type notch: str
:param position: The character corresponding to the rotor's start position (default value is "A")
:type position: str
:param ring_setting: The one-based rotor's ring setting (default value is 1)
:type ring_setting: int
:raises: :class:`TypeError, ValueError`: If one of the provided parameter is invalid
.. note:: All the class members are ring setting agnostic
Example::
rotor = Rotor("FSOKANUERHMBTIYCWLQPZXVGJD")
assert (rotor.encode_left_to_right("A") == "E")
assert (rotor.encode_right_to_left("A") == "F")
"""
def __init__(self, wiring, notch=None, position="A", ring_setting=1):
super().__init__(wiring)
self.notch = notch
self.position = position
self.ring_setting = ring_setting
self.__trans_table_right_to_left = str.maketrans(ascii_uppercase, self.wiring)
self.__trans_table_left_to_right = str.maketrans(self.wiring, ascii_uppercase)
def reset(self):
"""
Reset the rotor position to "A" and the ring setting to 1
"""
self.position = "A"
self.ring_setting = 1
def rotate(self):
"""
Perform one rotation step of the rotor, it updates the position to the next wiring character (circular update)
"""
self.position = ascii_uppercase[(self.position + 1) % len(ascii_uppercase)]
def encode_right_to_left(self, char):
"""
Encode the provided character accordingly to the right to left translation table
:param char: The character to encode
:type char: str
:return: The encoded character
:rtype: str
:raises: :class:`TypeError, ValueError`: If the provided character is invalid
"""
self._alpha_character_validator.validate(char)
return char.upper().translate(self.__trans_table_right_to_left)
def encode_left_to_right(self, char):
"""
Encode the provided character accordingly to the left to right translation table
:param char: The character to encode
:type char: str
:return: The encoded character
:rtype: str
:raises: :class:`TypeError, ValueError`: If the provided character is invalid
"""
self._alpha_character_validator.validate(char)
return char.upper().translate(self.__trans_table_left_to_right)
@property
def notch(self):
"""
The rotor's notch
:getter: Returns the 0-based index of the notch's character
:setter: Sets the notch's character
:type: int for the getter, str for the setter
:raises: :class:`TypeError, ValueError`: If the provided notch's character is invalid
"""
return self.__notch
@notch.setter
def notch(self, notch):
if notch is not None:
self._alpha_character_validator.validate(notch)
self.__notch = ascii_uppercase.index(notch)
else:
self.__notch = None
@property
def position(self):
"""
The rotor's position
.. note:: Use the Use :func:`position <Rotor.char_position>` to get position's character
:getter: Returns the 0-based index of the rotor's position
:setter: Sets the rotor's position character
:type: int for the getter, str for the setter
"""
return self.__position
@position.setter
def position(self, position):
self._alpha_character_validator.validate(position)
self.__position = ascii_uppercase.index(position.upper())
@property
def char_position(self):
"""
The rotor's position character
.. note:: Use :func:`position <Rotor.position>` to get the the 0-based index
:return: The character corresponding to the current rotor's position
:rtype: str
"""
return ascii_uppercase[self.__position]
@property
def ring_setting(self):
"""
The rotor's ring setting
.. note:: The ring setting allowed range of values is 1-26
:getter: Returns the value of the ring setting
:setter: Sets the value of the ring setting
:type: int
:raises: :class:`TypeError, ValueError`: If the provided ring setting invalid
"""
return self.__ring_setting
@ring_setting.setter
def ring_setting(self, ring_setting):
Validator(TypeValidator(int), RangeValidator(1, 26)).validate(ring_setting)
self.__ring_setting = ring_setting
@property
def on_notch_position(self):
"""
Check if the rotor is on notch position
:return: True if the current rotor's position is on notch, False otherwise
:rtype: bool
"""
return self.position == self.notch
class RotorI(Rotor):
"""
Rotor I class
This class simulates a rotor with wiring pattern I in the Enigma machine
Example::
rotor = RotorI()
"""
def __init__(self, position="A", ring_setting=1):
super().__init__("EKMFLGDQVZNTOWYHXUSPAIBRCJ", "Q", position, ring_setting)
class RotorII(Rotor):
"""
Rotor II class
This class simulates a rotor with wiring pattern II in the Enigma machine
Example::
rotor = RotorII()
"""
def __init__(self, position="A", ring_setting=1):
super().__init__("AJDKSIRUXBLHWTMCQGZNPYFVOE", "E", position, ring_setting)
class RotorIII(Rotor):
"""
Rotor III class
This class simulates a rotor with wiring pattern III in the Enigma machine
Example::
rotor = RotorIII()
"""
def __init__(self, position="A", ring_setting=1):
super().__init__("BDFHJLCPRTXVZNYEIWGAKMUSQO", "V", position, ring_setting)
class RotorIV(Rotor):
"""
Rotor IV class
This class simulates a rotor with wiring pattern IV in the Enigma machine
Example::
rotor = RotorIV()
"""
def __init__(self, position="A", ring_setting=1):
super().__init__("ESOVPZJAYQUIRHXLNFTGKDCMWB", "J", position, ring_setting)
class RotorV(Rotor):
"""
Rotor V class
This class simulates a rotor with wiring pattern V in the Enigma machine
Example::
rotor = RotorV()
"""
def __init__(self, position="A", ring_setting=1):
super().__init__("VZBRGITYUPSDNHLXAWMJQOFECK", "Z", position, ring_setting)
class RotorBeta(Rotor):
"""
Rotor Beta class
This class simulates a rotor with wiring pattern Beta in the Enigma machine
Example::
rotor = RotorBeta()
"""
def __init__(self, position="A", ring_setting=1):
super().__init__("LEYJVCNIXWPBQMDRTAKZGFUHOS", None, position, ring_setting)
class RotorGamma(Rotor):
"""
Rotor Gamma class
This class simulates a rotor with wiring pattern Gamma in the Enigma machine
Example::
rotor = RotorGamma()
"""
def __init__(self, position="A", ring_setting=1):
super().__init__("FSOKANUERHMBTIYCWLQPZXVGJD", None, position, ring_setting)
def rotor_factory(name):
return globals()[f"Rotor{name}"]()
if __name__ == "__main__":
rotor = Rotor("FSOKANUERHMBTIYCWLQPZXVGJD")
assert (rotor.encode_left_to_right("A") == "E")
assert (rotor.encode_right_to_left("A") == "F")
rotor = RotorI()
assert (rotor.encode_right_to_left("A") == "E")
assert (rotor.encode_left_to_right("A") == "U")
|
Python
|
CL
|
fe569d968308aa13ecd9e200f0aadeabee25c42fa75f13766393247d3e1b9c2b
|
#! /usr/bin/env python
import os
import argparse
import numpy as np
import pandas as pd
from scipy.io import mmread, mmwrite
from scipy.sparse import coo_matrix
from schpf import scHPF
from schpf import load_model, save_model, run_trials, max_pairwise_table
from schpf.preprocessing import load_and_filter, load_coo
def _parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='cmd')
### Preprocess command
prep = subparsers.add_parser('prep')
prep.add_argument('-i', '--input', required=True,
help='Input data. Currently accepts either: (1) a whitespace-'
'delimited gene by cell UMI count matrix with 2 leading columns '
'of gene attributes (ENSEMBL_ID and GENE_NAME respectively), or '
'(2) a loom file with at least one of the row attributes '
'`Accession` or `Gene`, where `Accession` is an ENSEMBL id and '
'`Gene` is the name.'
)
prep.add_argument('-o', '--outdir', required=True,
help='Output directory. Does not need to exist.')
prep.add_argument('-p', '--prefix', default='',
help='Prefix for output files. Optional.')
prep.add_argument('-m', '--min-cells', type=float, default=0.01,
help='Minimum number of cells in which we must observe at '
'least one transcript of a gene for the gene to pass '
'filtering. If 0 <`min_cells`< 1, sets threshold to be '
'`min_cells` * ncells, rounded to the nearest integer.'
' [Default 0.01]')
prep.add_argument('-w', '--whitelist', default='',
help='Tab-delimited file where first column contains ENSEMBL gene '
'ids to accept, and second column contains corresponding gene '
'names. If given, genes not on the whitelist are filtered from '
'the input matrix. Superseded by blacklist. Optional.')
prep.add_argument('-b', '--blacklist', default='',
help='Tab-delimited file where first column contains ENSEMBL gene '
'ids to exclude, and second column is the corresponding gene name. '
'Only performed if file given. Genes on the blacklist are '
'excluded even if they are also on the whitelist. Optional.')
prep.add_argument('--filter-by-gene-name', default=False,
action='store_true', help='Use gene name rather than ENSEMBL '
'id to filter (with whitelist or blacklist). Useful for '
'datasets where only gene symbols are given. Applies to both '
'whitelist and blacklist. Used by default when input is a loom '
'file.')
prep.add_argument('--no-split-on-dot', default=False, action='store_true',
help='Don\'t split gene symbol or name on period before '
'filtering white and blacklist. We do this by default for '
'ENSEMBL ids.')
###### Train command
train = subparsers.add_parser('train')
# data and saving
train.add_argument('-i', '--input', required=True,
help="Training data. Expects either the mtx file output by the "
"prep command or a tab-separated tsv file formatted like:"
"`CELL_ID\tGENE_ID\tUMI_COUNT`. In the later case, ids are "
"assumed to be 0 indexed and we assume no duplicates."
)
train.add_argument('-o', '--outdir', required=True,
help='Output directory for scHPF model. Will be created if does '
'not exist.')
train.add_argument('-p', '--prefix', default='',
help='Prefix for output files. Optional.')
# Required model hyperparameter
train.add_argument('-k', '--nfactors', type=int, required=True,
help='Number of factors.')
train.add_argument('-a', '--a-theta-shape', type=float, default=0.3,
help='Shape of theta distribution (a) - close to 0 for sparsity.')
train.add_argument('-c', '--c-beta-shape', type=float, default=0.3,
help='Shape of beta distribution (c) - close to 0 for sparsity.')
# training parameters
train.add_argument('-t', '--ntrials', type=int, default=1,
help='Number of times to run scHPF, selecting the trial with '
'best loss (on training data unless validation is given).'
' [Default 1]')
train.add_argument('-v', '--validation',
help='Validation data of selected nonzero entries. Must have same '
'format (either mtx or tsv) as input. [Default None]' )
train.add_argument('-M', '--max-iter', type=int, default=1000,
help='Maximum iterations. [Default 1000].')
train.add_argument('-m', '--min-iter', type=int, default=30,
help='Minimum iterations. [Default 30]')
train.add_argument('-e', '--epsilon', type=float, default=0.001,
help='Minimum percent decrease in loss between checks to continue '
'inference (convergence criteria). [Default 0.001].')
train.add_argument('-f', '--check_freq', type=int, default=10,
help='Number of iterations to run between convergence checks. '
'[Default 10].')
train.add_argument('--better-than-n-ago', default=5, type=int,
help= 'Stop condition if loss is getting worse. Stops training '
'if loss is worse than `better_than_n_ago`*`check_freq` training '
'steps ago and getting worse. Normally not necessary to change.')
train.add_argument('--quiet', dest='verbose', action='store_false',
default=True, help="Don't print intermediate llh.")
train.add_argument('--float32', action='store_true',
help="Use 32-bit floats instead of default 64-bit floats in "
"variational distrubtions")
### Score command
score = subparsers.add_parser('score')
score.add_argument('-m', '--model', required=True,
help='Saved scHPF model from train command. Should have extension'
'`.joblib`')
score.add_argument('-o', '--outdir', required=True,
help='Output directory for score files')
score.add_argument('-p', '--prefix', default='',
help='Prefix for output files. Optional.')
score.add_argument('-g', '--genefile', default=None,
help='Create an additional file with gene names ranked by score '
'for each factor. Expects the gene.txt file output by the scHPF '
'prep command or a similarly formatted tab-delimited file without '
'headers. Uses the zero-indexed `--name_col`\'th column as gene '
'names. Optional.')
score.add_argument('--name-col', type=int, default=1,
help='The zero-indexed column of `genefile` to use as a gene name '
'when (optionally) ranking genes. If `--name_col` is greater than '
'the index of `genefile`\'s last column, it is automatically reset '
'to the last column\'s index. [Default 1]'
)
return parser
if __name__=='__main__':
parser = _parser()
args = parser.parse_args()
if not os.path.exists(args.outdir):
print("Creating output directory {} ".format(args.outdir))
os.makedirs(args.outdir)
if args.cmd == 'prep':
filtered, genes = load_and_filter(args.input,
min_cells=args.min_cells,
whitelist=args.whitelist,
blacklist=args.blacklist,
filter_by_gene_name=args.filter_by_gene_name,
no_split_on_dot=args.no_split_on_dot)
print('Writing filtered data to file......')
prefix = args.prefix.rstrip('.') + '.' if len(args.prefix) > 0 else ''
outprefix = '{}/{}'.format(args.outdir, prefix)
mmwrite('{}train.mtx'.format(outprefix), filtered, field='integer')
genes.to_csv('{}genes.txt'.format(outprefix), sep='\t', header=None,
index=None)
elif args.cmd == 'train':
# load data
print( 'Loading data......' )
load_fnc = mmread if args.input.endswith('.mtx') else load_coo
train = load_fnc(args.input)
if args.validation is not None:
vdata = load_fnc(args.validation)
else:
vdata = None
ncells, ngenes = train.shape
msg = '......found {} cells and {} genes'.format(ncells, ngenes)
# create model
print('Running trials......' )
dtype = np.float32 if args.float32 else np.float64
model = run_trials(train,
nfactors=args.nfactors, ntrials=args.ntrials,
min_iter=args.min_iter, max_iter=args.max_iter,
check_freq=args.check_freq, epsilon=args.epsilon,
better_than_n_ago=args.better_than_n_ago,
a=args.a_theta_shape,c=args.c_beta_shape,
dtype=dtype, verbose=args.verbose,
validation_data=vdata)
# save the model
print('Saving best model......')
prefix = args.prefix.rstrip('.') + '.' if len(args.prefix) > 0 else ''
outprefix = '{}/{}'.format(args.outdir, prefix)
outname = '{}scHPF_K{}_epsilon{}_{}trials.joblib'.format(
outprefix, args.nfactors, args.epsilon, args.ntrials)
save_model(model, outname)
# save Poisson log-likelihood
pll = model.pois_llh(X = train)
np.savetxt(outname + '_pll', np.asarray([pll]), delimiter=",")
print('\n')
elif args.cmd == 'score':
print('Loading model......')
model = load_model(args.model)
print('Calculating scores......')
cell_score = model.cell_score()
gene_score = model.gene_score()
print('Saving scores......')
prefix = args.prefix.rstrip('.') + '.' if len(args.prefix) > 0 else ''
outprefix = '{}/{}'.format(args.outdir, prefix)
np.savetxt(outprefix + 'cell_score.txt', cell_score, delimiter='\t')
np.savetxt(outprefix + 'gene_score.txt', gene_score, delimiter='\t')
print('Calculating maximum pairwise overlaps......')
table = max_pairwise_table(gene_score,
ntop_list=[50,100,150,200,250,300])
table.to_csv(outprefix + 'maximum_overlaps.txt', sep='\t', index=False)
if args.genefile is not None:
print('Ranking genes......')
# load and format gene file
genes = np.loadtxt(args.genefile, delimiter='\t', dtype=str)
if len(genes.shape) == 1:
genes = genes[:,None]
# get column to use for gene names
last_col = genes.shape[1] - 1
name_col = last_col if args.name_col > last_col else args.name_col
print('......using {}\'th column of genefile as gene label'.format(
name_col))
# rank the genes by gene_score
ranks = np.argsort(gene_score, axis=0)[::-1]
ranked_genes = []
for i in range(gene_score.shape[1]):
ranked_genes.append(genes[ranks[:,i], name_col])
ranked_genes = np.stack(ranked_genes).T
print('Saving ranked genes......')
np.savetxt(outprefix + 'ranked_genes.txt', ranked_genes,
fmt="%s", delimiter='\t')
|
Python
|
CL
|
c4b72bd330a7cb739a41bee1e84c2eb7fa353c2ae43ee3c7329c8700e106639c
|
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Imprort resource action.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from enthought.pyface.api import ImageResource, OK
from enthought.pyface.action.api import Action
from puddle.resource.wizard.wizard_selection_wizard \
import WizardSelectionWizard
from puddle.resource.resource_plugin import IMPORT_WIZARDS
from common import IMAGE_LOCATION
#------------------------------------------------------------------------------
# "ImportAction" class:
#------------------------------------------------------------------------------
class ImportAction(Action):
""" Defines an action that opens the import wizard selection wizard.
"""
#--------------------------------------------------------------------------
# "Action" interface:
#--------------------------------------------------------------------------
# A longer description of the action:
description = "Import resources to the workspace"
# The action"s name (displayed on menus/tool bar tools etc):
name = "&Import..."
# A short description of the action used for tooltip text etc:
tooltip = "Import resources"
# The action's image (displayed on tool bar tools etc):
image = ImageResource("import", search_path=[IMAGE_LOCATION])
#--------------------------------------------------------------------------
# "Action" interface:
#--------------------------------------------------------------------------
def perform(self, event):
""" Performs the action.
"""
# Get all contributed import wizards
contrib = self.window.application.get_extensions(IMPORT_WIZARDS)
# Instantiate the contributed classes
wizards = [wizard() for wizard in contrib]
# Create the wizard...
wizard = WizardSelectionWizard( parent=self.window.control,
window=self.window, wizards=wizards, title="Import" )
# ...open the wizard.
if wizard.open() == OK:
wizard.next_wizard.finished = True
return
# # Get all contributed new element wizards
# app = self.window.workbench.application
# contrib = app.get_extensions(IMPORT_WIZARDS)
#
# # Ensure they are not instantiated
# wizards = []
# for factory_or_wizard in contrib:
# if not isinstance(factory_or_wizard, WizardContribution):
# wizard = factory_or_wizard()
# else:
# logger.warn(
# "DEPRECATED: contribute wizard classes or "
# "factories - not wizard instances."
# )
#
# wizard = factory_or_wizard
# wizards.append(wizard)
#
# # Create the selection page...
# wswp = WizardSelectionPage(
# wizards=wizards, id="wizard_selection"
# )
# wswp.on_trait_change(self.on_wizard_changed, "wizard")
#
# # ...add it to the a wizard...
# self.wizard = wizard = ChainedWizard(
# parent=self.window.control, title="New",
# pages=[wswp]
# )
#
# # ...open the wizard.
# wizard.open()
#
# return
# def on_wizard_changed(self, new):
#
# if new is not None:
# app = self.window.application
# wizard_klass = app.import_symbol(new.wizard_class)
#
# workspace = self.window.application.get_service(WORKSPACE_SERVICE)
#
# self.wizard.next_wizard = wizard_klass(
# parent=None, workspace=workspace
# )
# EOF -------------------------------------------------------------------------
|
Python
|
CL
|
7a618d73e2be6c72f633be0dd1a93db0d236059a337c563ec2f11b6e1a79134c
|
# tracks taken from geneset comparison
# need to integrated
from RnaseqReport import *
class AnnotationsAssociated:
pass
##########################################################################
##########################################################################
##########################################################################
# Coverage of transcript models
##########################################################################
class ContaminationCoverage(AnnotationsAssociated):
"""Check for contamination by listing transcript models with reads ."""
pattern = "(.*)_coverage$"
mColumns = "count(*) as total, SUM(CASE WHEN nmatches > 1 THEN 1 ELSE 0 END) AS hico"
mTable = "coverage"
def __call__(self, track, slice=None):
statement = self.getStatement(track, slice)
if not statement:
return []
data = self.getFirstRow(statement)
return odict(list(zip(("nmatches > 1", "nmatches = 1"),
(data[1], data[0] - data[1]))))
##########################################################################
##########################################################################
##########################################################################
# Coverage of transcript models
##########################################################################
class PolyATailCounts(AnnotationsAssociated):
"""Check for contamination by listing transcript models with reads ."""
pattern = "(.*)_polyA$"
mColumns = "COUNT(*) AS total, SUM(nmotifs) AS motifs, SUM(tails) AS tails"
mTable = "polyA"
def __call__(self, track, slice=None):
statement = self.getStatement(track, slice)
if not statement:
return []
data = self.getFirstRow(statement)
return odict(list(zip(("no tail", "with motif", "without motif"),
(data[0] - data[2], data[1], data[2] - data[1]))))
##########################################################################
##########################################################################
##########################################################################
# Contamination and repeats
##########################################################################
class ContaminationRepeats(TrackerSQL):
"""Estimate contamination based on the overlap with repeats.
repeats
number of bases in repeats
genome
number of base is genome
prepeats
proportion of bases in repeats
repeat_overlap
number of bases in unknown transcript models overlapping repeats
length
number of bases in unknown transcript models
poverlap
proportion of bases in unknown transcript models overlapping repeats
nspliced_ovl
number of unknown transcript models with introns that overlap repeats
nspliced_ovl
number of unknown transcript models with introns
pspliced
proportion of unknown transcript models with introns that overlap
repeats
"""
pattern = "(.*)_repeats$"
def getTracks(self, subset=None):
return [x for x in TrackerSQL.getTracks(self, subset)
if "_vs" not in x]
def __call__(self, track, slice=None):
genome_size = self.getValue("SELECT SUM(length) FROM repeats_table")
repeats_size = self.getValue(
"SELECT SUM(nover_bases) FROM repeats_table")
novl_repeats = self.getValue(
"SELECT SUM(nover) FROM %(track)s_repeats as r, %(track)s_annotation as a where a.gene_id = r.gene_id and is_unknown" % locals())
nlength = self.getValue(
"SELECT SUM(exons_sum) FROM %(track)s_repeats as r, %(track)s_annotation as a where a.gene_id = r.gene_id and is_unknown" % locals())
nspliced = self.getValue(
"SELECT COUNT(*) FROM %(track)s_repeats as r, %(track)s_annotation as a where a.gene_id = r.gene_id and is_unknown AND exons_nval > 1" % locals())
nspliced_ovl_repeats = self.getValue(
"SELECT COUNT(*) FROM %(track)s_repeats as r, %(track)s_annotation as a where a.gene_id = r.gene_id and is_unknown AND exons_nval > 1 AND nover > 0" % locals())
if novl_repeats is None:
novl_repeats, nlength, nspliced, nspliced_ovl_repeats = 0, 0, 0, 0
return odict((("repeats", repeats_size),
("genome", genome_size),
("prepeats", prettyPercent(repeats_size, genome_size)),
("repeat_overlap", novl_repeats),
("length", nlength),
("poverlap", prettyPercent(novl_repeats, nlength)),
("pcont", prettyPercent(
novl_repeats * genome_size, nlength * repeats_size)),
("nspliced_ovl", nspliced_ovl_repeats),
("nspliced", nspliced),
("pspliced_ovl", prettyPercent(
nspliced_ovl_repeats, nspliced)),
))
|
Python
|
CL
|
d0b274fbde00c8b9abce8c7848b8556eacbf4eeeff137aaee8ce97394eeab42b
|
############################
# Setup
############################
import DotsnBoxesGame as env #environnement du jeu
from math import log
from random import choice
from os import path
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import load_model
from datetime import date
import matplotlib.pyplot as plt
version_model = 3
model_storage = "models/qlearner"+str(version_model)+".h5"
GRAPHIC_MODE = False
env = env.Game(graphic_mode = GRAPHIC_MODE, delay = 0.01)
IS_PLAYER1 = True
# Configuration paramaters for the whole setup
NB_ROUND_MAX = 500
REWARD_ENOUGH = 75 # max 154 points => entrainer jusque 100 pour commencer
NB_GAMES_TO_WIN = 20 #nb de parties sur lesquelles prend moyenne du niveau
gamma = 0.7 # Discount factor for past rewards
epsilon = 1.0 # Epsilon greedy parameter
epsilon_min = 0.1 # Minimum epsilon greedy parameter
epsilon_max = 1.0 # Maximum epsilon greedy parameter
epsilon_interval = (epsilon_max - epsilon_min) # Rate at which to reduce chance of random action being taken
batch_size = 64 # Size of batch taken from replay buffer
############################
# Implement the Deep Q-Network
############################
num_actions = 112#len(env.get_id_moves_remaining()) #112
def create_q_model():
inputs = layers.Input(shape=(1,num_actions,), name='inputs')
# x = layers.Conv1D(num_actions, 3, activation='relu', kernel_initializer='glorot_uniform')(inputs)
# x = layers.GlobalAveragePooling1D()(x)
x = layers.LSTM(num_actions*4, activation='relu', kernel_initializer='glorot_uniform', return_sequences=True)(inputs)
x = layers.LSTM(num_actions*3, activation='relu', kernel_initializer='glorot_uniform')(x)
x = layers.Dense(num_actions*2, activation='relu', kernel_initializer='glorot_uniform')(x)
x = layers.Dense(num_actions*2, activation='relu', kernel_initializer='glorot_uniform')(x)
action = layers.Dense(num_actions, activation='softmax', kernel_initializer='glorot_uniform', name='q_values')(x)
return keras.Model(inputs = inputs, outputs = action)
# # Network defined by the Deepmind paper
# inputs = layers.Input(shape=(84, 84, 4,)) #images 2d de 84*84 pixels, codés sur 4 valeurs
# # Convolutions on the frames on the screen
# layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs)
# layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1)
# layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2)
# layer4 = layers.Flatten()(layer3)
# layer5 = layers.Dense(512, activation="relu")(layer4)
# action = layers.Dense(num_actions, activation="linear")(layer5) #num action donc 4 sorties dans un tableau => result = model()[0] => result = [a, b, c, d]
# return keras.Model(inputs=inputs, outputs=action)
# The first model makes the predictions for Q-values which are used to make a action.
#model = create_q_model()
# Build a target model for the prediction of future rewards.
# The weights of a target model get updated every 10000 steps thus when the
# loss between the Q-values is calculated the target Q-value is stable.
#model_target = create_q_model()
if path.isfile(model_storage):
print("\nload models")
model = load_model(model_storage)
model_target = load_model(model_storage)
else:
print("\ncreate models")
model = create_q_model()
model_target = create_q_model()
print("\n")
############################
# Train
############################
# In the Deepmind paper they use RMSProp however then Adam optimizer improves training time
optimizer = keras.optimizers.Adam(learning_rate=0.0025, clipnorm=1.0)
# Experience replay buffers
action_history = [] #coup choisi pour les max 100000 derniers coups
board_history = [] #plateau avant coup pour les max 100000 derniers coups
board_next_history = [] #plateau après coup pour les max 100000 derniers coups
rewards_history = [] #récompense du coup pour les max 100000 derniers coups
end_game_history = [] #coup terminal ou non pour les max 100000 derniers coups
game_reward_history = [] #récompense totale des max 100 dernières partie
running_reward = 0 #moyenne du total des récompenses des max 100 dernières partie
move_count = 0 #nombre de coups, toutes parties confondues
# Number of moves to take random action and observe output
epsilon_random_moves = 5000 #50000 #nombre de coups randoms max sur lesquels apprendre avant de laisser l'ia proposer ses coups
# Number of moves for exploration
epsilon_greedy_moves = 100000.0 #1000000.0 #réducteur de epsilon : plus il est haut, moins vite l'ia pourra tester ses coups
# Maximum replay length
# Note: The Deepmind paper suggests 1000000 however this causes memory issues
max_memory_length = 10000 #100000 #temps avant vidange des _history (excepté game_reward_history)
# Train the model after 4 actions
update_after_actions = 4 #ne modifie pas le modèle à chaque coup
# How often to update the target network
update_target_network = 1000 #10000 #modifie encore moins souvent le second modele (stabilisteur, chargé d'estimer les récompenses à coup + 2)
# Using huber loss for stability
loss_function = keras.losses.MeanSquaredError()
#loss_function = keras.losses.Huber()
reward_evolution = []
def compute_ai_reward(reward_move, reward_available):
# penality si points : 0 si aucune, 1 si 63
# penality si pas points : 1 si aucune, 0 si 63
# return : 1,2,4,6,8,14,16 - penality
return (2 * reward_move if reward_move > 0 else 1) - log(1 + reward_available / 7)
def normalize_lineboard(lineboard):
return np.array([int(x != env.OWNER_NONE) for x in lineboard]).reshape(-1, 112)
for game_count in range(NB_ROUND_MAX): #nb jeu max
print(f"\n\nGame {game_count} :")
lineboard = env.init_game()
game_reward = 0 #somme des récompenses de la partie
if GRAPHIC_MODE:
env.disp_board()
while True: #112 coups / jeu
if move_count < epsilon_random_moves or epsilon > np.random.rand(1)[0]: #premieres parties sont random, par la suite random se rarifie
action = choice(env.get_id_moves_remaining())
else: #prediction du premier modele
# Predict action Q-values from environment board
# board_tensor = np.array([int(x != env.OWNER_NONE) for x in lineboard]).reshape(1,-1)
board_tensor = np.array([int(x != env.OWNER_NONE) for x in lineboard]).reshape(1,-1, 112)
# print(board_tensor.shape)
#board_tensor = tf.expand_dims(tf.convert_to_tensor(board), 0) #normalise l'input (image) pour le modele
action_probs = np.array(model(board_tensor, training=False)[0]) #pas de .predict?
# discard wrong moves
action_probs[[i for i in range(len(action_probs)) if lineboard[i] != env.OWNER_NONE]] = -99999
# Take best action
action = np.argmax(action_probs) #plus haute valeur
# Decay probability of taking random action
epsilon = max(epsilon_min, epsilon - epsilon_interval / epsilon_greedy_moves) #reduit aléatoire jusque valeur min
# Apply the sampled action in our environment
is_player1, lineboard_next, reward, end_game = env.decide_and_move(action)
if GRAPHIC_MODE:
env.disp_board()
if is_player1 == IS_PLAYER1:
move_count += 1 #incremente nb de coups joués
#reward = compute_ai_reward(reward, env.nb_boxes_closable())
game_reward += reward #cumule récompense des coups de la partie
game_reward /= 2 #moyenne des deux précédents
# Save actions and boards in replay buffer
action_history.append(action)
board_history.append(normalize_lineboard(lineboard))
board_next_history.append(normalize_lineboard(lineboard_next))
rewards_history.append(reward)
end_game_history.append(end_game)
# Update every fourth move and once batch size is over 32
if move_count % update_after_actions == 0 and len(end_game_history) > batch_size: #si assez de données, tous les 4 coups, met à jour modèle 1
# Get indices of samples for replay buffers
indices = np.random.choice(range(len(end_game_history)), size=batch_size)#sélectionne échantillon au hasard
# Using list comprehension to sample from replay buffer
board_sample = np.array([board_history[i] for i in indices])
board_next_sample = np.array([board_next_history[i] for i in indices])
rewards_sample = [rewards_history[i] for i in indices]
action_sample = [action_history[i] for i in indices]
end_game_sample = tf.convert_to_tensor([float(end_game_history[i]) for i in indices])
# Build the updated Q-values for the sampled future boards
# Use the target model for stability
future_rewards = model_target.predict(board_next_sample)#, batch_size=batch_size) #prédit avec modèle 2 récompenses à plateau + 2
# Q value = reward + discount factor * expected future reward
updated_q_values = rewards_sample + gamma * tf.reduce_max(future_rewards, axis=1) #pondère récompenses à plateau + 1 avec plus haute récompense de plateau + 2
# If final move set the last value to -1
# if end_game_sample: quelque chose ?
updated_q_values = updated_q_values * (1 - end_game_sample) - end_game_sample #end_game : booléen donc 0 => updated_q_values et 1 => -1. à voir si on adapte pas
# Create a mask so we only calculate loss on the updated Q-values
masks = tf.one_hot(action_sample, num_actions)#crée une matrice de action_sample par 4 = num_actions... ?
with tf.GradientTape() as tape: #parcours chaque gradient comme un fichier?
# Train the model on the boards and updated Q-values
q_values = model(board_sample) #récupère qvalues pour l'état du plateau
print(q_values)
# Apply the masks to the Q-values to get the Q-value for action taken
q_action = tf.reduce_sum(tf.multiply(q_values, masks), axis=1) #somme matricielle de multiplication matricielle de whaaat?
# Calculate loss between new Q-value and old Q-value
loss = loss_function(updated_q_values, q_action) #utilise keras.losses.Huber avec y pred = qvalue du modele 1 sur l'état actuel du plateau et y true = qvalue du modele 2 sur l'état suivant du plateau ????
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables) #utilise tape en dehors de la boucle précédente??? pour caluler descente de gradient des variables du modèle 1 = ses poids neuronaux?
optimizer.apply_gradients(zip(grads, model.trainable_variables)) #keras.optimizers.Adam(learning_rate=0.00025, clipnorm=1.0) pour appliquer descente de gradient précédemment claculée à travers l'optimizer ?
if move_count % update_target_network == 0: #tous les 10000 coups, met à jour modèle 2
# update the the target network with new weights
model_target.set_weights(model.get_weights()) #met à jour le modèle 2
# Log details
print(f"running reward: {running_reward:.2f} at game {game_count}, move count {move_count}")
# Limit the lineboard and reward history
if len(rewards_history) > max_memory_length:
del rewards_history[0]
del board_history[0]
del board_next_history[0]
del action_history[0]
del end_game_history[0]
if end_game:
results = env.log_results()
print(f"game reward : {game_reward:.2f}")
reward_evolution.append(results[0 if IS_PLAYER1 else 1])
break
lineboard = lineboard_next #actualise plateau en mémoire
if GRAPHIC_MODE:
env.disp_board()
### vérifie qualité de l'ia via ses récompenses : quand suffisamment forte, met fin à l'entrainement
#dans notre cas, remplacer par % de victoire ?
# Update running reward to check condition for solving
game_reward_history.append(game_reward)
if len(game_reward_history) > NB_GAMES_TO_WIN:
del game_reward_history[0]
running_reward = np.mean(game_reward_history)
game_count += 1
print(f"End of game - mean level : {running_reward:.3f}")
if running_reward > REWARD_ENOUGH:
print("Solved!")
break
# save progression
model.save(model_storage)
#display result graph
plt.plot([x for x in range(len(reward_evolution))], reward_evolution, c = 'orange')
plt.scatter([x for x in range(len(reward_evolution))], reward_evolution, c = 'red', marker = 'x')
plt.title("Evolution du score obtenu")
plt.ylim(0,65)
plt.savefig(f"training{version_model}.png")
|
Python
|
CL
|
b098adb1e1c3d9041de68232ddcb391c99a9b609865b1d04adace45e8f562ebc
|
#!/usr/bin/env python
# coding:utf-8
"""
descriptor
"""
"""
definition(short version):
Descriptors are objects with any of __get__() , __set__() , or __delete__().
These descriptor objects can be used as attributes on other class definitions.
definition:
In general, a descriptor is an object attribute with "binding behavior",
one whose attribute access has been overridden by methods in the descriptor protocol.
Those methods are __get__(), __set__(), and __delete__().
If any of those methods are defined for an object, it is said to be a descriptor.
"""
"""
What Are Descriptors?
A descriptor is an object with any of the following methods (__get__() , __set__() , or __delete__() ),
intended to be used via dotted-lookup as if it were a typical attribute of an instance.
For an owner-object, [obj_instance], with a [descriptor] object:
descriptor.__get__(self, obj_instance, owner_class) (returning a value)
is invoked by
obj_instance.descriptor
descriptor.__set__(self, obj_instance, value) (returning None)
is invoked by
obj_instance.descriptor = value
descriptor.__delete__(self, obj_instance) (returning None)
is invoked by
del obj_instance.descriptor
obj_instance is the instance whose [class] contains the descriptor object's [instance].
self is the [instance] of the descriptor (probably just one for the class of the obj_instance)
"""
"""
Descriptor Protocol
descr.__get__(self, obj, type=None) --> value
descr.__set__(self, obj, value) --> None
descr.__delete__(self, obj) --> None
That is all there is to it. Define any of these methods and an object is considered a descriptor
and can override default behavior upon being looked up as an attribute.
If an object defines both __get__() and __set__(), it is considered a [data descriptor] .
Descriptors that only define __get__() are called [non-data descriptors] .
(they are typically used for methods but other uses are possible).
Data and non-data descriptors differ in how overrides are calculated with respect to entries in an instance’s dictionary.
If an instance’s dictionary has an entry with the same name as a data descriptor, the data descriptor takes precedence.
If an instance’s dictionary has an entry with the same name as a non-data descriptor, the dictionary entry takes precedence.
To make a read-only data descriptor, define both __get__() and __set__() with the __set__() raising an AttributeError when called.
Defining the __set__() method with an exception raising placeholder is enough to make it a data descriptor.
"""
print '-------------------------------------------------------------------------------------------------------'
"""
定义:
descriptor 是实现了 __get__(), __set__(), __delete__() 方法的类的实例(对象)。
任何实现 __get__(), __set__(), __delete__() 方法中一至多个的类的对象,都是 descriptor 对象。
"""
"""
一句话概括: 描述符就是可重用的属性。
property: 把函数调用伪装成对属性的访问。不足: 不能重复使用。
descriptor: 是property的升级版,允许为重复的property逻辑编写单独的类来处理。优点: 可以重复使用。
"""
print '-------------------------------------------------------------------------------------------------------'
class NonNegative(object):
def __init__(self):
"""
每个 NonNegative 的实例都维护着一个字典,其中保存着 [所有者实例] 和 [对应数据] 的映射关系。
当我们访问 instance.attr 时,
__get__ 方法会查找与 instance 相关联的数据,并返回这个结果。
当我们执行 instance.attr = xxx 时,
__set__ 方法采用的方式相同,只是这里会包含额外的非负检查。
"""
self.dict = dict()
pass
def __get__(self, instance, owner):
print '(descriptor get)(instance = %s)(owner = %s) %s' % (instance, owner, self.dict.get(instance))
return self.dict.get(instance)
def __set__(self, instance, value):
print '(descriptor set)(instance = %s)(value = %s)' % (instance, value)
if value < 0:
raise ValueError('value can not be negative')
self.dict[instance] = value
class Math(object):
"""
NonNegative 实例
是完全通过类属性模拟实例属性,
因此实例属性其实根本不存在。
"""
pid = NonNegative() # descriptor 对象(Math的类属性)
score = NonNegative() # descriptor 对象(Math的类属性)
def __init__(self, pid, score):
"""
通过在 __init__() 内直接调用类属性,实现对实例属性初始化赋值的模拟.
"""
"""
这里并未创建实例属性 pid 和 score, 而是调用类属性 Math.pid 和 Math.score
"""
self.pid = pid
self.score = score
def check(self):
if self.score >= 60:
print 'PASS'
else:
print 'FAIL'
print '-------------------------------------------------------------------------------------------------------'
print Math.score
# (descriptor get)(instance = None)(owner = <class '__main__.Math'>) None
# None
print Math.pid
# (descriptor get)(instance = None)(owner = <class '__main__.Math'>) None
# None
print '-------------------------------------------------------------------------------------------------------'
s1 = Math(1, 90)
# (descriptor set)(instance = <__main__.Math object at 0x10f177c10>)(value = 1)
# (descriptor set)(instance = <__main__.Math object at 0x10f177c10>)(value = 90)
s1.score
# (descriptor get)(instance = <__main__.Math object at 0x10f177c10>)(owner = <class '__main__.Math'>) 90
s1.score = 61
# (descriptor set)(instance = <__main__.Math object at 0x10f177c10>)(value = 61)
s1.check()
# (descriptor get)(instance = <__main__.Math object at 0x10f177c10>)(owner = <class '__main__.Math'>) 61
# PASS
s1.score = 59
# (descriptor set)(instance = <__main__.Math object at 0x10f177c10>)(value = 59)
s1.check()
# (descriptor get)(instance = <__main__.Math object at 0x10f177c10>)(owner = <class '__main__.Math'>) 59
# FAIL
print '-------------------------------------------------------------------------------------------------------'
s2 = Math(2, 59)
# (descriptor set)(instance = <__main__.Math object at 0x10f177c50>)(value = 2)
# (descriptor set)(instance = <__main__.Math object at 0x10f177c50>)(value = 59)
s2.score = 50
# (descriptor set)(instance = <__main__.Math object at 0x10f177c50>)(value = 50)
s2.check()
# (descriptor get)(instance = <__main__.Math object at 0x10f177c50>)(owner = <class '__main__.Math'>) 50
# FAIL
s2.score = 99
# (descriptor set)(instance = <__main__.Math object at 0x10f177c50>)(value = 99)
s2.check()
# (descriptor get)(instance = <__main__.Math object at 0x10f177c50>)(owner = <class '__main__.Math'>) 99
# PASS
print '-------------------------------------------------------------------------------------------------------'
"""
描述符(descriptor)秘诀和陷阱
[1] 描述符需要放在类的层次上([descriptor实例]是[owner类]的[类属性])
[2] 确保实例的数据只属于实例本身,而不是所有的实例都共享相同的数据(这也是为什么我们要在NonNegative中使用数据字典的原因)
[3] 注意不可哈希的描述符所有者(NonNegative类使用了一个字典来单独保存专属于实例的数据。这个一般来说是没问题的,除非你用到了不可哈希的对象)
[4] 访问描述符的方法。描述符仅仅是类,所以可以为它们增加一些方法。(举个例子,描述符是一个用来回调property的很好的手段)
"""
|
Python
|
CL
|
539a02e0814b1431fe9aa11cdafc33e0c1f2d8202c2e36b215448cf82dacfea2
|
import os
from gensim.models import Word2Vec
import keras.backend as K
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, LSTM, Input
from numpy.random import normal, randint
import numpy as np
from utils2 import get_timestamp, train_model, test_model
from gradient_reversal_keras_tf.flipGradientTF import GradientReversal
# line_counts = {
# "../data/Apps_for_Android_5.json.gz": 752_937,
# "../data/CDs_and_Vinyl_5.json.gz": 1_097_592,
# "../data/Electronics_5.json.gz": 1_689_188,
# "../data/Kindle_Store_5.json.gz": 982_619,
# "../data/Movies_and_TV_5.json.gz": 1_697_533
# }
line_counts = {
"../data/Apps_for_Android_5.json.gz": 100_000,
"../data/CDs_and_Vinyl_5.json.gz": 100_000,
"../data/Electronics_5.json.gz": 100_000,
"../data/Kindle_Store_5.json.gz": 100_000,
"../data/Movies_and_TV_5.json.gz": 100_000,
}
BIN_DROP = 0.1
epochs = 1
batch_size = 100
test_percent = 0.3
w2v_model = Word2Vec.load("../models/w2v_5dom.model")
train_files = list(line_counts.keys())
source_domain = "../data/Movies_and_TV_5.json.gz"
target_domains = [i for i in train_files if i != source_domain]
data_folder = "../data/"
def domain_name_from_file(fname: str) -> str:
part = fname.split("_5")[0]
return part.split("/")[-1]
def gaussian_noise(generator, ae: bool):
def noise_gen():
for X, y in generator:
if ae:
yield X + normal(0, 0.02, X.shape), X
else:
yield X + normal(0, 0.02, X.shape), y
return noise_gen()
def binary_noise(generator, ae: bool):
def noise_gen():
for X, y in generator:
for n, matr in enumerate(X):
res = np.ravel(matr)
indices = randint(0, len(res), size=int(BIN_DROP * len(res)))
res[indices] = 0
X[n] = res.reshape(matr.shape)
if ae:
yield X, X
else:
yield X, y
return noise_gen()
def create_lstm_classifier(model=None):
if model is None:
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape=(None, 128)))
model.add(Dense(128, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adagrad')
return model
def create_AE_model(latent_space_dim: int, data_dim: int, model=None):
if model is None:
model = Sequential()
model.add(LSTM(latent_space_dim, return_sequences=True,
input_shape=(None, data_dim)))
model.add(LSTM(data_dim, return_sequences=True))
model.compile(loss='binary_crossentropy',
optimizer='adam')
return model
def create_SDAE_model(layers: list, data_dim: int):
model = Sequential()
model.add(LSTM(layers[0], return_sequences=True,
input_shape=(None, data_dim)))
for latent_space_dim in layers[1:]:
model.add(LSTM(latent_space_dim, return_sequences=True))
model.add(LSTM(data_dim, return_sequences=True))
model.compile(loss='binary_crossentropy',
optimizer='adagrad')
return model
def create_DANN():
inputs = Input((None, 128))
# Feature extractor
feature_extractor = LSTM(128, return_sequences=True)(inputs)
feature_extractor = LSTM(64, return_sequences=True)(feature_extractor)
feature_extractor = LSTM(32, return_sequences=True)(feature_extractor)
# Domain adversarial layers
d_a = GradientReversal(0.5)(feature_extractor)
d_a = LSTM(64, return_sequences=True)(d_a)
d_a = Dense(64, activation='relu')(d_a)
d_a = Dense(32, activation='relu')(d_a)
d_a = Dense(1, activation="softmax", name="domain_adv")(d_a)
# Classification layers
classifier = LSTM(128, return_sequences=True)(feature_extractor)
classifier = Dense(128, activation='relu')(classifier)
classifier = Dense(32, activation='relu')(classifier)
classifier = Dense(1, activation="softmax", name="classifier")(classifier)
comb_model = Model(inputs=inputs, outputs=[classifier, d_a])
comb_model.compile(loss='binary_crossentropy', optimizer='adadelta')
classifier_model = Model(inputs=inputs, outputs=[classifier])
classifier_model.compile(loss='binary_crossentropy', optimizer='adadelta')
domain_adapt_model = Model(inputs=inputs, outputs=d_a)
domain_adapt_model.compile(loss='binary_crossentropy', optimizer='adadelta')
return classifier_model, domain_adapt_model, comb_model
def train_on_source(model, ae: bool):
train_model(model, train_files=[source_domain], batch_size=batch_size,
epochs=epochs, ae=ae, line_count_hint=line_counts,
test_percent=test_percent, w2v_model=w2v_model)
def test_on_source(model, report_path: str):
test_model(model, batch_size=batch_size, line_count_hint=line_counts,
test_paths=[source_domain], test_percent=test_percent,
w2v_model=w2v_model, report_path=report_path)
def test_on_target(model, report_path_with_format: str):
for i in target_domains:
path = i
print(f"Testing on {i}")
report_path = report_path_with_format.format(domain_name_from_file(i))
test_model(model, batch_size=batch_size, line_count_hint=line_counts,
test_paths=[path], test_percent=test_percent, w2v_model=w2v_model,
report_path=report_path)
def train_and_test_on_target(ae: bool,
model_name: str,
report_folder: str,
model_folder: str,
clear_model: str):
for i in target_domains:
model = load_model(clear_model)
domain = domain_name_from_file(i)
report_path = report_folder + f"{model_name}_target-target_{domain}.csv"
print(f"Training on {i}")
train_model(model, train_files=[i],
batch_size=batch_size, epochs=epochs, ae=ae,
line_count_hint=line_counts, test_percent=test_percent,
w2v_model=w2v_model)
print(f"Testing on {i}")
test_model(model, batch_size=batch_size, line_count_hint=line_counts,
test_paths=[i], test_percent=test_percent, w2v_model=w2v_model,
report_path=report_path)
model.save(model_folder + f"{model_name}_{domain_name_from_file(i)}.hdf5")
# 1. Обучить на source без адаптации
# 2. Протестировать на source и target
# 3. Обучить на source с адаптацией
# 4. Протестировать на source и target
# 5. Обучить на таргете
# 6. Протестировать на таргете
if __name__ == '__main__':
timestamp = get_timestamp()
data_folder = "../data/"
report_folder = f"../reports/{timestamp}/"
model_folder = f"../models/{timestamp}/"
os.mkdir(report_folder)
os.mkdir(model_folder)
# LSTM
model = create_lstm_classifier()
cp_fp = model_folder + "LSTM_source_{epoch}.hdf5"
report_path = report_folder + "LSTM_source-source.csv"
print("Training LSTM")
train_on_source(model, ae=False, cp_fp=cp_fp)
print("Testing LSTM on source")
test_on_source(model, report_path)
report_path = report_folder + "LSTM_source-target_{}.csv"
print("Testing LSTM on target")
test_on_target(model, report_path)
print("Training and testing LSTM on target")
train_and_test_on_target(model, ae=False, model_name="LSTM",
report_folder=report_folder, model_folder=model_folder)
K.clear_session()
exit(0)
# AE + LSTM
model = create_AE_model(64, 128)
cp_fp = model_folder + "AE_layers_{epoch}.hdf5"
report_path = report_folder + "AE_LSTM_test_report.csv"
print("Training AE layers")
train_model(model, train_files=train_files, batch_size=batch_size,
epochs=epochs, ae=True, line_count_hint=line_counts,
test_percent=test_percent, w2v_model=w2v_model, checkpoint_fpath=cp_fp)
model.layers.pop()
model = create_lstm_classifier(model)
cp_fp = model_folder + "AE_LSTM_{epoch}.hdf5"
print("Training LSTM AE")
train_model(model, train_files=train_files, batch_size=batch_size,
epochs=epochs, ae=False, line_count_hint=line_counts,
test_percent=test_percent, w2v_model=w2v_model, checkpoint_fpath=cp_fp)
print("Testing LSTM AE")
test_model(model, batch_size=batch_size, line_count_hint=line_counts,
test_paths=train_files, test_percent=test_percent, w2v_model=w2v_model,
report_path=report_path)
K.clear_session()
report_path = report_folder + "SDAE_LSTM_test_report.csv"
model = None
for latent_space_dim in [128, 72, 64]:
model = create_AE_model(latent_space_dim, 128, model)
cp_fp = model_folder + "SDAE_layers_" + str(latent_space_dim) + "_{epoch}.hdf5"
print(f"Training AE layers {latent_space_dim}")
train_model(model, train_files=train_files, batch_size=batch_size,
epochs=epochs, ae=True, line_count_hint=line_counts,
test_percent=test_percent, w2v_model=w2v_model,
checkpoint_fpath=cp_fp, noise_decorator=gaussian_noise)
model.layers.pop()
model = create_lstm_classifier(model)
cp_fp = model_folder + "SDAE_LSTM_{epoch}.hdf5"
print("Training LSTM SDAE")
train_model(model, train_files=train_files, batch_size=batch_size,
epochs=epochs, ae=False, line_count_hint=line_counts,
test_percent=test_percent, w2v_model=w2v_model, checkpoint_fpath=cp_fp)
print("Testing LSTM SDAE")
test_model(model, batch_size=batch_size, line_count_hint=line_counts,
test_paths=train_files, test_percent=test_percent, w2v_model=w2v_model,
report_path=report_path)
K.clear_session()
|
Python
|
CL
|
b7f3b1f525e757596dd8ebc54e68e99c7deb4fae46e631d15aa7629fd5c2a10a
|
import sqlite3
import json
import time
import location_database_utils
import pendulum
DB_PATH = '.taisteal.db'
def _connect():
db_connection = sqlite3.connect(DB_PATH)
db_connection.row_factory = sqlite3.Row
return db_connection, db_connection.cursor()
def _create_tables():
print('Creating database tables...')
conn, cursor = _connect()
# The location_lookups table acts as a cache for previous responses when
# looking up "query". The "result" column contains arbitrary JSON.
cursor.execute('''CREATE TABLE IF NOT EXISTS location_lookups
(
query text PRIMARY KEY,
result text
)''')
# The directly entered data, not used in prod directly, but processed into
# `legs` table.
cursor.execute('''CREATE TABLE IF NOT EXISTS logged_legs
(
id text PRIMARY KEY,
departure_query text,
departure_datetime text,
arrival_query text,
arrival_datetime text,
mode text,
FOREIGN KEY (departure_query) REFERENCES location_lookups(query),
FOREIGN KEY (arrival_query) REFERENCES location_lookups(query)
)''')
# Maps location queries in the location_lookups table to IDs in the
# locations table. There may be multiple queries associated with the same
# id (eg. "Zurich Hauptbahnhof" and "Zurich Main Station" ideally point to
# the same underyling location).
cursor.execute('''CREATE TABLE IF NOT EXISTS location_query_to_id (
query text PRIMARY KEY,
id text
)''')
# The locations table is derived from the data in location_lookups.
cursor.execute('''CREATE TABLE IF NOT EXISTS locations
(
/* id is an arbitrary string */
id text PRIMARY KEY,
/* human-readable non-ambiguous well-formatted address */
address text,
/* human-readable name */
name text,
/* position in the real world */
latitude real,
longitude real,
/* country name in English */
country_name text,
/* ISO 3166 country codes, with some additions (eg. the UK is split up into component countries) */
country_code text,
/* Type of this place, eg. STATION, AIRPORT. */
type text,
/* Computed region, used to group some locations together. Sometimes, this is some official designation (eg. corresponds to NUTS 2 region names), but there is no guarantee. Prefer names in in English. In Switzerland, this is cantons; in Ireland, this is counties; in the US, this is states; in Monaco, there is just one region.*/
region text
)''')
# Derived from logged_legs.
cursor.execute('''CREATE TABLE IF NOT EXISTS legs
(
id text PRIMARY KEY,
departure_location_id text,
departure_datetime integer,
arrival_location_id text,
arrival_datetime integer,
mode text
)''')
cursor.execute('''CREATE TABLE IF NOT EXISTS collections
(
/* id is arbitrary string */
id text PRIMARY KEY,
title text
)''')
cursor.execute('''CREATE TABLE IF NOT EXISTS collection_parts
(
collection_id text,
position integer,
/* Should have exactly one of leg_id or note set */
leg_id text,
note text,
image_url text
)''')
conn.close()
print('Database tables created.')
# This runs at the time this file is first imported.
_create_tables()
# Regenerate derived tables.
def regenerate_tables():
print('Regenerating tables...')
start_time = time.time()
conn, cursor = _connect()
cursor.execute('DELETE FROM location_query_to_id')
cursor.execute('DELETE FROM locations')
cursor.execute('DELETE FROM legs')
conn.commit()
cursor.execute('SELECT * FROM location_lookups')
for lookup in cursor.fetchall():
parsed_lookup_result = json.loads(lookup['result'])
id_ = location_database_utils.get_id_for_location_lookup(lookup['query'], parsed_lookup_result)
args = (lookup['query'], id_)
cursor.execute('INSERT INTO location_query_to_id(query, id) VALUES(?, ?)', args)
conn.commit()
# Regenerate "locations" table ID->data from (query->lookup data) and (query->ID)
cursor.execute('''
SELECT
location_query_to_id.id,
location_lookups.result
FROM
location_query_to_id
LEFT OUTER JOIN
location_lookups
ON
location_query_to_id.query = location_lookups.query
GROUP BY
location_query_to_id.id
''')
for lookup in cursor.fetchall():
parsed_lookup_result = json.loads(lookup['result'])
id_ = lookup['id']
location_data = location_database_utils.create_locations_row_for_lookup(parsed_lookup_result)
args = (id_, location_data['address'], location_data['name'], location_data['latitude'], location_data['longitude'], location_data['country_name'], location_data['country_code'], location_data['type'], location_data['region'])
cursor.execute('INSERT INTO locations(id, address, name, latitude, longitude, country_name, country_code, type, region) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', args)
conn.commit()
# Regenerate "legs" table from "logged_legs" with new location IDs.
cursor.execute('''
SELECT
logged_legs.id,
logged_legs.departure_datetime,
logged_legs.arrival_datetime,
logged_legs.mode,
departure_location.id as departure_location_id,
arrival_location.id as arrival_location_id
FROM logged_legs
LEFT OUTER JOIN
location_query_to_id AS departure_location
ON
departure_location.query = logged_legs.departure_query
LEFT OUTER JOIN
location_query_to_id AS arrival_location
ON
arrival_location.query = logged_legs.arrival_query
''')
for lookup in cursor.fetchall():
args = (lookup['id'], lookup['departure_location_id'], lookup['departure_datetime'], lookup['arrival_location_id'], lookup['arrival_datetime'], lookup['mode'])
cursor.execute('INSERT INTO legs(id, departure_location_id, departure_datetime, arrival_location_id, arrival_datetime, mode) VALUES(?, ?, ?, ?, ?, ?)', args)
conn.commit()
conn.close()
end_time = time.time()
print('Tables regenerated. It took {} seconds.'.format(end_time - start_time))
def save_location_lookup(query, result):
print('Putting {} into location_lookups'.format(query))
conn, cursor = _connect()
args = (query, result)
cursor.execute('INSERT INTO location_lookups(query, result) VALUES(?, ?)', args)
conn.commit()
conn.close()
def get_location_lookup(query):
conn, cursor = _connect()
args = (query,)
cursor.execute("SELECT * FROM location_lookups WHERE query=? LIMIT 1", args)
row = cursor.fetchone()
conn.close()
if not row:
return None
return {col: row[col] for col in row.keys()}
def save_id_for_query(id_, query):
conn, cursor = _connect()
args = (id_, query)
cursor.execute('INSERT INTO location_query_to_id(id, query) VALUES(?, ?)', args)
conn.commit()
conn.close()
def get_id_for_query(query):
conn, cursor = _connect()
args = (query,)
cursor.execute("SELECT id FROM location_query_to_id WHERE query=? LIMIT 1", args)
row = cursor.fetchone()
conn.close()
if not row:
return None
return row['id']
def get_location(id_):
conn, cursor = _connect()
args = (id_,)
cursor.execute("SELECT * FROM locations WHERE id=? LIMIT 1", args)
row = cursor.fetchone()
conn.close()
if not row:
return None
return {col: row[col] for col in row.keys()}
def save_location(id_, location_data):
conn, cursor = _connect()
args = (id_, location_data['address'], location_data['name'], location_data['latitude'], location_data['longitude'], location_data['country_name'], location_data['country_code'], location_data['type'], location_data['region'])
cursor.execute('INSERT INTO locations(id, address, name, latitude, longitude, country_name, country_code, type, region) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', args)
conn.commit()
conn.close()
def save_logged_leg(id_, departure_query, departure_datetime, arrival_query, arrival_datetime, mode):
conn, cursor = _connect()
args = (id_, departure_query, departure_datetime, arrival_query, arrival_datetime, mode)
cursor.execute('INSERT INTO logged_legs(id, departure_query, departure_datetime, arrival_query, arrival_datetime, mode) VALUES(?, ?, ?, ?, ?, ?)', args)
conn.commit()
conn.close()
def save_leg(id_, departure_id, departure_datetime, arrival_id, arrival_datetime, mode):
conn, cursor = _connect()
args = (id_, departure_id, departure_datetime, arrival_id, arrival_datetime, mode)
cursor.execute('INSERT INTO legs(id, departure_location_id, departure_datetime, arrival_location_id, arrival_datetime, mode) VALUES(?, ?, ?, ?, ?, ?)', args)
conn.commit()
conn.close()
def get_legs():
conn, cursor = _connect()
cursor.execute('''
SELECT
id,
mode,
departure_location_id,
departure_datetime,
arrival_location_id,
arrival_datetime
FROM
legs
ORDER BY
arrival_datetime ASC
''')
for lookup in cursor.fetchall():
d = {n: lookup[n] for n in lookup.keys()}
d['departure_datetime'] = pendulum.parse(d['departure_datetime'])
d['arrival_datetime'] = pendulum.parse(d['arrival_datetime'])
yield d
def get_leg(id_):
conn, cursor = _connect()
args = (id_,)
cursor.execute("SELECT * FROM legs WHERE id=? LIMIT 1", args)
row = cursor.fetchone()
conn.close()
if not row:
return None
return {col: row[col] for col in row.keys()}
def get_collections():
conn, cursor = _connect()
cursor.execute('''
SELECT *
FROM
collections
''')
for lookup in cursor.fetchall():
yield {n: lookup[n] for n in lookup.keys()}
def get_collection_parts(id_):
conn, cursor = _connect()
cursor.execute('''
SELECT
*
FROM
collection_parts
WHERE
collection_id = ?
ORDER BY
position
''', (id_,))
for lookup in cursor.fetchall():
yield {n: lookup[n] for n in lookup.keys()}
def save_collection(collection):
print('DATABASE: Saving collection:', collection)
conn, cursor = _connect()
cursor.execute('''
DELETE FROM
collections
WHERE
id = ?''', (collection['id'],))
cursor.execute('''
DELETE FROM
collection_parts
WHERE
collection_id = ?''', (collection['id'],))
conn.commit()
cursor.execute('INSERT INTO collections(id, title) VALUES(?, ?)', (collection['id'], collection['title']))
for position, part in enumerate(collection['parts']):
leg_id = part['leg_id']
note = part['note']
image_url = part['image_url']
# TODO: verify that exactly one of (leg_id, note) is set
cursor.execute('INSERT INTO collection_parts(collection_id, position, leg_id, note, image_url) VALUES(?, ?, ?, ?, ?)', (collection['id'], position, leg_id, note, image_url))
conn.commit()
|
Python
|
CL
|
c1349f5a1b1830a5da2987968256c12eedfb5c43c2bc6f18fd157363e4cf3b4f
|
# follow Google coding standards for imports
import copy
import multiprocessing
import os
import time
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torchvision
from sklearn import preprocessing
from torch.optim import lr_scheduler
from zmq.devices import device
from tqdm import tqdm
from custom_dataset_npy import CustomDatasetNPY
from net import ConvNet
import pickle
import matplotlib
multiprocessing.set_start_method("spawn", True)
# try to run resnet34 architecture
# look at first: https://pytorch.org/hub/pytorch_vision_resnet/
# look at second: https://stackoverflow.com/questions/23202132/splitting-an-rgb-image-to-r-g-b-channels-python/23208666
class Model(object):
"""Deep learning model for image recognition.
Loads dataset and neural network hyperparameters.
Typical usage example:
model = Model()
print(model.dataset_train)
"""
def __init__(self):
"""Initialize dataset and neural network.
Traverses train_path and valid_path to load (.npy) images using CustomDataNPY)().
Creates neural network using Net(). Defines hyperparameters. These hyperparameters may need more work and iteration.
"""
'''
# Defines paths for training and validation datasets.
train_path = "/home/gauravkuppa24/Documents/datasets/MRNet-v1.0/train/coronal"
valid_path = "/home/gauravkuppa24/Documents/datasets/MRNet-v1.0/valid/axial"
'''
# Create Dataset and DataLoader for training and validation dataset
self.dataset_train = CustomDatasetNPY("train")[0:200]
self.train_loader = torch.utils.data.DataLoader(
self.dataset_train, batch_size=30, shuffle=False # , num_workers=4
)
self.dataset_valid = CustomDatasetNPY("valid")[0:25]
self.valid_loader = torch.utils.data.DataLoader(
self.dataset_valid, batch_size=30, shuffle=False # , num_workers=4
)
self.dataset_sizes = {'train':len(self.dataset_train), 'valid':len(self.dataset_valid)}
self.dataloaders = {
'train': self.train_loader,
'valid': self.valid_loader
}
# Create Neural Network with hyperparameters.
self.net = ConvNet(2)
self.optimizer = torch.optim.Adam(
self.net.parameters(), lr=0.01
) # how do you know which optim to use when?
self.criterion = (
torch.nn.CrossEntropyLoss()
) # how do you know which criterion to use? why do we choose cross entropy loss
self.exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=7, gamma=0.1
)
self.device = torch.device("cpu")
# TODO(G): make a train() method
def train(self, num_epochs=10):
net, criterion, optimizer, scheduler = self.net, self.criterion, self.optimizer, self.exp_lr_scheduler
since = time.time()
train_loss = []
train_accuracy = []
valid_loss = []
valid_accuracy = []
self.stats = {"train":[train_loss, train_accuracy],"valid":[valid_loss, valid_accuracy]}
best_model_wts = copy.deepcopy(net.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch, num_epochs - 1))
print("-" * 10)
# Each epoch has a training and validation phase
for phase in ["train", "valid"]:
if phase == "train":
net.train() # Set model to training mode
else:
net.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for i in tqdm(range(len(self.dataloaders[phase]))):
it = iter(self.dataloaders[phase])
inputs, labels = it.next()
inputs = inputs.to(self.device)
labels = labels.to(self.device)
labels = labels.long()[:,1]
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == "train"):
outputs = net(inputs)
_, preds = torch.max(outputs, 1)
#print(labels)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == "train":
scheduler.step()
epoch_loss = running_loss / self.dataset_sizes[phase]
epoch_acc = running_corrects.double().item() / self.dataset_sizes[phase]
self.stats[phase][0].append(epoch_loss)
self.stats[phase][1].append(epoch_acc)
print(
"{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc)
)
# deep copy the model
if phase == "valid" and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(net.state_dict())
print()
time_elapsed = time.time() - since
print(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
print("Best val Acc: {:4f}".format(best_acc))
# load best model weights
net.load_state_dict(best_model_wts)
return net
def plot_images(self):
for imgBunch, groundBunch in self.train_loader:
print(imgBunch.shape)
for img in imgBunch:
print("x", img.shape)
img = img[2,:,:]
print("y", img.shape)
# TODO(g): display img #, ground truth, img index
plt.imshow(img.view(256, -1), cmap="gray")
plt.show()
def plot_results(self, epochs, loss_acc):
train_loss, train_accuracy, valid_loss, valid_accuracy = loss_acc
fig = plt.figure(figsize=(20,4))
ax = fig.add_subplot(1, 2, 1)
plt.title("Train - Validation Loss")
plt.plot(list(np.arange(epochs) + 1) , train_loss, label='train')
plt.plot(list(np.arange(epochs) + 1), valid_loss, label='validation')
plt.xlabel('num_epochs', fontsize=12)
plt.ylabel('loss', fontsize=12)
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
plt.legend(loc='best')
ax = fig.add_subplot(1, 2, 2)
plt.title("Train - Validation Accuracy")
plt.plot(list(np.arange(epochs) + 1) , train_accuracy, label='train')
plt.plot(list(np.arange(epochs) + 1), valid_accuracy, label='validation')
plt.xlabel('num_epochs', fontsize=12)
plt.ylabel('accuracy', fontsize=12)
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
plt.legend(loc='best')
plt.show()
def main():
# set random seed to 0
torch.manual_seed(0)
np.random.seed(0)
model = Model()
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
train_loss = []
train_accuracy = []
valid_loss = []
valid_accuracy = []
epochs = 25
model.train(num_epochs=epochs)
train_loss, train_accuracy = model.stats['train']
valid_loss, valid_accuracy = model.stats['valid']
loss_acc = [train_loss, train_accuracy, valid_loss, valid_accuracy]
model.plot_results(epochs, loss_acc)
if __name__ == "__main__":
main()
|
Python
|
CL
|
4a9790ef476c5393142b00a6a139c44fe748571dea78caba4a939da0f5e0f68b
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 17 13:20:02 2017
@author: Weiyu_Lee
"""
import os
import sys
sys.path.append('./utility')
import tensorflow as tf
from tqdm import tqdm
import numpy as np
import model_zoo
import scipy.misc as misc
import random
from utils import (
read_data,
batch_shuffle_rndc,
batch_shuffle,
log10
)
class MODEL(object):
def __init__(self,
sess,
mode=None,
epoch=10,
batch_size=128,
image_size=32,
label_size=20,
learning_rate=1e-4,
color_dim=1,
scale=4,
train_extract_stride=14,
test_extract_stride=20,
checkpoint_dir=None,
log_dir=None,
output_dir=None,
train_dir=None,
test_dir=None,
h5_dir=None,
train_h5_name=None,
test_h5_name=None,
ckpt_name=None,
is_train=True,
model_ticket=None):
"""
Initial function
Args:
image_size: training or testing input image size.
(if scale=3, image size is [33x33].)
label_size: label image size.
(if scale=3, image size is [21x21].)
batch_size: batch size
color_dim: color dimension number. (only Y channel, color_dim=1)
checkpoint_dir: checkpoint directory
output_dir: output directory
"""
self.sess = sess
self.mode = mode
self.epoch = epoch
self.batch_size = batch_size
self.image_size = image_size
self.label_size = label_size
self.learning_rate = learning_rate
self.color_dim = color_dim
self.is_grayscale = (color_dim == 1)
self.scale = scale
self.train_extract_stride = train_extract_stride
self.test_extract_stride = test_extract_stride
self.checkpoint_dir = checkpoint_dir
self.log_dir = log_dir
self.output_dir = output_dir
self.train_dir = train_dir
self.test_dir = test_dir
self.h5_dir = h5_dir
self.train_h5_name = train_h5_name
self.test_h5_name = test_h5_name
self.ckpt_name = ckpt_name
self.is_train = is_train
self.model_ticket = model_ticket
self.model_list = ["googleLeNet_v1", "resNet_v1", "srcnn_v1", "grr_srcnn_v1", "grr_grid_srcnn_v1", "edsr_v1", "espcn_v1", "edsr_v2", "grr_edsr_v2", "GoogLeNet_edsr_v1"]
self.build_model()
def build_model(self):###
if self.model_ticket not in self.model_list:
print("sorry, wrong ticket!")
return 0
else:
fn = getattr(self, "build_" + self.model_ticket)
model = fn()
return model
def train(self):
if self.model_ticket not in self.model_list:
print("sorry, wrong ticket!")
return 0
else:
fn = getattr(self, "train_" + self.model_ticket)
function = fn()
return function
def build_srcnn_v1(self):###
"""
Build srcnn_v1 model
"""
# Define input and label images
self.images = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')
self.labels = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.color_dim], name='labels')
self.dropout = tf.placeholder(tf.float32, name='dropout')
# Initial model_zoo
mz = model_zoo.model_zoo(self.images, self.dropout, self.is_train, self.model_ticket)
# Build model
self.pred = mz.build_model()
# Define loss function (MSE)
self.loss = tf.reduce_mean(tf.square(self.labels - self.pred))
with tf.name_scope('train_summary'):
tf.summary.scalar("loss", self.loss, collections=['train'])
self.merged_summary_train = tf.summary.merge_all('train')
with tf.name_scope('test_summary'):
tf.summary.scalar("loss", self.loss, collections=['test'])
self.merged_summary_test = tf.summary.merge_all('test')
self.saver = tf.train.Saver()
def build_grr_srcnn_v1(self):###
"""
Build grr_srcnn_v1 model
"""
# Define input and label images
self.images = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')
self.stg1_labels = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='stg1_labels')
self.stg2_labels = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='stg2_labels')
self.stg3_labels = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.color_dim], name='stg3_labels')
self.dropout = tf.placeholder(tf.float32, name='dropout')
# Initial model_zoo
mz = model_zoo.model_zoo(self.images, self.dropout, self.is_train, self.model_ticket)
# Build model
self.stg1_pred, self.stg2_pred, self.stg3_pred = mz.build_model()
padding = 6
# Define loss function (MSE)
## Stage 1 loss:
self.stg1_loss = tf.reduce_mean(tf.square(self.stg1_labels[:, padding:-padding, padding:-padding, :] - self.stg1_pred[:, padding:-padding, padding:-padding, :]))
## Stage 2 loss:
self.stg2_loss = tf.reduce_mean(tf.square(self.stg2_labels[:, padding:-padding, padding:-padding, :] - self.stg2_pred[:, padding:-padding, padding:-padding, :]))
## Stage 3 loss:
self.stg3_loss = tf.reduce_mean(tf.square(self.stg3_labels - self.stg3_pred))
self.all_stg_loss = tf.add(tf.add(self.stg1_loss, self.stg2_loss), self.stg3_loss)
with tf.name_scope('train_summary'):
tf.summary.scalar("Stg1 loss", self.stg1_loss, collections=['train'])
tf.summary.scalar("Stg2 loss", self.stg2_loss, collections=['train'])
tf.summary.scalar("Stg3 loss", self.stg3_loss, collections=['train'])
self.merged_summary_train = tf.summary.merge_all('train')
with tf.name_scope('test_summary'):
tf.summary.scalar("Stg1 loss", self.stg1_loss, collections=['test'])
tf.summary.scalar("Stg2 loss", self.stg2_loss, collections=['test'])
tf.summary.scalar("Stg3 loss", self.stg3_loss, collections=['test'])
self.merged_summary_test = tf.summary.merge_all('test')
self.saver = tf.train.Saver()
def build_grr_grid_srcnn_v1(self):###
"""
Build grr_grid_srcnn_v1 model
"""
# Define input and label images
self.images = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')
self.stg1_labels = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='stg1_labels')
self.stg2_labels = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='stg2_labels')
self.stg3_labels = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.color_dim], name='stg3_labels')
self.dropout = tf.placeholder(tf.float32, name='dropout')
self.inputs = self.images
# Initial model_zoo
mz = model_zoo.model_zoo(self.inputs, self.dropout, self.is_train, self.model_ticket)
# Build model
self.stg_pred, self.HFLF_pred, self.HFLF_idx, self.TV_stg3_output = mz.build_model()
padding = 6
# Define loss function (MSE)
## Stage 1 loss:
self.stg1_loss = tf.reduce_mean(tf.square(self.stg1_labels[:, padding:-padding, padding:-padding, :] - self.stg_pred[0][:, padding:-padding, padding:-padding, :]))
## Stage 2 loss:
self.stg2_loss = tf.reduce_mean(tf.square(self.stg2_labels[:, padding:-padding, padding:-padding, :] - self.stg_pred[1][:, padding:-padding, padding:-padding, :]))
## Stage 3 loss:
self.stg3_loss = tf.reduce_mean(tf.square(self.stg3_labels - self.stg_pred[2]))
self.all_stg_loss = tf.add(tf.add(self.stg1_loss, self.stg2_loss), self.stg3_loss)
## HF loss
self.HF_labels = tf.squeeze(tf.gather(self.stg3_labels, self.HFLF_idx[0]), 1)
self.HF_loss = tf.reduce_mean(tf.square(self.HF_labels - self.HFLF_pred[0]))
self.before_HF_pred = tf.squeeze(tf.gather(self.stg_pred[2], self.HFLF_idx[0]), 1)
self.before_HF_loss = tf.reduce_mean(tf.square(self.HF_labels - self.before_HF_pred))
## LF loss
self.LF_labels = tf.squeeze(tf.gather(self.stg3_labels, self.HFLF_idx[1]), 1)
self.LF_loss = tf.reduce_mean(tf.square(self.LF_labels - self.HFLF_pred[1]))
with tf.name_scope('train_summary'):
tf.summary.scalar("Stg1 loss", self.stg1_loss, collections=['train'])
tf.summary.scalar("Stg2 loss", self.stg2_loss, collections=['train'])
tf.summary.scalar("Stg3 loss", self.stg3_loss, collections=['train'])
tf.summary.scalar("HF loss", self.HF_loss, collections=['train'])
tf.summary.scalar("LF loss", self.LF_loss, collections=['train'])
tf.summary.scalar("Final loss", self.stg3_loss, collections=['train'])
self.merged_summary_train = tf.summary.merge_all('train')
with tf.name_scope('test_summary'):
tf.summary.scalar("Stg1 loss", self.stg1_loss, collections=['test'])
tf.summary.scalar("Stg2 loss", self.stg2_loss, collections=['test'])
tf.summary.scalar("Stg3 loss", self.stg3_loss, collections=['test'])
tf.summary.scalar("HF loss", self.HF_loss, collections=['test'])
tf.summary.scalar("LF loss", self.LF_loss, collections=['test'])
tf.summary.scalar("Final loss", self.stg3_loss, collections=['test'])
self.merged_summary_test = tf.summary.merge_all('test')
self.saver = tf.train.Saver()
def train_srcnn_v1(self):
"""
Training process.
"""
print("Training...")
stage_size = 3
# Define dataset path
self.train_h5_name = self.train_h5_name + "_[{}]_scale_{}_size_{}.h5".format(self.mode, self.scale, self.image_size)
self.test_h5_name = self.test_h5_name + "_[{}]_scale_{}_size_{}.h5".format(self.mode, self.scale, self.image_size)
train_data_dir = os.path.join('./{}'.format(self.h5_dir), self.train_h5_name)
test_data_dir = os.path.join('./{}'.format(self.h5_dir), self.test_h5_name)
# Read data from .h5 file
train_data, train_label = read_data(train_data_dir)
test_data, test_label = read_data(test_data_dir)
# Stochastic gradient descent with the standard backpropagation
## Stage loss
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
summary_writer = tf.summary.FileWriter('log', self.sess.graph)
self.sess.run(tf.global_variables_initializer())
# Define iteration counter, timer and average loss
itera_counter = 0
avg_500_loss = 0
avg_loss = 0
# Load checkpoint
if self.load_ckpt(self.checkpoint_dir, self.ckpt_name):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
batch_labels = [None]*stage_size
train_batch_num = len(train_data) // self.batch_size
padding = (self.image_size - self.label_size) // 2 # 6
# Prerpare validation data
val_images = test_data;
val_labels = test_label[:, padding:-padding, padding:-padding, :]
epoch_pbar = tqdm(range(self.epoch))
for ep in epoch_pbar:
# Run by batch images
train_data, train_label = batch_shuffle(train_data, train_label, self.batch_size)
epoch_pbar.set_description("Epoch: [%2d]" % ((ep+1)))
epoch_pbar.refresh()
batch_pbar = tqdm(range(0, train_batch_num), desc="Batch: [0]")
for idx in batch_pbar:
itera_counter += 1
# Get the training data
batch_images = train_data[idx*self.batch_size : (idx+1)*self.batch_size]
batch_images = np.array(batch_images)
batch_labels = np.array(train_label[idx*self.batch_size : (idx+1)*self.batch_size])
batch_labels = batch_labels[:, padding:-padding, padding:-padding, :]
# Run the model
train_sum, _, train_err = self.sess.run([self.merged_summary_train,
self.train_op,
self.loss],
feed_dict={
self.images: batch_images,
self.labels: batch_labels,
self.dropout: 1.
})
avg_loss += train_err
avg_500_loss += train_err
batch_pbar.set_description("Batch: [%2d]" % (idx+1))
if ep % 5 == 0:
self.save_ckpt(self.checkpoint_dir, self.ckpt_name, itera_counter)
# Validation
## Run the test images
test_sum, val_err = self.sess.run([self.merged_summary_test,
self.loss] ,
feed_dict={
self.images: val_images,
self.labels: val_labels,
self.dropout: 1.
})
avg_500_loss /= (train_batch_num*5)
print("Epoch: [%2d], Average train loss: 5 ep loss: [%.8f], all loss: [%.8f], Test stg loss: [%.8f]\n" \
% ((ep+1), avg_500_loss, avg_loss/itera_counter, val_err))
summary_writer.add_summary(train_sum, ep)
summary_writer.add_summary(test_sum, ep)
avg_500_loss = 0
def train_grr_srcnn_v1(self):
"""
Training process.
"""
print("Training...")
stage_size = 3
# Define dataset path
self.train_h5_name = self.train_h5_name + "_[{}]_scale_{}_size_{}.h5".format(self.mode, self.scale, self.image_size)
self.test_h5_name = self.test_h5_name + "_[{}]_scale_{}_size_{}.h5".format(self.mode, self.scale, self.image_size)
train_data_dir = os.path.join('./{}'.format(self.h5_dir), self.train_h5_name)
test_data_dir = os.path.join('./{}'.format(self.h5_dir), self.test_h5_name)
# Read data from .h5 file
train_data, train_label = read_data(train_data_dir)
test_data, test_label = read_data(test_data_dir)
# Stochastic gradient descent with the standard backpropagation
## Stage loss
self.stg_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.all_stg_loss)
#self.stg1_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.stg1_loss)
#self.stg2_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.stg2_loss)
#self.stg3_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.stg3_loss)
summary_writer = tf.summary.FileWriter('log', self.sess.graph)
self.sess.run(tf.global_variables_initializer())
# Define iteration counter, timer and average loss
itera_counter = 0
avg_500_loss = [0]*(stage_size+1) # 3 stage + 1 total loss
avg_final_loss = 0
# Load checkpoint
if self.load_ckpt(self.checkpoint_dir, self.ckpt_name):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
batch_labels = [None]*stage_size
train_batch_num = len(train_data) // self.batch_size
padding = (self.image_size - self.label_size) // 2 # 6
# Prerpare validation data
val_label = [None]*stage_size
val_images = test_data;
val_label[0] = test_label
val_label[1] = test_label
val_label[2] = test_label[:, padding:-padding, padding:-padding, :]
epoch_pbar = tqdm(range(self.epoch))
for ep in epoch_pbar:
# Run by batch images
train_data, train_label = batch_shuffle(train_data, train_label, self.batch_size)
epoch_pbar.set_description("Epoch: [%2d]" % ((ep+1)))
epoch_pbar.refresh()
batch_pbar = tqdm(range(0, train_batch_num), desc="Batch: [0]")
for idx in batch_pbar:
itera_counter += 1
# Get the training data
batch_images = train_data[idx*self.batch_size : (idx+1)*self.batch_size]
batch_images = np.array(batch_images)
batch_labels[0] = (train_label[idx*self.batch_size : (idx+1)*self.batch_size])
batch_labels[1] = (train_label[idx*self.batch_size : (idx+1)*self.batch_size])
batch_labels[2] = np.array(train_label[idx*self.batch_size : (idx+1)*self.batch_size])
batch_labels[2] = batch_labels[2][:, padding:-padding, padding:-padding, :]
# Run the model
train_sum, _, stg_err = self.sess.run([ self.merged_summary_train,
self.stg_train_op,
self.stg3_loss,
],
feed_dict={
self.images: batch_images,
self.stg1_labels: batch_labels[0],
self.stg2_labels: batch_labels[1],
self.stg3_labels: batch_labels[2],
self.dropout: 1.
})
avg_500_loss[0] += stg_err
batch_pbar.set_description("Batch: [%2d]" % (idx+1))
#batch_pbar.refresh()
if ep % 5 == 0:
self.save_ckpt(self.checkpoint_dir, self.ckpt_name, itera_counter)
# Validation
## Run the test images
test_sum, val_stg3_err = self.sess.run([ self.merged_summary_test,
self.stg3_loss,
],
feed_dict={
self.images: val_images,
self.stg1_labels: val_label[0],
self.stg2_labels: val_label[1],
self.stg3_labels: val_label[2],
self.dropout: 1.
})
for i in range(len(avg_500_loss)):
avg_500_loss[i] /= (train_batch_num*5)
avg_final_loss /= (train_batch_num*5)
print("Epoch: [%2d], Average train loss of 5 epoches: stg3 loss: [%.8f], Test stg loss: [%.8f]\n" \
% ((ep+1), avg_500_loss[0], val_stg3_err))
summary_writer.add_summary(train_sum, ep)
summary_writer.add_summary(test_sum, ep)
avg_500_loss = [0]*(stage_size+1)
def build_espcn_v1(self):###
"""
Build SRCNN model
"""
# Define input and label images
self.input = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')
self.image_target = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.color_dim], name='labels')
#mean_x = tf.reduce_mean(self.input)
#image_input = self.input - mean_x
#mean_y = tf.reduce_mean(self.image_target)
#taget = self.image_target - mean_y
image_input = self.input
target = self.image_target
self.dropout = tf.placeholder(tf.float32, name='dropout')
# Initial model_zoo
mz = model_zoo.model_zoo(image_input, self.dropout, self.is_train, self.model_ticket)
# Build model
#self.logits = mz.build_model(scale=4,feature_size = 32)
self.logits = mz.build_model()
self.l1_loss = tf.reduce_mean(tf.losses.absolute_difference(target,self.logits ))
mse = tf.reduce_mean(tf.square(target - self.logits))
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(mse)
with tf.name_scope('train_summary'):
tf.summary.scalar("loss", self.l1_loss, collections=['train'])
tf.summary.scalar("MSE", mse, collections=['train'])
tf.summary.image("input_image",self.input , collections=['train'])
tf.summary.image("target_image",target, collections=['train'])
tf.summary.image("output_image",self.logits, collections=['train'])
self.merged_summary_train = tf.summary.merge_all('train')
with tf.name_scope('test_summary'):
tf.summary.scalar("loss", self.l1_loss, collections=['test'])
tf.summary.scalar("MSE", mse, collections=['test'])
tf.summary.image("input_image",self.input, collections=['test'])
tf.summary.image("target_image",target , collections=['test'])
tf.summary.image("output_image",self.logits, collections=['test'])
self.merged_summary_test = tf.summary.merge_all('test')
self.saver = tf.train.Saver()
def train_grr_grid_srcnn_v1(self):
"""
Training process.
"""
print("Training...")
stage_size = 3
# Define dataset path
self.train_h5_name = self.train_h5_name + "_[{}]_scale_{}_size_{}.h5".format(self.mode, self.scale, self.image_size)
self.test_h5_name = self.test_h5_name + "_[{}]_scale_{}_size_{}.h5".format(self.mode, self.scale, self.image_size)
train_data_dir = os.path.join('./{}'.format(self.h5_dir), self.train_h5_name)
test_data_dir = os.path.join('./{}'.format(self.h5_dir), self.test_h5_name)
# Read data from .h5 file
train_data, train_label = read_data(train_data_dir)
test_data, test_label = read_data(test_data_dir)
# Stochastic gradient descent with the standard backpropagation
## Stage loss
self.stg_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.all_stg_loss)
#self.stg1_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.stg1_loss)
#self.stg2_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.stg2_loss)
#self.stg3_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.stg3_loss)
self.HF_train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.HF_loss)
summary_writer = tf.summary.FileWriter('log', self.sess.graph)
self.sess.run(tf.global_variables_initializer())
# Define iteration counter, timer and average loss
itera_counter = 0
avg_500_loss = [0]*5 # 5 temp var. for debuging
# Load checkpoint
if self.load_ckpt(self.checkpoint_dir, self.ckpt_name):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
batch_labels = [None]*stage_size
train_batch_num = len(train_data) // self.batch_size
padding = (self.image_size - self.label_size) // 2 # 6
# Prerpare validation data
val_label = [None]*stage_size
val_images = test_data
val_label[0] = test_label
val_label[1] = test_label
val_label[2] = test_label[:, padding:-padding, padding:-padding, :]
epoch_pbar = tqdm(range(self.epoch))
for ep in epoch_pbar:
# Run by batch images
shuffled_train_data, shuffled_train_label = batch_shuffle(train_data, train_label, self.batch_size)
epoch_pbar.set_description("Epoch: [%2d]" % ((ep+1)))
epoch_pbar.refresh()
batch_pbar = tqdm(range(0, train_batch_num), desc="Batch: [0]")
for idx in batch_pbar:
itera_counter += 1
# Get the training data
batch_images = shuffled_train_data[idx*self.batch_size : (idx+1)*self.batch_size]
batch_images = np.array(batch_images)
batch_labels[0] = (shuffled_train_label[idx*self.batch_size : (idx+1)*self.batch_size])
batch_labels[1] = (shuffled_train_label[idx*self.batch_size : (idx+1)*self.batch_size])
batch_labels[2] = np.array(shuffled_train_label[idx*self.batch_size : (idx+1)*self.batch_size])
batch_labels[2] = batch_labels[2][:, padding:-padding, padding:-padding, :]
# Run the model
train_sum, _, _, stg3_err, bHF_err, HF_err, LF_err, HFLF_idx, TV = self.sess.run([ self.merged_summary_train,
self.stg_train_op,
self.HF_train_op,
self.stg3_loss,
self.before_HF_loss,
self.HF_loss,
self.LF_loss,
self.HFLF_idx,
self.TV_stg3_output
],
feed_dict={
self.images: batch_images,
self.stg1_labels: batch_labels[0],
self.stg2_labels: batch_labels[1],
self.stg3_labels: batch_labels[2],
self.dropout: 1.
})
final_err = (HF_err * (HFLF_idx[0].size) + LF_err * (HFLF_idx[1].size)) / (HFLF_idx[0].size + HFLF_idx[1].size)
avg_500_loss[0] += stg3_err
avg_500_loss[1] += bHF_err
avg_500_loss[2] += HF_err
avg_500_loss[3] += LF_err
avg_500_loss[4] += final_err
batch_pbar.set_description("Batch: [%2d]" % (idx+1))
#batch_pbar.refresh()
if ep % 5 == 0:
self.save_ckpt(self.checkpoint_dir, self.ckpt_name, itera_counter)
# Validation
## Run the test images
test_sum, val_stg3_err, val_bHF_err, val_HF_err, val_LF_err, val_HFLF_idx = self.sess.run([ self.merged_summary_test,
self.stg3_loss,
self.before_HF_loss,
self.HF_loss,
self.LF_loss,
self.HFLF_idx,
],
feed_dict={
self.images: val_images,
self.stg1_labels: val_label[0],
self.stg2_labels: val_label[1],
self.stg3_labels: val_label[2],
self.dropout: 1.
})
val_final_loss = (val_HF_err * (val_HFLF_idx[0].size) + val_LF_err * (val_HFLF_idx[1].size)) / (val_HFLF_idx[0].size + val_HFLF_idx[1].size)
#val_final_loss = (val_HF_err*test_HF_size + val_LF_err*test_LF_size) / test_data_size
#print("Val. HF num: ", (val_HFLF_idx[0].size), "Val. LF num", (val_HFLF_idx[1].size))
for i in range(len(avg_500_loss)):
avg_500_loss[i] /= (train_batch_num*5)
print("Epoch: [%2d], Average train loss of 5 epoches: stg3 loss: [%.8f], HF loss: [%.8f]->[%.8f], LF loss: [%.8f], Final loss: [%.8f]" \
% ((ep+1), avg_500_loss[0], avg_500_loss[1], avg_500_loss[2], avg_500_loss[3], avg_500_loss[4]))
print("Epoch: [%2d], Test stg loss: [%.8f], HF loss: [%.8f]->[%.8f], LF loss: [%.8f], Final loss: [%.8f]\n"\
% ((ep+1), val_stg3_err, val_bHF_err, val_HF_err, val_LF_err, val_final_loss))
summary_writer.add_summary(train_sum, ep)
summary_writer.add_summary(test_sum, ep)
def build_edsr_v1(self):###
"""
Build SRCNN model
"""
# Define input and label images
self.input = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')
self.image_target = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.color_dim], name='labels')
self.dropout = tf.placeholder(tf.float32, name='dropout')
self.lr = tf.placeholder(tf.float32, name='learning_rate')
mean_x = tf.reduce_mean(self.input)
image_input =self.input - mean_x
mean_y = tf.reduce_mean(self.image_target)
target = self.image_target - mean_y
#image_input = self.input
#target = self.image_target
# Initial model_zoo
mz = model_zoo.model_zoo(image_input, self.dropout, self.is_train, self.model_ticket)
# Build model
self.logits = mz.build_model({"scale":self.scale,"feature_size" :64})
self.l1_loss = tf.reduce_mean(tf.losses.absolute_difference(target,self.logits))
mse = tf.reduce_mean(tf.squared_difference(target,self.logits))
PSNR = tf.constant(255**2,dtype=tf.float32)/(mse)
PSNR = tf.constant(10,dtype=tf.float32)*log10(PSNR)
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.l1_loss)
with tf.name_scope('train_summary'):
tf.summary.scalar("loss", self.l1_loss, collections=['train'])
tf.summary.scalar("MSE", mse, collections=['train'])
tf.summary.scalar("PSNR",PSNR, collections=['train'])
tf.summary.image("input_image",self.input , collections=['train'])
tf.summary.image("target_image",target, collections=['train'])
tf.summary.image("output_image",self.logits, collections=['train'])
self.merged_summary_train = tf.summary.merge_all('train')
with tf.name_scope('test_summary'):
tf.summary.scalar("loss", self.l1_loss, collections=['test'])
tf.summary.scalar("PSNR",PSNR, collections=['test'])
tf.summary.scalar("MSE", mse, collections=['test'])
tf.summary.image("input_image",self.input, collections=['test'])
tf.summary.image("target_image",target , collections=['test'])
tf.summary.image("output_image",self.logits, collections=['test'])
self.merged_summary_test = tf.summary.merge_all('test')
self.saver = tf.train.Saver()
def train_edsr_v1(self):
"""
Training process.
"""
print("Training...")
# Define dataset path
#mean of DIV2K
train_mean = np.zeros((1,1,3))
train_mean[0][0][0] = 113.9427
train_mean[0][0][1] = 111.3509
train_mean[0][0][2] = 103.1092
#mean of set5
test_mean = np.zeros((1,1,3))
test_mean[0][0][0] = 140.6670
test_mean[0][0][1] = 112.9228
test_mean[0][0][2] = 85.2956
test_dataset = self.load_divk("/home/ubuntu/dataset/SuperResolution/Set5/preprocessed_scale_2", test_mean,type="test")
dataset = self.load_divk("/home/ubuntu/dataset/SuperResolution/DIV2K/", train_mean)
log_dir = os.path.join(self.log_dir, self.ckpt_name, "log")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
summary_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
if self.load_ckpt(self.checkpoint_dir, self.ckpt_name):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Define iteration counter, timer and average loss
itera_counter = 0
learning_rate = 1e-4
#train_batch_num = len(train_data) // self.batch_size
epoch_pbar = tqdm(range(400, self.epoch))
for ep in epoch_pbar:
# Run by batch images
random.shuffle(dataset)
train_data, train_label = zip(*dataset)
test_data, test_label = zip(*test_dataset)
epoch_pbar.set_description("Epoch: [%2d]" % ((ep+1)))
epoch_pbar.refresh()
batch_pbar = tqdm(range(0, len(train_data)//self.batch_size), desc="Batch: [0]")
print("learning_rate: ", learning_rate)
if ep%4000 == 0 and ep != 0:learning_rate = learning_rate/2
for idx in batch_pbar:
itera_counter += 1
batch_index = idx*self.batch_size
batch_images, batch_labels = batch_shuffle_rndc(train_data, train_label, self.scale, self.image_size,batch_index, self.batch_size)
# Run the model
_, train_loss = self.sess.run([self.train_op, self.l1_loss],
feed_dict={self.input: batch_images,
self.image_target: batch_labels,
self.dropout: 1.,
self.lr:learning_rate})
batch_pbar.set_description("Batch: [%2d], L1:%.2f" % (idx+1, train_loss))
#batch_pbar.refresh()
if ep % 5 == 0:
self.save_ckpt(self.checkpoint_dir, self.ckpt_name, itera_counter)
train_sum, train_loss = self.sess.run([self.merged_summary_train, self.l1_loss],
feed_dict={
self.input: batch_images,
self.image_target: batch_labels,
self.dropout: 1.
})
batch_test_images, batch_test_labels = batch_shuffle_rndc(test_data, test_label, self.scale, self.image_size, 0, 5)
test_sum, test_loss = self.sess.run([self.merged_summary_test, self.l1_loss],
feed_dict={
self.input: batch_test_images,
self.image_target: batch_test_labels,
self.dropout: 1.})
print("Epoch: [{}], Train_loss: {}".format((ep+1), train_loss))
print("Epoch: [{}], Test_loss: {}".format((ep+1), test_loss))
summary_writer.add_summary(train_sum, ep)
summary_writer.add_summary(test_sum, ep)
def build_edsr_v2(self):###
"""
Build SRCNN model
"""
# Define input and label images
self.input = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')
self.image_target = tf.placeholder(tf.float32, [None, None, None, self.color_dim], name='labels')
self.dropout = tf.placeholder(tf.float32, name='dropout')
self.lr = tf.placeholder(tf.float32, name='learning_rate')
"""
mean_x = tf.reduce_mean(self.input)
image_input = self.input - mean_x
mean_y = tf.reduce_mean(self.image_target)
target = self.image_target - mean_y
"""
self.image_input = self.input/255.
self.target = target = self.image_target/255.
# Initial model_zoo
mz = model_zoo.model_zoo(self.image_input, self.dropout, self.is_train, self.model_ticket)
# Build model
logits2, logits4 = mz.build_model({"scale":self.scale,"feature_size" :64})
self.l1_loss2 = tf.reduce_mean(tf.losses.absolute_difference(target,logits2))
self.l1_loss4 = tf.reduce_mean(tf.losses.absolute_difference(target,logits4))
self.train_op2 = tf.train.AdamOptimizer(self.lr).minimize(self.l1_loss2)
self.train_op4 = tf.train.AdamOptimizer(self.lr).minimize(self.l1_loss4)
if self.scale == 2:
self.logits = logits2
self.l1_loss = self.l1_loss2
self.train_op = self.train_op2
elif self.scale == 4:
self.logits = logits4
self.l1_loss = self.l1_loss4
self.train_op = self.train_op4
mse = tf.reduce_mean(tf.squared_difference(target*255.,self.logits*255.))
PSNR = tf.constant(255**2,dtype=tf.float32)/mse
PSNR = tf.constant(10,dtype=tf.float32)*log10(PSNR)
with tf.name_scope('train_summary'):
tf.summary.scalar("loss", self.l1_loss, collections=['train'])
tf.summary.scalar("MSE", mse, collections=['train'])
tf.summary.scalar("PSNR",PSNR, collections=['train'])
tf.summary.image("input_image",self.input , collections=['train'])
tf.summary.image("target_image",target*255, collections=['train'])
tf.summary.image("output_image",self.logits*255, collections=['train'])
self.merged_summary_train = tf.summary.merge_all('train')
with tf.name_scope('test_summary'):
tf.summary.scalar("loss", self.l1_loss, collections=['test'])
tf.summary.scalar("PSNR",PSNR, collections=['test'])
tf.summary.scalar("MSE", mse, collections=['test'])
tf.summary.image("input_image",self.input, collections=['test'])
tf.summary.image("target_image",target*255 , collections=['test'])
tf.summary.image("output_image",self.logits*255, collections=['test'])
self.merged_summary_test = tf.summary.merge_all('test')
self.saver = tf.train.Saver()
def train_edsr_v2(self):
"""
Training process.
"""
print("Training...")
# Define dataset path
test_dataset = self.load_divk("/home/ubuntu/dataset/SuperResolution/Set5/preprocessed_scale_"+str(self.scale),type="test")
dataset = self.load_divk("/home/ubuntu/dataset/SuperResolution/DIV2K/")
log_dir = os.path.join(self.log_dir, self.ckpt_name, "log")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
summary_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
if self.load_ckpt(self.checkpoint_dir, self.ckpt_name):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Define iteration counter, timer and average loss
itera_counter = 0
learning_rate = 1e-4
#train_batch_num = len(train_data) // self.batch_size
epoch_pbar = tqdm(range(self.epoch))
for ep in epoch_pbar:
# Run by batch images
random.shuffle(dataset)
train_data, train_label = zip(*dataset)
test_data, test_label = zip(*test_dataset)
epoch_pbar.set_description("Epoch: [%2d], lr:%f" % ((ep+1), learning_rate))
epoch_pbar.refresh()
batch_pbar = tqdm(range(0, len(train_data)//self.batch_size), desc="Batch: [0]")
if ep%4000 == 0 and ep != 0:learning_rate = learning_rate/2
for idx in batch_pbar:
itera_counter += 1
batch_index = idx*self.batch_size
batch_images, batch_labels = batch_shuffle_rndc(train_data, train_label, self.scale, self.image_size,batch_index, self.batch_size)
#print(batch_images, batch_labels)
# Run the model
_, train_loss = self.sess.run([self.train_op, self.l1_loss],
feed_dict={self.input: batch_images,
self.image_target: batch_labels,
self.dropout: 1.,
self.lr:learning_rate})
batch_pbar.set_description("Batch: [%2d], L1:%.2f" % (idx+1, train_loss))
#batch_pbar.refresh()
if ep % 50 == 0:
self.save_ckpt(self.checkpoint_dir, self.ckpt_name, itera_counter)
train_sum, train_loss = self.sess.run([self.merged_summary_train, self.l1_loss],
feed_dict={
self.input: batch_images,
self.image_target: batch_labels,
self.dropout: 1.
})
batch_test_images, batch_test_labels = batch_shuffle_rndc(test_data, test_label, self.scale, self.image_size, 0, 5)
test_sum, test_loss = self.sess.run([self.merged_summary_test, self.l1_loss],
feed_dict={
self.input: batch_test_images,
self.image_target: batch_test_labels,
self.dropout: 1.})
print("Epoch: [{}], Train_loss: {}".format((ep+1), train_loss))
print("Epoch: [{}], Test_loss: {}".format((ep+1), test_loss))
summary_writer.add_summary(train_sum, ep)
summary_writer.add_summary(test_sum, ep)
def build_grr_edsr_v2(self):###
"""
Build SRCNN model
"""
# Define input and label images
self.input = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')
self.image_target = tf.placeholder(tf.float32, [None, None, None, self.color_dim], name='labels')
self.dropout = tf.placeholder(tf.float32, name='dropout')
self.lr = tf.placeholder(tf.float32, name='learning_rate')
"""
mean_x = tf.reduce_mean(self.input)
image_input = self.input - mean_x
mean_y = tf.reduce_mean(self.image_target)
target = self.image_target - mean_y
"""
self.image_input = self.input/255.
self.target = target = self.image_target/255.
# Initial model_zoo
mz = model_zoo.model_zoo(self.image_input, self.dropout, self.is_train, self.model_ticket)
# Build model
#stg1_logits, stg2_logits, stg3_logits = mz.build_model({"scale":self.scale,"feature_size" :64})
stg3_logits = mz.build_model({"scale":self.scale,"feature_size" :64})
## Stage 1 loss
#self.l1_stg1_loss2 = tf.reduce_mean(tf.losses.absolute_difference(target, stg1_logits[0]))
#self.l1_stg1_loss4 = tf.reduce_mean(tf.losses.absolute_difference(target, stg1_logits[1]))
## Stage 2 loss
#self.l1_stg2_loss2 = tf.reduce_mean(tf.losses.absolute_difference(target, stg2_logits[0]))
#self.l1_stg2_loss4 = tf.reduce_mean(tf.losses.absolute_difference(target, stg2_logits[1]))
## Stage 3 loss
self.l1_stg3_loss2 = tf.reduce_mean(tf.losses.absolute_difference(target, stg3_logits[0]))
self.l1_stg3_loss4 = tf.reduce_mean(tf.losses.absolute_difference(target, stg3_logits[1]))
## All stage loss
#self.l1_all_stg_loss2 = self.l1_stg1_loss2 + self.l1_stg2_loss2 + self.l1_stg3_loss2
#self.l1_all_stg_loss4 = self.l1_stg1_loss4 + self.l1_stg2_loss4 + self.l1_stg3_loss4
## Optimizer
#self.train_op2 = tf.train.AdamOptimizer(self.lr).minimize(self.l1_stg1_loss2)
self.train_op2 = tf.train.AdamOptimizer(self.lr).minimize(self.l1_stg3_loss2)
#self.train_op4 = tf.train.AdamOptimizer(self.lr).minimize(self.l1_all_stg_loss4)
self.train_op4 = tf.train.AdamOptimizer(self.lr).minimize(self.l1_stg3_loss4)
if self.scale == 2:
self.logits = stg3_logits[0]
#self.l1_stg1_loss = self.l1_stg1_loss2
#self.l1_stg2_loss = self.l1_stg2_loss2
self.l1_stg3_loss = self.l1_stg3_loss2
#self.l1_all_stg_loss = self.l1_all_stg_loss2
self.train_op = self.train_op2
elif self.scale == 4:
self.logits = stg3_logits[1]
#self.l1_stg1_loss = self.l1_stg1_loss4
#self.l1_stg2_loss = self.l1_stg2_loss4
self.l1_stg3_loss = self.l1_stg3_loss4
#self.l1_all_stg_loss = self.l1_all_stg_loss4
self.train_op = self.train_op4
mse = tf.reduce_mean(tf.squared_difference(target*255.,self.logits*255.))
PSNR = tf.constant(255**2,dtype=tf.float32)/mse
PSNR = tf.constant(10,dtype=tf.float32)*log10(PSNR)
with tf.name_scope('train_summary'):
#tf.summary.scalar("stg1_loss", self.l1_stg1_loss, collections=['train'])
#tf.summary.scalar("stg2_loss", self.l1_stg2_loss, collections=['train'])
tf.summary.scalar("stg3_loss", self.l1_stg3_loss, collections=['train'])
#tf.summary.scalar("total_loss", self.l1_all_stg_loss, collections=['train'])
tf.summary.scalar("MSE", mse, collections=['train'])
tf.summary.scalar("PSNR",PSNR, collections=['train'])
tf.summary.image("input_image",self.input , collections=['train'])
tf.summary.image("target_image",target*255, collections=['train'])
tf.summary.image("output_image",self.logits*255, collections=['train'])
self.merged_summary_train = tf.summary.merge_all('train')
with tf.name_scope('test_summary'):
#tf.summary.scalar("stg1_loss", self.l1_stg1_loss, collections=['test'])
#tf.summary.scalar("stg2_loss", self.l1_stg2_loss, collections=['test'])
tf.summary.scalar("stg3_loss", self.l1_stg3_loss, collections=['test'])
#tf.summary.scalar("total_loss", self.l1_all_stg_loss, collections=['test'])
tf.summary.scalar("PSNR",PSNR, collections=['test'])
tf.summary.scalar("MSE", mse, collections=['test'])
tf.summary.image("input_image",self.input, collections=['test'])
tf.summary.image("target_image",target*255 , collections=['test'])
tf.summary.image("output_image",self.logits*255, collections=['test'])
self.merged_summary_test = tf.summary.merge_all('test')
self.saver = tf.train.Saver()
self.best_saver = tf.train.Saver()
def train_grr_edsr_v2(self):
"""
Training process.
"""
print("Training...")
# Define dataset path
test_dataset = self.load_divk("/home/wei/ML/dataset/SuperResolution/Set5/preprocessed_scale_"+str(self.scale),type="test")
dataset = self.load_divk("/home/wei/ML/dataset/SuperResolution/DIV2K/")
log_dir = os.path.join(self.log_dir, self.ckpt_name, "log")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
summary_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
if self.load_ckpt(self.checkpoint_dir, self.ckpt_name):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Define iteration counter, timer and average loss
itera_counter = 0
learning_rate = 1e-4
#train_batch_num = len(train_data) // self.batch_size
best_loss = 100
epoch_pbar = tqdm(range(self.epoch))
for ep in epoch_pbar:
# Run by batch images
random.shuffle(dataset)
train_data, train_label = zip(*dataset)
test_data, test_label = zip(*test_dataset)
epoch_pbar.set_description("Epoch: [%2d], lr:%f" % ((ep+1), learning_rate))
epoch_pbar.refresh()
batch_pbar = tqdm(range(0, len(train_data)//self.batch_size), desc="Batch: [0]")
if ep%4000 == 0 and ep != 0:learning_rate = learning_rate/2
for idx in batch_pbar:
itera_counter += 1
batch_index = idx*self.batch_size
batch_images, batch_labels = batch_shuffle_rndc(train_data, train_label, self.scale, self.image_size,batch_index, self.batch_size)
# Run the model
_, train_loss = self.sess.run([
self.train_op,
self.l1_stg3_loss
],
feed_dict={
self.input: batch_images,
self.image_target: batch_labels,
self.dropout: 1.,
self.lr:learning_rate
})
batch_pbar.set_description("Batch: [%2d], L1:%.2f" % (idx+1, train_loss))
if ep % 5 == 0:
self.save_ckpt(self.checkpoint_dir, self.ckpt_name, itera_counter)
#train_sum, train_stg1_loss, train_stg2_loss, train_stg3_loss, train_loss = self.sess.run([
train_sum, train_loss = self.sess.run([
self.merged_summary_train,
#self.l1_stg1_loss,
#self.l1_stg2_loss,
self.l1_stg3_loss,
#self.l1_all_stg_loss
],
feed_dict={
self.input: batch_images,
self.image_target: batch_labels,
self.dropout: 1.
})
batch_test_images, batch_test_labels = batch_shuffle_rndc(test_data, test_label, self.scale, self.image_size, 0, 5)
#test_sum, test_stg1_loss, test_stg2_loss, test_stg3_loss, test_loss = self.sess.run([
test_sum, test_loss = self.sess.run([
self.merged_summary_test,
#self.l1_stg1_loss,
#self.l1_stg2_loss,
self.l1_stg3_loss,
#self.l1_all_stg_loss
],
feed_dict={
self.input: batch_test_images,
self.image_target: batch_test_labels,
self.dropout: 1.
})
print("Epoch: [{}], Train_loss: stg3: [{}], Test_loss: stg3: [{}]\n".format((ep+1), train_loss, test_loss))
#print("Epoch: [{}], Train_loss: stg: [{}, {}, {}], total loss: [{}]".format((ep+1), train_stg1_loss, train_stg2_loss, train_stg3_loss, train_loss))
#print("Epoch: [{}], Test_loss: stg: [{}, {}, {}], total loss: [{}]\n".format((ep+1), test_stg1_loss, test_stg2_loss, test_stg3_loss, test_loss))
if test_loss < best_loss:
best_loss = test_loss
self.save_best_ckpt(self.checkpoint_dir, self.ckpt_name, best_loss, itera_counter)
summary_writer.add_summary(train_sum, ep)
summary_writer.add_summary(test_sum, ep)
def build_GoogLeNet_edsr_v1(self):###
"""
Build GoogLeNet_edsr_v1 model
"""
# Define input and label images
self.input = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')
self.image_target = tf.placeholder(tf.float32, [None, None, None, self.color_dim], name='labels')
self.dropout = tf.placeholder(tf.float32, name='dropout')
self.lr = tf.placeholder(tf.float32, name='learning_rate')
"""
mean_x = tf.reduce_mean(self.input)
image_input = self.input - mean_x
mean_y = tf.reduce_mean(self.image_target)
target = self.image_target - mean_y
"""
self.image_input = self.input/255.
self.target = target = self.image_target/255.
# Initial model_zoo
mz = model_zoo.model_zoo(self.image_input, self.dropout, self.is_train, self.model_ticket)
# Build model
logits2, logits4 = mz.build_model({"scale":self.scale,"feature_size" :64})
# Loss
self.l1_loss2 = tf.reduce_mean(tf.losses.absolute_difference(target, logits2))
self.l1_loss4 = tf.reduce_mean(tf.losses.absolute_difference(target, logits4))
# Optimizer
self.train_op2 = tf.train.AdamOptimizer(self.lr).minimize(self.l1_loss2)
self.train_op4 = tf.train.AdamOptimizer(self.lr).minimize(self.l1_loss4)
if self.scale == 2:
self.logits = logits2
self.l1_loss = self.l1_loss2
self.train_op = self.train_op2
elif self.scale == 4:
self.logits = logits4
self.l1_loss = self.l1_loss4
self.train_op = self.train_op4
mse = tf.reduce_mean(tf.squared_difference(target*255.,self.logits*255.))
PSNR = tf.constant(255**2,dtype=tf.float32)/mse
PSNR = tf.constant(10,dtype=tf.float32)*log10(PSNR)
with tf.name_scope('train_summary'):
tf.summary.scalar("Loss", self.l1_loss, collections=['train'])
tf.summary.scalar("MSE", mse, collections=['train'])
tf.summary.scalar("PSNR",PSNR, collections=['train'])
tf.summary.image("input_image",self.input , collections=['train'])
tf.summary.image("target_image",target*255, collections=['train'])
tf.summary.image("output_image",self.logits*255, collections=['train'])
self.merged_summary_train = tf.summary.merge_all('train')
with tf.name_scope('test_summary'):
tf.summary.scalar("Loss", self.l1_loss, collections=['test'])
tf.summary.scalar("PSNR",PSNR, collections=['test'])
tf.summary.scalar("MSE", mse, collections=['test'])
tf.summary.image("input_image",self.input, collections=['test'])
tf.summary.image("target_image",target*255 , collections=['test'])
tf.summary.image("output_image",self.logits*255, collections=['test'])
self.merged_summary_test = tf.summary.merge_all('test')
self.saver = tf.train.Saver()
self.best_saver = tf.train.Saver()
def train_GoogLeNet_edsr_v1(self):
"""
Training process.
"""
print("Training...")
# Define dataset path
test_dataset = self.load_divk("/home/wei/ML/dataset/SuperResolution/Set5/preprocessed_scale_"+str(self.scale),type="test")
dataset = self.load_divk("/home/wei/ML/dataset/SuperResolution/DIV2K/")
log_dir = os.path.join(self.log_dir, self.ckpt_name, "log")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
summary_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
if self.load_ckpt(self.checkpoint_dir, self.ckpt_name):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# Define iteration counter, timer and average loss
itera_counter = 0
learning_rate = 1e-4
#train_batch_num = len(train_data) // self.batch_size
best_loss = 100
epoch_pbar = tqdm(range(self.epoch))
for ep in epoch_pbar:
# Run by batch images
random.shuffle(dataset)
train_data, train_label = zip(*dataset)
test_data, test_label = zip(*test_dataset)
epoch_pbar.set_description("Epoch: [%2d], lr:%f" % ((ep+1), learning_rate))
epoch_pbar.refresh()
batch_pbar = tqdm(range(0, len(train_data)//self.batch_size), desc="Batch: [0]")
if ep%4000 == 0 and ep != 0:learning_rate = learning_rate/2
for idx in batch_pbar:
itera_counter += 1
batch_index = idx*self.batch_size
batch_images, batch_labels = batch_shuffle_rndc(train_data, train_label, self.scale, self.image_size,batch_index, self.batch_size)
# Run the model
_, train_loss = self.sess.run([
self.train_op,
self.l1_loss
],
feed_dict={
self.input: batch_images,
self.image_target: batch_labels,
self.dropout: 1.,
self.lr:learning_rate
})
batch_pbar.set_description("Batch: [%2d], L1:%.2f" % (idx+1, train_loss))
if ep % 5 == 0:
self.save_ckpt(self.checkpoint_dir, self.ckpt_name, itera_counter)
train_sum, train_loss = self.sess.run([
self.merged_summary_train,
self.l1_loss,
],
feed_dict={
self.input: batch_images,
self.image_target: batch_labels,
self.dropout: 1.
})
batch_test_images, batch_test_labels = batch_shuffle_rndc(test_data, test_label, self.scale, self.image_size, 0, 5)
test_sum, test_loss = self.sess.run([
self.merged_summary_test,
self.l1_loss,
],
feed_dict={
self.input: batch_test_images,
self.image_target: batch_test_labels,
self.dropout: 1.
})
print("Epoch: [{}], Train_loss: [{}], Test_loss: [{}]\n".format((ep+1), train_loss, test_loss))
if test_loss < best_loss:
best_loss = test_loss
self.save_best_ckpt(self.checkpoint_dir, self.ckpt_name, best_loss, itera_counter)
summary_writer.add_summary(train_sum, ep)
summary_writer.add_summary(test_sum, ep)
def save_ckpt(self, checkpoint_dir, ckpt_name, step):
"""
Save the checkpoint.
According to the scale, use different folder to save the models.
"""
print(" [*] Saving checkpoints...step: [{}]".format(step))
model_name = ckpt_name
if ckpt_name == "":
model_dir = "%s_%s_%s" % ("srcnn", "scale", self.scale)
else:
model_dir = ckpt_name
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def save_best_ckpt(self, checkpoint_dir, ckpt_name, loss, step):
"""
Save the checkpoint.
According to the scale, use different folder to save the models.
"""
print(" [*] Saving best checkpoints...step: [{}]\n".format(step))
model_name = ckpt_name + "_{}".format(loss)
if ckpt_name == "":
model_dir = "%s_%s_%s" % ("srcnn", "scale", self.scale)
else:
model_dir = ckpt_name
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_dir = os.path.join(checkpoint_dir, "best_performance")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.best_saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load_ckpt(self, checkpoint_dir, ckpt_name=""):
"""
Load the checkpoint.
According to the scale, read different folder to load the models.
"""
print(" [*] Reading checkpoints...")
if ckpt_name == "":
model_dir = "%s_%s_%s" % ("srcnn", "scale", self.scale)
else:
model_dir = ckpt_name
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
def load_divk(self, dataset_path, mean=0, lrtype='bicubic', type='train'):
#dataset_path = "/home/ubuntu/dataset/SuperResolution/DIV2K/"
if type == "train":
sub_path = "DIV2K_train"
if lrtype == 'bicubic':
lr_subpath = sub_path + "_LR_bicubic/" + "X" + str(self.scale)
else:
lr_subpath = sub_path + "_LR_unkown/" + "X" + str(self.scale)
LR_path = os.path.join(dataset_path, lr_subpath)
HR_path = os.path.join(dataset_path, sub_path + "_HR")
hr_imgs = os.listdir(HR_path)
lr_imgs = [os.path.join(LR_path,hr_imgs[i].split(".")[0] + 'x' + str(self.scale)+'.' + hr_imgs[i].split(".")[1]) for i in range(len(hr_imgs))]
hr_imgs = [os.path.join(HR_path, hr_imgs[i]) for i in range(len(hr_imgs))]
if type == "test":
lr_imgs = []
hr_imgs = []
images = os.listdir(dataset_path)
for i in range(len(images)//2):
lr_imgs.append(os.path.join(dataset_path, "img_00"+str(i+1)+"_SRF_" + str(self.scale)+"_LR.png"))
hr_imgs.append(os.path.join(dataset_path, "img_00"+str(i+1)+"_SRF_"+ str(self.scale)+"_HR.png"))
hr_list = []
lr_list = []
for i in range(len(hr_imgs)):
sys.stdout.write("Load data:{}/{}".format(i,len(hr_imgs))+'\r')
sys.stdout.flush()
hr_list.append(misc.imread(hr_imgs[i]))
lr_list.append(misc.imread(lr_imgs[i]))
return list(zip(lr_list, hr_list))
|
Python
|
CL
|
0209b58a4498a0941680bcf05798bdbcd98f06b6ad0e9faba8da40f731b59ff2
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.ioff() #turn of the interactive plotting
import matplotlib as matplotlib
import numpy.fft as fft
import corner
import h5py
import sys
import scipy.interpolate
import tools
import map_cosmo
import xs_class
import PS_function
import itertools as itr
from scipy.optimize import curve_fit
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pixel_window_TF
#import matplotlib.colors as colors
#theory spectrum
k_th = np.load('k.npy')
ps_th = np.load('ps.npy')
ps_th_nobeam = np.load('psn.npy') #instrumental beam, less sensitive to small scales line broadening, error bars go up at high k, something with the intrinsic resolution of the telescope (?)
#in 2D
ps_2d_smooth = np.load('ps_2d_smooth.npy')
ps_2d_notsmooth = np.load('ps_2d_notsmooth.npy')
#ps_2d_smooth = np.load('smooth_mean.npy')
#ps_2d_notsmooth = np.load('notsmooth_mean.npy')
#ps_2d_smooth = np.load('ps_smooth_single.npy') #'ps_2dfrom3d.npy'
#ps_2d_notsmooth = np.load('ps_notsmooth_single.npy')
k_smooth = np.load('k_smooth.npy')
#k_notsmooth = np.load('k_notsmooth.npy')
#print (ps_2d_smooth/ps_2d_notsmooth)
k_perp_sim = k_smooth[0]
k_par_sim = k_smooth[1]
transfer_sim_2D = scipy.interpolate.interp2d(k_perp_sim, k_par_sim, ps_2d_smooth/ps_2d_notsmooth)
#values from COPPS
ps_copps = 8.746e3 * ps_th / ps_th_nobeam #shot noise level
ps_copps_nobeam = 8.7e3
transfer = scipy.interpolate.interp1d(k_th, ps_th / ps_th_nobeam) #transfer(k) always < 1, values at high k are even larger and std as well
P_theory = scipy.interpolate.interp1d(k_th,ps_th_nobeam)
#Read the transfer function associated with effects of filtering
def filtering_TF(filename, dim):
if dim == 1:
with h5py.File(filename, mode="r") as my_file:
k = np.array(my_file['k'][:])
TF_1D = np.array(my_file['TF'][:])
return k, TF_1D
if dim == 2:
with h5py.File(filename, mode="r") as my_file:
k_perp = np.array(my_file['k'][0])
k_par = np.array(my_file['k'][1])
TF_2D = np.array(my_file['TF'][:])
return k_perp, k_par, TF_2D
k_filtering_1D, TF_filtering_1D = filtering_TF('TF_1d.h5', 1)
transfer_filt = scipy.interpolate.interp1d(k_filtering_1D, TF_filtering_1D)
k_perp_filt, k_par_filt, TF_filtering_2D = filtering_TF('TF_2d.h5', 2)
transfer_filt_2D = scipy.interpolate.interp2d(k_perp_filt, k_par_filt, TF_filtering_2D)
'''
pixel_window = np.load('pixel_window.npy')
pixel_window = np.mean(pixel_window, axis=0)
k_pw = np.load('k_arr.npy') #{10,2,14}
k_pw = k_pw[0]
print ('pw', pixel_window.shape)
pix_wind = scipy.interpolate.interp2d(k_pw[0], k_pw[1], pixel_window)
'''
pix_wind = pixel_window_TF.pw()
def read_h5_arrays(filename, two_dim=False):
with h5py.File(filename, mode="r") as my_file:
k = np.array(my_file['k'][:])
xs_mean = np.array(my_file['xs_mean'][:])
xs_sigma = np.array(my_file['xs_sigma'][:])
if two_dim == True:
k_edges_perp = np.array(my_file['k_edges_perp'][:])
k_edges_par = np.array(my_file['k_edges_par'][:])
return k, xs_mean, xs_sigma, k_edges_perp, k_edges_par
else:
return k, xs_mean, xs_sigma
k2, xs_mean2, xs_sigma2 = read_h5_arrays('co2_map_signal_1D_arrays.h5')
np.save('k_co2_ces.npy',np.array(k2[1]))
np.save('xs_co2_ces.npy',np.array(xs_mean2[1]))
np.save('sigma_co2_ces.npy',np.array(xs_sigma2[1]))
print (k2[1],xs_mean2[1],xs_sigma2[1])
'''
[0.01215163 0.0173808 0.02486021 0.03555821 0.05085983 0.07274615
0.10405072 0.14882648 0.21287041 0.30447412 0.4354973 0.6229032
0.89095476 1.27435592] [-111394.55879619 -35713.77249337 -63660.34236317 -2403.03453446
-10983.39995201 -62925.38996002 -51229.31200701 -26009.15152815
-5208.28158103 7848.84887646 -7672.5316699 -149.65956843
3036.73274278 -303.45343263] [502402.56502822 260465.31564856 191957.69052262 134950.36658772
93414.04731901 63488.46854868 41459.50760101 27083.94180769
17198.45857079 10336.38453305 6368.44938107 3865.63125151
2795.62660359 5222.61565755]
'''
k6, xs_mean6, xs_sigma6 = read_h5_arrays('co6_map_signal_1D_arrays.h5')
k7, xs_mean7, xs_sigma7 = read_h5_arrays('co7_map_signal_1D_arrays.h5')
def coadd_all_ces(k2, xs_mean2, xs_sigma2, k6, xs_mean6, xs_sigma6,k7, xs_mean7, xs_sigma7):
k2, xs2, sigma2 = k2[1], xs_mean2[1], xs_sigma2[1] #take CES
k6, xs6, sigma6 = k6[1], xs_mean6[1], xs_sigma6[1] #take CES
k7, xs7, sigma7 = k7[1], xs_mean7[1], xs_sigma7[1] #take CES
xs_sigma_arr = np.array([sigma2, sigma6, sigma7])
xs_mean_arr = np.array([xs2,xs6,xs7])
k2 = np.array(k2)
no_k = len(k2)
mean_combined = np.zeros(no_k)
w_sum = np.zeros(no_k)
for i in range(3):
w = 1./ xs_sigma_arr[i]**2.
w_sum += w
mean_combined += w*xs_mean_arr[i]
mean_combined1 = mean_combined/w_sum
sigma_combined1 = w_sum**(-0.5)
mean_combined = mean_combined1/(transfer(k2)*transfer_filt(k2))
sigma_combined = sigma_combined1/(transfer(k2)*transfer_filt(k2))
np.save('k_ces.npy', k2)
np.save('sigma_ces.npy', sigma_combined)
np.save('xs_mean_ces.npy', mean_combined)
return mean_combined1, sigma_combined1
mean_combo, sigma_combo = coadd_all_ces(k2, xs_mean2, xs_sigma2, k6, xs_mean6, xs_sigma6,k7, xs_mean7, xs_sigma7)
print (np.load('co2_map_signal_1D_names.npy'))
'''
['xs_mean_co7_map_elev_cesc0.pdf' 'xs_mean_co7_map_elev_cesc1.pdf'
'xs_mean_co7_map_dayn_cesc0.pdf' 'xs_mean_co7_map_dayn_cesc1.pdf'
'xs_mean_co7_map_sidr_cesc0.pdf' 'xs_mean_co7_map_sidr_cesc1.pdf'
'xs_mean_co7_map_ambt_cesc0.pdf' 'xs_mean_co7_map_ambt_cesc1.pdf'
'xs_mean_co7_map_wind_cesc0.pdf' 'xs_mean_co7_map_wind_cesc1.pdf'
'xs_mean_co7_map_wint_cesc0.pdf' 'xs_mean_co7_map_wint_cesc1.pdf'
'xs_mean_co7_map_rise_cesc0.pdf' 'xs_mean_co7_map_rise_cesc1.pdf']
'''
#def xs_with_model_3fields(figure_name, k, xs_mean2, xs_mean6, xs_mean7, xs_sigma2, xs_sigma6, xs_sigma7, scan_strategy, mean_combo, sigma_combo):
def xs_with_model_3fields(figure_name, k, xs_mean2, xs_mean6, xs_mean7, xs_sigma2, xs_sigma6, xs_sigma7, scan_strategy):
if scan_strategy == 'ces':
titlename = 'CES scans'
if scan_strategy == 'liss':
titlename = 'Lissajous scans'
k_offset = k*0.025
k6 = k - k_offset
k7 = k + k_offset
k_combo = k + k_offset*2
lim = np.mean(np.abs(xs_mean2[4:-2] * k[4:-2])) * 8
fig = plt.figure()
#fig.set_figwidth(8)
ax1 = fig.add_subplot(211)
ax1.errorbar(k6, k * xs_mean6 / (transfer(k)*transfer_filt(k)), k * xs_sigma6 / (transfer(k)*transfer_filt(k)), fmt='o', label=r'co6', color='teal', zorder=3)
ax1.errorbar(k7, k * xs_mean7 / (transfer(k)*transfer_filt(k)), k * xs_sigma7 / (transfer(k)*transfer_filt(k)), fmt='o', label=r'co7', color='purple', zorder=2)
ax1.errorbar(k, k * xs_mean2 / (transfer(k)*transfer_filt(k)), k * xs_sigma2 / (transfer(k)*transfer_filt(k)), fmt='o', label=r'co2', color='indianred', zorder=4)
#ax1.errorbar(k_combo, k * mean_combo / (transfer(k)*transfer_filt(k)), k * sigma_combo / (transfer(k)*transfer_filt(k)), fmt='o', label=r'combo', color='black', zorder=5)
#ax1.errorbar(k, k * xs_mean, k * xs_sigma, fmt='o', label=r'$k\tilde{C}_{data}(k)$')
ax1.plot(k, 0 * xs_mean2, 'k', alpha=0.4, zorder=1)
#ax1.plot(k, k*PS_function.PS_f(k)/ transfer(k), label='k*PS of the input signal')
#ax1.plot(k, k*PS_function.PS_f(k), label='k*PS of the input signal')
#ax1.plot(k_th, k_th * ps_th_nobeam * 10, '--', label=r'$10\times kP_{Theory}(k)$', color='dodgerblue')
#ax1.plot(k_th, k_th * ps_copps_nobeam * 5, 'g--', label=r'$5 \times kP_{COPPS}$ (shot)')
ax1.set_ylabel(r'$k\tilde{C}(k)$ [$\mu$K${}^2$ Mpc${}^2$]', fontsize=14)
if scan_strategy == 'ces':
ax1.set_ylim(-lim*3, lim*3) # ax1.set_ylim(0, 0.1)
if scan_strategy == 'liss':
ax1.set_ylim(-lim, lim) # ax1.set_ylim(0, 0.1)
ax1.set_xlim(0.04,0.7)
ax1.set_xscale('log')
#ax1.set_title(titlename, fontsize=16)
ax1.grid()
#ax1.set_xlabel(r'$k$ [Mpc${}^{-1}$]', fontsize=14)
labnums = [0.05,0.1, 0.2, 0.5]
ax1.set_xticks(labnums)
ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
#plt.legend(bbox_to_anchor=(0, 0.61))
ax1.legend(ncol=4)
ax2 = fig.add_subplot(212)
#ax2.plot(k, diff_mean / error, fmt='o', label=r'$\tilde{C}_{diff}(k)$', color='black')
ax2.errorbar(k6, xs_mean6 / xs_sigma6, xs_sigma6/xs_sigma6, fmt='o', label=r'co6', color='teal', zorder=3)
ax2.errorbar(k7, xs_mean7 / xs_sigma7, xs_sigma7/xs_sigma7, fmt='o', label=r'co7', color='purple', zorder=2)
ax2.errorbar(k, xs_mean2 / xs_sigma2, xs_sigma2/xs_sigma2, fmt='o', label=r'co2', color='indianred', zorder=4)
#ax2.errorbar(k_combo, mean_combo / sigma_combo, sigma_combo/sigma_combo, fmt='o', label=r'combo', color='black', zorder=5)
#ax2.errorbar(k, sum_mean / error, error /error, fmt='o', label=r'$\tilde{C}_{sum}(k)$', color='mediumorchid')
ax2.plot(k, 0 * xs_mean2, 'k', alpha=0.4, zorder=1)
#ax2.set_ylabel(r'$\tilde{C}(k) / \sigma_\tilde{C}$')
ax2.set_ylabel(r'$\tilde{C}(k) / \sigma_\tilde{C}$', fontsize=14)
ax2.set_xlabel(r'$k$ [Mpc${}^{-1}$]', fontsize=14)
ax2.set_ylim(-5, 5)
ax2.set_xlim(0.04,0.7)
ax2.set_xscale('log')
ax2.grid()
ax2.legend(ncol=4)
ax2.set_xticks(labnums)
ax2.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.tight_layout()
#plt.legend()
plt.savefig(figure_name, bbox_inches='tight')
#plt.show()
#elev
#xs_with_model_3fields('liss_all_fields_map_signal.pdf', k2[0],xs_mean2[0], xs_mean6[0], xs_mean7[0], xs_sigma2[0], xs_sigma6[0], xs_sigma7[0], 'liss')
#xs_with_model_3fields('ces_all_fields_map_signal_withcombo.pdf', k2[1],xs_mean2[1], xs_mean6[1], xs_mean7[1], xs_sigma2[1], xs_sigma6[1], xs_sigma7[1], 'ces', mean_combo, sigma_combo)
#xs_with_model_3fields('ces_all_fields_map_signal.pdf', k2[1],xs_mean2[1], xs_mean6[1], xs_mean7[1], xs_sigma2[1], xs_sigma6[1], xs_sigma7[1], 'ces')
#dayn
xs_with_model_3fields('liss_all_fields_map_signal_dayn.pdf', k2[2],xs_mean2[2], xs_mean6[2], xs_mean7[2], xs_sigma2[2], xs_sigma6[2], xs_sigma7[2], 'liss')
xs_with_model_3fields('ces_all_fields_map_signal_dayn.pdf', k2[3],xs_mean2[3], xs_mean6[3], xs_mean7[3], xs_sigma2[3], xs_sigma6[3], xs_sigma7[3], 'ces')
#wint
xs_with_model_3fields('liss_all_fields_map_signal_wint.pdf', k2[10],xs_mean2[10], xs_mean6[10], xs_mean7[10], xs_sigma2[10], xs_sigma6[10], xs_sigma7[10], 'liss')
xs_with_model_3fields('ces_all_fields_map_signal_wint.pdf', k2[11],xs_mean2[11], xs_mean6[11], xs_mean7[11], xs_sigma2[11], xs_sigma6[11], xs_sigma7[11], 'ces')
def log2lin(x, k_edges):
loglen = np.log10(k_edges[-1]) - np.log10(k_edges[0])
logx = np.log10(x) - np.log10(k_edges[0])
return logx / loglen
def xs_2D_plot(figure_name, k,k_bin_edges_par, k_bin_edges_perp, xs_mean2,xs_mean6,xs_mean7, xs_sigma2,xs_sigma6,xs_sigma7, titlename):
#k,k_bin_edges_par, k_bin_edges_perp, xs_mean, xs_sigma = k[3:],k_bin_edges_par[3:], k_bin_edges_perp[3:], xs_mean[3:], xs_sigma[3:]
fig, ax = plt.subplots(nrows=2,ncols=3,figsize=(15.5,8))
#fig.tight_layout(h_pad=0.005, w_pad=1)
fig.subplots_adjust(hspace=-0.5, wspace=0.0)
#fig.suptitle(titlename, fontsize=16)
#norm = mpl.colors.Normalize(vmin=1.3*np.amin(xs_mean7), vmax=-1.3*np.amin(xs_mean7))
#norm1 = mpl.colors.Normalize(vmin=1.3*np.amin(xs_mean7/xs_sigma7), vmax=-1.3*np.amin(xs_mean7/xs_sigma7))
norm = mpl.colors.Normalize(vmin=-800000, vmax=800000) #here it was 800000
norm1 = mpl.colors.Normalize(vmin=-5, vmax=5)
img1 = ax[0][0].imshow(xs_mean2/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1])*pix_wind(k[0],k[1])), interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img1, ax=ax[0][0],fraction=0.046, pad=0.04)
img2 = ax[0][1].imshow(xs_mean6/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1])*pix_wind(k[0],k[1])), interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img2, ax=ax[0][1], fraction=0.046, pad=0.04)
img3 = ax[0][2].imshow(xs_mean7/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1])*pix_wind(k[0],k[1])), interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img3, ax=ax[0][2], fraction=0.046, pad=0.04).set_label(r'$\tilde{C}\left(k_{\bot},k_{\parallel}\right)$ [$\mu$K${}^2$ (Mpc)${}^3$]', size=14)
#this said fig.colorbar(img2 before, was something wrong because of that?
img4 = ax[1][0].imshow(xs_mean2/xs_sigma2, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm1)
fig.colorbar(img4, ax=ax[1][0],fraction=0.046, pad=0.04)
img5 = ax[1][1].imshow(xs_mean6/xs_sigma6, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm1)
fig.colorbar(img5, ax=ax[1][1], fraction=0.046, pad=0.04)
img6 = ax[1][2].imshow(xs_mean7/xs_sigma7, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm1)
fig.colorbar(img6, ax=ax[1][2], fraction=0.046, pad=0.04).set_label(r'$\tilde{C}\left(k_{\bot},k_{\parallel}\right)/\sigma_{\tilde{C}}$', size=14)
ticks = [0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09,0.1,
0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,1., 1.1, 1.2, 1.3]
majorticks = [ 0.03,0.1, 0.3,1]
majorlabels = [ '0.03','0.1', '0.3','1']
xbins = k_bin_edges_par
ticklist_x = log2lin(ticks[:-3], xbins)
majorlist_x = log2lin(majorticks, xbins)
ybins = k_bin_edges_perp
ticklist_y = log2lin(ticks, ybins)
majorlist_y = log2lin(majorticks, ybins)
ax[0][0].set_title(r'CO2', fontsize=16)
ax[0][1].set_title(r'CO6', fontsize=16)
ax[0][2].set_title(r'CO7', fontsize=16)
for i in range(3):
for j in range(2):
ax[j][i].set_xticks(ticklist_x, minor=True)
ax[j][i].set_xticks(majorlist_x, minor=False)
ax[j][i].set_xticklabels(majorlabels, minor=False, fontsize=12)
ax[j][i].set_yticks(ticklist_y, minor=True)
ax[j][i].set_yticks(majorlist_y, minor=False)
ax[j][i].set_yticklabels(majorlabels, minor=False, fontsize=12)
ax[j][i].tick_params(labelsize=12)
ax[1][0].set_xlabel(r'$k_{\parallel}$ [Mpc${}^{-1}$]',fontsize=14)
ax[0][0].set_ylabel(r'$k_{\bot}$ [Mpc${}^{-1}$]',fontsize=14)
ax[1][0].set_ylabel(r'$k_{\bot}$ [Mpc${}^{-1}$]',fontsize=14)
ax[1][1].set_xlabel(r'$k_{\parallel}$ [Mpc${}^{-1}$]', fontsize=14)
ax[1][2].set_xlabel(r'$k_{\parallel}$ [Mpc${}^{-1}$]', fontsize=14)
plt.tight_layout()
plt.savefig(figure_name)
def xs_2D_plot_pw_diff(figure_name, k,k_bin_edges_par, k_bin_edges_perp, xs_mean2,xs_mean6,xs_mean7, xs_sigma2,xs_sigma6,xs_sigma7, titlename):
#k,k_bin_edges_par, k_bin_edges_perp, xs_mean, xs_sigma = k[3:],k_bin_edges_par[3:], k_bin_edges_perp[3:], xs_mean[3:], xs_sigma[3:]
fig, ax = plt.subplots(nrows=2,ncols=3,figsize=(15.5,8))
#fig.tight_layout(h_pad=0.005, w_pad=1)
fig.subplots_adjust(hspace=-0.5, wspace=0.0)
#fig.suptitle(titlename, fontsize=16)
#norm = mpl.colors.Normalize(vmin=1.3*np.amin(xs_mean7), vmax=-1.3*np.amin(xs_mean7))
#norm1 = mpl.colors.Normalize(vmin=1.3*np.amin(xs_mean7/xs_sigma7), vmax=-1.3*np.amin(xs_mean7/xs_sigma7))
#norm = mpl.colors.SymLogNorm(linthresh=40000, vmin=-1000000, vmax=1000000)
norm = mpl.colors.Normalize(vmin=-800000/2., vmax=800000/2.)
xs2_pw = xs_mean2/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1])*pix_wind(k[0],k[1]))
xs2_npw = xs_mean2/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1]))
xs6_pw = xs_mean6/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1])*pix_wind(k[0],k[1]))
xs6_npw = xs_mean6/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1]))
xs7_pw = xs_mean7/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1])*pix_wind(k[0],k[1]))
xs7_npw = xs_mean7/(transfer_filt_2D(k[0],k[1])*transfer_sim_2D(k[0],k[1]))
#print ('this is weird',-np.mean(abs(xs2_pw-xs2_npw)), np.mean(abs(xs2_pw-xs2_npw)) )
#norm = mpl.colors.Normalize(vmin=-np.mean(abs(xs2_pw-xs2_npw)), vmax=np.mean(abs(xs2_pw-xs2_npw)))
#norm = mpl.colors.Normalize(vmin=0.86, vmax=1.05)
img1 = ax[0][0].imshow(xs2_npw, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img1, ax=ax[0][0],fraction=0.046, pad=0.04)
img2 = ax[0][1].imshow(xs6_npw, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img2, ax=ax[0][1], fraction=0.046, pad=0.04)
img3 = ax[0][2].imshow(xs7_npw, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img3, ax=ax[0][2], fraction=0.046, pad=0.04).set_label(r'$\tilde{C}\left(k_{\bot},k_{\parallel}\right)$ [$\mu$K${}^2$ (Mpc)${}^3$]', size=14)
img4 = ax[1][0].imshow(xs2_pw, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img4, ax=ax[1][0],fraction=0.046, pad=0.04)
img5 = ax[1][1].imshow(xs6_pw, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img5, ax=ax[1][1], fraction=0.046, pad=0.04)
img6 = ax[1][2].imshow(xs7_pw, interpolation='none', origin='lower',extent=[0,1,0,1], cmap='magma', norm=norm)
fig.colorbar(img6, ax=ax[1][2], fraction=0.046, pad=0.04).set_label(r'with pw, $\tilde{C}\left(k_{\bot},k_{\parallel}\right)$ [$\mu$K${}^2$ (Mpc)${}^3$]', size=14)
ticks = [0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09,0.1,
0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,1., 1.1, 1.2, 1.3]
majorticks = [ 0.03,0.1, 0.3,1]
majorlabels = [ '0.03','0.1', '0.3','1']
xbins = k_bin_edges_par
ticklist_x = log2lin(ticks[:-3], xbins)
majorlist_x = log2lin(majorticks, xbins)
ybins = k_bin_edges_perp
ticklist_y = log2lin(ticks, ybins)
majorlist_y = log2lin(majorticks, ybins)
ax[0][0].set_title(r'CO2', fontsize=16)
ax[0][1].set_title(r'CO6', fontsize=16)
ax[0][2].set_title(r'CO7', fontsize=16)
for i in range(3):
for j in range(2):
ax[j][i].set_xticks(ticklist_x, minor=True)
ax[j][i].set_xticks(majorlist_x, minor=False)
ax[j][i].set_xticklabels(majorlabels, minor=False, fontsize=12)
ax[j][i].set_yticks(ticklist_y, minor=True)
ax[j][i].set_yticks(majorlist_y, minor=False)
ax[j][i].set_yticklabels(majorlabels, minor=False, fontsize=12)
ax[j][i].tick_params(labelsize=12)
ax[1][0].set_xlabel(r'$k_{\parallel}$ [Mpc${}^{-1}$]',fontsize=14)
ax[0][0].set_ylabel(r'$k_{\bot}$ [Mpc${}^{-1}$]',fontsize=14)
ax[1][0].set_ylabel(r'$k_{\bot}$ [Mpc${}^{-1}$]',fontsize=14)
ax[1][1].set_xlabel(r'$k_{\parallel}$ [Mpc${}^{-1}$]', fontsize=14)
ax[1][2].set_xlabel(r'$k_{\parallel}$ [Mpc${}^{-1}$]', fontsize=14)
plt.tight_layout()
plt.savefig(figure_name)
'''
def read_h5_arrays(filename, two_dim=False):
with h5py.File(filename, mode="r") as my_file:
k = np.array(my_file['k'][:])
xs_mean = np.array(my_file['xs_mean'][:])
xs_sigma = np.array(my_file['xs_sigma'][:])
if two_dim == True:
k_edges_perp = np.array(my_file['k_edges_perp'][:])
k_edges_par = np.array(my_file['k_edges_par'][:])
return k, xs_mean, xs_sigma, k_edges_perp, k_edges_par
else:
return k, xs_mean, xs_sigma
'''
k2, xs_mean2, xs_sigma2, k_edges_perp2, k_edges_par2 = read_h5_arrays('co2_map_signal_2D_arrays.h5', two_dim=True)
print (xs_mean2[0])
k6, xs_mean6, xs_sigma6, k_edges_perp6, k_edges_par6 = read_h5_arrays('co6_map_signal_2D_arrays.h5', two_dim=True)
k7, xs_mean7, xs_sigma7, k_edges_perp7, k_edges_par7 = read_h5_arrays('co7_map_signal_2D_arrays.h5', two_dim=True)
#k2, xs_mean2, xs_sigma2, k_edges_perp2, k_edges_par2 = read_h5_arrays('co2_map_null_2D_arrays.h5', two_dim=True)
#xs_2D_plot('ces_3fields_2D_null.pdf', k2[0],k_edges_par2[0], k_edges_perp2[0], xs_mean2[0],xs_mean6[0],xs_mean7[0], xs_sigma2[0],xs_sigma6[0],xs_sigma7[0], 'Liss scans')
xs_2D_plot('liss_3fields_2D_pixel_window.pdf', k2[0],k_edges_par2[0], k_edges_perp2[0], xs_mean2[0],xs_mean6[0],xs_mean7[0], xs_sigma2[0],xs_sigma6[0],xs_sigma7[0], 'Liss scans')
#xs_2D_plot('ces_3fields_2D.pdf', k2[1],k_edges_par2[1], k_edges_perp2[1], xs_mean2[1],xs_mean6[1],xs_mean7[1], xs_sigma2[1],xs_sigma6[1],xs_sigma7[1], 'CES cans')
xs_2D_plot('ces_3fields_2D_pixel_window.pdf', k2[1],k_edges_par2[1], k_edges_perp2[1], xs_mean2[1],xs_mean6[1],xs_mean7[1], xs_sigma2[1],xs_sigma6[1],xs_sigma7[1], 'CES scans')
xs_2D_plot_pw_diff('liss_3fields_2D_pixel_window_diff.pdf', k2[0],k_edges_par2[0], k_edges_perp2[0], xs_mean2[0],xs_mean6[0],xs_mean7[0], xs_sigma2[0],xs_sigma6[0],xs_sigma7[0], 'Liss scans')
xs_2D_plot_pw_diff('ces_3fields_2D_pixel_window_diff.pdf', k2[1],k_edges_par2[1], k_edges_perp2[1], xs_mean2[1],xs_mean6[1],xs_mean7[1], xs_sigma2[1],xs_sigma6[1],xs_sigma7[1], 'CES scans')
print (np.load('co6_map_null_1D_names.npy'))
['xs_mean_co6_map_elev_ambtsubtr_cesc0.pdf'
'xs_mean_co6_map_elev_ambtsubtr_cesc1.pdf'
'xs_mean_co6_map_elev_windsubtr_cesc0.pdf'
'xs_mean_co6_map_elev_windsubtr_cesc1.pdf'
'xs_mean_co6_map_elev_wintsubtr_cesc0.pdf'
'xs_mean_co6_map_elev_wintsubtr_cesc1.pdf'
'xs_mean_co6_map_elev_risesubtr_cesc0.pdf'
'xs_mean_co6_map_elev_risesubtr_cesc1.pdf'
'xs_mean_co6_map_elev_halfsubtr_cesc0.pdf'
'xs_mean_co6_map_elev_halfsubtr_cesc1.pdf'
'xs_mean_co6_map_elev_oddesubtr_cesc0.pdf'
'xs_mean_co6_map_elev_oddesubtr_cesc1.pdf'
'xs_mean_co6_map_elev_fpolsubtr_cesc0.pdf'
'xs_mean_co6_map_elev_fpolsubtr_cesc1.pdf'
'xs_mean_co6_map_elev_daynsubtr_cesc0.pdf'
'xs_mean_co6_map_elev_daynsubtr_cesc1.pdf']
def plot_sub_fig(field,jk_we_want,ax_i,lim,cesc,ax):
if field == 'CO2':
k, xs_mean, xs_sigma = read_h5_arrays('co2_map_null_1D_arrays.h5')
if field == 'CO6':
k, xs_mean, xs_sigma = read_h5_arrays('co6_map_null_1D_arrays.h5')
if field == 'CO7':
k, xs_mean, xs_sigma = read_h5_arrays('co7_map_null_1D_arrays.h5')
ax[ax_i].plot(k[0], 0 * xs_mean[0], 'k', alpha=0.4)
for index in jk_we_want:
if index == 4 or index == 5:
kt = -0.015
label_name = 'wint'
color_name = 'teal'
l1 = ax[ax_i].errorbar(k[index]+k[index]*kt, k[index] * xs_mean[index] / (transfer(k[index])*transfer_filt(k[index])), k[index] * xs_sigma[index] / (transfer(k[index])*transfer_filt(k[index])), fmt='o', label=label_name, color=color_name)
if index == 8 or index == 9:
kt = -0.005
label_name = 'half'
color_name = 'indianred'
l2 = ax[ax_i].errorbar(k[index]+k[index]*kt, k[index] * xs_mean[index] / (transfer(k[index])*transfer_filt(k[index])), k[index] * xs_sigma[index] / (transfer(k[index])*transfer_filt(k[index])), fmt='o', label=label_name, color=color_name)
if index == 10 or index == 11:
kt = 0.005
label_name = 'odde'
color_name = 'purple'
l3 = ax[ax_i].errorbar(k[index]+k[index]*kt, k[index] * xs_mean[index] / (transfer(k[index])*transfer_filt(k[index])), k[index] * xs_sigma[index] / (transfer(k[index])*transfer_filt(k[index])), fmt='o', label=label_name, color=color_name)
if index == 14 or index == 15:
kt = 0.015
label_name = 'dayn'
color_name = 'forestgreen'
l4 = ax[ax_i].errorbar(k[index]+k[index]*kt, k[index] * xs_mean[index] / (transfer(k[index])*transfer_filt(k[index])), k[index] * xs_sigma[index] / (transfer(k[index])*transfer_filt(k[index])), fmt='o', label=label_name, color=color_name)
if ax_i == 0:
ax[ax_i].set_ylabel(r'$k\tilde{C}(k)$ [$\mu$K${}^2$ Mpc${}^2$]', fontsize=14)
if cesc == '1':
ax[ax_i].set_ylim(-lim*6, lim*6)
if cesc == '0':
ax[ax_i].set_ylim(-lim*6, lim*6)
if field == 'CO6':
#ax[ax_i].xaxis.set_label_position('top')
#ax[ax_i].xaxis.tick_top()
if cesc == '0':
ax[ax_i].set_title('Lissajous scans', fontsize=16, pad=40)
if cesc == '1':
ax[ax_i].set_title('CES scans', fontsize=16, pad=40)
ax[ax_i].text(.5,.9,field,horizontalalignment='center',transform=ax[ax_i].transAxes, fontsize=16)
ax[ax_i].set_xlim(0.04,0.7)
ax[ax_i].set_xscale('log')
#ax[ax_i].set_title(field, fontsize=16)
ax[ax_i].grid()
ax[ax_i].set_xlabel(r'$k$ [Mpc${}^{-1}$]', fontsize=14)
labnums = [0.05,0.1, 0.2, 0.5]
ax[ax_i].set_xticks(labnums)
ax[ax_i].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
#plt.legend(bbox_to_anchor=(0, 0.61))
#ax[ax_i].legend(ncol=4)
return l1,l2,l3,l4
def plot_nulltest(cesc):
k7, xs_mean7, xs_sigma7 = read_h5_arrays('co7_map_null_1D_arrays.h5')
xs_mean7 = xs_mean7[8]
k7 = k7[8]
lim = np.mean(np.abs(xs_mean7[4:-2] * k7[4:-2])) * 8
if cesc == '0':
jk_we_want = [4,8,10,14] #indices of jk we want to use: wint, half, odde, dayn
if cesc == '1':
jk_we_want = [5,9,11,15]
fig, ax = plt.subplots(nrows=1,ncols=3,figsize=(17,4))
l1,l2,l3,l4 = plot_sub_fig('CO2',jk_we_want,0,lim,cesc,ax)
l1,l2,l3,l4 = plot_sub_fig('CO6',jk_we_want,1,lim,cesc,ax)
l1,l2,l3,l4 = plot_sub_fig('CO7',jk_we_want,2,lim,cesc,ax)
plt.figlegend((l1,l2,l3,l4), ('Winter/Summer split', 'Half-mission split', 'Odd/Even split', 'Day/Night split'),loc='upper center',bbox_to_anchor=(0.52,0.9), ncol=4, fontsize=14)
plt.tight_layout()
if cesc == '0':
#plt.title('Lissajous scans', fontsize=16, loc='right')
plt.savefig('nulltests_3fields_liss.pdf', bbox_inches='tight')
if cesc == '1':
#plt.title('CES scans', fontsize=16, loc='right')
plt.savefig('nulltests_3fields_ces.pdf', bbox_inches='tight')
plot_nulltest('0')
plot_nulltest('1')
def plot_sub_fig2(field,jk_we_want,ax_i,lim,cesc,ax):
if field == 'CO2':
k, xs_mean, xs_sigma = read_h5_arrays('co2_map_null_1D_arrays.h5')
if field == 'CO6':
k, xs_mean, xs_sigma = read_h5_arrays('co6_map_null_1D_arrays.h5')
if field == 'CO7':
k, xs_mean, xs_sigma = read_h5_arrays('co7_map_null_1D_arrays.h5')
ax[ax_i].plot(k[0], 0 * xs_mean[0], 'k', alpha=0.4)
for index in jk_we_want:
if index == 0 or index == 1:
kt = -0.015
label_name = 'ambt'
color_name = 'teal'
l1 = ax[ax_i].errorbar(k[index]+k[index]*kt, k[index] * xs_mean[index] / (transfer(k[index])*transfer_filt(k[index])), k[index] * xs_sigma[index] / (transfer(k[index])*transfer_filt(k[index])), fmt='o', label=label_name, color=color_name)
if index == 2 or index == 3:
kt = -0.005
label_name = 'wind'
color_name = 'indianred'
l2 = ax[ax_i].errorbar(k[index]+k[index]*kt, k[index] * xs_mean[index] / (transfer(k[index])*transfer_filt(k[index])), k[index] * xs_sigma[index] / (transfer(k[index])*transfer_filt(k[index])), fmt='o', label=label_name, color=color_name)
if index == 6 or index == 7:
kt = 0.005
label_name = 'rise'
color_name = 'purple'
l3 = ax[ax_i].errorbar(k[index]+k[index]*kt, k[index] * xs_mean[index] / (transfer(k[index])*transfer_filt(k[index])), k[index] * xs_sigma[index] / (transfer(k[index])*transfer_filt(k[index])), fmt='o', label=label_name, color=color_name)
if index == 12 or index == 13:
kt = 0.015
label_name = 'fpol'
color_name = 'forestgreen'
l4 = ax[ax_i].errorbar(k[index]+k[index]*kt, k[index] * xs_mean[index] / (transfer(k[index])*transfer_filt(k[index])), k[index] * xs_sigma[index] / (transfer(k[index])*transfer_filt(k[index])), fmt='o', label=label_name, color=color_name)
if ax_i == 0:
ax[ax_i].set_ylabel(r'$k\tilde{C}(k)$ [$\mu$K${}^2$ Mpc${}^2$]', fontsize=14)
if cesc == '1':
ax[ax_i].set_ylim(-lim*6, lim*6)
if cesc == '0':
ax[ax_i].set_ylim(-lim*6, lim*6)
if field == 'CO6':
#ax[ax_i].xaxis.set_label_position('top')
#ax[ax_i].xaxis.tick_top()
if cesc == '0':
ax[ax_i].set_title('Lissajous scans', fontsize=16, pad=40)
if cesc == '1':
ax[ax_i].set_title('CES scans', fontsize=16, pad=40)
ax[ax_i].text(.5,.9,field,horizontalalignment='center',transform=ax[ax_i].transAxes, fontsize=16)
ax[ax_i].set_xlim(0.04,0.7)
ax[ax_i].set_xscale('log')
#ax[ax_i].set_title(field, fontsize=16)
ax[ax_i].grid()
ax[ax_i].set_xlabel(r'$k$ [Mpc${}^{-1}$]', fontsize=14)
labnums = [0.05,0.1, 0.2, 0.5]
ax[ax_i].set_xticks(labnums)
ax[ax_i].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
#plt.legend(bbox_to_anchor=(0, 0.61))
#ax[ax_i].legend(ncol=4)
return l1,l2,l3,l4
def plot_nulltest2(cesc):
k7, xs_mean7, xs_sigma7 = read_h5_arrays('co7_map_null_1D_arrays.h5')
xs_mean7 = xs_mean7[8]
k7 = k7[8]
lim = np.mean(np.abs(xs_mean7[4:-2] * k7[4:-2])) * 8
if cesc == '0':
jk_we_want = [0,2,6,12] #indices of jk we want to use: wint, half, odde, dayn
if cesc == '1':
jk_we_want = [1,3,7,13]
fig, ax = plt.subplots(nrows=1,ncols=3,figsize=(17,4))
l1,l2,l3,l4 = plot_sub_fig2('CO2',jk_we_want,0,lim,cesc,ax)
l1,l2,l3,l4 = plot_sub_fig2('CO6',jk_we_want,1,lim,cesc,ax)
l1,l2,l3,l4 = plot_sub_fig2('CO7',jk_we_want,2,lim,cesc,ax)
plt.figlegend((l1,l2,l3,l4), ('Ambient temp', 'Wind speed', 'Rise', 'Fpol'),loc='upper center',bbox_to_anchor=(0.52,0.9), ncol=4, fontsize=14)
plt.tight_layout()
if cesc == '0':
#plt.title('Lissajous scans', fontsize=16, loc='right')
plt.savefig('nulltests_3fields_liss.pdf', bbox_inches='tight')
if cesc == '1':
#plt.title('CES scans', fontsize=16, loc='right')
plt.savefig('nulltests_3fields_ces.pdf', bbox_inches='tight')
plot_nulltest2('0')
plot_nulltest2('1')
|
Python
|
CL
|
003d0dbc599329adf218a29eae88ef53e48023c558e701ef5de68ad7e85404dd
|
from __future__ import absolute_import
"""
init for mapanalysistools
"""
# Use Semantic Versioning, http://semver.org/
version_info = (0, 1, 0, '')
__version__ = '%d.%d.%d%s' % version_info
import mapanalysistools.getTable
import mapanalysistools.analyzeMapData
#import mapanalysistoolsplotMapData # removed - is an old unstructured version for source information
import mapanalysistools.functions
import mapanalysistools.colormaps
import mapanalysistools.digital_filters
|
Python
|
CL
|
69efda68472a0d523f1d226c18a92cc869d071c3eb565e5f813fab35f16d9d9f
|
import scipy
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
def butterlowpass(x, fpass, fstop, gpass, gstop, fs, dt, checkflag, labelname='Signal[-]'):
'''
:param x: Input signal
:param fpass: 通過域端周波数[Hz]
:param fstop: 阻止域端周波数[Hz]
:param gpass: 通過域最大損失量[dB]
:param gstop: 阻止域最大損失量[dB]
:param fs: サンプリング周波数[Hz]
:param dt: サンプリングレート[s]
:param checkflag: グラフ生成ON/OFF
:param labelname: 信号ラベル名
:return: フィルター後データ
'''
print('Applying filter against: {0}...'.format(labelname))
fn = 1 / (2 * dt)
Wp = fpass / fn
Ws = fstop / fn
N, Wn = signal.buttord(Wp, Ws, gpass, gstop)
b1, a1 = signal.butter(N, Wn, "low")
y = signal.filtfilt(b1, a1, x)
print(y)
if checkflag == True:
time = np.arange(x.__len__()) * dt
plt.figure(figsize = (12, 5))
plt.title('Comparison between signals')
plt.plot(time, x, color='black', label='Raw signal')
plt.plot(time, y, color='red', label='Filtered signal')
plt.xlabel('Time[s]')
plt.ylabel(labelname)
plt.show()
return y
|
Python
|
CL
|
194f4198802ac992cd89f1b79c6f68963f6db88869f652766f0643a6d3cfd537
|
## Import relevant libraries and dependencies
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import random
# GPU check
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
## Good-Old Vanilla LSTM Model
class VanillaLSTM (nn.Module):
def __init__(self, hidden_dim, output_size, vocab_size, n_layers=1, memory_size=1, memory_dim = 1):
super(VanillaLSTM, self).__init__()
self.vocab_size = vocab_size
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.memory_size = memory_size
self.memory_dim = memory_dim
self.lstm = nn.LSTM(self.vocab_size, self.hidden_dim, self.n_layers)
self.W_y = nn.Linear(self.hidden_dim, output_size)
self.sigmoid = nn.Sigmoid ()
def init_hidden (self):
return (torch.zeros (self.n_layers, 1, self.hidden_dim).to(device),
torch.zeros (self.n_layers, 1, self.hidden_dim).to(device))
def forward(self, input, hidden0, stack=None, temperature=1.):
ht, hidden = self.lstm(input, hidden0)
output = self.sigmoid(self.W_y(ht)).view (-1, self.output_size)
return output, hidden, stack
## Stack-Augmented LSTM with a Softmax Decision Gate
class SLSTM_Softmax (nn.Module):
def __init__(self, hidden_dim, output_size, vocab_size, n_layers=1, memory_size=104, memory_dim = 5):
super(SLSTM_Softmax, self).__init__()
self.vocab_size = vocab_size
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.memory_size = memory_size
self.memory_dim = memory_dim
self.lstm = nn.LSTM(self.vocab_size, self.hidden_dim, self.n_layers)
self.W_y = nn.Linear(self.hidden_dim, output_size)
self.W_n = nn.Linear(self.hidden_dim, self.memory_dim)
self.W_a = nn.Linear(self.hidden_dim, 2)
self.W_sh = nn.Linear (self.memory_dim, self.hidden_dim)
# Actions -- push : 0 and pop: 1
self.softmax = nn.Softmax(dim=2)
self.sigmoid = nn.Sigmoid ()
def init_hidden (self):
return (torch.zeros (self.n_layers, 1, self.hidden_dim).to(device),
torch.zeros (self.n_layers, 1, self.hidden_dim).to(device))
def forward(self, input, hidden0, stack, temperature=1.):
h0, c0 = hidden0
hidden0_bar = self.W_sh (stack[0]).view(1, 1, -1) + h0
ht, hidden = self.lstm(input, (hidden0_bar, c0))
output = self.sigmoid(self.W_y(ht)).view(-1, self.output_size)
self.action_weights = self.softmax (self.W_a (ht)).view(-1)
self.new_elt = self.sigmoid (self.W_n(ht)).view(1, self.memory_dim)
push_side = torch.cat ((self.new_elt, stack[:-1]), dim=0)
pop_side = torch.cat ((stack[1:], torch.zeros(1, self.memory_dim).to(device)), dim=0)
stack = self.action_weights [0] * push_side + self.action_weights [1] * pop_side
return output, hidden, stack
## Stack-Augmented LSTM with a Softmax Decision Gate (with Temperature)
class SLSTM_Softmax_Temperature (nn.Module):
def __init__(self, hidden_dim, output_size, vocab_size, n_layers=1, memory_size=104, memory_dim = 5):
super(SLSTM_Softmax_Temperature, self).__init__()
self.vocab_size = vocab_size
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.memory_size = memory_size
self.memory_dim = memory_dim
self.lstm = nn.LSTM(self.vocab_size, self.hidden_dim, self.n_layers)
self.W_y = nn.Linear(self.hidden_dim, output_size)
self.W_n = nn.Linear(self.hidden_dim, self.memory_dim)
self.W_a = nn.Linear(self.hidden_dim, 2)
self.W_sh = nn.Linear (self.memory_dim, self.hidden_dim)
self.sigmoid = nn.Sigmoid ()
def init_hidden (self):
return (torch.zeros (self.n_layers, 1, self.hidden_dim).to(device),
torch.zeros (self.n_layers, 1, self.hidden_dim).to(device))
def softmax_temp (self, arr, temp):
probs = torch.zeros (arr.shape).to(device)
for i in range (2):
probs [i] = torch.exp(arr[i]/temp)
probs = probs / probs.sum(dim=0)
return probs
def forward(self, input, hidden0, stack, temperature):
h0, c0 = hidden0
hidden0_bar = self.W_sh (stack[0]).view(1, 1, -1) + h0
ht, hidden = self.lstm(input, hidden_bar)
output = self.sigmoid(self.W_y(ht)).view(-1, self.output_size)
self.action_weights = self.softmax_temp (self.W_a (ht).view(-1), temperature).view(-1)
self.new_elt = self.sigmoid (self.W_n(ht)).view(1, self.memory_dim)
push_side = torch.cat ((self.new_elt, stack[:-1]), dim=0)
pop_side = torch.cat ((stack[1:], torch.zeros(1, self.memory_dim).to(device)), dim=0)
stack = self.action_weights [0] * push_side + self.action_weights [1] * pop_side
return output, hidden, stack
## Stack-Augmented LSTM Model with the Gumbel-Softmax Decision Gate
class SLSTM_GumbelSoftmax (nn.Module):
def __init__(self, hidden_dim, output_size, vocab_size, n_layers=1, memory_size=104, memory_dim = 5):
super(SLSTM_GumbelSoftmax, self).__init__()
self.vocab_size = vocab_size
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.memory_size = memory_size
self.memory_dim = memory_dim
self.lstm = nn.LSTM(self.vocab_size, self.hidden_dim, self.n_layers)
self.W_y = nn.Linear(self.hidden_dim, output_size)
self.W_n = nn.Linear(self.hidden_dim, self.memory_dim)
self.W_a = nn.Linear(self.hidden_dim, 2)
self.W_sh = nn.Linear (self.memory_dim, self.hidden_dim)
self.sigmoid = nn.Sigmoid ()
def init_hidden (self):
return (torch.zeros (self.n_layers, 1, self.hidden_dim).to(device),
torch.zeros (self.n_layers, 1, self.hidden_dim).to(device))
def forward(self, input, hidden0, stack, temperature):
h0, c0 = hidden0
hidden0_bar = self.W_sh (stack[0]).view(1, 1, -1) + h0
ht, hidden = self.lstm(input, hidden_bar)
output = self.sigmoid(self.W_y(ht)).view(-1, self.output_size)
self.action_weights = torch.nn.functional.gumbel_softmax (self.W_a (ht).view(1, -1), temperature).view(-1)
self.new_elt = self.sigmoid (self.W_n(ht)).view(1, self.memory_dim)
push_side = torch.cat ((self.new_elt, stack[:-1]), dim=0)
pop_side = torch.cat ((stack[1:], torch.zeros(1, self.memory_dim).to(device)), dim=0)
stack = self.action_weights [0] * push_side + self.action_weights [1] * pop_side
return output, hidden, stack
|
Python
|
CL
|
5f4b42540b6fedfb766efd7bc0af9511028d72dfd7abef0dcfdbcf5baacf7ff7
|
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtWidgets
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
import matplotlib
matplotlib.use('QT5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
try:
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
try:
# This line was first commented
from mpl_toolkits.mplot3d import axes3d
except:
pass
try:
from mpl_toolkits.mplot3d import Axes3D
except:
pass
import matplotlib.pyplot as plt
matplotlib.rcParams[ 'font.size' ] = 9
matplotlib.rcParams[ 'font.family' ] = 'serif'
matplotlib.rcParams[ 'axes.labelsize' ] = 9
matplotlib.rcParams[ 'axes.titlesize' ] = 9
matplotlib.rcParams[ 'legend.fontsize' ] = 8
class QArkMplPlotWidget(QtWidgets.QWidget):
"""
"""
VIEW_MODE__PLOT = 1
VIEW_MODE__IMAGESHOW = 2
VIEW_MODE__MATRIXSHOW = 4
VIEW_MODE__WIREFRAME = 8
VIEW_MODE__SURFACE = 16
VIEW_MODE__COLORMESH = 32
def __init__( self
, parent=None
):
"""
"""
super(QArkMplPlotWidget, self).__init__(parent = parent)
self.initUi()
self.initConnection()
self.t_axes = {}
def initUi(self):
"""
"""
self.setObjectName(_fromUtf8("qArkMplPlotWidget"))
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
vbox = QtWidgets.QVBoxLayout()
#--------------------------------------------------------------------------------
# Plot zone
#--------------------------------------------------------------------------------
self.dpi = 70
#self.figure = plt.Figure((10.0, 10.0), dpi=self.dpi)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
print(self)
self.canvas.setParent(self)
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
self.setLayout(vbox)
def initConnection( self ):
"""
"""
pass
def initAxe(self, _u_c, _u_mode ):
if _u_mode is self.__class__.VIEW_MODE__PLOT:
self.t_axes[_u_c] = self.fig.add_subplot(_u_c)
elif _u_mode is self.__class__.VIEW_MODE__IMAGESHOW:
self.t_axes[_u_c] = self.fig.add_subplot(_u_c)
elif _u_mode is self.__class__.VIEW_MODE__MATRIXSHOW:
self.t_axes[_u_c] = self.fig.add_subplot(_u_c)
elif _u_mode is self.__class__.VIEW_MODE__WIREFRAME:
self.t_axes[_u_c] = self.fig.add_subplot(_u_c, projection='3d')
elif _u_mode is self.__class__.VIEW_MODE__SURFACE:
self.t_axes[_u_c] = self.fig.add_subplot(_u_c, projection='3d')
elif _u_mode is self.__class__.VIEW_MODE__COLORMESH:
self.t_axes[_u_c] = self.fig.add_subplot(_u_c)
return self.t_axes[_u_c]
def enableCurrentFigure(self):
plt.figure( self.figure.number )
def displayPlot(self):
self.canvas.draw()
def savePlot(self, _s_filename):
self.canvas.print_figure(_s_filename)
def setPlotter(self, _o_plotter):
self.o_plotter = _o_plotter
self.enableCurrentFigure()
self.o_plotter.setFigure(self.figure)
self.o_plotter.plot()
self.displayPlot()
@QtCore.pyqtSlot()
def updatePlot(self):
self.o_plotter.plot()
self.displayPlot()
|
Python
|
CL
|
eb61f7b29ec92f48bf2ff4afba31b55b4dd23206f517b9bf13c55d7f4f86ab9a
|
# -*- mode: python -*-
import sys
sys.setrecursionlimit(5000)
block_cipher = None
a = Analysis(["begin.py"],
pathex=["C:\\Users\\Administrator\\Desktop\\tieba"],
binaries=[],
datas=[(".\\scrapy","scrapy"),(".\\scrapy.cfg","."),(".\\tieba","tieba"),
(".\\my_tk.py","."),(".\\tieba_log.py","."),(".\\search.py","."),(".\\find_path.py","."),
(".\\wordcloud","wordcloud")],
hiddenimports = ["scrapy.spiderloader",
"scrapy.statscollectors",
"scrapy.logformatter",
"scrapy.dupefilters",
"scrapy.squeues",
"scrapy.extensions.spiderstate",
"scrapy.extensions.corestats",
"scrapy.extensions.telnet",
"scrapy.extensions.logstats",
"scrapy.extensions.memusage",
"scrapy.extensions.memdebug",
"scrapy.extensions.feedexport",
"scrapy.extensions.closespider",
"scrapy.extensions.debug",
"scrapy.extensions.httpcache",
"scrapy.extensions.statsmailer",
"scrapy.extensions.throttle",
"scrapy.core.scheduler",
"scrapy.core.engine",
"scrapy.core.scraper",
"scrapy.core.spidermw",
"scrapy.core.downloader",
"scrapy.downloadermiddlewares.stats",
"scrapy.downloadermiddlewares.httpcache",
"scrapy.downloadermiddlewares.cookies",
"scrapy.downloadermiddlewares.useragent",
"scrapy.downloadermiddlewares.httpproxy",
"scrapy.downloadermiddlewares.ajaxcrawl",
"scrapy.downloadermiddlewares.chunked",
"scrapy.downloadermiddlewares.decompression",
"scrapy.downloadermiddlewares.defaultheaders",
"scrapy.downloadermiddlewares.downloadtimeout",
"scrapy.downloadermiddlewares.httpauth",
"scrapy.downloadermiddlewares.httpcompression",
"scrapy.downloadermiddlewares.redirect",
"scrapy.downloadermiddlewares.retry",
"scrapy.downloadermiddlewares.robotstxt",
"scrapy.spidermiddlewares.depth",
"scrapy.spidermiddlewares.httperror",
"scrapy.spidermiddlewares.offsite",
"scrapy.spidermiddlewares.referer",
"scrapy.spidermiddlewares.urllength",
"scrapy.pipelines",
#"scrapy.pipelines.images",
"scrapy.core.downloader.handlers.http",
"scrapy.core.downloader.contextfactory",
"json", "csv", "re","scrapy",'copy','codecs',
'emoji','webbrowser','subprocess','shutil','urllib',
'numpy','PIL','wordcloud','matplotlib','datetime',
'jieba','traceback','subprocess','shutil','urllib'
],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name="begin",
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=False)
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name="begin")
|
Python
|
CL
|
47158cc6ad0d6badaec8790d1d370ca0d4797e324c78df706173aac7b7fcb555
|
import itertools
import pickle
import tensorflow as tf
import numpy as np
from PIL import Image
from dependencies import facenet
from dependencies import detect_face
class FaceDetection:
MINSIZE = 20 # Minimum size of face
THRESHOLD = [0.6, 0.7, 0.7] # Three steps's threshold
FACTOR = 0.709 # Scale factor
def __init__(self, gpu_memory_fraction: float = 0.2):
"""Face detection class initialisation
:param gpu_memory_fraction: The upperbound of GPU memory that can be used for face detection (default=20%)
"""
self.pnet, self.rnet, self.onet = self.create_network_face_detection(gpu_memory_fraction)
def create_network_face_detection(self, gpu_memory_fraction: float):
"""Create MTCNN face detection network
:param gpu_memory_fraction: The upperbound of GPU memory that can be used for face detection
:return: MTCNN network instances (Proposal network, Refinement network and Output network)
"""
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
return pnet, rnet, onet
def extract_faces_from_image(self, image: np.ndarray, image_size: int = 160, margin: int = 44):
"""Find and extracts all faces in an image
:param image: The target frame (image) to find faces in
:param image_size: The size of the face images (default=160)
:param margin: The margin around the faces (default=44)
:return: List of found faces on the frame
:raises: ThresholdNotMetException: Raised when a face has a certainty below 95%
:raises: NoFacesFoundExeption: Raised when no faces are found in the frame
"""
faces = []
final_bounding_boxes = []
bounding_boxes, _ = detect_face.detect_face(image, self.MINSIZE,
self.pnet, self.rnet, self.onet, self.THRESHOLD, self.FACTOR)
nrof_faces = len(bounding_boxes)
if nrof_faces > 0:
frame_size = np.asarray(image.shape)[0:2]
for face_index_on_frame in range(nrof_faces):
if bounding_boxes[face_index_on_frame][4] > 0.95:
det = np.squeeze(bounding_boxes[face_index_on_frame, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, frame_size[1])
bb[3] = np.minimum(det[3] + margin / 2, frame_size[0])
cropped = np.array(image)[bb[1]:bb[3], bb[0]:bb[2], :]
aligned = Image.fromarray(cropped).resize((image_size, image_size), Image.ANTIALIAS)
prewhitened = facenet.prewhiten(np.array(aligned))
faces.append(prewhitened)
final_bounding_boxes.append(bounding_boxes[face_index_on_frame])
if len(faces) > 0:
return faces, final_bounding_boxes
return None, None
class FaceRecognition:
def __init__(self, pb_model_location: str, gpu_memory_fraction: float = 0.6):
"""Face recognition class initialisation
:param pb_model_location: Frozen (.pb) facenet model location on disk
:param gpu_memory_fraction: The upperbound of GPU memory that can be used for face detection (default=60%)
"""
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
with tf.Graph().as_default():
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) as self.sess:
facenet.load_model(pb_model_location)
# Retrieve the required input and output tensors for classification from the model
self.images_placeholder = self.sess.graph.get_tensor_by_name('input:0')
self.embeddings = self.sess.graph.get_tensor_by_name('embeddings:0')
self.phase_train_placeholder = self.sess.graph.get_tensor_by_name('phase_train:0')
self.embedding_size = self.embeddings.get_shape()[1]
def retrieve_embeddings_for_face_list(self, faces):
"""Retrieves the embeddings for a list of face images
:param faces: List of images containing aligned faces
:return: A list of embeddings corresponding to the faces
"""
feed_dict = {self.images_placeholder: faces, self.phase_train_placeholder: False}
emb_array = self.sess.run(self.embeddings, feed_dict=feed_dict)
return emb_array
def classify_person(self, unknown_embeddings):
try:
know_embeddings = np.load('models/known_persons.npy').item()
except IOError:
return 'Unknown'
lowest_dist = 2
name = ''
for person in know_embeddings:
for i in range(len(unknown_embeddings)):
dist = np.sqrt(
np.sum(np.square(np.subtract(unknown_embeddings[i], know_embeddings.get(person)))))
if dist < lowest_dist:
name = person
lowest_dist = dist
if lowest_dist < 1:
return name
else:
return 'Unknown'
def add_person(self, name, embedding):
try:
know_embeddings = np.load('models/known_persons.npy').item()
except IOError:
know_embeddings = {}
if name in know_embeddings:
know_embeddings.update({name: embedding})
else:
know_embeddings[name] = embedding
np.save('models/known_persons.npy', know_embeddings)
|
Python
|
CL
|
1b5856066c6d2a73a8cdbfad4d05d74bf251c85031d90b425a47b16459056559
|
from .errors import MissingScriptError
# from .registry import Component, get_script
from .top import find_script
from .decorators import Component
from .util import autofill_args
from omnibelt import get_printer
prt = get_printer(__name__)
@Component('run_mode/default')
class Run_Mode:
'''
Run modes are used to actually execute the script specified by the user in the run command.
It is recommended to register all run_modes with a ``run_mode/`` prefix, but not required.
'''
def __init__(self, A):
self.silent = A.pull('silent', True, silent=True)
@staticmethod
def get_script_info(script_name):
'''Given the name of the registered script, this returns the corresponding entry in the registry'''
return find_script(script_name)
def run(self, meta, config):
'''
When called this should actually execute the registered script whose name is specified under meta.script_name.
:param meta: meta config - contains script_name
:param config: config for script
:return: output of script
'''
script_name = meta.pull('script_name', silent=self.silent)
script_info = self.get_script_info(script_name)
if script_info is None:
raise MissingScriptError(script_name)
script_fn = script_info.fn
if script_info.use_config:
return script_fn(config)
return autofill_args(script_fn, config)
|
Python
|
CL
|
4bc743756b7bbc714cd9150a6df03ba28732f07bf64825ff165d02bf35bca35f
|
# Generated by Django 3.0.4 on 2020-08-18 09:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0145_invitations_date'),
]
operations = [
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(0, 'team'), (1, 'company')], default='team', max_length=15)),
('invitation_group_id', models.IntegerField(default=0)),
('date', models.DateField(null=True)),
('initiator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='main.Profile')),
('profile', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.Profile')),
],
options={
'db_table': 'Invitation',
},
),
migrations.DeleteModel(
name='Invitations',
),
]
|
Python
|
CL
|
bba18ad69aa37bc665ff4441083427dd94f2ead71ce0ad9f2384eccebbe735f2
|
from config import database_config as dbcfg
import pandas as pd
import pymysql
import config.database_config as dbc
import sys
from sqlalchemy import create_engine
import logging
def set_logger():
""" set scraper module logger
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(dbcfg.LOG_FILE)
file_handler.setFormatter(dbcfg.MAIN_FORMATTER)
logger.addHandler(file_handler)
return logger
def get_df(data):
"""Returns a data frame
"""
df = pd.DataFrame.from_dict(data, orient='index')
return df
def assign_types_to_df(df, table):
""" Assign type to data according to table definitions in db
"""
types = dbcfg.TABLES[table]
for col, field_type in types.items():
if field_type == 'date':
df[col] = pd.to_datetime(df[col])
else:
df[col] = df[col].astype(field_type)
return df[types.keys()]
def insert_dict_to_df(data_dict, chunk_size):
""" inserts data into tables in the database.
input: dictionary where keys are table names and values are dictionaries with data to insert to table
"""
engine = create_engine_con()
for table, data in data_dict.items():
if data:
df = get_df(data)
db_logger.info(f'data frame of table {table} has been created successfully')
df = assign_types_to_df(df, table)
db_logger.info(f'data type processing of table {table} - success')
df.to_sql(name=table, con=engine, if_exists='append', chunksize=chunk_size, index=False)
db_logger.info(f'data of {table} has been inserted to db successfully')
else:
db_logger.info(f'no data to insert to table {table} ')
def connect_sql(host=dbc.HOST, user=dbc.USERNAME, password=dbc.PASSWORD):
""" connect pymysql
"""
try:
con = pymysql.connect(host=host, user=user, password=password)
return con
except pymysql.err.OperationalError as err:
print("couldn't connect pymysql. please verify that host, username and \n"
"password are all correct and try again", format(err))
db_logger.error(f' connection to pymysql failed')
sys.exit(1)
def execute_sql(sql_string, con):
""" execute sql statement
"""
cur = con.cursor()
cur.execute(sql_string)
def use_database(databaseto_use=dbcfg.DATABASE_NAME):
""" creates a connection and use to sql database
"""
con = connect_sql()
try:
execute_sql(f"USE {databaseto_use}", con)
except pymysql.err.OperationalError:
print(f"Database {databaseto_use} was not found.\n"
f"Please make sure you set the correct database name in database_config file.\n"
f"if database is not exists please create first. find more information in README")
db_logger.error(f' database {databaseto_use} not found')
sys.exit(1)
db_logger.info(f'connected to database {databaseto_use} successfully')
return con
def create_engine_con():
""" creating connection to mysql database
"""
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}"
.format(user=dbcfg.USERNAME,
pw=dbcfg.PASSWORD,
db=dbcfg.DATABASE_NAME))
db_logger.info(f'created engine to connect mysql successfully')
return engine
db_logger = set_logger()
|
Python
|
CL
|
3e4140cdd22fbc2cd9d9cb8755c6337761d96e9cf48de14204bf829346df92db
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
################################################################################
#
# Copyright (c) 2014 All Rights Reserved
#
################################################################################
"""
This module realizes unit test for mini spider.
Author: weileizhe
Date: 2014/11/12 00:00:06
"""
import httpretty
import os
import Queue
import shutil
import unittest
import mini_spider
class TestParseConfiguration(unittest.TestCase):
""" Test parse_configuration(configuration_file_name) function.
"""
def setUp(self):
""" Set up test.
"""
self.configuration_file_path = 'test.conf'
def tearDown(self):
""" Tear down test.
"""
if os.path.exists(self.configuration_file_path):
os.remove(self.configuration_file_path)
def write_configuration_file(self, content):
""" Write content to configuration file.
Args:
content: The content of configuration file.
"""
with open(self.configuration_file_path, 'w') as configuration_file:
configuration_file.write(content)
def test_normal_configuration(self):
""" Test for normal configuration file parse.
"""
self.write_configuration_file(
'[spider]\n'
'url_list_file: ./urls\n'
'output_directory: ./output\n'
'max_depth: 6\n'
'crawl_interval: 1\n'
'crawl_timeout: 5\n'
'target_url: .*\.(gif|png|jpg|bmp)$\n'
'thread_count: 8\n'
)
configuration = mini_spider.parse_configuration(self.configuration_file_path)
self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')
self.assertEqual(configuration.get('spider', 'output_directory'), './output')
self.assertEqual(configuration.getint('spider', 'max_depth'), 6)
self.assertEqual(configuration.getint('spider', 'crawl_interval'), 1)
self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 5)
self.assertEqual(configuration.getint('spider', 'thread_count'), 8)
self.assertEqual(configuration.get('spider', 'target_url'), '.*\.(gif|png|jpg|bmp)$')
def test_partly_default_configuration(self):
""" Test for partly default configuration file parse.
"""
self.write_configuration_file(
'[spider]\n'
'max_depth: 10\n'
'crawl_interval: 2\n'
'crawl_timeout: 10\n'
'target_url: .*\.(com|cn|net)$\n'
)
configuration = mini_spider.parse_configuration(self.configuration_file_path)
self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')
self.assertEqual(configuration.get('spider', 'output_directory'), './output')
self.assertEqual(configuration.getint('spider', 'max_depth'), 10)
self.assertEqual(configuration.getint('spider', 'crawl_interval'), 2)
self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 10)
self.assertEqual(configuration.getint('spider', 'thread_count'), 8)
self.assertEqual(configuration.get('spider', 'target_url'), '.*\.(com|cn|net)$')
def test_fully_default_configuration(self):
""" Test for fully default configuration file parse.
"""
configuration = mini_spider.parse_configuration(self.configuration_file_path)
self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')
self.assertEqual(configuration.get('spider', 'output_directory'), './output')
self.assertEqual(configuration.getint('spider', 'max_depth'), 1)
self.assertEqual(configuration.getint('spider', 'crawl_interval'), 1)
self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 1)
self.assertEqual(configuration.getint('spider', 'thread_count'), 8)
self.assertEqual(configuration.get('spider', 'target_url'), '.*\.(gif|png|jpg|bmp)$')
def test_invalid_max_depth_configuration(self):
""" Test for invalid max_depth configuration parse.
"""
self.write_configuration_file(
'[spider]\n'
'max_depth: -1\n'
)
with self.assertRaises(mini_spider.ConfigurationException):
mini_spider.parse_configuration(self.configuration_file_path)
def test_invalid_crawl_interval_configuration(self):
""" Test for invalid crawl_interval configuration parse.
"""
self.write_configuration_file(
'[spider]\n'
'crawl_interval: 0\n'
)
with self.assertRaises(mini_spider.ConfigurationException):
mini_spider.parse_configuration(self.configuration_file_path)
def test_invalid_crawl_timeout_configuration(self):
""" Test for invalid crawl_timeout configuration parse.
"""
self.write_configuration_file(
'[spider]\n'
'crawl_timeout: 0\n'
)
with self.assertRaises(mini_spider.ConfigurationException):
mini_spider.parse_configuration(self.configuration_file_path)
def test_invalid_thread_count_configuration(self):
""" Test for invalid thread_count configuration parse.
"""
self.write_configuration_file(
'[spider]\n'
'thread_count: 0\n'
)
with self.assertRaises(mini_spider.ConfigurationException):
mini_spider.parse_configuration(self.configuration_file_path)
class TestMiniSpiderThread(unittest.TestCase):
""" Test for MiniSpiderThread.
"""
def setUp(self):
""" Set up test.
"""
self.url_queue = Queue.Queue()
self.crawled_urls = set()
self.mini_spider_thread = mini_spider.MiniSpiderThread(self.url_queue, self.crawled_urls,
6, 2, 5, '.*\.(gif|png|jpg|bmp)$',
'./output_test')
self.url_obj = mini_spider.Url('http://example.com/iterate_next_urls/html_webpage',
0)
httpretty.enable()
httpretty.register_uri(httpretty.GET,
'http://example.com/graburl/success', body = 'Grab url success.')
httpretty.register_uri(httpretty.GET,
'http://example.com/graburl/fail', status = 404)
httpretty.register_uri(httpretty.GET,
'http://example.com/savewebpage/saved.txt',
body = 'Saved webpage content.')
httpretty.register_uri(httpretty.GET,
'http://example.com/iterate_next_urls/html_webpage',
content_type = 'text/html',
body = '<a href="/test/test1.html">Link</a>\
<img src="/test/test3.png" /><script src="/test/test4.js"></script>')
httpretty.register_uri(httpretty.GET,
'http://example.com/iterate_next_urls/not_html_webpage',
content_type = 'text/plain',
body = '/test/not_html.txt')
def tearDown(self):
""" Tear down test.
"""
httpretty.disable()
httpretty.reset()
if os.path.exists('output_test'):
shutil.rmtree('output_test')
def test_grab_url_success(self):
""" Test grabing url success.
"""
self.mini_spider_thread.grab_url('http://example.com/graburl/success')
self.assertTrue(self.mini_spider_thread.grab_url_success)
self.assertEqual(self.mini_spider_thread.url_response.read(), 'Grab url success.')
def test_grab_url_fail(self):
""" Test grabing url fail.
"""
self.mini_spider_thread.grab_url('http://example.com/graburl/fail')
self.assertFalse(self.mini_spider_thread.grab_url_success)
def test_save_specific_webpage(self):
""" Test saving specific webpage.
"""
self.mini_spider_thread.grab_url('http://example.com/savewebpage/saved.txt')
self.mini_spider_thread.grab_url_success = True
self.mini_spider_thread.save_specific_webpage('http://example.com/savewebpage/saved.txt',
self.mini_spider_thread.output_directory)
saved_path = os.path.join(self.mini_spider_thread.output_directory,
'http%3A%2F%2Fexample.com%2Fsavewebpage%2Fsaved.txt')
self.assertTrue(os.path.exists(saved_path))
with open(saved_path, 'r') as saved_file:
self.assertEqual(saved_file.read(), 'Saved webpage content.')
def test_iterate_next_urls_html(self):
""" Test interate next urls for html type webpage.
"""
self.mini_spider_thread.grab_url('http://example.com/iterate_next_urls/html_webpage')
self.assertTrue(self.mini_spider_thread.grab_url_success)
self.assertEqual(list(self.mini_spider_thread.iterate_next_urls(self.url_obj))[0],
'http://example.com/test/test1.html')
def test_iterate_next_urls_not_html(self):
""" Test iterate next urls for not html type webpage.
"""
self.mini_spider_thread.grab_url('http://example.com/iterate_next_urls/not_html_webpage')
self.assertTrue(self.mini_spider_thread.grab_url_success)
self.assertEqual(len(list(self.mini_spider_thread.iterate_next_urls(self.url_obj))), 0)
def main():
""" Main function entrance.
"""
unittest.main()
if __name__ == '__main__':
main()
|
Python
|
CL
|
b8681be4a3d405277aa4e67a810b7306977127ddd809ddea8499b37f8dfdf6e6
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
eventlet.monkey_patch(
os=True, select=True, socket=True, thread=True, time=True)
import gettext
import sys
from eventlet import wsgi
from oslo_config import cfg
from oslo_log import log as logging
gettext.install('blazar')
from blazar.api import app as wsgi_app
from blazar.api.v2 import app as v2_app
from blazar.notification import notifier
from blazar.utils import service as service_utils
opts = [
cfg.IntOpt('port',
default=1234,
min=0,
max=65535,
help='Port that will be used to listen on'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_cli_opts(opts)
CONF.import_opt('host', 'blazar.config')
CONF.import_opt('enable_v1_api', 'blazar.config')
def main():
"""Entry point to start Blazar API wsgi server."""
cfg.CONF(sys.argv[1:], project='blazar', prog='blazar-api')
notifier.init()
service_utils.prepare_service(sys.argv)
if not CONF.enable_v1_api:
app = v2_app.make_app()
else:
app = wsgi_app.VersionSelectorApplication()
wsgi.server(eventlet.listen((CONF.host, CONF.port), backlog=500), app)
if __name__ == '__main__':
main()
|
Python
|
CL
|
8b2f48d14b5fe5ff89912fabd5d89d80d55613005bc4eded4271c9db6dd3b21f
|
"""Rocket league item pipeline."""
from scrapy.exceptions import DropItem
from scrapy.spiders import Spider
from rlgpy.scraper.items import (
RlItem,
RlTrade,
RlAchievement
)
# Normal for pipeline class... pylint: disable=too-few-public-methods
class RlItemPipeline:
"""Rocket League item data pipeline."""
def __init__(self):
"""Initialize pipeline with a set containing unique item ids to avoid duplicates."""
self.item_ids = set()
# Required argument for pipeline fn... pylint: disable=unused-argument
def process_item(self, item: RlItem, spider: Spider) -> RlItem:
"""Ensure no duplicate items are exported and set default field values."""
if item['data_id'] in self.item_ids:
raise DropItem('Item already added.')
self.item_ids.add(item['data_id'])
for field in item.fields:
item.setdefault(field, '')
return item
class RlTradePipeline:
"""Rocket League trade pipeline."""
# Required argument for pipeline fn... pylint: disable=unused-argument,no-self-use
def process_item(self, item: RlTrade, spider: Spider) -> RlTrade:
"""Set the default values for tradeable items without certifications.
By default, tradeable items without a certification or paint or count will not have the
keys 'certification' 'paint' or 'count'. To normalize the data, the keys are added with
a default value.
"""
for tradeable_item in item['have'] + item['want']:
tradeable_item.setdefault('count', 1)
tradeable_item.setdefault('certification', '')
tradeable_item.setdefault('paint', '')
return item
class RlAchievementPipeline:
"""Rocket League achievement pipeline."""
# Required argument for pipeline fn... pylint: disable=unused-argument,no-self-use
def process_item(self, item: RlAchievement, spider: Spider) -> RlAchievement:
"""Set default values for achievement items without gamerscore fields."""
item.setdefault('gamerscore', 0)
return item
|
Python
|
CL
|
0c67d90d15895c549e74ed416cea10ddae3fc61a044383543b9a565d2e4eb668
|
import collections
import logging
import re
import mongoengine as me
import pymongo
import review
import rating
import section
import term
from rmc.shared import rmclogger
from rmc.shared import util
import user_course as _user_course
# TODO(david): Add usefulness
_SORT_MODES = [
{
'name': 'popular',
'direction': pymongo.DESCENDING,
'field': 'interest.count',
'is_rating': False,
},
{
'name': 'friends_taken',
'direction': pymongo.DESCENDING,
# Default in case no current user, else we do our own in-memory sorting
'field': 'interest.count',
'is_rating': False,
},
{
'name': 'interesting',
'direction': pymongo.DESCENDING,
'field': 'interest',
'is_rating': True,
},
{
'name': 'easy',
'direction': pymongo.DESCENDING,
'field': 'easiness',
'is_rating': True,
},
{
'name': 'hard',
'direction': pymongo.ASCENDING,
'field': 'easiness',
'is_rating': True,
},
{
'name': 'course code',
'direction': pymongo.ASCENDING,
'field': 'id',
'is_rating': False,
},
]
_SORT_MODES_BY_NAME = {sm['name']: sm for sm in _SORT_MODES}
class Course(me.Document):
meta = {
'indexes': [
'_keywords',
'interest.rating',
'interest.count',
'easiness.rating',
'easiness.count',
'usefulness.rating',
'usefulness.count',
'overall.rating',
'overall.count',
],
}
# eg. earth121l
id = me.StringField(primary_key=True)
# eg. earth
department_id = me.StringField(required=True)
# eg. 121l
number = me.StringField(required=True)
# eg. Introductory Earth Sciences Laboratory 1
name = me.StringField(required=True)
# Description about the course
description = me.StringField(required=True)
easiness = me.EmbeddedDocumentField(rating.AggregateRating,
default=rating.AggregateRating())
interest = me.EmbeddedDocumentField(rating.AggregateRating,
default=rating.AggregateRating())
usefulness = me.EmbeddedDocumentField(rating.AggregateRating,
default=rating.AggregateRating())
# TODO(mack): deprecate overall rating
overall = me.EmbeddedDocumentField(rating.AggregateRating,
default=rating.AggregateRating())
professor_ids = me.ListField(me.StringField())
antireqs = me.StringField()
coreqs = me.StringField()
prereqs = me.StringField()
# NOTE: The word term is overloaded based on where it's used. Here, it mean
# which terms of the year is the course being offered?
# NOTE: THIS FIELD IS ***DEPRECATED***, because the data source we get
# info about this is not reliable. There may not exist such reliable
# data at all -- course offerings are decided on an annual basis.
# TODO(david): Remove this field and replace it with info from sections.
# e.g. ['01', '05', '09']
terms_offered = me.ListField(me.StringField())
# eg. ['earth', '121l', 'earth121l', 'Introductory',
# 'Earth' 'Sciences', 'Laboratory', '1']
_keywords = me.ListField(me.StringField(), required=True)
SORT_MODES = _SORT_MODES
@property
def code(self):
matches = re.findall(r'^([a-z]+)(.*)$', self.id)[0]
department = matches[0]
number = matches[1]
return '%s %s' % (department.upper(), number.upper())
def save(self, *args, **kwargs):
if not self.id:
# id should not be set during first save
self.id = self.department_id + self.number
super(Course, self).save(*args, **kwargs)
def get_ratings(self):
# Ordered for consistency with CourseReview.rating_fields; see #109.
return collections.OrderedDict([
('usefulness', self.usefulness.to_dict()),
('easiness', self.easiness.to_dict()),
('interest', self.interest.to_dict()),
])
def get_reviews(self, current_user=None, user_courses=None):
"""Return a list of all user reviews ("tips") about this course.
Does not include professor reviews.
Arguments:
current_user: The current user. Used for revealing more author
information if possible (eg. reviews written by friends who
allow their friends to know that they wrote it).
user_courses: An optional list of all user_courses that's
associated with this course to speed up this function.
"""
if not user_courses:
limit_fields = ['course_id', 'user_id', 'course_review']
user_courses = _user_course.UserCourse.objects(
course_id=self.id).only(*limit_fields)
reviews = []
for uc in user_courses:
if (len(uc.course_review.comment) <
review.CourseReview.MIN_REVIEW_LENGTH):
continue
reviews.append(uc.course_review.to_dict(current_user, uc.user_id,
uc.id))
# Filter out old reviews if we have enough results.
date_getter = lambda review: review['comment_date']
reviews = util.publicly_visible_ratings_and_reviews_filter(
reviews, date_getter, util.MIN_NUM_REVIEWS)
return reviews
# TODO(mack): this function is way too overloaded, even to separate into
# multiple functions based on usage
@classmethod
def get_course_and_user_course_dicts(cls, courses, current_user,
include_friends=False, include_all_users=False,
full_user_courses=False, include_sections=False):
limited_user_course_fields = [
'program_year_id', 'term_id', 'user_id', 'course_id']
course_dicts = [course.to_dict() for course in courses]
course_ids = [c['id'] for c in course_dicts]
if include_sections:
for course_dict in course_dicts:
# By default, we'll send down section info for current and next
# term for each course we return.
sections = section.Section.get_for_course_and_recent_terms(
course_dict['id'])
course_dict['sections'] = [s.to_dict() for s in sections]
ucs = []
if not current_user:
if include_all_users:
ucs = _user_course.UserCourse.objects(
course_id__in=course_ids)
if not full_user_courses:
ucs.only(*limited_user_course_fields)
ucs = list(ucs)
uc_dicts = [uc.to_dict() for uc in ucs]
return course_dicts, uc_dicts, ucs
else:
return course_dicts, [], []
uc_dicts = []
if include_all_users or include_friends:
query = {
'course_id__in': course_ids,
}
# If we're just including friends
if not include_all_users:
query['user_id__in'] = current_user.friend_ids
if full_user_courses:
if not include_all_users:
query.setdefault('user_id__in', []).append(current_user.id)
ucs = list(_user_course.UserCourse.objects(**query))
uc_dicts = [uc.to_dict() for uc in ucs]
else:
ucs = list(_user_course.UserCourse.objects(**query).only(
*limited_user_course_fields))
friend_uc_fields = ['id', 'user_id', 'course_id', 'term_id',
'term_name']
uc_dicts = [uc.to_dict(friend_uc_fields) for uc in ucs]
# TODO(mack): optimize to not always get full user course
# for current_user
current_ucs = list(_user_course.UserCourse.objects(
user_id=current_user.id,
course_id__in=course_ids,
id__nin=[uc_dict['id'] for uc_dict in uc_dicts],
))
ucs += current_ucs
uc_dicts += [uc.to_dict() for uc in current_ucs]
current_user_course_by_course = {}
friend_user_courses_by_course = {}
current_friends_set = set(current_user.friend_ids)
current_user_course_ids = set(current_user.course_history)
for uc_dict in uc_dicts:
if uc_dict['id'] in current_user_course_ids:
current_user_course_by_course[uc_dict['course_id']] = uc_dict
elif include_friends:
if uc_dict['user_id'] in current_friends_set:
friend_user_courses_by_course.setdefault(
uc_dict['course_id'], []).append(uc_dict)
for course_dict in course_dicts:
current_uc = current_user_course_by_course.get(
course_dict['id'])
current_uc_id = current_uc['id'] if current_uc else None
course_dict['user_course_id'] = current_uc_id
if include_friends:
friend_ucs = friend_user_courses_by_course.get(
course_dict['id'], [])
friend_uc_ids = [uc['id'] for uc in friend_ucs]
course_dict['friend_user_course_ids'] = friend_uc_ids
return course_dicts, uc_dicts, ucs
@staticmethod
def code_to_id(course_code):
return "".join(course_code.split()).lower()
@staticmethod
def search(params, current_user=None):
"""Search for courses based on various parameters.
Arguments:
params: Dict of search parameters (all optional):
keywords: Keywords to search on
sort_mode: Name of a sort mode. See Course.SORT_MODES. The
'friends_taken' sort mode defaults to 'popular' if no
current_user.
direction: 1 for ascending, -1 for descending
count: Max items to return (aka. limit)
offset: Index of first search result to return (aka. skip)
exclude_taken_courses: "yes" to exclude courses current_user
has taken.
current_user: The user making the request.
Returns:
A tuple (courses, has_more):
courses: Search results
has_more: Whether there could be more search results
"""
keywords = params.get('keywords')
sort_mode = params.get('sort_mode', 'popular')
default_direction = _SORT_MODES_BY_NAME[sort_mode]['direction']
direction = int(params.get('direction', default_direction))
count = int(params.get('count', 10))
offset = int(params.get('offset', 0))
exclude_taken_courses = (params.get('exclude_taken_courses') == "yes")
# TODO(david): These logging things should be done asynchronously
rmclogger.log_event(
rmclogger.LOG_CATEGORY_COURSE_SEARCH,
rmclogger.LOG_EVENT_SEARCH_PARAMS,
params
)
filters = {}
if keywords:
# Clean keywords to just alphanumeric and space characters
keywords_cleaned = re.sub(r'[^\w ]', ' ', keywords)
def regexify_keywords(keyword):
keyword = keyword.lower()
return re.compile('^%s' % re.escape(keyword))
keyword_regexes = map(regexify_keywords, keywords_cleaned.split())
filters['_keywords__all'] = keyword_regexes
if exclude_taken_courses:
if current_user:
ucs = (current_user.get_user_courses()
.only('course_id', 'term_id'))
filters['id__nin'] = [
uc.course_id for uc in ucs
if not term.Term.is_shortlist_term(uc.term_id)
]
else:
logging.error('Anonymous user tried excluding taken courses')
if sort_mode == 'friends_taken' and current_user:
import user
friends = user.User.objects(id__in=current_user.friend_ids).only(
'course_history')
num_friends_by_course = collections.Counter()
for friend in friends:
num_friends_by_course.update(friend.course_ids)
filters['id__in'] = num_friends_by_course.keys()
existing_courses = Course.objects(**filters).only('id')
existing_course_ids = set(c.id for c in existing_courses)
for course_id in num_friends_by_course.keys():
if course_id not in existing_course_ids:
del num_friends_by_course[course_id]
sorted_course_count_tuples = sorted(
num_friends_by_course.items(),
key=lambda (_, total): total,
reverse=direction < 0,
)[offset:offset + count]
sorted_course_ids = [course_id for (course_id, total)
in sorted_course_count_tuples]
unsorted_courses = Course.objects(id__in=sorted_course_ids)
course_by_id = {course.id: course for course in unsorted_courses}
courses = [course_by_id[cid] for cid in sorted_course_ids]
else:
sort_options = _SORT_MODES_BY_NAME[sort_mode]
if sort_options['is_rating']:
suffix = 'positive' if direction < 0 else 'negative'
order_by = '-%s.sorting_score_%s' % (sort_options['field'],
suffix)
else:
sign = '-' if direction < 0 else ''
order_by = '%s%s' % (sign, sort_options['field'])
unsorted_courses = Course.objects(**filters)
sorted_courses = unsorted_courses.order_by(order_by)
courses = sorted_courses.skip(offset).limit(count)
has_more = len(courses) == count
return courses, has_more
def to_dict(self):
"""Returns information about a course to be sent down an API.
Args:
course: The course object.
"""
return {
'id': self.id,
'code': self.code,
'name': self.name,
'description': self.description,
# TODO(mack): create user models for friends
#'friends': [1647810326, 518430508, 541400376],
'ratings': util.dict_to_list(self.get_ratings()),
'overall': self.overall.to_dict(),
'professor_ids': self.professor_ids,
'prereqs': self.prereqs,
}
def __repr__(self):
return "<Course: %s>" % self.code
|
Python
|
CL
|
40cd672e66df4b44b1e2231743ffe736d51160e49fa0f7b3acf69c95dfe68c4b
|
import imath
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import IECoreScene
class compoundParameters( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"Op with some compound parameters.",
IECore.ObjectParameter(
name = "result",
description = "Dummy.",
defaultValue = IECoreScene.PointsPrimitive( IECore.V3fVectorData() ),
type = IECoreScene.TypeId.PointsPrimitive
)
)
self.parameters().addParameters( [
IECore.CompoundParameter(
name = "compound_1",
description = "a compound parameter",
userData = { "UI" : { "label" : IECore.StringData( "My Compound 1" ) } },
members = [
IECore.V3dParameter(
name = "j",
description = "a v3d",
defaultValue = IECore.V3dData( imath.V3d( 8, 16, 32 ) ),
userData = { "UI" : { "label" : IECore.StringData( "A Vector" ) } },
),
IECore.Color3fParameter(
name = "k",
description = "an m44f",
defaultValue = imath.Color3f(1,0.5,0),
userData = { "UI" : { "label" : IECore.StringData( "A Colour" ) } },
),
]
),
IECore.CompoundParameter(
name = "compound_2",
description = "a compound parameter",
userData = { "UI" : { "label" : IECore.StringData( "My Compound 2" ) } },
members = [
IECore.V3dParameter(
name = "j",
description = "a v3d",
defaultValue = IECore.V3dData( imath.V3d( 8, 16, 32 ) ),
presets = (
( "one", imath.V3d( 1 ) ),
( "two", imath.V3d( 2 ) )
),
userData = { "UI" : { "label" : IECore.StringData( "Compound->V3d" ) } },
),
IECore.V2fParameter(
name = "k",
description = "an v2f",
defaultValue = imath.V2f(1,1)
),
]
),
IECore.CompoundParameter(
name = "compound_3",
description = "a compound parameter",
userData ={ "UI" : { "label" : IECore.StringData( "My Compound 3" ) } },
members = [
IECore.CompoundParameter(
name = "compound_4",
description = "a compound parameter",
userData = { "UI" : { "label" : IECore.StringData( "My Compound 4" ) } },
members = [
IECore.IntParameter(
name = "some_int",
description = "Int",
defaultValue = 123,
userData = { "UI" : { "label" : IECore.StringData( "Int" ) } },
),
]
)
]
),
IECore.FloatParameter(
name="blah",
description="blah",
defaultValue = 123.0
),
IECore.CompoundParameter(
name = "compound_5",
description = "a compound parameter",
userData = { "UI" : { "label" : IECore.StringData( "Another Compound Parameter" ) } },
members = [
IECore.BoolParameter(
name = "bool_1",
description = "a boolean parameter",
defaultValue = True
)
]
)
] )
def doOperation( self, args ) :
return IECoreScene.PointsPrimitive( IECore.V3fVectorData() )
IECore.registerRunTimeTyped( compoundParameters )
|
Python
|
CL
|
48911736fe1709737551c692d9ccfdeac2ae5be9b5f8247b5c2b5dee19741351
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import Dataset
from deepy.utils import FakeGenerator, StreamPickler
import logging as loggers
logging = loggers.getLogger(__name__)
class OnDiskDataset(Dataset):
"""
On-disk dataset.
The data should be dumped with deepy.utils.StreamPickler.
You must convert the data to mini-batches before dump it to a file.
"""
def __init__(self, train_path, valid_path=None, test_path=None, train_size=None, cache_on_memory=False):
self._train_path = train_path
self._valid_path = valid_path
self._test_path = test_path
self._train_size = train_size
self._cache_on_memory = cache_on_memory
self._cached_train_data = None
if self._cache_on_memory:
logging.info("Cache on memory")
self._cached_train_data = list(StreamPickler.load(open(self._train_path)))
def generate_train_data(self):
for data in StreamPickler.load(open(self._train_path)):
yield data
def generate_valid_data(self):
for data in StreamPickler.load(open(self._valid_path)):
yield data
def generate_test_data(self):
for data in StreamPickler.load(open(self._test_path)):
yield data
def train_set(self):
if self._cache_on_memory:
return self._cached_train_data
if not self._train_path:
return None
return FakeGenerator(self, "generate_train_data")
def valid_set(self):
if not self._valid_path:
return None
return FakeGenerator(self, "generate_valid_data")
def test_set(self):
if not self._test_path:
return None
return FakeGenerator(self, "generate_test_data")
def train_size(self):
return self._train_size
|
Python
|
CL
|
194e92c14d0eabc19ee55ae8ced5c3e1f22a09691ca84652d847d41432ec59eb
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_create_composed_model_async.py
DESCRIPTION:
Model compose allows multiple models to be composed and called with a single model ID.
This is useful when you have trained different models and want to aggregate a group of
them into a single model that you (or a user) could use to recognize a form. When doing
so, you can let the service decide which model more accurately represents the form to
recognize, instead of manually trying each trained model against the form and selecting
the most accurate one.
In our case, we will be writing an application that collects the expenses a company is making.
There are 4 main areas where we get purchase orders from (office supplies, office equipment,
furniture, and cleaning supplies). Because each area has its own form with its own structure,
we need to train a model per form. Note that you can substitute your own models or container
SAS URLs for this sample.
USAGE:
python sample_create_composed_model_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Form Recognizer resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) PURCHASE_ORDER_OFFICE_SUPPLIES_SAS_URL_V2 - a container SAS URL to your Azure Storage blob container.
4) PURCHASE_ORDER_OFFICE_EQUIPMENT_SAS_URL_V2 - a container SAS URL to your Azure Storage blob container.
5) PURCHASE_ORDER_OFFICE_FURNITURE_SAS_URL_V2 - a container SAS URL to your Azure Storage blob container.
6) PURCHASE_ORDER_OFFICE_CLEANING_SUPPLIES_SAS_URL_V2 - a container SAS URL to your Azure Storage blob container.
"""
import os
import asyncio
class ComposedModelSampleAsync(object):
async def create_composed_model_async(self):
# [START begin_create_composed_model_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import FormTrainingClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
po_supplies = os.environ['PURCHASE_ORDER_OFFICE_SUPPLIES_SAS_URL_V2']
po_equipment = os.environ['PURCHASE_ORDER_OFFICE_EQUIPMENT_SAS_URL_V2']
po_furniture = os.environ['PURCHASE_ORDER_OFFICE_FURNITURE_SAS_URL_V2']
po_cleaning_supplies = os.environ['PURCHASE_ORDER_OFFICE_CLEANING_SUPPLIES_SAS_URL_V2']
form_training_client = FormTrainingClient(endpoint=endpoint, credential=AzureKeyCredential(key))
async with form_training_client:
supplies_poller = await form_training_client.begin_training(
po_supplies, use_training_labels=True, model_name="Purchase order - Office supplies"
)
equipment_poller = await form_training_client.begin_training(
po_equipment, use_training_labels=True, model_name="Purchase order - Office Equipment"
)
furniture_poller = await form_training_client.begin_training(
po_furniture, use_training_labels=True, model_name="Purchase order - Furniture"
)
cleaning_supplies_poller = await form_training_client.begin_training(
po_cleaning_supplies, use_training_labels=True, model_name="Purchase order - Cleaning Supplies"
)
supplies_model = await supplies_poller.result()
equipment_model = await equipment_poller.result()
furniture_model = await furniture_poller.result()
cleaning_supplies_model = await cleaning_supplies_poller.result()
models_trained_with_labels = [
supplies_model.model_id,
equipment_model.model_id,
furniture_model.model_id,
cleaning_supplies_model.model_id
]
poller = await form_training_client.begin_create_composed_model(
models_trained_with_labels, model_name="Office Supplies Composed Model"
)
model = await poller.result()
print("Office Supplies Composed Model Info:")
print("Model ID: {}".format(model.model_id))
print("Model name: {}".format(model.model_name))
print("Is this a composed model?: {}".format(model.properties.is_composed_model))
print("Status: {}".format(model.status))
print("Composed model creation started on: {}".format(model.training_started_on))
print("Creation completed on: {}".format(model.training_completed_on))
# [END begin_create_composed_model_async]
print("Recognized fields:")
for submodel in model.submodels:
print("The submodel has model ID: {}".format(submodel.model_id))
print("...The submodel with form type {} has an average accuracy '{}'".format(
submodel.form_type, submodel.accuracy
))
for name, field in submodel.fields.items():
print("...The model found the field '{}' with an accuracy of {}".format(
name, field.accuracy
))
# Training result information
for doc in model.training_documents:
print("Document was used to train model with ID: {}".format(doc.model_id))
print("Document name: {}".format(doc.name))
print("Document status: {}".format(doc.status))
print("Document page count: {}".format(doc.page_count))
print("Document errors: {}".format(doc.errors))
async def main():
sample = ComposedModelSampleAsync()
await sample.create_composed_model_async()
if __name__ == '__main__':
asyncio.run(main())
|
Python
|
CL
|
e9add4033826760a29af7fdea6ec00a7649ac8ca3332af0c2aad8078b47b5264
|
import os
import sys
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
from lcdblib.plotting import results_table
from lcdblib.stats import fisher
import pybedtools
import pandas
import argh
from argh import arg
def plot(de_results, regions=None, peaks=None, selected=None, x='baseMean',
y='log2FoldChange', disable_logx=False, logy=False, pval_col='padj',
alpha=0.1, lfc_cutoff=0, plot_filename=None,
disable_raster_points=False, genes_to_label=None, label_column=None,
report=None, gene_lists=None
):
"""
M-A plot showing up- and downregulated genes with optional labeling and
Fishers exact tests.
If --plot-filename is not specified, then the plot will be displayed and
points can be clicked for interactive exploration.
If --peaks and --regions are specified, then results from Fishers exact
tests will be printed to stdout, or to --report if specified.
Parameters
----------
de_results : str or pandas.DataFrame
If str, it's the filename of a TSV of differential expression results,
with first column as gene ID. It will be parsed into a dataframe where
the index is gene ID. When called as a library, an already-created
pandas.DataFrame can optionally be provided instead.
regions : str or pybedtools.BedTool
Gene regions in which to look for intersections with peaks. BED file
where the 4th column contains gene IDs that are also present in first
column of `de_results`. Typically this would be a BED file of promoters
or gene bodies. When called as a library, a pybedtools.BedTool object
can optionally be provided instead.
peaks : str or pybedtools.BedTool
BED file to be intersected with `regions`. When called as a library,
a pybedtools.BedTool object can optionally be provided instead.
selected : str or list-like
Replaces `regions` `peaks` arguments; useful for when you already know
which genes you want to select (e.g., upregulated from a different
experiment). If a string, assume it's a filename and use the first
column which will be used as an index into the `de_results` dataframe.
When called as a library, if `selected` is not a string it will be used
as an index into the dataframe.
x : str
Column to use for x-axis. Default of "baseMean" expects DESeq2
results
y : str
Column to use for y-axis. Default of "log2FoldChange" expects DESeq2
results
disable_logx : bool
Disable default behavior of transforming x values using log10
logy : bool
Transform y values using log2
pval-col : str
Column to use for statistical significance. Default "padj" expectes
DESeq2 results.
alpha : float
Threshold for calling significance. Applied to `pval_col`
lfc_cutoff : float
Log2fold change cutoff to be applied to y values. Threshold is applied
post-transformation, if any specified (e.g., `logy` argument).
plot_filename : str
File to save plot. Format auto-detected by extension. Output directory
will be created if needed.
disable_raster_points : bool
Disable the default behavior of rasterizing points in a PDF. Use
sparingly, since drawing 30k+ individual points in a PDF may slow down
your machine.
genes_to_label: str or list-like
Optional file containing genes to label with text. First column must be
a subset of the first column of `de_results`. Lines starting with '#'
and subsequent tab-separated columns will be ignored. When called as
a library, a list-like object of gene IDs can be provided.
label_column : str
Optional column from which to take gene labels found in
`genes_to_label` (e.g., "symbol"). If the value in this column is
missing, fall back to the index. Use this if your gene IDs are long
Ensembl IDs but you want the gene symbols to show up on the plot.
report : str
Where to write out Fisher's exact test results. Default is stdout
gene_lists : str
Prefix to gene lists. If specified, gene lists corresponding to the
cells of the 2x2 Fishers exact test will be written to
{prefix}.up.tsv and {prefix}.dn.tsv. These are subsets of `de_results`
where genes are up and have a peak in region (or are selected), or
downregulated and have a peak in region (or are selected),
respectively.
"""
rasterized = not disable_raster_points
rt = results_table.DESeq2Results(de_results, import_kwargs=dict(index_col=0))
up = rt.upregulated(alpha=alpha, lfc=lfc_cutoff)
dn = rt.downregulated(alpha=alpha, lfc=-lfc_cutoff)
ch = (up | dn)
un = ~ch
sns.set_context('talk')
sns.set_style('white')
general_kwargs=dict(marker='.', alpha=0.4, color='0.5', picker=5,
label="_", linewidth=0, rasterized=rasterized)
genes_to_highlight = [
(
up,
dict(color='#990000', marker='o', s=40, alpha=0.5, label='up (%s)' % sum(up))
),
(
dn,
dict(color='#005c99', marker='o', s=40, alpha=0.5, label='down (%s)' % sum(dn))
),
]
if genes_to_label:
_genes_to_label = []
if isinstance(genes_to_label, str):
for i in open(genes_to_label):
if i.startswith('#'):
continue
_genes_to_label.append(i.split('\t')[0].strip())
else:
_genes_to_label = genes_to_label
ind = rt.data.index.isin(_genes_to_label)
# Don't add labels if a coordinate is null
ind = ind & ~(rt.data[y].isnull() | rt.data[x].isnull())
if label_column:
names = rt.data.loc[ind, label_column]
# Fill in nulls with index to avoid labeling all genes with no
# symbol as "nan"
n = names.isnull()
names[n] = rt.index[n]
else:
names = rt.index[ind]
names = list(names)
genes_to_highlight.append(
(
ind,
dict(rasterized=rasterized, names=names, facecolor='None',
alpha=1.0, s=160, linewidth=1, zorder=100, label='_')
)
)
if not disable_logx:
xfunc=np.log10
else:
xfunc=None
if report is None:
output = sys.stdout
else:
output = open(report, 'w')
if selected and (peaks or regions):
raise ValueError(
"`selected` is mutually exclusive with `peaks` and `regions`")
do_fisher = False
if selected:
do_fisher = True
if isinstance(selected, str):
selected = list(pandas.read_table(selected, index_col=0).index)
selected_genes = rt.index.isin(selected)
row_names = ['selected', 'not selected']
elif peaks is not None and regions is not None:
do_fisher = True
row_names = ['has peak', 'no peak']
regions = pybedtools.BedTool(regions)
peaks = pybedtools.BedTool(peaks)
with_peak = list(set([i.name for i in regions.intersect(peaks, u=True)]))
in_region = peaks.intersect(regions, u=True)
selected_genes = rt.index.isin(with_peak)
npeaks = len(peaks)
nregions = len(regions)
npeaks_in_region = len(in_region)
output.write('Total peaks: {}\n'.format(npeaks))
output.write('Peaks in regions: {0} ({1:.2f}%)\n\n'.format(npeaks_in_region, npeaks_in_region / npeaks * 100))
if do_fisher:
genes_to_highlight.append(
(
selected_genes,
dict(color='#ff9900', alpha=0.8, s=30,
rasterized=rasterized, label='{0} ({1})'.format(row_names[0], sum(selected_genes)))
)
)
output.write(
fisher.fisher_tables(
table=fisher.table_from_bool(selected_genes, up),
row_names=row_names,
col_names=['upregulated', 'not'],
title='Upregulated (lfc>{0}; padj<{1})'.format(lfc_cutoff, alpha)))
output.write('\n\n')
output.write(
fisher.fisher_tables(
table=fisher.table_from_bool(selected_genes, dn),
row_names=row_names,
col_names=['downregulated', 'not'],
title='Downregulated (lfc<-{0}; padj<{1})'.format(lfc_cutoff, alpha)))
output.write('\n\n')
output.write(
fisher.fisher_tables(
table=fisher.table_from_bool(selected_genes, ch),
row_names=row_names,
col_names=['changed', 'not'],
title='Changed (lfc<-{0}; padj<{1})'.format(lfc_cutoff, alpha)))
if gene_lists is not None:
rt.data[selected_genes & up].to_csv(gene_lists + '.up.tsv', sep='\t')
rt.data[selected_genes & dn].to_csv(gene_lists + '.dn.tsv', sep='\t')
if report is not None:
output.close()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
ax = rt.scatter(
ax=ax,
x=x,
y=y,
xfunc=xfunc,
genes_to_highlight=genes_to_highlight,
general_kwargs=general_kwargs,
offset_kwargs=dict(x=-0.1),
label_kwargs=dict(
horizontalalignment='right',
verticalalignment='center',
style='italic',
size=10,
zorder=500,
bbox=dict(facecolor='w', alpha=0.5, edgecolor='none'))
)
ax.legend(loc='best', prop=dict(size=10))
if plot_filename:
dirname = os.path.dirname(plot_filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
fig.savefig(plot_filename)
return ax
if __name__ == "__main__":
argh.dispatch_command(plot)
plt.show()
|
Python
|
CL
|
b13462e6a7678cb447c33bda8c1d097b6a14532d9203c45a5c8f4f1f933e9566
|
import argparse
def get_argparse(answer_type="answer_extraction", return_parser=False):
parser = argparse.ArgumentParser()
# setup/logging parameters
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.",
)
parser.add_argument(
"--vocab_file",
default=None,
type=str,
required=True,
help="The vocabulary file that the BERT model was trained on.",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.",
)
parser.add_argument(
"--dataset_option",
default=None,
type=str,
help=(
"dataset-specific option. "
"RACE={'high', 'middle'}, "
"MCTest={'mc160', 'mc500'}"
),
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
required=True,
help="Where to save cache of features."
)
parser.add_argument(
"--corenlp_cache_dir",
default="corenlp_{}",
type=str,
help="directory of corenlp caches",
)
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="Where to get the dataset data."
)
parser.add_argument(
"--log_spec",
default=None,
type=str,
help="specification for logging filename.",
)
parser.add_argument(
"--no_cache",
default=False,
action="store_true",
help="Never use feature cache."
)
parser.add_argument(
"--verbose_logging",
default=False,
action="store_true",
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
# run parameters
parser.add_argument(
"--do_train",
default=False,
action="store_true",
help="Whether to run training.",
)
parser.add_argument(
"--do_eval",
default=False,
action="store_true",
help="Whether to run eval on the dev set.",
)
parser.add_argument(
"--do_test",
default=False,
action="store_true",
help="Whether to run eval on the dev set.",
)
parser.add_argument(
"--eval_on_train",
default=False,
action="store_true",
help="Evaluate on the training set.",
)
parser.add_argument(
"--data_split",
default="",
type=str,
)
# processing parameters
parser.add_argument(
"--do_lower_case",
default=True,
action="store_true",
help="Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.",
)
parser.add_argument(
"--max_seq_length",
default={
'answer_extraction': 384,
'multiple_choice':128
}[answer_type],
type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument(
"--max_query_length",
default={
'answer_extraction': 64,
'multiple_choice': 23
}[answer_type],
type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
# model parameters
parser.add_argument(
"--init_checkpoint",
default=None,
type=str,
help="Initial checkpoint (usually from a pre-trained BERT model).",
)
parser.add_argument(
"--train_batch_size",
default=32,
type=int,
help="Total batch size for training.",
)
parser.add_argument(
"--eval_batch_size",
default=64, # 32
type=int,
help="Total batch size for predictions.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% "
"of training.",
)
parser.add_argument(
"--save_checkpoints_steps",
default=200,
type=int,
help="How often to save the model checkpoint.",
)
parser.add_argument(
"--save_model_steps",
default=200,
type=int,
help="How often to save the model checkpoint.",
)
parser.add_argument(
"--loss_report_steps",
default=0,
type=int,
help="How often to report the loss."
)
parser.add_argument(
"--eval_steps",
default=200,
type=int,
help="How often to eval the model."
)
parser.add_argument(
"--iterations_per_loop",
default=1000,
type=int,
help="How many steps to make in each estimator call.",
)
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json "
"output file.",
)
parser.add_argument(
"--no_cuda",
default=False,
action="store_true",
help="Whether not to use CUDA when available",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
parser.add_argument(
"--optimize_on_cpu",
default=False,
action="store_true",
help="Whether to perform optimization and keep the optimizer averages on CPU",
)
parser.add_argument(
"--fp16",
default=False,
action="store_true",
help="Whether to use 16-bit float precision instead of 32-bit",
)
parser.add_argument(
"--loss_scale",
type=float,
default=128,
help="Loss scaling, positive power of 2 values can improve fp16 convergence.",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for initialization"
)
# method parameters
parser.add_argument(
"--input_ablation",
default=None,
type=str,
help="input ablation: shuffle_sentences",
)
# output options
parser.add_argument("--output_statistics", default=False, action="store_true")
parser.add_argument("--output_mturk", default=False, action="store_true")
parser.add_argument("--output_examples", default=False, action="store_true")
parser.add_argument("--enter_debugger", default=False, action="store_true")
# debug
parser.add_argument("--debug_counter", default=-1, type=int)
parser.add_argument("--debug_start_counter", default=-1, type=int)
parser.add_argument("--small_debug", default=False, action="store_true")
# vocabulary modifications
parser.add_argument(
"--entity_anonymization",
# choices=["open", "close", "close_noun", "close_content", "close_contentverb"],
default=None,
type=str,
help=(
"Entity anonymization. close: use the same id for the same entity"
"across context documents. open: use a different id."
)
)
parser.add_argument(
"--limit_vocab_size",
default=None,
type=int,
)
parser.add_argument(
"--limit_vocab_freq",
default=None,
type=int,
)
if answer_type == "answer_extraction":
parser.add_argument(
"--mix_input_ablation",
default=None,
type=str,
help="example: shuff_document_words=10:shuffle_sentence_words=10",
)
parser.add_argument("--ignore_out_of_span", default=False, action="store_true")
parser.add_argument("--allow_impossible", default=False, action="store_true")
parser.add_argument("--null_score_diff_threshold", default=0.0, type=float)
elif answer_type == "multiple_choice":
parser.add_argument(
"--max_option_length",
default=17,
type=int,
help="17 is used in GPTv1 on RACE."
)
parser.add_argument(
"--convert_from_ans_extr",
default=False,
action="store_true",
help="convert examples from answer extraction",
)
parser.add_argument(
"--train_predictions",
default=None,
type=str,
help="predictions for train examples",
)
parser.add_argument(
"--eval_predictions",
default=None,
type=str,
help="predictions for eval examples",
)
if return_parser:
return parser #.parse_args()
else:
return parser.parse_args()
|
Python
|
CL
|
b87b6158ea922105726be4ad908c291478c24ea8a5207188117f0cc29deafef0
|
#--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Module config...
#
#------------------------------------------------------------------------
"""Job configuration for pyana
This software was developed for the LUSI project. If you use all or
part of it, please give an appropriate acknowledgment.
@see RelatedModule
@version $Id$
@author Andrei Salnikov
"""
#------------------------------
# Module's version from CVS --
#------------------------------
__version__ = "$Revision$"
# $Source$
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
import logging
import ConfigParser
#---------------------------------
# Imports of base class module --
#---------------------------------
#-----------------------------
# Imports for other modules --
#-----------------------------
#----------------------------------
# Local non-exported definitions --
#----------------------------------
_log = logging.getLogger("pyana.config")
#------------------------
# Exported definitions --
#------------------------
#---------------------
# Class definition --
#---------------------
class config ( object ) :
#--------------------
# Class variables --
#--------------------
#----------------
# Constructor --
#----------------
def __init__ ( self, configFile, jobName = None ) :
self._config = None
self._sections = ["pyana"]
if jobName :
self._sections.insert(0, "pyana."+jobName)
# read config file
if configFile :
_log.info("reading config file %s", configFile)
self._config = ConfigParser.ConfigParser()
self._config.read(file(configFile))
else :
_log.info("reading config file pyana.cfg")
self._config = ConfigParser.ConfigParser()
self._config.read("pyana.cfg")
#-------------------
# Public methods --
#-------------------
def getJobConfig( self, option ):
""" Gets configuration option from one of many sections """
for section in self._sections:
try:
#_log.debug("getJobConfig section=%s option=%s", section, option)
return self._config.get(section, option)
except ConfigParser.NoSectionError:
pass
except ConfigParser.NoOptionError:
pass
#_log.debug("getJobConfig option not found")
def getModuleConfig( self, module ):
""" Gets configuration options for a module as a dictionary """
try :
config = self._config.items(module)
return dict(config)
except ConfigParser.NoSectionError:
return {}
#--------------------------------
# Static/class public methods --
#--------------------------------
#--------------------
# Private methods --
#--------------------
#
# In case someone decides to run this module
#
if __name__ == "__main__" :
# In principle we can try to run test suite for this module,
# have to think about it later. Right now just abort.
sys.exit ( "Module is not supposed to be run as main module" )
|
Python
|
CL
|
953a10cc3feb8ffdfd071e21543cc58add7fb5bd1b6c954131b0bc399a12c896
|
"""
Django settings for trip_1 project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from datetime import timedelta
from pathlib import Path
# from django.contrib.auth.models import User
# Build paths inside the project like this: BASE_DIR / 'subdir'.
# F:\trip_1
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-pa_dvbe7qmcung-l#xlqm3t&x&%d=rq-gt)xh-2hf92ioi@nz#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# 配置用户模型-> 替换django默认的用户模型
AUTH_USER_MODEL = 'accounts.User'
# Application definition
INSTALLED_APPS = [
# 后台管理
'simpleui',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 富文本编辑器
'ckeditor',
'ckeditor_uploader',
# 系统模块
'system',
# 景点模块
'sight',
# 用户账户
'accounts',
# 订单模块
'order',
# 后台统计报表
'master',
'rest_framework',
'django_filters',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'trip_1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trip_1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'trip',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
# 语言配置
LANGUAGE_CODE = 'zh-hans'
# 亚洲/上海时区
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# 系统支持的语言种类Django LocaleMiddleware根据请求信息会自动选择
LANGUAGES = (
('en', 'English'),
('zh-hans', 'Simpled Chinese')
)
# 翻译文件所在目录,需要自行创建
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# MEDIA_URL = '/media/'
MEDIA_URL = 'http://localhost:8080/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# 定义一个目录列表 (STATICFILES_DIRS),Django会从中寻找静态文件
# STATICFILES_DIRS = (
# )
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# drf配置
REST_FRAMEWORK = {
# 全局默认渲染器
# REST framework提供了一个响应类Response,使用该类构造响应对象时,响应的具体数据内容会被转换(render渲染)成符合前端需求的类型
# 根据请求头中的Accept(接收数据类型声明)来自动转换响应数据到对应格式。如果前端请求中未进行Accept声明,则会采用默认方式处理响应数据
'DEFAULT_RENDERER_CLASSES': (
# JSON渲染器
'rest_framework.renderers.JSONRenderer',
# 浏览API渲染器
'rest_framework.renderers.BrowsableAPIRenderer',
),
# 过滤
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.SearchFilter',
'rest_framework.filters.OrderingFilter',
),
# 全局默认用户认证
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
# 全局默认的分页方式
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
# 每页的数据有多少条
'PAGE_SIZE': 10,
# 全局默认权限配置
'DEFAULT_PERMISSION_CLASSES': (
# 仅允许通过认证的用户
'rest_framework.permissions.IsAuthenticated',
),
}
# redis配置
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# Simple JWT的默认设置
SIMPLE_JWT = {
# access_token的持续时间
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=15),
# refresh_token的持续时间
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'UPDATE_LAST_LOGIN': False,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'JWK_URL': None,
'LEEWAY': 0,
'AUTH_HEADER_TYPES': ('Bearer',),
'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'USER_AUTHENTICATION_RULE': 'rest_framework_simplejwt.authentication.default_user_authentication_rule',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
# 跨域
# 配置允许跨域,测试阶段配置
CORS_ALLOW_CREDENTIALS = True
# 允许所有人跨域访问
CORS_ORIGIN_ALLOW_ALL = True
# 允许所有的跨域请求头
CORS_ALLOW_HEADERS = ('*',)
# simple-ui
SIMPLEUI_HOME_INFO = False
SIMPLEUI_ICON = {
'景点门票': 'fas fa-briefcase',
'景点': 'fas fa-file-powerpoint',
'景点详情': 'fab fa-artstation',
}
# ckeditor
# 富文本编辑器文件上传的位置
CKEDITOR_UPLOAD_PATH = "ckeditor/"
# 生成图片缩略图,在编辑器里浏览上传的图片
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_CONFIGS = {
'default': {
'language': 'zh-cn',
'height': 400,
'width': 800,
'toolbar': (
['div', 'Source', '-', 'Save', 'NewPage', 'Preview', '-', 'Templates'],
['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Print', 'SpellChecker', 'Scayt'],
['Undo', 'Redo', '-', 'Find', 'Replace', '-', 'SelectAll', 'RemoveFormat'],
['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea', 'Select', 'Button', 'ImageButton', 'HiddenField'],
['Bold', 'Italic', 'Underline', 'Strike', '-', 'Subscript', 'Superscript'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', 'Blockquote'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Link', 'Unlink', 'Anchor'],
['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak'],
['Styles', 'Format', 'Font', 'FontSize'],
['TextColor', 'BGColor'],
['Maximize', 'ShowBlocks', '-', 'About', 'pbckcode'],
),
}
}
|
Python
|
CL
|
a13f453e78caa9f97e81a462073c031062662677fbca7e3014bd679405cc537d
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-08 02:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=40)),
('detail', models.TextField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answer', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='question', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='care.Post'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reply', to=settings.AUTH_USER_MODEL),
),
]
|
Python
|
CL
|
f7fdc26f12fc441425426d6bb784f3fe651c6a1fdc92949be74442521930793e
|
# https://leetcode.com/problems/isomorphic-strings/?tab=Description
# Given two strings s and t, determine if they are isomorphic.
# Two strings are isomorphic if the characters in s can be replaced to get t.
# All occurrences of a character must be replaced with another character while
# preserving the order of characters. No two characters may map to the same
# character but a character may map to itself.
#
# For example,
# Given "egg", "add", return true.
#
# Given "foo", "bar", return false.
#
# Given "paper", "title", return true.
# You may assume both s and t have the same length.
"""
This is very similar to the problem 290 - Word Pattern. HOWEVER its also a
little different. For example this test case: (ab, ca) --> True
Idea here is that one we have 2 strings "egg" and "add". For the result to be
true one character in the first string must have a unique mapping to another
char in the second string. For ex: e --> a , g --> d.
And the number of such mappings MUST be the same as the number of diferent
letters in the strings. We can use HashSet.
"""
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
mappings = set(zip(s, t))
if len(mappings) == len(set(s)):
if len(mappings) == len(set(t)):
return True
return False
|
Python
|
CL
|
cea3640d47891266b60ddba4e2ed58852c65d6870143ca9001908afb7146e6d2
|
# -*- coding: utf-8 -*-
"""
rater.views.core
~~~~~~~~~~~~~~~~~
This module implements the core views. These are usually language
independent view functions such as the overall index page.
:copyright: (c) 2009 by Rater Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug import redirect, Response
from werkzeug.exceptions import NotFound, MethodNotAllowed
from rater.application import url_for, json_response
from rater.templating import render_template
def home(request):
"""Shows the home page."""
return render_template('core/home.html')
def about(request):
"""Just shows a simple about page that explains the system."""
return render_template('core/about.html')
def not_found(request):
"""Shows a not found page."""
return Response(render_template('core/not_found.html'), status=404,
mimetype='text/html')
def bad_request(request):
"""Shows a "bad request" page."""
return Response(render_template('core/bad_request.html'),
status=400, mimetype='text/html')
def forbidden(request):
"""Shows a forbidden page."""
return Response(render_template('core/forbidden.html'),
status=401, mimetype='text/html')
|
Python
|
CL
|
7a7bd7fb1ec733da639d167b6b0df419fc80968a9decb3e8e0d116ee13a7ea95
|
# This is the Twisted Clien EHA Now! client, version 5.0.
# NOTE: This should not be used as the basis for production code.
# It uses low-level Twisted APIs as a learning exercise.
import datetime
import time
from sources.ebilockorder import Ebilock_order as eb
from sources.hdlc import read_hdlc
from twisted.internet.protocol import Protocol
from twisted.internet.protocol import ClientFactory
from twisted.internet import defer
from twisted.python import log
from twisted.application import service
class EbilockProtocol(Protocol):
def delta_time(self, receive_time):
self.factory.receive_data["time_delta"] = receive_time - self.factory.start_time
self.factory.start_time = time.time()
def dataReceived(self, data):
self.delta_time(time.time())
self.factory.receive_data["hdlc"] = data
self.factory.order_received()
def dataSend(self, data):
self.transport.write(data)
#def connectionMade(self):
# self.transport.write("Hello!")
def connectionLost(self, reason):
print("Connection Lost!!!")
#self.order_finished(reason)
#def order_finished(self, reason):
# self.factory.client_finished(reason)
class EbilockClientFactory(ClientFactory):
task_num = 1
result = ""
protocol = EbilockProtocol
def __init__(self, defered):
self.defered = defered
self.clientprotocol = EbilockProtocol()
self.order = ""
self.start_time = time.time()
self.receive_data = {"hdlc": "", "time_delta": "",}
self.work_data = {
"System_Status": "PASSIVE",
"status_order": "",
"Count_A": 1,
"Count_B": 254,
"Err_Count": 0,
"order": "",
}
def check_count_ok(self):
""" check counters good Telegram """
status = False
order_a = self.work_data["order"]["PACKET_COUNT_A"]
order_b = self.work_data["order"]["PACKET_COUNT_B"]
global_a = self.work_data["Count_A"]
global_b = self.work_data["Count_B"]
if order_a - global_a <= 2 and order_b - global_b <= 2:
status = True
else:
pass
#send status from old counts
self.work_data["Count_A"] = self.work_data["order"]["PACKET_COUNT_A"]
self.work_data["Count_B"] = self.work_data["order"]["PACKET_COUNT_B"]
return status
def switching_to_work(self):
""" system to switch to the operating mode """
self.work_data["Err_Count"] = 0
#timer_error = stop
self.work_data["System_Status"] = "WORK"
#print("System status: Work!!!")
#def buildProtocol(self, address):
# proto = EbilockClientFactory.buildProtocol(self, address)
# return proto
#def buildProtocol(self, address):
# return EbilockProtocol(EbilockClientFactory)
def client_finished(self,reason):
if self.defered is not None:
d = self.defered
self.defered = None
d.errback(reason)
#self.errorback(reason)
#def clientConnectionFailed(self, connector, reason):
# print("Failed to connect to: {}".format(connector.getDestination()))
# if self.defered is not None:
# d = self.defered
# self.defered = None
# d.errback(reason)
def order_received(self):
if self.defered is not None:
d = self.defered
self.defered = None
# d.callback(data)
#self.callback(data, receive_count, delta_time)
source_hdlc = read_hdlc(self.receive_data["hdlc"])
order = eb.from_hdlc(source_hdlc).check_telegramm()
self.work_data["status_order"] = order["status"]
self.work_data["order"] = order["order"]
self.work_order()
def work_order(self):
if self.work_data["status_order"] == "This send status":
print("Send status")
elif self.work_data["status_order"] == "OK":
if self.check_count_ok():
self.switching_to_work()
else:
#wrong telegram
self.work_data["Err_Count"] += 1
#print("order: {}, CountA: {}, CountB: {}, Err_count: {}".format(self.work_data["status_order"],\
#self.work_data["Count_A"], self.work_data["Count_B"], self.work_data["Err_Count"]))
print "status system: {0}, order: {1}, delta time: {2}, CountA: {3}, CountB: {4}".format(self.work_data["System_Status"],\
self.work_data["status_order"], self.receive_data["time_delta"], self.work_data["Count_A"], self.work_data["Count_B"])
#def sendResult(self, data):
# print("Factory send over")
# self.protocol.dataSend(data)
def get_order(host, port):
d = defer.Deferred()
from twisted.internet import reactor
factory = EbilockClientFactory(d)
reactor.connectTCP(host, port, factory)
return d
def client_main():
"""
EHM_STATUS: 'PASS' - silence mode, 'WORK' - work mode
"""
from twisted.internet import reactor
start = datetime.datetime.now()
port = 4016
host = '192.168.101.100'
#port = 10000
#host = "localhost"
main_dict = {
"count_orders": 5,
"EHM_STATUS": "PASS",
"CountA": 0,
"CountB": 0,
"Dog_timer": 0,
}
def got_order(data):
result = eb.from_hdlc(read_hdlc(data["order"])).check_telegramm()
print "status order: {0}, delta time: {1}".format(result, data["time_delta"])
if main_dict["count_orders"] == data["count"]:
main_dict["count_orders"] = data["count"]
order_done()
else:
print("return")
order_done()
def order_filed(err):
main_dict["count_orders"] = 0
print("Order filed: {}".format(err))
#order_done()
def order_done():
print("Reactor stop!!!")
reactor.stop()
def first_init():
""" start initialization """
main_dict["CountA"] = 0
main_dict["CountB"] = 0
main_dict["Dog_timer"] = 0
main_dict["EHM_STATUS"] = "PASS"
first_init()
d = get_order(host, port)
d.addCallbacks(got_order, order_filed)
reactor.run()
elasped = datetime.datetime.now() - start
print 'Got {} orders in {}'.format(main_dict["count_orders"], elasped)
if __name__ == '__main__':
client_main()
|
Python
|
CL
|
e9b4441de81c4b3ffdee0cd87fb8c8e185d84c0a88dcd05acfdd9fd3189e0ea3
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__copyright__ = """ This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
"""
Utility functions for conversions between molecule/atom representations.
"""
from vtk import vtkPeriodicTable, vtkAtom, vtkMolecule
from typing import List, Tuple, cast, Optional
import numpy as np
import scine_utilities as su
def atom_to_tuple(atom: vtkAtom) -> Tuple[str, Tuple[float, float, float]]:
"""
Returns an atomic symbol and a tuple of atom positions.
"""
symbol = vtkPeriodicTable().GetSymbol(atom.GetAtomicNumber())
position = atom.GetPosition()
return cast(
Tuple[str, Tuple[float, float, float]],
(symbol, (position.GetX(), position.GetY(), position.GetZ())),
)
def molecule_to_list_of_atoms(
molecule: vtkMolecule,
) -> List[Tuple[str, Tuple[float, float, float]]]:
"""
Convert vtkMolecule to list of atoms.
"""
atoms = list()
for atom_index in range(molecule.GetNumberOfAtoms()):
atoms.append(atom_to_tuple(molecule.GetAtom(atom_index)))
return atoms
def molecule_to_atom_collection(molecule: vtkMolecule) -> su.AtomCollection:
"""
Convert list of atoms to su.AtomCollection.
The method gets the molecule in Angstrom units and returns the AtomCollection in Bohr units.
"""
atom_list = molecule_to_list_of_atoms(molecule)
if not atom_list:
return su.AtomCollection()
elements = list()
positions = list()
for symbol, position in atom_list:
elements.append(su.ElementInfo.element_from_symbol(symbol))
positions.append(su.BOHR_PER_ANGSTROM * np.array(position))
return su.AtomCollection(elements, positions)
def molecule_to_bond_order_collection(molecule: vtkMolecule) -> su.BondOrderCollection:
bo_collection = su.BondOrderCollection(molecule.GetNumberOfAtoms())
n_bonds = molecule.GetNumberOfBonds()
for n in range(n_bonds):
bond = molecule.GetBond(n)
i = bond.GetBeginAtomId()
j = bond.GetEndAtomId()
order = bond.GetOrder()
bo_collection.set_order(i, j, order)
return bo_collection
def atom_collection_to_molecule(atom_collection: su.AtomCollection) -> vtkMolecule:
molecule = vtkMolecule()
for atom in atom_collection:
p = atom.position * su.ANGSTROM_PER_BOHR
z = su.ElementInfo.Z(atom.element)
molecule.AppendAtom(z, *p)
return molecule
def convert_gradients(
gradients: np.ndarray,
boost_factor: float = 0.1400142601462408,
trust_radius: float = 1.0
) -> None:
"""
Convert gradients from hartree/bohr to hartree/angstrom.
Dampen, if max displacement is > trust_radius (default 1 bohr)
to avoid shooting around nuclei.
"""
gradients *= boost_factor * su.BOHR_PER_ANGSTROM
max_coefficient = np.max(np.abs(gradients))
if max_coefficient > trust_radius:
gradients *= trust_radius / max_coefficient
def apply_gradients(
molecule: vtkMolecule,
gradients: np.ndarray,
mouse_picked_atom_id: Optional[int] = None,
haptic_picked_atom_id: Optional[int] = None,
) -> None:
"""
Apply gradients to molecule.
"""
n_atoms = molecule.GetNumberOfAtoms()
for atom_index in range(n_atoms):
if atom_index == mouse_picked_atom_id or atom_index == haptic_picked_atom_id:
continue
gradient = gradients[atom_index]
atom = molecule.GetAtom(atom_index)
positions = atom.GetPosition()
atom.SetPosition(
positions[0] - gradient[0],
positions[1] - gradient[1],
positions[2] - gradient[2],
)
def times_bohr_per_angstrom(atom_positions: List[float]) -> List[float]:
"""
Convert atom positions from bohr to angstrom.
"""
return cast(
List[float],
[
atom_positions[0] * su.BOHR_PER_ANGSTROM,
atom_positions[1] * su.BOHR_PER_ANGSTROM,
atom_positions[2] * su.BOHR_PER_ANGSTROM,
],
)
def times_angstrom_per_bohr(atom_positions: List[float]) -> List[float]:
"""
Convert atom positions from angstrom to bohr.
"""
return cast(
List[float],
[
atom_positions[0] * su.ANGSTROM_PER_BOHR,
atom_positions[1] * su.ANGSTROM_PER_BOHR,
atom_positions[2] * su.ANGSTROM_PER_BOHR,
],
)
def maximum_vdw_radius(molecule: vtkMolecule) -> float:
"""
Returns the maximum VDW radius of all the atoms in the molecule.
"""
table = vtkPeriodicTable()
return max(
(
cast(float, table.GetVDWRadius(molecule.GetAtom(id).GetAtomicNumber()))
for id in range(molecule.GetNumberOfAtoms())
),
default=0.0,
)
|
Python
|
CL
|
d586b3434d60d55ee150bd205218a72aeb10ff9d1824a15bf87eba7c650fa3e6
|
# Generated by Django 3.0.1 on 2020-01-06 23:59
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
('equipment', '0004_auto_20200106_2359'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('checkout_date_time', models.DateTimeField(default=datetime.datetime(2020, 1, 7, 23, 59, 38, 427622, tzinfo=utc))),
('checkin_date_time', models.DateTimeField(blank=True, null=True)),
('total_cost', models.DecimalField(decimal_places=2, max_digits=6)),
('borrower_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('equipment_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='equipment.Equipment')),
],
),
]
|
Python
|
CL
|
d6a4553722943a7dfced71e67a7a3374988621edca4b752486179a7c74746223
|
# Utility routines for processing reanalysis precipitation data for
# the Snow on Sea Ice project
import numpy as np
import xarray as xr
import pandas as pd
import glob
import os
import datetime as dt
import calendar
from precipitation.constants import filepath, vnamedict, arctic_mask_region
from precipitation.constants import accumulation_period_filepath, annual_total_filepath
def _glob_precip_stats_dirpath(reanalysis):
"""
Generates glob dir path
"""
fmt = os.path.join( os.path.split( os.path.split( filepath[reanalysis]['path'] )[0] )[0], '????', '??')
return fmt.format('PRECTOT')
def _glob_precip_stats_fname(reanalysis):
"""
Generates glob filename
"""
fname = filepath[reanalysis]['ffmt'].format('PRECIP_STATS','????')
fname = fname.replace('.nc','.month.Nh50km.nc')
return fname
def glob_precip_stats(reanalysis):
"""
Returns a list of files for a given reanalysis and variable using glob
NB: I think I might have to hardcode the file formats
reanalysis - ERAI, CFSR, MERRA, MERRA2
variable - name of variable
grid - e.g. Nh50km
"""
globpath = _glob_precip_stats_dirpath(reanalysis)
globfile = _glob_precip_stats_fname(reanalysis)
#fileList = glob.glob( os.path.join( globpath, globfile) )
return os.path.join( globpath, globfile)
def make_filepath(reanalysis, variable, date, grid=None):
'''
Generates a filepath for a given reanalysis variable for a given date
reanalysis - name of reanalysis
variable - my standard variable name
date - date string - can have wildcards
returns - filepath string
'''
fp = os.path.join(filepath[reanalysis]['path'].format(vnamedict[reanalysis][variable]['name'], date.year, date.month),
filepath[reanalysis]['ffmt'].format(vnamedict[reanalysis][variable]['name'], date.strftime('%Y%m')))
if grid:
#if (reanalysis == 'CFSR') | (reanalysis == 'CFSR2'):
# fp = fp.replace('.nc','.{:s}.nc'.format('EASE_NH50km'))
#else:
# fp = fp.replace('.nc', '.{:s}.nc'.format(grid))
fp = fp.replace('.nc', '.{:s}.nc'.format(grid))
return fp
def make_outfile(fili, reanalysis, variable, version=None):
import re
new_varname = {'PRECIP': '.PRECIP_STATS',
'SNOW': '.PRECSNO_STATS',}
filo = fili
filo = re.sub('(?<=MERRA)[_]*\?0+','',filo)
filo = re.sub('(?<=MERRA2)[_]*\?0+','',filo)
filo = filo.replace('.'+vnamedict[reanalysis][variable]['name'],new_varname[variable]).replace('??.','.month.').replace('.day.','.month.')
if version: filo = filo.replace('.nc','.v{:s}.nc'.format(version))
return filo
def make_fileList(reanalysis, variable, date_range, grid=None):
'''
Generates a list of filepaths for a given reanalysis and variable for a date range.
The code deals with CFSR* spanning two products.
reanalysis - name of reanalysis
variable - my standard variable name
date_range - tuple of dates in format (yyyymmdd, yyyymmdd)
returns - filepath string
'''
from pandas import date_range as pd_date_range
import datetime as dt
filelist = []
for date in pd_date_range(date_range[0], date_range[1], freq='M'):
if (reanalysis == 'CFSR') & (date >= dt.datetime(2011,1,1)):
filelist.append(make_filepath('CFSR2', variable, date, grid=grid))
else:
filelist.append(make_filepath(reanalysis, variable, date, grid=grid))
return filelist
def date_from_filename(fname):
'''Extracts the YYYYMM from a filename and returns a datetime object'''
import re
import datetime as dt
m = re.search('\.(\d{6})[\?\.]', fname).groups()[0]
return dt.datetime.strptime(m, '%Y%m')
def make_time_coordinate(fileGlob):
'''
Generates a time coordinate using file times stamps
'''
import calendar
import pandas as pd
date = date_from_filename(fileGlob)
last_day = calendar.monthrange(date.year, date.month)[1]
start_date = '{:4d}{:02d}{:02d}'.format(date.year, date.month, 1)
end_date = '{:4d}{:02d}{:02d}'.format(date.year, date.month, last_day)
return xr.IndexVariable('time',pd.date_range(start_date, end_date, freq='D'))
def read_month(fileGlob, reanalysis, variable):
'''
Gets a xarray DataArray of days in month for a given variable
Need to add time dimension if I want to do something fancy
'''
vname = vnamedict[reanalysis][variable]
fileList = glob.glob(fileGlob)
with xr.open_mfdataset(fileList, concat_dim='time', data_vars='different') as ds:
# To deal with 2016 ERA-Interim data
if (reanalysis == 'ERA-Interim') & (date_from_filename(fileGlob).year > 2015):
ds.rename({'tp': 'PRECTOT', 'latitude': 'lat', 'longitude': 'lon'}, inplace=True)
# To deal with ERA5 variable name - this needs to be fixed in processing 6h files
if (reanalysis == 'ERA5'):
ds.rename({'tp': 'TOTPREC'}, inplace=True)
# A quick fix to deal with EASE grids and NaNs
if 'Nh50km' in fileList[0]:
ds[vname['name']] = ds[vname['name']].where(ds.latitude > -999.) # Set off-grid cells to NaN
if vname['scale'] != 1.:
attrs = ds[vname['name']].attrs
ds[vname['name']] = ds[vname['name']] * vname['scale'] # Scale to mm and change units
attrs['units'] = 'mm'
ds[vname['name']].attrs = attrs
ds.set_coords(['latitude','longitude'], inplace=True)
if 'time' not in ds.coords.keys(): ds.coords['time'] = make_time_coordinate(fileGlob)
#ds.load()
return ds
def apply_threshold(da, threshold=1.):
"""Set values < threshold to 0. NaNs stay as NaNs"""
with np.errstate(all='ignore'):
result = xr.where(da < threshold, np.nan, da)
result.attrs = da.attrs
return result
def wetday_mean(da, threshold=1.):
'''
Returns mean precipitation rate for wet days
wetdays are defined as days with rain rate greater than a threshold (default = 1. mm/day)
da - data array containing precipitation
threshold - threshold to distinguish wet days (default 1 mm/day)
Returns 2D data array with lat and lon dimensions
'''
nday = daysinmonth(da.time.values[0])
mask = da.count(dim='time') == nday # Mask cells with less than nday
result = apply_threshold(da, threshold=threshold).mean(dim='time', keep_attrs=True)
result = xr.where(mask & xr.ufuncs.isnan(result), 0., result)
return result.where(mask)
def wetdays(da, threshold=1.):
'''
Returns frequency of wet days
wetdays are defined as days with rain rate greater than a threshold (default = 1. mm/day)
da - data array containing precipitation
threshold - threshold to distinguish wet days (default 1 mm/day)
Returns 2D data array with lat and lon dimensions
'''
nday = daysinmonth(da.time.values[0])
mask = da.count(dim='time') == nday # Mask cells with less than nday
nwet = da.where(da > threshold).count(dim='time', keep_attrs=True)
fwet = nwet.astype(float)/float(nday)
fwet.attrs = nwet.attrs
fwet.attrs['units'] = 'none'
return fwet.where(mask)
def wetday_max(da, threshold=1.):
'''
Returns maximum precipitation rate for wet days (same as max of dataarray)
wetdays are defined as days with rain rate greater than a threshold (default = 1. mm/day)
da - data array containing precipitation
threshold - threshold to distinguish wet days (default 1 mm/day)
Returns 2D data array with lat and lon dimensions
'''
nday = daysinmonth(da.time.values[0])
mask = da.count(dim='time') == nday # Mask cells with less than nday
result = da.max(dim='time', keep_attrs=True)
return result.where(mask)
def wetday_total(da, threshold=1.):
'''
Returns total precipitation rate for wet days. This is not the same as the sum
all precipitation.
wetdays are defined as days with rain rate greater than a threshold (default = 1. mm/day)
da - data array containing precipitation
threshold - threshold to distinguish wet days (default 1 mm/day)
Returns 2D data array with lat and lon dimensions
'''
nday = daysinmonth(da.time.values[0])
mask = da.count(dim='time') == nday # Mask cells with less than nday
result = apply_threshold(da, threshold=threshold).sum(dim='time', keep_attrs=True)
return result.where(mask)
def all_total(da):
nday = daysinmonth(da.time.values[0])
return da.sum(dim='time', min_count=nday, keep_attrs=True)
def daysinmonth(dt64):
"""Returns numbers of days in a month for a given datetime object
date - numpy datetime64 (from xarray time)
"""
date = to_datetime(dt64)
return calendar.monthrange(date.year, date.month)[1]
def to_datetime(dt64):
"""Convert numpy datetime64 object to datetime"""
ns = 1e-9
return dt.datetime.utcfromtimestamp(dt64.astype(int)*ns)
def arbitSum(ds, dateStart, dateEnd):
sub = ds.sel(time=slice(dateStart,dateEnd))
nt = sub.time.size
it = np.floor(nt/2.).astype(int)
result = sub.sum(dim='time')
result = result.expand_dims('time', axis=0)
result.coords['time'] = sub['time'][it]
return result
def make_test_dataArray():
"""Returns a 3 cell x 31 time DataArray with random uniform and NaN values
wetday_mean = [nan, nan, 2.] with threshold=1.
prectot = [nan, nan, 11.5]
wetdays = [nan, nan, 0.16129]
wetday_max = [nan, nan, 2.]
"""
x = np.zeros(shape=(3,31))
x[0,:] = np.nan
x[1,[1,2,3,4,5,6,15,23,24,25]] = [np.nan,np.nan,0.1,0.5,2.,2.,2.,2.,0.9,2.]
x[2,[3,4,5,6,15,23,24,25]] = [0.1,0.5,2.,2.,2.,2.,0.9,2.]
da = xr.DataArray(x, dims=['x','time'])
da.coords['time'] = pd.date_range('19790101', freq='D', periods=31)
return da
def read_region_mask(grid='Nh50km'):
"""
Reads the Nh50km Arctic region mask and puts it into a xarray DataArray compatable with
the precip_stats Dataset
"""
mask_path = ('/oldhome/apbarret/data/seaice_indices/'
'Arctic_region_mask_Meier_AnnGlaciol2007_Nh50km.dat')
nrow = 360
ncol = 360
result = xr.DataArray(np.fromfile(mask_path, dtype=float).reshape(nrow,ncol),
dims=['x','y'])
return result
def region_stats(ds, mask, region_name):
"""
Extracts stats for a given region from an EASE grid data set
"""
agg = ds.where(mask == arctic_mask_region[region_name]).mean(dim=['x','y'])
if 'latitude' in agg:
agg = agg.drop('latitude')
if 'longitude' in agg:
agg = agg.drop('longitude')
return agg
def read_arctic_regional_stats(filepath):
"""
Reads a summary file of Arctic regional stats into a multi-level pandas data frame
"""
return pd.read_csv(filepath, header=[0,1], index_col=0,
infer_datetime_format=True, parse_dates=True)
def load_annual_accumulation(reanalysis):
"""Loads annual accumulation period fields
Returns: an xarray dataset
"""
ds = xr.open_dataset(accumulation_period_filepath[reanalysis])
# Modify coordinate names to match other files
# This will be fixed in a later version
if reanalysis == 'CFSR':
print (ds)
#ds.rename({'row': 'x', 'col': 'y'}, inplace=True)
return ds
def load_annual_total(reanalysis):
"""Loads annual total precipitation stats"""
ds = xr.open_dataset(annual_total_filepath[reanalysis])
return ds
def read_latlon_region_mask():
mask_path = ('/oldhome/apbarret/data/seaice_indices/'
'Arctic_region_mask_Meier_AnnGlaciol2007_latlon.tif')
mask = xr.open_rasterio(mask_path)[0,:,:]
mask = mask.rename({'x': 'longitude', 'y': 'latitude'})
#mask.values = mask.values[:,::-1]
return mask
def get_central_arctic_mask(grid='Nh50km'):
"""Loads the Arctic region mask and selects central Arctic regions"""
if grid == 'Nh50km':
mask = read_region_mask()
elif grid == 'latlon':
mask = read_latlon_region_mask()
else:
raise ValueError("Unknown grid")
central_arctic = mask.where((mask == arctic_mask_region['CENTRAL_ARCTIC']) | \
(mask == arctic_mask_region['BEAUFORT']) | \
(mask == arctic_mask_region['CHUKCHI']) | \
(mask == arctic_mask_region['LAPTEV']) | \
(mask == arctic_mask_region['EAST_SIBERIAN']))
return central_arctic
|
Python
|
CL
|
7f88980651c57d1312f81dbb0e050ea9f9058a00dcd8f75b17912a541d648934
|
import os
import tensorflow as tf
# tensorflow_hubから圧縮されたモデルデータをゲットする
os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED"
# 必要パッケージインポート
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["figure.figsize"] = (12, 12)
mpl.rcParams["axes.grid"] = False
import numpy as np
import PIL.Image
import time
import functools
# テンソルを画像に変換
def tensor_to_image(tensor):
tensor = tensor * 255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor) > 3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
# 画像をロードし、最大サイズを512pxに制限する
def load_img(path_to_img):
# 最大サイズ
max_dim = 512
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
# 画像を表示する関数
def imshow(image, title=None):
if len(image.shape) > 3:
image = tf.squeeze(image, axis=0)
plt.imshow(image)
if title:
plt.title(title)
# 画像をダウンロード -----
# コンテンツ画像 フィルターかけられるやつ
content_path = tf.keras.utils.get_file('YellowLabradorLooking_new.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg')
# スタイル画像 フィルターになる画像
style_path = tf.keras.utils.get_file('kandinsky5.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg')
# 画像をゲット -----
content_image = load_img(content_path)
style_image = load_img(style_path)
# コメント
{
# 画像表示実行 -----
# plt.subplot(1, 2, 1)
# imshow(content_image, "Content Image")
# plt.subplot(1, 2, 2)
# imshow(style_image, 'Style Image')
# plt.show()
# 画像Quick Look -----
# 環境のデフォルトですぐ見たいとき
# im = PIL.Image.open(content_path)
# im = PIL.Image.open(style_path)
# im.show()
# ちなみに結果画像見てみましょう -----
# import tensorflow_hub as hub
# hub_model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
# stylized_image = hub_model(tf.constant(content_image), tf.constant(style_image))[0]
# tensor_to_image(stylized_image).show()
}
|
Python
|
CL
|
ab4128494536911015dfc6bac9a8b439c828d2ba82fea73939e078018f41ae20
|
#------------------------------------------
#--- This code create a client, which subcribe a topic "test/topic" on server "test.mosquitto.org"
#--- and loop forever waiting a mesage comming to print out
#--- Author: Minhnt27
#--- Date: 6th May 2020
#--- Version: 1.0
#--- Python Ver: 3.7
#--- ref from: https://pypi.org/project/paho-mqtt/
#--- lib installation cmd: pip install paho-mqtt
#------------------------------------------
import paho.mqtt.client as mqtt
#================================================
# test with topic "test/topic" on server "test.mosquitto.org"
topic="test/topic"
server="test.mosquitto.org"
port=1883
keepalive=60
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
#=================================================
# main
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(server, port, keepalive)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
|
Python
|
CL
|
9e2aa5026b1d070796431e3fcb074a330e0978cebb072040598d687bdb30da04
|
# Generated by Django 2.2.7 on 2019-12-06 00:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('task', '0013_auto_20191128_1457'),
]
operations = [
migrations.AlterField(
model_name='whitepaper',
name='acceptor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='wp_acceptor', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='wpTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('description', models.TextField(blank=True, max_length=1024)),
('dueTime', models.DateField()),
('uploadTime', models.DateTimeField(auto_now_add=True)),
('updateTime', models.DateTimeField(auto_now=True)),
('accepted', models.BooleanField(default=False)),
('WP', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='task.whitePaper')),
('acceptor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_acceptor', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='taskFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('WPFile', models.FileField(blank=True, null=True, upload_to='')),
('wpt', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='task.wpTask')),
],
),
]
|
Python
|
CL
|
c43e32ffcb70c78580e7007c6cc91fc35ae901bebf0e6a3172213fd59f714547
|
from abc import ABCMeta, abstractmethod
class BooknotStorage:
__metaclass__ = ABCMeta
@abstractmethod
def exists(self) -> bool:
raise NotImplementedError()
@abstractmethod
def init_store(self) -> None:
raise NotImplementedError()
@abstractmethod
def create_sphinx(self, project, author) -> None:
raise NotImplementedError()
@abstractmethod
def is_sphinx_present(self) -> bool:
raise NotImplementedError()
|
Python
|
CL
|
18d132a8406794cea33439b98d4330279dda36011433c2856bff1aafdc00069e
|
import pickle
from copy import deepcopy
# -----------------------------------------------------------------------------
# Automata classes
# -----------------------------------------------------------------------------
class State:
def __init__(self, abc):
self.next = {symb: set() for symb in abc}
self.is_terminal = False
def get_next(self, letter):
return next(iter(self.next[letter]))
class Automata:
def __init__(self, abc):
self.abc = abc
self.state = dict()
def print(self):
for state_id, state in self.state.items():
not_term = '' if state.is_terminal else 'not '
print('State {} is {}terminal:'.format(state_id, not_term))
for letter, v_to in state.next.items():
print(letter, '->', v_to)
print_delimiter()
def print_delimiter():
print('-' * 80)
# -----------------------------------------------------------------------------
# Input
# -----------------------------------------------------------------------------
def get_id(state_id):
return frozenset([str(state_id)])
def input_automata():
abc = input('Input alphabet: ').split()
state_count = int(input('Input state count: '))
terminal_states = input('Input terminal states: ').split()
automata = Automata(abc)
for i in range(state_count):
automata.state[get_id(i)] = State(abc)
for terminal in terminal_states:
automata.state[get_id(terminal)].is_terminal = True
edge_count = int(input('Input edge count: '))
for i in range(edge_count):
edge = input('Input edge <from> <letter> <to>: ').split()
v_from, letter, v_to = edge
automata.state[get_id(v_from)].next[letter].add(get_id(v_to))
print_delimiter()
return automata
def input_and_save():
with open('automata.pickle', 'wb') as f:
pickle.dump(input_automata(), f)
def load_and_get():
with open('automata.pickle', 'rb') as f:
auto = pickle.load(f)
return auto
# -----------------------------------------------------------------------------
# Determinization
# -----------------------------------------------------------------------------
def merge_states(state_id_list, automata):
new_state = State(automata.abc)
for state_id in state_id_list:
state = automata.state[get_id(state_id)]
new_state.is_terminal |= state.is_terminal
for letter in new_state.next.keys():
new_state.next[letter] |= state.next[letter]
return new_state
def add_steps_to_trash(state):
need_trash = False
for letter in state.next.keys():
if len(state.next[letter]) == 0:
need_trash = True
state.next[letter].add(get_id('trash'))
return need_trash
def determinize(automata):
det_auto = Automata(automata.abc)
queue = {get_id(0)}
processed_states = set()
while len(queue) > 0:
state_id = queue.pop()
processed_states.add(state_id)
# combine the sets of states in which transitions on letters
state = merge_states(state_id, automata)
det_auto.state[state_id] = state
# transform sets into new states
for letter in state.next.keys():
if len(state.next[letter]) > 0:
next_state_id = []
for next_id in state.next[letter]:
next_state_id += list(next_id)
new_next_id = frozenset(next_state_id)
state.next[letter] = {new_next_id}
if new_next_id not in processed_states:
queue.add(new_next_id)
need_trash = False
for state in det_auto.state.values():
need_trash |= add_steps_to_trash(state)
if need_trash:
trash_state = State(det_auto.abc)
add_steps_to_trash(trash_state)
det_auto.state[get_id('trash')] = trash_state
return det_auto
# -----------------------------------------------------------------------------
# Inverse
# -----------------------------------------------------------------------------
def inverse(automata):
automata = determinize(automata)
for state in automata.state.values():
state.is_terminal = not state.is_terminal
return automata
# -----------------------------------------------------------------------------
# Minimization
# -----------------------------------------------------------------------------
def get_identity(state_id, state, classes, abc):
identity = [classes[state_id]]
identity += [classes[state.get_next(letter)] for letter in abc]
return tuple(identity)
def get_min_state_id(classes, i):
for state_id, class_id in classes.items():
if class_id == i:
return state_id
def minimize(automata):
automata = determinize(automata)
previous_classes = dict()
for state_id, state in automata.state.items():
previous_classes[state_id] = int(state.is_terminal)
while True:
mapping = dict()
classes = dict()
class_count = 0
for state_id, state in automata.state.items():
state_identity = get_identity(state_id, state, previous_classes,
automata.abc)
if state_identity not in mapping:
mapping[state_identity] = class_count
class_count += 1
classes[state_id] = mapping[state_identity]
if classes == previous_classes:
break
previous_classes = classes
min_auto = Automata(automata.abc)
for i in range(class_count):
state = deepcopy(automata.state[get_min_state_id(classes, i)])
for letter in state.next.keys():
state.next[letter] = {get_id(classes[state.get_next(letter)])}
min_auto.state[get_id(i)] = state
print(classes)
return min_auto
# -----------------------------------------------------------------------------
# Equivalence
# -----------------------------------------------------------------------------
def dfs(auto1, cur1, visited1, auto2, cur2, visited2, letter):
visited1.add((letter, cur1))
visited2.add((letter, cur2))
if auto1.state[cur1].is_terminal != auto2.state[cur2].is_terminal:
return False
for letter in auto1.state[cur1].next.keys():
next1 = (letter, auto1.state[cur1].get_next(letter))
next2 = (letter, auto2.state[cur2].get_next(letter))
if (next1 in visited1) != (next2 in visited2):
return False
if next1 not in visited1:
res = dfs(auto1, next1[1], visited1, auto2, next2[1], visited2,
letter)
if not res:
return False
return True
def equiv(auto1, auto2):
auto1 = minimize(auto1)
auto2 = minimize(auto2)
if len(auto1.state) != len(auto2.state):
return False
visited1 = set()
visited2 = set()
return dfs(auto1, get_id(0), visited1, auto2, get_id(0), visited2, '0')
if __name__ == '__main__':
# example of backup and restore
# input_and_save()
my_auto = load_and_get()
edge_count = int(input('Input edge count: '))
for i in range(edge_count):
edge = input('Input edge <from> <letter> <to>: ').split()
v_from, letter, v_to = edge
my_auto.state[get_id(v_from)].next[letter].add(get_id(v_to))
my_auto.print()
# my_auto = input_automata()
minimize(my_auto).print()
|
Python
|
CL
|
48511913ee282e59ccd1b11ff3dbdf58bfaf7b4fbf3920076b4c74d8c69f5e36
|
# Django settings for wardenclyffe project.
import os.path
import sys
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
WATCH_DIRECTORY = "/var/www/wardenclyffe/tmp/watch_dir/"
ALLOWED_HOSTS = [".ccnmtl.columbia.edu", "localhost"]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'wardenclyffe',
'HOST': '',
'PORT': 5432,
'USER': '',
'PASSWORD': '',
}
}
if 'test' in sys.argv or 'jenkins' in sys.argv:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': '',
}
}
SURELINK_PROTECTION_KEY = "test-dummy-key"
MEDIATHREAD_SECRET = "test-dummy-secret"
WATCH_DIRECTORY = "/tmp/"
TMP_DIR = "/tmp"
PCP_BASE_URL = ""
CELERY_ALWAYS_EAGER = True
SOUTH_TESTS_MIGRATE = False
SOUTH_AUTO_FREEZE_APP = True
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
JENKINS_TASKS = (
'django_jenkins.tasks.run_pylint',
'django_jenkins.tasks.with_coverage',
'django_jenkins.tasks.run_pep8',
'django_jenkins.tasks.run_pyflakes',
)
PROJECT_APPS = [
'wardenclyffe.main',
'wardenclyffe.mediathread',
'wardenclyffe.util',
'wardenclyffe.youtube',
'wardenclyffe.cuit',
'wardenclyffe.graphite',
]
NOSE_ARGS = [
'--with-coverage',
"--with-doctest",
"--noexe",
"--exclude-dir-file=exclude_tests.txt",
('--cover-package=wardenclyffe.main,wardenclyffe.mediathread,'
'wardenclyffe.youtube,'
'wardenclyffe.util,wardenclyffe.graphite'),
]
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
MEDIA_ROOT = "/var/www/wardenclyffe/uploads/"
MEDIA_URL = '/uploads/'
STATIC_URL = '/media/'
SECRET_KEY = ')ng#)ef_u@_^zvvu@dxm7ql-yb^_!a6%v3v^j3b(mp+)l+5%@h'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'waffle.middleware.WaffleMiddleware',
)
ROOT_URLCONF = 'wardenclyffe.urls'
TEMPLATE_DIRS = (
"/var/www/wardenclyffe/templates/",
os.path.join(os.path.dirname(__file__), "templates"),
)
import djcelery
djcelery.setup_loader()
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.messages',
'django.contrib.staticfiles',
'compressor',
'sorl.thumbnail',
'django.contrib.admin',
'tagging',
'smartif',
'template_utils',
'djcelery',
'wardenclyffe.main',
'wardenclyffe.mediathread',
'wardenclyffe.youtube',
'wardenclyffe.util',
'oembed',
'taggit',
'south',
'django_nose',
'wardenclyffe.cuit',
'django_statsd',
'smoketest',
'waffle',
'debug_toolbar',
'django_jenkins',
'wardenclyffe.graphite',
'django_extensions',
'django_markwhat',
]
INTERNAL_IPS = ('127.0.0.1', )
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
)
STATSD_CLIENT = 'statsd.client'
STATSD_PREFIX = 'wardenclyffe'
STATSD_HOST = 'localhost'
STATSD_PORT = 8125
#STATSD_PATCHES = ['django_statsd.patches.db', ]
BROKER_URL = "amqp://localhost:5672//"
CELERYD_CONCURRENCY = 4
class MyRouter(object):
def route_for_task(self, task, args=None, kwargs=None):
if task.startswith('wardenclyffe.graphite.tasks'):
return {
'exchange': 'graphite',
'exchange_type': 'direct',
'routing_key': 'graphite',
}
if task == 'wardenclyffe.main.tasks.check_for_slow_operations':
return {'exchange': 'short',
'exchange_type': 'direct',
'routing_key': 'short'}
if task == 'wardenclyffe.main.tasks.move_file':
return {'exchange': 'batch',
'exchange_type': 'direct',
'routing_key': 'batch'}
return None
CELERY_ROUTES = (MyRouter(),)
THUMBNAIL_SUBDIR = "thumbs"
EMAIL_SUBJECT_PREFIX = "[wardenclyffe] "
EMAIL_HOST = 'localhost'
SERVER_EMAIL = "wardenclyffe@ccnmtl.columbia.edu"
# email addresses of video team members how want to
# be annoyed by lots of status email
ANNOY_EMAILS = ["jhanford@columbia.edu", "anders@columbia.edu"]
VIDEO_TEAM_EMAILS = ["jhanford@columbia.edu"]
# WIND settings
AUTHENTICATION_BACKENDS = ('djangowind.auth.WindAuthBackend',
'django.contrib.auth.backends.ModelBackend', )
WIND_BASE = "https://wind.columbia.edu/"
WIND_SERVICE = "cnmtl_full_np"
WIND_PROFILE_HANDLERS = ['djangowind.auth.CDAPProfileHandler']
WIND_AFFIL_HANDLERS = ['djangowind.auth.AffilGroupMapper',
'djangowind.auth.StaffMapper',
'djangowind.auth.SuperuserMapper']
WIND_STAFF_MAPPER_GROUPS = ['tlc.cunix.local:columbia.edu']
WIND_SUPERUSER_MAPPER_GROUPS = ['anp8', 'jb2410', 'zm4', 'egr2107',
'amm8', 'mar227', 'njn2118', 'jed2161']
H264_SECURE_STREAM_DIRECTORY = "/media/h264/ccnmtl/secure/"
H264_PUBLIC_STREAM_DIRECTORY = "/media/h264/ccnmtl/public/"
H264_SECURE_STREAM_BASE = "http://stream.ccnmtl.columbia.edu/secvideos/"
H264_PUBLIC_STREAM_DIRECTORY = "/media/h264/ccnmtl/public/"
H264_PUBLIC_STREAM_DIRECTORY = "/media/h264/ccnmtl/public/"
H264_PUBLIC_STREAM_BASE = "http://stream.ccnmtl.columbia.edu/public/"
CUNIX_BROADCAST_DIRECTORY = "/www/data/ccnmtl/broadcast/"
CUNIX_BROADCAST_URL = "http://ccnmtl.columbia.edu/broadcast/"
CUNIX_SECURE_DIRECTORY = "/www/data/ccnmtl/broadcast/secure/"
CUNIX_H264_DIRECTORY = "/media/h264"
FLV_STREAM_BASE_URL = "http://ccnmtl.columbia.edu/stream/flv/"
MAX_FRAMES = 50
POSTER_BASE_URL = "http://wardenclyffe.ccnmtl.columbia.edu/uploads/"
DEFAULT_POSTER_URL = (
"http://ccnmtl.columbia.edu/"
"broadcast/posters/vidthumb_480x360.jpg")
STAGING = False
STATIC_ROOT = os.path.join(os.path.dirname(__file__), "../media")
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
IONICE_PATH = "/usr/bin/ionice"
MPLAYER_PATH = "/usr/bin/mplayer"
COMPRESS_URL = "/media/"
COMPRESS_ROOT = "media/"
SOUTH_MIGRATION_MODULES = {
'taggit': 'taggit.south_migrations',
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
}
|
Python
|
CL
|
033137e360b4951aad824922071d2fc308c9c052decdd76ae5e034013a3568a2
|
from django.core.management import call_command
from django.test import TestCase
from integration.models import Channel, Category
class ImportCategoriesTestCase(TestCase):
csv_dir = 'integration/tests/'
def test_created_objects(self):
"""
Asserts the correct amount of created objects.
Multiple kinds of tests are used:
running normally, repeating the same command, using a file with
repeating rows and using a file with categories in a random order
"""
for channel, file in [('channel1', 'importcategories.csv'),
('channel1', 'importcategories.csv'),
('channel2', 'importcategories-repeated.csv'),
('channel3', 'importcategories-unordered.csv')]:
args = [channel, self.csv_dir + file]
call_command('importcategories', *args)
self.assertEquals(Channel.objects.filter(name=channel).count(), 1)
self.assertEquals(
Category.objects.filter(channel__name=channel).count(), 23)
c = Category.objects.filter(name__contains='360').first()
anc = c.get_ancestors(c.reference)
self.assertEquals(anc.count(), 1)
def test_relatives(self):
"""
Asserts the correct retrieval of both ancestors and descendants
:return:
"""
args = ['channel1', self.csv_dir + 'importcategories.csv']
call_command('importcategories', *args)
c = Category.objects.filter(name__contains='360').first()
# Ancestors
self.assertEquals(c.get_ancestors(c.reference).count(), 1)
anc = c.get_ancestors(c.reference, get_current=True)
self.assertEquals(anc.count(), 2)
self.assertTrue(c in anc)
# Descendants
self.assertEquals(c.get_descendants(c.reference).count(), 3)
desc = c.get_descendants(c.reference, get_current=True)
self.assertEquals(desc.count(), 4)
self.assertTrue(c in desc)
|
Python
|
CL
|
da50ea6910a4ba2ee45b7e54d0bf63dc437388d6688389c3ca71749c8486243f
|
# coding=utf-8
#!/bin/bash
# 本程序的目的是在前端发布时,自动将.html文件中引用到的静态文件文件更新为:{原文件名}_hash.{ext}
# 参考:
# http://www.infoq.com/cn/articles/front-end-engineering-and-performance-optimization-part1
import hashlib
import re
import shutil
import os
import importlib
import sys
from bs4 import BeautifulSoup
import codecs
import logging
import argparse
settings = None # django Settings文件
replace_re = re.compile('({{\s*STATIC_URL\s*}})')
logger = logging.getLogger()
DEBUG = True
def _copy_hashed_file(origin_file_name):
'''
复制文件的hash版本
@return 新文件的文件名(不含路径信息)
'''
hash_code = ''
with open(origin_file_name, 'r') as f:
hash_code = hashlib.sha1(f.read()).hexdigest()
dir_name = os.path.dirname(origin_file_name)
file_name, extname = os.path.splitext(origin_file_name)
file_name = origin_file_name[len(dir_name) + 1:len(origin_file_name) - len(extname)]
t_filename = '%s/%s_%s%s' % (dir_name, file_name, hash_code[0:7], extname)
shutil.copyfile(origin_file_name, t_filename)
return t_filename
def _load_settings(path):
sys.path.append(path)
global settings
settings = importlib.import_module('settings')
def _ref_file(filename):
soup = BeautifulSoup(codecs.open(filename, encoding='UTF-8'), 'html5lib')
def _ref_node(node_name, attr_name, **kwargs):
'''
处理相应节点
'''
for tag in soup.find_all(node_name, **kwargs):
attr_value = tag[attr_name] if attr_name in tag.attrs else None
if attr_value and not (attr_value.startswith('http') or attr_value.startswith('//')):
tag[attr_name] = _gen_new_ref(attr_value)
def _gen_new_ref(v):
'''
生成新的文件,并返回文件名
'''
if not 'STATIC_URL' in v:
return v
for path in settings.STATICFILES_DIRS:
map_path = r'%s/' % path
old_file = replace_re.sub(map_path, v)
if not os.path.exists(old_file):
continue
new_file = _copy_hashed_file(old_file)
new_file = new_file.replace(map_path, '{{ STATIC_URL }}')
logger.info(u'源文件:%s 目标文件:%s' % (old_file, new_file))
return new_file
_ref_node('script', 'src')
_ref_node('link', 'href', rel='stylesheet')
body = ''.join(unicode(l) for l in soup.body.contents) # 去掉自动增加的不必要的<html><body>等标签
if not DEBUG:
f = codecs.open(filename, 'r+', encoding='UTF-8')
f.write(body)
f.close()
else:
print body
def scan_ref():
def _scan_ref(file_or_dir):
if os.path.isdir(file_or_dir):
for f in os.listdir(file_or_dir):
f = os.path.join(file_or_dir, f)
logger.info(u'进入目录:%s' % f)
_scan_ref(f)
logging.info('\r\n\r\n')
else:
logger.info(u'开始处理文件:%s' % file_or_dir)
_ref_file(file_or_dir)
for d in settings.TEMPLATE_DIRS:
for f in os.listdir(d):
_scan_ref(os.path.join(d, f))
logger.info(u'\r\n\r\n处理完成.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', '-d', action='store_true', help=u'调试模式。不写入文件。')
parser.add_argument('path_to_settings', type=str, help=u'Django settings.py文件。要求全路径。')
args = parser.parse_args()
DEBUG = args.debug
_load_settings(args.path_to_settings)
|
Python
|
CL
|
ff741bf847232793368c3e6bb8f24fe6b98b437c95debcccb4da6692a985a479
|
# Generated by Django 2.2.19 on 2021-02-25 18:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course', '0040_auto_20210203_0812'),
]
operations = [
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='module_course', to='course.Course')),
],
options={
'verbose_name_plural': 'Modules',
'ordering': ('pk',),
},
),
migrations.RemoveField(
model_name='lesson',
name='course',
),
migrations.CreateModel(
name='ModuleProgress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='module_progress', to='course.Module')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Module Progress',
},
),
migrations.AddField(
model_name='lesson',
name='module',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lesson_module', to='course.Module'),
),
]
|
Python
|
CL
|
9f5e5b879137e76d83b6d0a4a9116b5b154da7e6a57b8a8fd1b6d557af36aaca
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains a class which implements the FIPA protocol for the TAC."""
import json
import logging
import pprint
from typing import Union, List
from oef.messages import CFP, Decline, Propose, Accept
from oef.uri import Context
from tac.agents.v1.base.game_instance import GameInstance
from tac.agents.v1.base.dialogues import Dialogue
from tac.agents.v1.mail import OutContainer
from tac.agents.v1.base.helpers import generate_transaction_id
from tac.agents.v1.base.stats_manager import EndState
from tac.helpers.crypto import Crypto
from tac.platform.protocol import Transaction
logger = logging.getLogger(__name__)
STARTING_MESSAGE_ID = 1
class FIPABehaviour:
"""Specifies FIPA negotiation behaviours."""
def __init__(self, crypto: Crypto, game_instance: GameInstance, agent_name: str) -> None:
"""
Instantiate the FIPABehaviour.
:param crypto: the crypto module
:param game_instance: the game instance
:param agent_name: the agent_name of the agent
:return: None
"""
self._crypto = crypto
self._game_instance = game_instance
self._agent_name = agent_name
@property
def game_instance(self) -> GameInstance:
"""Get the game instance."""
return self._game_instance
@property
def crypto(self) -> Crypto:
"""Get the crypto."""
return self._crypto
@property
def agent_name(self) -> str:
"""Get the agent name."""
return self._agent_name
def on_cfp(self, cfp: CFP, dialogue: Dialogue) -> Union[Propose, Decline]:
"""
Handle a CFP.
:param cfp: the CFP
:param dialogue: the dialogue
:return: a Propose or a Decline
"""
goods_description = self.game_instance.get_service_description(is_supply=dialogue.is_seller)
new_msg_id = cfp.msg_id + 1
decline = False
cfp_services = json.loads(cfp.query.decode('utf-8'))
if not self.game_instance.is_matching(cfp_services, goods_description):
decline = True
logger.debug("[{}]: Current holdings do not satisfy CFP query.".format(self.agent_name))
else:
proposal = self.game_instance.generate_proposal(cfp_services, dialogue.is_seller)
if proposal is None:
decline = True
logger.debug("[{}]: Current strategy does not generate proposal that satisfies CFP query.".format(self.agent_name))
if decline:
logger.debug("[{}]: sending to {} a Decline{}".format(self.agent_name, cfp.destination,
pprint.pformat({
"msg_id": new_msg_id,
"dialogue_id": cfp.dialogue_id,
"origin": cfp.destination,
"target": cfp.msg_id
})))
response = Decline(new_msg_id, cfp.dialogue_id, cfp.destination, cfp.msg_id, Context())
self.game_instance.stats_manager.add_dialogue_endstate(EndState.DECLINED_CFP, dialogue.is_self_initiated)
else:
transaction_id = generate_transaction_id(self.crypto.public_key, cfp.destination, dialogue.dialogue_label, dialogue.is_seller)
transaction = Transaction.from_proposal(proposal=proposal,
transaction_id=transaction_id,
is_sender_buyer=not dialogue.is_seller,
counterparty=cfp.destination,
sender=self.crypto.public_key,
crypto=self.crypto)
self.game_instance.transaction_manager.add_pending_proposal(dialogue.dialogue_label, new_msg_id, transaction)
logger.debug("[{}]: sending to {} a Propose{}".format(self.agent_name, cfp.destination,
pprint.pformat({
"msg_id": new_msg_id,
"dialogue_id": cfp.dialogue_id,
"origin": cfp.destination,
"target": cfp.msg_id,
"propose": proposal.values
})))
response = Propose(new_msg_id, cfp.dialogue_id, cfp.destination, cfp.msg_id, [proposal], Context())
return response
def on_propose(self, propose: Propose, dialogue: Dialogue) -> Union[Accept, Decline]:
"""
Handle a Propose.
:param propose: the Propose
:param dialogue: the dialogue
:return: an Accept or a Decline
"""
logger.debug("[{}]: on propose as {}.".format(self.agent_name, dialogue.role))
proposal = propose.proposals[0]
transaction_id = generate_transaction_id(self.crypto.public_key, propose.destination, dialogue.dialogue_label, dialogue.is_seller)
transaction = Transaction.from_proposal(proposal=proposal,
transaction_id=transaction_id,
is_sender_buyer=not dialogue.is_seller,
counterparty=propose.destination,
sender=self.crypto.public_key,
crypto=self.crypto)
new_msg_id = propose.msg_id + 1
is_profitable_transaction, message = self.game_instance.is_profitable_transaction(transaction, dialogue)
logger.debug(message)
if is_profitable_transaction:
logger.debug("[{}]: Accepting propose (as {}).".format(self.agent_name, dialogue.role))
self.game_instance.transaction_manager.add_locked_tx(transaction, as_seller=dialogue.is_seller)
self.game_instance.transaction_manager.add_pending_initial_acceptance(dialogue.dialogue_label, new_msg_id, transaction)
result = Accept(new_msg_id, propose.dialogue_id, propose.destination, propose.msg_id, Context())
else:
logger.debug("[{}]: Declining propose (as {})".format(self.agent_name, dialogue.role))
result = Decline(new_msg_id, propose.dialogue_id, propose.destination, propose.msg_id, Context())
self.game_instance.stats_manager.add_dialogue_endstate(EndState.DECLINED_PROPOSE, dialogue.is_self_initiated)
return result
def on_decline(self, decline: Decline, dialogue: Dialogue) -> None:
"""
Handle a Decline.
:param decline: the decline
:param dialogue: the dialogue
:return: None
"""
logger.debug("[{}]: on_decline: msg_id={}, dialogue_id={}, origin={}, target={}"
.format(self.agent_name, decline.msg_id, decline.dialogue_id, decline.destination, decline.target))
if decline.target == 1:
self.game_instance.stats_manager.add_dialogue_endstate(EndState.DECLINED_CFP, dialogue.is_self_initiated)
elif decline.target == 2:
self.game_instance.stats_manager.add_dialogue_endstate(EndState.DECLINED_PROPOSE, dialogue.is_self_initiated)
transaction = self.game_instance.transaction_manager.pop_pending_proposal(dialogue.dialogue_label, decline.target)
if self.game_instance.strategy.is_world_modeling:
self.game_instance.world_state.update_on_declined_propose(transaction)
elif decline.target == 3:
self.game_instance.stats_manager.add_dialogue_endstate(EndState.DECLINED_ACCEPT, dialogue.is_self_initiated)
transaction = self.game_instance.transaction_manager.pop_pending_initial_acceptance(dialogue.dialogue_label, decline.target)
self.game_instance.transaction_manager.pop_locked_tx(transaction.transaction_id)
return None
def on_accept(self, accept: Accept, dialogue: Dialogue) -> Union[List[Decline], List[Union[OutContainer, Accept]], List[OutContainer]]:
"""
Handle an Accept.
:param accept: the accept
:param dialogue: the dialogue
:return: a Deline or an Accept and a Transaction (in OutContainer) or a Transaction (in OutContainer)
"""
logger.debug("[{}]: on_accept: msg_id={}, dialogue_id={}, origin={}, target={}"
.format(self.agent_name, accept.msg_id, accept.dialogue_id, accept.destination, accept.target))
if dialogue.dialogue_label in self.game_instance.transaction_manager.pending_initial_acceptances \
and accept.target in self.game_instance.transaction_manager.pending_initial_acceptances[dialogue.dialogue_label]:
results = self._on_match_accept(accept, dialogue)
else:
results = self._on_initial_accept(accept, dialogue)
return results
def _on_initial_accept(self, accept: Accept, dialogue: Dialogue) -> Union[List[Decline], List[Union[OutContainer, Accept]]]:
"""
Handle an initial Accept.
:param accept: the accept
:param dialogue: the dialogue
:return: a Deline or an Accept and a Transaction (in OutContainer
"""
transaction = self.game_instance.transaction_manager.pop_pending_proposal(dialogue.dialogue_label, accept.target)
new_msg_id = accept.msg_id + 1
results = []
is_profitable_transaction, message = self.game_instance.is_profitable_transaction(transaction, dialogue)
logger.debug(message)
if is_profitable_transaction:
if self.game_instance.strategy.is_world_modeling:
self.game_instance.world_state.update_on_initial_accept(transaction)
logger.debug("[{}]: Locking the current state (as {}).".format(self.agent_name, dialogue.role))
self.game_instance.transaction_manager.add_locked_tx(transaction, as_seller=dialogue.is_seller)
results.append(OutContainer(message=transaction.serialize(), message_id=STARTING_MESSAGE_ID, dialogue_id=accept.dialogue_id, destination=self.game_instance.controller_pbk))
results.append(Accept(new_msg_id, accept.dialogue_id, accept.destination, accept.msg_id, Context()))
else:
logger.debug("[{}]: Decline the accept (as {}).".format(self.agent_name, dialogue.role))
results.append(Decline(new_msg_id, accept.dialogue_id, accept.destination, accept.msg_id, Context()))
self.game_instance.stats_manager.add_dialogue_endstate(EndState.DECLINED_ACCEPT, dialogue.is_self_initiated)
return results
def _on_match_accept(self, accept: Accept, dialogue: Dialogue) -> List[OutContainer]:
"""
Handle a matching Accept.
:param accept: the accept
:param dialogue: the dialogue
:return: a Transaction
"""
logger.debug("[{}]: on match accept".format(self.agent_name))
results = []
transaction = self.game_instance.transaction_manager.pop_pending_initial_acceptance(dialogue.dialogue_label, accept.target)
results.append(OutContainer(message=transaction.serialize(), message_id=STARTING_MESSAGE_ID, dialogue_id=accept.dialogue_id, destination=self.game_instance.controller_pbk))
return results
|
Python
|
CL
|
e3ea46fdbfeaec9e3e52290ec34928f3b9105e4b73a7a621ea91dcecfff5e7c2
|
"""
Character recognition software is widely used to digitise printed texts. Thus the texts can be edited, searched and stored on a computer.
When documents (especially pretty old ones written with a typewriter), are digitised character recognition softwares often make mistakes.
Your task is correct the errors in the digitised text. You only have to handle the following mistakes:
S is misinterpreted as 5
O is misinterpreted as 0
I is misinterpreted as 1
The test cases contain numbers only by mistake.
"""
#answer
def correct(string):
return string.replace('5','S').replace('0','O').replace('1','I')
|
Python
|
CL
|
1a62f4bb81fde47e299e5ac0e99499d0713e030d094ea080b26ae37e3cdbc37a
|
from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name="pyinstagram",
version="3.0.0",
author="Oleg Yurchik",
author_email="oleg.yurchik@protonmail.com",
url="https://github.com/OlegYurchik/pyinstagram",
description="",
long_description=open(join(dirname(__file__), "README.md")).read(),
packages=find_packages(),
install_requires=["aiohttp"],
tests_require=["pytest", "pytest-asyncio", "pytest-random-order"],
test_suite="pyinstagram.tests",
)
|
Python
|
CL
|
cced89ec6ff0721b45a6f63703cf826383160cd1c5fa1962681249dedaca2925
|
from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from Basics.test_framework.utilities.logger import InfoLogger
from datetime import datetime
class MyDriverMethods:
log = InfoLogger('debug').info_logger()
def __init__(self, driver):
self.driver = driver
def get_page_title(self):
return self.driver.title
def take_screenshot(self, result_msg):
dir_loc = r'H:\Softwares\Python files\Extras\Selenium_Automation\Basics\test_framework\screenshots and logs'
time = str(datetime.today().replace(microsecond=0))
time_str = ''.join(filter(str.isalnum, time))
try:
self.driver.save_screenshot(f'{dir_loc}\\SS-{result_msg}-{time_str}.png')
self.log.info(f'Screenshot saved "SS-{result_msg}-{time_str}.png"')
except:
self.log.error(f'Exception occurred while taking screenshot - "SS-{result_msg}-{time_str}.png"')
def get_by_type(self, locator_type):
locator_type = locator_type.lower()
if locator_type == "id":
return By.ID
elif locator_type == "class_name":
return By.CLASS_NAME
elif locator_type == "name":
return By.NAME
elif locator_type == "xpath":
return By.XPATH
elif locator_type == "css_selector":
return By.CSS_SELECTOR
elif locator_type == "link_text":
return By.LINK_TEXT
else:
self.log.error(f"Locator type {locator_type} not correct/supported")
return False
def get_element(self, locator, locator_type="id"):
"""
available 'locator' types:
id = By.ID, class_name = By.CLASS_NAME, name = By.CLASS_NAME, xpath = By.XPATH, css_selector = By.CSS_SELECTOR
default_value = "id"
"""
element = None
try:
locator_type = locator_type.lower()
by_type = self.get_by_type(locator_type)
element = self.driver.find_element(by_type, locator)
self.log.info(f"Element {locator} found")
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"Element {locator} not found")
return element
def get_element_list(self, locator, locator_type="id"):
"""
available 'locator' types:
id = By.ID, class_name = By.CLASS_NAME, name = By.CLASS_NAME, xpath = By.XPATH, css_selector = By.CSS_SELECTOR
default_value = "id"
:outparam = Returns list if elements
"""
element_list = None
try:
locator_type = locator_type.lower()
by_type = self.get_by_type(locator_type)
element_list = self.driver.find_elements(by_type, locator)
self.log.info(f"Element list {locator} found e.g {element_list[2].text}")
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"Element list {locator} not found")
return element_list
def element_click(self, locator=None, locator_type="id", element=None):
# pytest.set_trace()
"""
available 'locator' types:
id = By.ID, class_name = By.CLASS_NAME, name = By.CLASS_NAME, xpath = By.XPATH, css_selector = By.CSS_SELECTOR
default_value = "id"
"""
if element is None and locator is not None:
try:
self.get_element(locator, locator_type).click()
self.log.info(f"Clicked on element with locator:{locator} with locator_type: {locator_type}")
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"locator:{locator} with locator_type: {locator_type} is not clickable")
else:
try:
element.click()
self.log.info(f"Clicked on element with locator:{locator} with element: {element}")
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"locator:{locator} with element: {element} is not clickable")
def element_send_keys(self, data, locator, locator_type="id"):
"""
available 'locator' types:
id = By.ID, class_name = By.CLASS_NAME, name = By.CLASS_NAME, xpath = By.XPATH, css_selector = By.CSS_SELECTOR
default_value = "id"
"""
try:
if data is None:
raise ElementNotSelectableException
self.get_element(locator, locator_type).send_keys(data)
self.log.info(f"Data: {data} sent to element with locator:{locator} and locator_type: {locator_type}")
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"Cannot send data: {data} to locator:{locator} and locator_type: {locator_type} is not "
f"clickable")
def is_element_enabled(self, locator, locator_type="id"):
"""
:returns bool
"""
try:
if self.get_element(locator, locator_type).is_enabled():
self.log.info(f"Element at locator {locator}-({locator_type}) is enabled!")
return True
else:
return False
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"Element at locator {locator}-({locator_type}) is not enabled!")
def get_value(self, locator, locator_type="id", attr_type="value"):
_value_of_element = None
try:
_element = self.get_element(locator, locator_type)
_value_of_element = _element.get_attribute(attr_type)
self.log.info(f"Value of Element {_value_of_element} at locator {locator}-({locator_type}) is returned")
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"Cannot get value of Element at locator {locator}-({locator_type})")
return _value_of_element
def select_element(self, data, locator, locator_type="id", select_by="index"):
"""
available 'locator' types:
id = By.ID, class_name = By.CLASS_NAME, name = By.CLASS_NAME, xpath = By.XPATH, css_selector = By.CSS_SELECTOR
default_value = "id"
-----------------------------------
available 'select_by' options:
value = select_by_value
text = select_by_visible_text
"default" = select_by_index
-----------------------------------
data:
e.g. select_by('data')
"""
try:
element = self.get_element(locator, locator_type)
select_by = select_by.lower()
if select_by == "value":
self.log.info(f'Selecting element at {locator} by value - {data}')
return Select(element).select_by_value(data)
elif select_by == "text":
self.log.info(f'Selecting element at {locator} by text - {data}')
return Select(element).select_by_visible_text(data)
else:
self.log.info(f'Selecting element at {locator} by index - {data}')
return Select(element).select_by_index(data)
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"Not able to Select value {data} at element {locator} by {locator_type}")
def is_element_present(self, locator, locator_type="id"):
element = None
element = self.get_element(locator, locator_type)
if element is not None:
self.log.info(f'Element at location {locator} is present')
return True
else:
self.log.error(f'Element at location {locator} is not present')
return False
def get_text_of_element(self, locator, locator_type="id"):
try:
element = self.get_element(locator, locator_type)
self.log.info(f'Text at Element {locator} is {element.text}')
return element.text
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f'Element at {locator} does not exists')
def scroll_page(self, direction='down', pixels='1000'):
if direction.lower() == 'down':
self.driver.execute_script(f"window.scrollBy(0, {pixels});")
self.log.info(f'Page scrolled {direction} {pixels} pixels')
else:
self.driver.execute_script(f"window.scrollBy(0, -{pixels});")
self.log.info(f'Page scrolled {direction} {pixels} pixels')
def hover_element(self, locator=None, locator_type="id", element=None):
try:
self.log.info(f'Starting hover operation with {locator} - {locator_type} - {element}')
if element is None and locator is not None:
hov_element = self.get_element(locator, locator_type)
self.log.info(f'Got element at {locator} to hover')
hover = ActionChains(self.driver).move_to_element(hov_element)
hover.perform()
self.log.info(f'Hovered over element at {locator} by {locator_type}')
else:
self.log.info(f'Got element {element} to hover')
hover = ActionChains(self.driver).move_to_element(element)
hover.perform()
self.log.info(f'Hovered over element {element}')
except (NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException):
self.log.error(f"Not able to hover at element {locator} by {locator_type} or element {element}")
|
Python
|
CL
|
b8586a8c1a056e27a1f5bb1a64cb69c8eebe3e4baaee2dbcb7a088e758ff0235
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (c) 2013 Guan Bo <guanbo2002@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import redis
import threading
import subprocess
from utils import serial
import time
startup = time.time()
def message_handler(message):
if (message):
if message['type'] == 'pmessage':
print message['channel'], ":", message['data']
cmds = ["python", "print.py"]
if message['channel'].endswith("#net"):
cmds[1] = "netprint.py"
lpr = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
lpr.communicate(message['data'])
else:
now = time.time()
print now, ":startup:", now-startup, ":", message
if __name__ == "__main__":
channel = serial.getserial()
if (channel == "ERROR000000000"):
channel = "000000007985f65b"
channel = channel+"*"
# r = redis.StrictRedis(host='service.fankahui.com')
r = redis.StrictRedis(host='service.fankahui.com',socket_timeout=5)
ps = r.pubsub()
ps.psubscribe(channel)
while True:
# message = ps.get_message()
# message_handler(message)
# time.sleep(0.001) # be nice to the system :)
try:
for message in ps.listen():
message_handler(message)
except KeyboardInterrupt:
break
except:
print 'Retry Connect Redis'
try:
time.sleep(0.001)
# print 'ping', r.ping()
ps.psubscribe(channel)
except:
pass
|
Python
|
CL
|
0d3a1517e32e01cba5651fb172a28f0e3b1d2df25ecafe8f2b39f586603311b9
|
from datetime import timedelta
from city_scrapers_core.constants import CITY_COUNCIL, COMMITTEE, FORUM
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import LegistarSpider
class AlleCountySpider(LegistarSpider):
name = "alle_county"
agency = "Allegheny County Government"
timezone = "America/New_York"
allowed_domains = ["alleghenycounty.legistar.com"]
start_urls = ["https://alleghenycounty.legistar.com"]
def parse_legistar(self, events):
"""
`parse_legistar` should always `yield` Meeting items.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
for event, _ in events:
start = self.legistar_start(event)
meeting = Meeting(
title=event["Name"]["label"],
description="",
classification=self._parse_classification(event),
start=start,
end=self._parse_end(start),
all_day=False,
time_notes="Estimated 3 hour meeting length",
location=self._parse_location(event),
links=self.legistar_links(event),
source=self.legistar_source(event),
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_classification(self, item):
"""Parse or generate classification from allowed options."""
meeting_loc_str = item["Meeting Location"].lower()
if "hearing" in meeting_loc_str:
return FORUM
if "committee" in meeting_loc_str:
return COMMITTEE
return CITY_COUNCIL
def _parse_end(self, start):
"""Parse end datetime as a naive datetime object. Added by pipeline if None"""
return start + timedelta(hours=3)
def _parse_location(self, item):
"""Parse or generate location."""
addr_str = ""
room = item.get("Meeting Location")
if room:
addr_str += room + ", "
addr_str += "436 Grant Street, Pittsburgh, PA 15219"
return {
"address": addr_str,
"name": "",
}
|
Python
|
CL
|
ff90e1ad055a026c56373f6f1fd1ca160f65e809de2e3a9bdae6e6231e00c8a5
|
import urlparse, time, functools
import cyclone.web
import oauth2
from oauth2 import generate_verifier, Consumer, Error, MissingSignature
from twisted.python import log
from twisted.internet import defer
from cycloauth.utils import (generate_string, get_normalized_parameters,
NOnceList, oauth_request)
from cycloauth.errors import *
from cycloauth.signatures import HMAC_SHA1, PLAINTEXT
from cycloauth.token import Token
def handlers(settings):
if 'oauth_authorization_handler' in settings:
m = settings['oauth_authorization_handler']
authz_mod = __import__('.'.join(m.split('.')[:-1]), globals(), locals(), [], -1)
for part in m.split('.')[1:]:
authz_mod = getattr(mod, part)
else:
authz_mod = AuthorizeHandler
ret = [
(settings.get('oauth_request_token_url', '/oauth/request_token'), RequestTokenHandler),
(settings.get('oauth_authorize_url', '/oauth/authorize'), authz_mod),
(settings.get('oauth_access_token_url', '/oauth/access_token'), AccessTokenHandler)]
return ret
def oauth_authenticated(method):
"same as cyclone.web.authenticated but doesn't redirect just raises 403"
"and works with asynchronous authentication methods (that might require a db lookup or something)"
"using this decorator means you do not have to use cyclone.web.asynchronous and return"
"values will be entirely ignored"
@defer.inlineCallbacks
@cyclone.web.asynchronous
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = yield self.get_oauth_token()
if not user:
raise cyclone.web.HTTPError(403)
method(self, *args, **kwargs)
return wrapper
class OAuthApplicationMixin(object):
oauth_signature_methods = {
'HMAC-SHA1': HMAC_SHA1,
'PLAINTEXT': PLAINTEXT
}
@property
def oauth_nonce_list(self):
if getattr(self, '_nonce_list', None) is None:
nonce_cache_size = self.settings.get('nonce_cache_size', 20000)
self._nonce_list = NOnceList(nonce_cache_size)
return self._nonce_list
@property
def oauth_storage(self):
if getattr(self, '_oauth_storage', None) is None:
factory_name = self.settings.get('oauth_storage_factory', 'cycloauth.storage.BaseStorage')
fn = '.'.join(factory_name.split('.')[:-1])
mod = __import__(fn, globals(), locals(), [], -1)
for part in factory_name.split('.')[1:]:
mod = getattr(mod, part)
self._oauth_storage = mod(self.settings)
return self._oauth_storage
class OAuthRequestHandlerMixin(object):
def get_error_html(self, status_code, **kwargs):
e = kwargs.get('exception')
if isinstance(e, Error):
return """
<html>
<head><title>%(title)s</title>
</head>
<body><h1>OAuth Error</h1><p>%(title)s</p></body>
</html>
""" % {'title': e.log_message}
else:
return cyclone.web.RequestHandler.get_error_html(self, status_code, **kwargs)
@defer.inlineCallbacks
def get_oauth_token(self):
consumer_key = self.oauth_params.get('oauth_consumer_key', None)
oauth_token_key = self.oauth_params.get('oauth_token', None)
if len(self.oauth_params) == 0:
raise NotAnOAuthRequest('The request made does not contain one or more OAuth parameters.')
elif not (consumer_key or oauth_token_key):
raise PartialOAuthRequest('A consumer or token was not provided in the request.')
if getattr(self, 'oauth_consumer', None) and getattr(self, 'oauth_token', None):
defer.returnValue(self.oauth_token)
s = self.application.oauth_storage
try:
consumer = yield s.get_consumer(consumer_key)
token = yield s.get_access_token(oauth_token_key)
self._check_signature(consumer, token)
self.oauth_consumer = consumer
self.oauth_token = token
except:
log.err()
self.oauth_token = self.oauth_consumer = None
defer.returnValue(self.oauth_token)
def _got_oauth_info_from_storage(self, ret):
consumer, token = (ret[0], ret[1])
try:
self._check_signature(consumer, token)
self.oauth_consumer = consumer
self.oauth_token = token
except:
log.err()
self.oauth_consumer = None
self.oauth_token = None
return self.oauth_token
def _check_signature(self, consumer, token):
try:
nonce = self.oauth_params['oauth_nonce']
except KeyError:
raise PartialOAuthRequest('Missing oauth_nonce.')
self._check_nonce(nonce)
try:
timestamp = self.oauth_params['oauth_timestamp']
except KeyError:
raise PartialOAuthRequest('Missing oauth_timestamp.')
self._check_timestamp(timestamp)
try:
signature_method = self.application.oauth_signature_methods[self.oauth_params['oauth_signature_method']]()
except KeyError:
raise UnknownSignature('Unknown oauth_signature_method.')
oauth_req = oauth_request(self.request)
try:
signature = self.oauth_params['oauth_signature']
except KeyError:
raise MissingSignature('The oauth_signature is missing')
valid = signature_method.check(oauth_req, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(oauth_req, consumer, token)
raise Error(('Invalid signature. Expected signature base string: ' + str(base)), 'sock')
def _check_timestamp(self, timestamp, threshold=300):
if timestamp is None:
raise Error("The oauth_timestamp parameter is missing.")
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > threshold:
raise Error('Expired timestamp: given %d and now %s has a greater difference than the threshold %d' % (
timestamp, now, threshold
))
def _check_nonce(self, nonce):
if nonce in self.application.oauth_nonce_list:
raise NOnceReplayed('The provided nonce value has been used recently.')
self.application.oauth_nonce_list.append(nonce)
def _checks_positive_for_oauth(self, params_var):
return True in [p.find('oauth_') >= 0 for p in params_var]
@property
def oauth_header(self):
extracted = {}
try:
auth_header = self.request.headers['authorization']
if auth_header[:6] == 'OAuth ':
auth_header = auth_header.lstrip('OAuth ')
try:
extracted = oauth2.Request._split_header(auth_header)
except Exception, e:
log.err()
raise Error('Unable to parse OAuth parameters from the Authorization Header.')
except KeyError:
pass
return extracted
@property
def oauth_arguments(self):
extracted = {}
if self._checks_positive_for_oauth(self.request.arguments):
extracted = dict((k, self.get_argument(k)) for k in self.request.arguments.iterkeys() if k.find('oauth_') >= 0)
return extracted
@property
def oauth_params(self):
if getattr(self, '_oauth_params', None) is None:
extracted = {}
extracted.update(self.oauth_header)
extracted.update(self.oauth_arguments)
self._oauth_params = dict((k, extracted[k]) for k in extracted.iterkeys())
return self._oauth_params
@property
def nonoauth_argument(self):
oauth_param_keys = self.oauth_params.keys()
return dict([k, v] for k, v in self.params.iteritems() if k not in oauth_param_keys)
class RequestTokenHandler(cyclone.web.RequestHandler, OAuthRequestHandlerMixin):
@defer.inlineCallbacks
@cyclone.web.asynchronous
def get(self):
consumer = yield self.application.oauth_storage.get_consumer(self.oauth_params['oauth_consumer_key'])
self._check_signature(consumer, None)
token = yield self.application.oauth_storage.add_request_token()
callback = self.oauth_params.get('oauth_callback', None)
if callback:
if 'callback' == 'oob':
if hasattr(consumer, 'callback'):
token.set_callback(consumer.callback)
else:
raise PartialOAuthRequest("There is no callback set for out-of-band (oob) use")
else:
token.set_callback(callback)
else:
if hasattr(consumer, 'callback'):
token.set_callback(consumer.callback)
else:
raise PartialOAuthRequest("Missing oauth_callback. Required by OAuth 1.0a")
yield self.application.oauth_storage.save_request_token(token)
self.set_header('Content-Type', 'text/plain')
self.write(token.to_string())
self.finish()
class AuthorizeHandler(cyclone.web.RequestHandler, OAuthRequestHandlerMixin):
@defer.inlineCallbacks
@cyclone.web.asynchronous
def get(self):
token = yield self.application.oauth_storage.get_request_token(self.oauth_params['oauth_token'])
token.set_verifier()
yield self.application.oauth_storage.save_request_token(token)
cb = token.get_callback_url()
self.redirect(cb)
class AccessTokenHandler(cyclone.web.RequestHandler, OAuthRequestHandlerMixin):
@defer.inlineCallbacks
@cyclone.web.asynchronous
def get(self):
consumer = yield self.application.oauth_storage.get_consumer(self.oauth_params['oauth_consumer_key'])
request_token = yield self.application.oauth_storage.get_request_token(self.oauth_params['oauth_token'])
try:
verifier = self.oauth_params['oauth_verifier']
except KeyError:
raise InvalidVerifier('Missing oauth_verifier. Required by OAuth 1.0a')
self._check_signature(consumer, request_token)
if verifier != request_token.verifier:
raise InvalidVerifier('Invalid Verifier.')
access_token = yield self.application.oauth_storage.add_access_token()
yield self.application.oauth_storage.save_access_token(access_token)
self.set_header('Content-Type', 'text/plain')
self.write(access_token.to_string())
self.finish()
|
Python
|
CL
|
8b2c8df29597ab565064ffd28ee50542ee4bb9f297b3d22e3fc0072aa624a8dd
|
# mypy: ignore-errors
import railroad
from pip._vendor import pyparsing
import typing
from typing import (
List,
NamedTuple,
Generic,
TypeVar,
Dict,
Callable,
Set,
Iterable,
)
from jinja2 import Template
from io import StringIO
import inspect
jinja2_template_source = """\
{% if not embed %}
<!DOCTYPE html>
<html>
<head>
{% endif %}
{% if not head %}
<style>
.railroad-heading {
font-family: monospace;
}
</style>
{% else %}
{{ head | safe }}
{% endif %}
{% if not embed %}
</head>
<body>
{% endif %}
{{ body | safe }}
{% for diagram in diagrams %}
<div class="railroad-group">
<h1 class="railroad-heading">{{ diagram.title }}</h1>
<div class="railroad-description">{{ diagram.text }}</div>
<div class="railroad-svg">
{{ diagram.svg }}
</div>
</div>
{% endfor %}
{% if not embed %}
</body>
</html>
{% endif %}
"""
template = Template(jinja2_template_source)
# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
NamedDiagram = NamedTuple(
"NamedDiagram",
[("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
)
"""
A simple structure for associating a name with a railroad diagram
"""
T = TypeVar("T")
class EachItem(railroad.Group):
"""
Custom railroad item to compose a:
- Group containing a
- OneOrMore containing a
- Choice of the elements in the Each
with the group label indicating that all must be matched
"""
all_label = "[ALL]"
def __init__(self, *items):
choice_item = railroad.Choice(len(items) - 1, *items)
one_or_more_item = railroad.OneOrMore(item=choice_item)
super().__init__(one_or_more_item, label=self.all_label)
class AnnotatedItem(railroad.Group):
"""
Simple subclass of Group that creates an annotation label
"""
def __init__(self, label: str, item):
super().__init__(item=item, label="[{}]".format(label) if label else label)
class EditablePartial(Generic[T]):
"""
Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
constructed.
"""
# We need this here because the railroad constructors actually transform the data, so can't be called until the
# entire tree is assembled
def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
self.func = func
self.args = args
self.kwargs = kwargs
@classmethod
def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
"""
If you call this function in the same way that you would call the constructor, it will store the arguments
as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
"""
return EditablePartial(func=func, args=list(args), kwargs=kwargs)
@property
def name(self):
return self.kwargs["name"]
def __call__(self) -> T:
"""
Evaluate the partial and return the result
"""
args = self.args.copy()
kwargs = self.kwargs.copy()
# This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
# args=['list', 'of', 'things'])
arg_spec = inspect.getfullargspec(self.func)
if arg_spec.varargs in self.kwargs:
args += kwargs.pop(arg_spec.varargs)
return self.func(*args, **kwargs)
def railroad_to_html(diagrams: List[NamedDiagram], embed=False, **kwargs) -> str:
"""
Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
:params kwargs: kwargs to be passed in to the template
"""
data = []
for diagram in diagrams:
if diagram.diagram is None:
continue
io = StringIO()
try:
css = kwargs.get('css')
diagram.diagram.writeStandalone(io.write, css=css)
except AttributeError:
diagram.diagram.writeSvg(io.write)
title = diagram.name
if diagram.index == 0:
title += " (root)"
data.append({"title": title, "text": "", "svg": io.getvalue()})
return template.render(diagrams=data, embed=embed, **kwargs)
def resolve_partial(partial: "EditablePartial[T]") -> T:
"""
Recursively resolves a collection of Partials into whatever type they are
"""
if isinstance(partial, EditablePartial):
partial.args = resolve_partial(partial.args)
partial.kwargs = resolve_partial(partial.kwargs)
return partial()
elif isinstance(partial, list):
return [resolve_partial(x) for x in partial]
elif isinstance(partial, dict):
return {key: resolve_partial(x) for key, x in partial.items()}
else:
return partial
def to_railroad(
element: pyparsing.ParserElement,
diagram_kwargs: typing.Optional[dict] = None,
vertical: int = 3,
show_results_names: bool = False,
show_groups: bool = False,
) -> List[NamedDiagram]:
"""
Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
creation if you want to access the Railroad tree before it is converted to HTML
:param element: base element of the parser being diagrammed
:param diagram_kwargs: kwargs to pass to the Diagram() constructor
:param vertical: (optional) - int - limit at which number of alternatives should be
shown vertically instead of horizontally
:param show_results_names - bool to indicate whether results name annotations should be
included in the diagram
:param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
surrounding box
"""
# Convert the whole tree underneath the root
lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
_to_diagram_element(
element,
lookup=lookup,
parent=None,
vertical=vertical,
show_results_names=show_results_names,
show_groups=show_groups,
)
root_id = id(element)
# Convert the root if it hasn't been already
if root_id in lookup:
if not element.customName:
lookup[root_id].name = ""
lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
# Now that we're finished, we can convert from intermediate structures into Railroad elements
diags = list(lookup.diagrams.values())
if len(diags) > 1:
# collapse out duplicate diags with the same name
seen = set()
deduped_diags = []
for d in diags:
# don't extract SkipTo elements, they are uninformative as subdiagrams
if d.name == "...":
continue
if d.name is not None and d.name not in seen:
seen.add(d.name)
deduped_diags.append(d)
resolved = [resolve_partial(partial) for partial in deduped_diags]
else:
# special case - if just one diagram, always display it, even if
# it has no name
resolved = [resolve_partial(partial) for partial in diags]
return sorted(resolved, key=lambda diag: diag.index)
def _should_vertical(
specification: int, exprs: Iterable[pyparsing.ParserElement]
) -> bool:
"""
Returns true if we should return a vertical list of elements
"""
if specification is None:
return False
else:
return len(_visible_exprs(exprs)) >= specification
class ElementState:
"""
State recorded for an individual pyparsing Element
"""
# Note: this should be a dataclass, but we have to support Python 3.5
def __init__(
self,
element: pyparsing.ParserElement,
converted: EditablePartial,
parent: EditablePartial,
number: int,
name: str = None,
parent_index: typing.Optional[int] = None,
):
#: The pyparsing element that this represents
self.element: pyparsing.ParserElement = element
#: The name of the element
self.name: typing.Optional[str] = name
#: The output Railroad element in an unconverted state
self.converted: EditablePartial = converted
#: The parent Railroad element, which we store so that we can extract this if it's duplicated
self.parent: EditablePartial = parent
#: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
self.number: int = number
#: The index of this inside its parent
self.parent_index: typing.Optional[int] = parent_index
#: If true, we should extract this out into a subdiagram
self.extract: bool = False
#: If true, all of this element's children have been filled out
self.complete: bool = False
def mark_for_extraction(
self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
):
"""
Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
:param el_id: id of the element
:param state: element/diagram state tracker
:param name: name to use for this element's text
:param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
root element when we know we're finished
"""
self.extract = True
# Set the name
if not self.name:
if name:
# Allow forcing a custom name
self.name = name
elif self.element.customName:
self.name = self.element.customName
else:
self.name = ""
# Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
# to be added
# Also, if this is just a string literal etc, don't bother extracting it
if force or (self.complete and _worth_extracting(self.element)):
state.extract_into_diagram(el_id)
class ConverterState:
"""
Stores some state that persists between recursions into the element tree
"""
def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
#: A dictionary mapping ParserElements to state relating to them
self._element_diagram_states: Dict[int, ElementState] = {}
#: A dictionary mapping ParserElement IDs to subdiagrams generated from them
self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
#: The index of the next unnamed element
self.unnamed_index: int = 1
#: The index of the next element. This is used for sorting
self.index: int = 0
#: Shared kwargs that are used to customize the construction of diagrams
self.diagram_kwargs: dict = diagram_kwargs or {}
self.extracted_diagram_names: Set[str] = set()
def __setitem__(self, key: int, value: ElementState):
self._element_diagram_states[key] = value
def __getitem__(self, key: int) -> ElementState:
return self._element_diagram_states[key]
def __delitem__(self, key: int):
del self._element_diagram_states[key]
def __contains__(self, key: int):
return key in self._element_diagram_states
def generate_unnamed(self) -> int:
"""
Generate a number used in the name of an otherwise unnamed diagram
"""
self.unnamed_index += 1
return self.unnamed_index
def generate_index(self) -> int:
"""
Generate a number used to index a diagram
"""
self.index += 1
return self.index
def extract_into_diagram(self, el_id: int):
"""
Used when we encounter the same token twice in the same tree. When this
happens, we replace all instances of that token with a terminal, and
create a new subdiagram for the token
"""
position = self[el_id]
# Replace the original definition of this element with a regular block
if position.parent:
ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
if "item" in position.parent.kwargs:
position.parent.kwargs["item"] = ret
elif "items" in position.parent.kwargs:
position.parent.kwargs["items"][position.parent_index] = ret
# If the element we're extracting is a group, skip to its content but keep the title
if position.converted.func == railroad.Group:
content = position.converted.kwargs["item"]
else:
content = position.converted
self.diagrams[el_id] = EditablePartial.from_call(
NamedDiagram,
name=position.name,
diagram=EditablePartial.from_call(
railroad.Diagram, content, **self.diagram_kwargs
),
index=position.number,
)
del self[el_id]
def _worth_extracting(element: pyparsing.ParserElement) -> bool:
"""
Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
themselves have children, then its complex enough to extract
"""
children = element.recurse()
return any(child.recurse() for child in children)
def _apply_diagram_item_enhancements(fn):
"""
decorator to ensure enhancements to a diagram item (such as results name annotations)
get applied on return from _to_diagram_element (we do this since there are several
returns in _to_diagram_element)
"""
def _inner(
element: pyparsing.ParserElement,
parent: typing.Optional[EditablePartial],
lookup: ConverterState = None,
vertical: int = None,
index: int = 0,
name_hint: str = None,
show_results_names: bool = False,
show_groups: bool = False,
) -> typing.Optional[EditablePartial]:
ret = fn(
element,
parent,
lookup,
vertical,
index,
name_hint,
show_results_names,
show_groups,
)
# apply annotation for results name, if present
if show_results_names and ret is not None:
element_results_name = element.resultsName
if element_results_name:
# add "*" to indicate if this is a "list all results" name
element_results_name += "" if element.modalResults else "*"
ret = EditablePartial.from_call(
railroad.Group, item=ret, label=element_results_name
)
return ret
return _inner
def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
non_diagramming_exprs = (
pyparsing.ParseElementEnhance,
pyparsing.PositionToken,
pyparsing.And._ErrorStop,
)
return [
e
for e in exprs
if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
]
@_apply_diagram_item_enhancements
def _to_diagram_element(
element: pyparsing.ParserElement,
parent: typing.Optional[EditablePartial],
lookup: ConverterState = None,
vertical: int = None,
index: int = 0,
name_hint: str = None,
show_results_names: bool = False,
show_groups: bool = False,
) -> typing.Optional[EditablePartial]:
"""
Recursively converts a PyParsing Element to a railroad Element
:param lookup: The shared converter state that keeps track of useful things
:param index: The index of this element within the parent
:param parent: The parent of this element in the output tree
:param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
do so
:param name_hint: If provided, this will override the generated name
:param show_results_names: bool flag indicating whether to add annotations for results names
:returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
:param show_groups: bool flag indicating whether to show groups using bounding box
"""
exprs = element.recurse()
name = name_hint or element.customName or element.__class__.__name__
# Python's id() is used to provide a unique identifier for elements
el_id = id(element)
element_results_name = element.resultsName
# Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
if not element.customName:
if isinstance(
element,
(
# pyparsing.TokenConverter,
# pyparsing.Forward,
pyparsing.Located,
),
):
# However, if this element has a useful custom name, and its child does not, we can pass it on to the child
if exprs:
if not exprs[0].customName:
propagated_name = name
else:
propagated_name = None
return _to_diagram_element(
element.expr,
parent=parent,
lookup=lookup,
vertical=vertical,
index=index,
name_hint=propagated_name,
show_results_names=show_results_names,
show_groups=show_groups,
)
# If the element isn't worth extracting, we always treat it as the first time we say it
if _worth_extracting(element):
if el_id in lookup:
# If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
# so we have to extract it into a new diagram.
looked_up = lookup[el_id]
looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
return ret
elif el_id in lookup.diagrams:
# If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
# just put in a marker element that refers to the sub-diagram
ret = EditablePartial.from_call(
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
)
return ret
# Recursively convert child elements
# Here we find the most relevant Railroad element for matching pyparsing Element
# We use ``items=[]`` here to hold the place for where the child elements will go once created
if isinstance(element, pyparsing.And):
# detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
# (all will have the same name, and resultsName)
if not exprs:
return None
if len(set((e.name, e.resultsName) for e in exprs)) == 1:
ret = EditablePartial.from_call(
railroad.OneOrMore, item="", repeat=str(len(exprs))
)
elif _should_vertical(vertical, exprs):
ret = EditablePartial.from_call(railroad.Stack, items=[])
else:
ret = EditablePartial.from_call(railroad.Sequence, items=[])
elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
if not exprs:
return None
if _should_vertical(vertical, exprs):
ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
else:
ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
elif isinstance(element, pyparsing.Each):
if not exprs:
return None
ret = EditablePartial.from_call(EachItem, items=[])
elif isinstance(element, pyparsing.NotAny):
ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
elif isinstance(element, pyparsing.FollowedBy):
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
elif isinstance(element, pyparsing.PrecededBy):
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
elif isinstance(element, pyparsing.Group):
if show_groups:
ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
else:
ret = EditablePartial.from_call(railroad.Group, label="", item="")
elif isinstance(element, pyparsing.TokenConverter):
label = type(element).__name__.lower()
if label == "tokenconverter":
ret = EditablePartial.from_call(railroad.Sequence, items=[])
else:
ret = EditablePartial.from_call(AnnotatedItem, label=label, item="")
elif isinstance(element, pyparsing.Opt):
ret = EditablePartial.from_call(railroad.Optional, item="")
elif isinstance(element, pyparsing.OneOrMore):
ret = EditablePartial.from_call(railroad.OneOrMore, item="")
elif isinstance(element, pyparsing.ZeroOrMore):
ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
elif isinstance(element, pyparsing.Group):
ret = EditablePartial.from_call(
railroad.Group, item=None, label=element_results_name
)
elif isinstance(element, pyparsing.Empty) and not element.customName:
# Skip unnamed "Empty" elements
ret = None
elif isinstance(element, pyparsing.ParseElementEnhance):
ret = EditablePartial.from_call(railroad.Sequence, items=[])
elif len(exprs) > 0 and not element_results_name:
ret = EditablePartial.from_call(railroad.Group, item="", label=name)
elif len(exprs) > 0:
ret = EditablePartial.from_call(railroad.Sequence, items=[])
else:
terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
ret = terminal
if ret is None:
return
# Indicate this element's position in the tree so we can extract it if necessary
lookup[el_id] = ElementState(
element=element,
converted=ret,
parent=parent,
parent_index=index,
number=lookup.generate_index(),
)
if element.customName:
lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
i = 0
for expr in exprs:
# Add a placeholder index in case we have to extract the child before we even add it to the parent
if "items" in ret.kwargs:
ret.kwargs["items"].insert(i, None)
item = _to_diagram_element(
expr,
parent=ret,
lookup=lookup,
vertical=vertical,
index=i,
show_results_names=show_results_names,
show_groups=show_groups,
)
# Some elements don't need to be shown in the diagram
if item is not None:
if "item" in ret.kwargs:
ret.kwargs["item"] = item
elif "items" in ret.kwargs:
# If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
ret.kwargs["items"][i] = item
i += 1
elif "items" in ret.kwargs:
# If we're supposed to skip this element, remove it from the parent
del ret.kwargs["items"][i]
# If all this items children are none, skip this item
if ret and (
("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
or ("item" in ret.kwargs and ret.kwargs["item"] is None)
):
ret = EditablePartial.from_call(railroad.Terminal, name)
# Mark this element as "complete", ie it has all of its children
if el_id in lookup:
lookup[el_id].complete = True
if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
lookup.extract_into_diagram(el_id)
if ret is not None:
ret = EditablePartial.from_call(
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
)
return ret
|
Python
|
CL
|
9f19833a8605f4d5ee2da198cb4d6d2858e4351796265ac616e24d584893a3ce
|
import torch
def my_imfilter(image, filter):
"""
Apply a filter to an image. Return the filtered image.
Args
- image: Torch tensor of shape (m, n, c)
- filter: Torch tensor of shape (k, j)
Returns
- filtered_image: Torch tensor of shape (m, n, c)
HINTS:
- You may not use any libraries that do the work for you. Using torch to work
with matrices is fine and encouraged. Using OpenCV or similar to do the
filtering for you is not allowed.
- I encourage you to try implementing this naively first, just be aware that
it may take a long time to run. You will need to get a function
that takes a reasonable amount of time to run so that the TAs can verify
your code works.
- Useful functions: torch.nn.functional.pad
"""
filtered_image = torch.Tensor()
assert filter.shape[0] % 2 == 1
assert filter.shape[1] % 2 == 1
#############################################################################
# TODO: YOUR CODE HERE
############################################################################
ffilter = filter.float()
K, J = ffilter.size()
M, N, C = image.size()
R1 = int((K - 1)/2)
R2 = int((K + 1)/2)
S1 = int((J - 1)/2)
S2 = int((J + 1)/2)
filtered_image = torch.zeros(M, N, C).float()
padded_signal = torch.zeros(M + K, N + J, C)
padded_signal[R1:M + K - R2, S1:N + J - S2, :] = image
for i in range(int(M)):
for j in range(int(N)):
filtered_image[i, j, :] += torch.einsum('kjc,kj->c', padded_signal[i:i + K, j: j + J, :].float(), ffilter)
#############################################################################
# TODO: YOUR CODE HERE
############################################################################
return filtered_image
def create_hybrid_image(image1, image2, filter):
"""
Takes two images and a low-pass filter and creates a hybrid image. Returns
the low frequency content of image1, the high frequency content of image 2,
and the hybrid image.
Args
- image1: Torch tensor of dim (m, n, c)
- image2: Torch tensor of dim (m, n, c)
- filter: Torch tensor of dim (x, y)
Returns
- low_frequencies: Torch tensor of shape (m, n, c)
- high_frequencies: Torch tensor of shape (m, n, c)
- hybrid_image: Torch tensor of shape (m, n, c)
HINTS:
- You will use your my_imfilter function in this function.
- You can get just the high frequency content of an image by removing its low
frequency content. Think about how to do this in mathematical terms.
- Don't forget to make sure the pixel values of the hybrid image are between
0 and 1. This is known as 'clipping' ('clamping' in torch).
- If you want to use images with different dimensions, you should resize them
in the notebook code.
"""
hybrid_image = torch.Tensor()
low_frequencies = torch.Tensor()
high_frequencies = torch.Tensor()
assert image1.shape[0] == image2.shape[0]
assert image1.shape[1] == image2.shape[1]
assert image1.shape[2] == image2.shape[2]
assert filter.shape[0] <= image1.shape[0]
assert filter.shape[1] <= image1.shape[1]
assert filter.shape[0] % 2 == 1
assert filter.shape[1] % 2 == 1
#############################################################################
# TODO: YOUR CODE HERE
############################################################################
low_frequencies = my_imfilter(image1, filter)
high_frequencies = image2 - my_imfilter(image2, filter)
hybrid_image = torch.clamp(low_frequencies + high_frequencies, 0, 1)
#############################################################################
# TODO: YOUR CODE HERE
############################################################################
return low_frequencies, high_frequencies, hybrid_image
|
Python
|
CL
|
8aef09c1345a4a9d1e6ab4f04cf284529afd54840c1e42e91002d5ffbd7ea418
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: monitor.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="monitor.proto",
package="chec",
syntax="proto2",
serialized_options=None,
serialized_pb=_b(
'\n\rmonitor.proto\x12\x04\x63hec"$\n\x07TimeUTC\x12\x0b\n\x03sec\x18\x01 \x02(\x05\x12\x0c\n\x04nsec\x18\x02 \x02(\x05"\xa4\x01\n\x0bMonitorData\x12\x1b\n\x04time\x18\x01 \x02(\x0b\x32\r.chec.TimeUTC\x12+\n\x07reciver\x18\x10 \x01(\x0b\x32\x1a.chec.MonitorData.Reciever\x1aK\n\x08Reciever\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0b\n\x03pid\x18\x02 \x01(\x05\x12\x11\n\trecv_data\x18\x03 \x01(\x08\x12\x11\n\tdata_rate\x18\x04 \x01(\x02"O\n\x0cMonitorFrame\x12\x1b\n\x04time\x18\x01 \x02(\x0b\x32\r.chec.TimeUTC\x12"\n\x07mondata\x18\x02 \x03(\x0b\x32\x11.chec.MonitorData'
),
)
_TIMEUTC = _descriptor.Descriptor(
name="TimeUTC",
full_name="chec.TimeUTC",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="sec",
full_name="chec.TimeUTC.sec",
index=0,
number=1,
type=5,
cpp_type=1,
label=2,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="nsec",
full_name="chec.TimeUTC.nsec",
index=1,
number=2,
type=5,
cpp_type=1,
label=2,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=23,
serialized_end=59,
)
_MONITORDATA_RECIEVER = _descriptor.Descriptor(
name="Reciever",
full_name="chec.MonitorData.Reciever",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="chec.MonitorData.Reciever.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="pid",
full_name="chec.MonitorData.Reciever.pid",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="recv_data",
full_name="chec.MonitorData.Reciever.recv_data",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="data_rate",
full_name="chec.MonitorData.Reciever.data_rate",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=151,
serialized_end=226,
)
_MONITORDATA = _descriptor.Descriptor(
name="MonitorData",
full_name="chec.MonitorData",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="time",
full_name="chec.MonitorData.time",
index=0,
number=1,
type=11,
cpp_type=10,
label=2,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="reciver",
full_name="chec.MonitorData.reciver",
index=1,
number=16,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MONITORDATA_RECIEVER],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=62,
serialized_end=226,
)
_MONITORFRAME = _descriptor.Descriptor(
name="MonitorFrame",
full_name="chec.MonitorFrame",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="time",
full_name="chec.MonitorFrame.time",
index=0,
number=1,
type=11,
cpp_type=10,
label=2,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mondata",
full_name="chec.MonitorFrame.mondata",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=228,
serialized_end=307,
)
_MONITORDATA_RECIEVER.containing_type = _MONITORDATA
_MONITORDATA.fields_by_name["time"].message_type = _TIMEUTC
_MONITORDATA.fields_by_name["reciver"].message_type = _MONITORDATA_RECIEVER
_MONITORFRAME.fields_by_name["time"].message_type = _TIMEUTC
_MONITORFRAME.fields_by_name["mondata"].message_type = _MONITORDATA
DESCRIPTOR.message_types_by_name["TimeUTC"] = _TIMEUTC
DESCRIPTOR.message_types_by_name["MonitorData"] = _MONITORDATA
DESCRIPTOR.message_types_by_name["MonitorFrame"] = _MONITORFRAME
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TimeUTC = _reflection.GeneratedProtocolMessageType(
"TimeUTC",
(_message.Message,),
dict(
DESCRIPTOR=_TIMEUTC,
__module__="monitor_pb2"
# @@protoc_insertion_point(class_scope:chec.TimeUTC)
),
)
_sym_db.RegisterMessage(TimeUTC)
MonitorData = _reflection.GeneratedProtocolMessageType(
"MonitorData",
(_message.Message,),
dict(
Reciever=_reflection.GeneratedProtocolMessageType(
"Reciever",
(_message.Message,),
dict(
DESCRIPTOR=_MONITORDATA_RECIEVER,
__module__="monitor_pb2"
# @@protoc_insertion_point(class_scope:chec.MonitorData.Reciever)
),
),
DESCRIPTOR=_MONITORDATA,
__module__="monitor_pb2"
# @@protoc_insertion_point(class_scope:chec.MonitorData)
),
)
_sym_db.RegisterMessage(MonitorData)
_sym_db.RegisterMessage(MonitorData.Reciever)
MonitorFrame = _reflection.GeneratedProtocolMessageType(
"MonitorFrame",
(_message.Message,),
dict(
DESCRIPTOR=_MONITORFRAME,
__module__="monitor_pb2"
# @@protoc_insertion_point(class_scope:chec.MonitorFrame)
),
)
_sym_db.RegisterMessage(MonitorFrame)
# @@protoc_insertion_point(module_scope)
|
Python
|
CL
|
6664eb43f2d1441fefc0fab9f8439b0810741c036229be36510c393c6f57ecea
|
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import matplotlib.pyplot as plt
import os
import pandas as pd
from floris import tools as wfct
from flasc.energy_ratio import energy_ratio
from flasc.dataframe_operations import \
dataframe_manipulations as dfm
from flasc import floris_tools as fsatools
def load_data():
# Load dataframe with scada data
root_dir = os.path.dirname(os.path.abspath(__file__))
ftr_path = os.path.join(root_dir, '..', 'demo_dataset',
'demo_dataset_scada_60s.ftr')
if not os.path.exists(ftr_path):
raise FileNotFoundError('Please run ./examples_artificial_data/demo_dataset/' +
'generate_demo_dataset.py before try' +
'ing any of the other examples.')
df = pd.read_feather(ftr_path)
return df
def load_floris():
# Initialize the FLORIS interface fi
print('Initializing the FLORIS object for our demo wind farm')
file_path = os.path.dirname(os.path.abspath(__file__))
fi_path = os.path.join(file_path, "../demo_dataset/demo_floris_input.yaml")
fi = wfct.floris_interface.FlorisInterface(fi_path)
return fi
if __name__ == '__main__':
# Load data and floris object
df = load_data()
fi = load_floris()
# Visualize layout
fig, ax = plt.subplots()
ax.plot(fi.layout_x, fi.layout_y, 'ko')
for ti in range(len(fi.layout_x)):
ax.text(fi.layout_x[ti], fi.layout_y[ti], "T{:02d}".format(ti))
ax.axis("equal")
ax.grid(True)
ax.set_xlabel("x-direction (m)")
ax.set_ylabel("y-direction (m)")
# We first need to define a wd against which we plot the energy ratios
# In this example, we set the wind direction to be equal to the mean
# wind direction between all turbines
df = dfm.set_wd_by_all_turbines(df)
# We reduce the dataframe to only data where the wind direction
# is between 20 and 90 degrees.
df = dfm.filter_df_by_wd(df=df, wd_range=[20., 90.])
df = df.reset_index(drop=True)
# We also need to define a reference wind speed and a reference power
# production against to normalize the energy ratios with. In this
# example, we set the wind speed equal to the mean wind speed
# of all upstream turbines. The upstream turbines are automatically
# derived from the turbine layout and the wind direction signal in
# the dataframe, df['wd']. The reference power production is set
# as the average power production of turbines 0 and 6, which are
# always upstream for wind directions between 20 and 90 deg.
df_upstream = fsatools.get_upstream_turbs_floris(fi)
df = dfm.set_ws_by_upstream_turbines(df, df_upstream)
df = dfm.set_pow_ref_by_turbines(df, turbine_numbers=[0, 6])
# # Initialize energy ratio object for the dataframe
era = energy_ratio.energy_ratio(df_in=df, verbose=True)
# Get energy ratio without uncertainty quantification
era.get_energy_ratio(
test_turbines=[1],
wd_step=2.0,
ws_step=1.0,
wd_bin_width=2.0,
)
fig, ax = era.plot_energy_ratio()
ax[0].set_title("Energy ratios for turbine 001 without UQ")
plt.tight_layout()
fig, ax = era.plot_energy_ratio(polar_plot=True) # Plot in polar format too
ax[0].set_title("Energy ratios for turbine 001 without UQ")
plt.tight_layout()
# Get energy ratio with uncertainty quantification
# using N=50 bootstrap samples and 5-95 percent conf. bounds.
era.get_energy_ratio(
test_turbines=[1],
wd_step=2.0,
ws_step=1.0,
wd_bin_width=2.0,
N=20,
percentiles=[5.0, 95.0]
)
fig, ax = era.plot_energy_ratio()
ax[0].set_title("Energy ratios for turbine 001 with UQ "
+ "(N=20, 90% confidence interval)")
plt.tight_layout()
# Get energy ratio with uncertainty quantification
# using N=10 bootstrap samples and block bootstrapping
era.get_energy_ratio(
test_turbines=[1],
wd_step=2.0,
ws_step=1.0,
wd_bin_width=2.0,
N=20,
percentiles=[5.0, 95.0],
num_blocks=20 # Resample over 20 blocks
)
fig, ax = era.plot_energy_ratio()
ax[0].set_title("Energy ratios for turbine 001 with UQ "
+ "(N=20, Block Bootstrapping)")
plt.tight_layout()
plt.show()
|
Python
|
CL
|
50b359f4a273417273ef52ff7ffff42906d3ac2830162f77d0637d81c963c4d0
|
# coding: utf-8
# flake8: noqa
"""
Модуль для конфигов сервиса Parse.
"""
from __future__ import absolute_import
from ..templates.config import create_conf
#
PARSER_DIR = ''
# DC, MPConv
SORT_CMD = 'sort -S 1024M'
# утилита распаковки архивов
ARCHIVE_UTIL_CMD = '7z'
# регулярное вырпжение, описывающее поддерживаемые архивы для распаковки
SUPPORTED_ARCHIVES = '.*\.(rar|tar\.gz|tar\.Z|tar|tgz|zip|gz)'
# Команда для запаковки архива
PACK_CMD = '7z a {folder}.7z ./{folder}/*'
# Команда для распаковки архива
UNPACK_CMD = '7z x -aoa {folder}.7z -o{folder}'
RAW_FILE = '/Users/vladworld/Documents/IOSS/develop/files/raw/HUA_HR/Sharing_eNodeB_para.csv'
FLUSH_DIR = '/Users/vladworld/Documents/IOSS/develop/files/dat/HUA_HR'
def get_conf():
"""
Фабричный метод для микросервиса Parse.
:return:
"""
return create_conf(service_name='parse', type='process', count=20)
|
Python
|
CL
|
be9c03716f1ff8ead9cfd9cc83a4efd951f23592ca8013f4447d39dcc4f3cc9a
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
from tensorflow.python.platform import app
import intel_quantization.graph_converter as converter
def main(_):
print(args.inputs.split(','), args.outputs.split(','), args.output_graph)
if not os.path.exists(args.input_graph):
print("{} doesn't exist!".format(args.input_graph))
sys.exit(-1)
if args.inputs:
inputs = args.inputs.split(',')
else:
inputs = []
if args.outputs:
outputs = args.outputs.split(',')
else:
outputs = []
if args.excluded_ops:
excluded_ops = args.excluded_ops.split(',')
else:
excluded_ops = []
if args.excluded_nodes:
excluded_nodes = args.excluded_nodes.split(',')
else:
excluded_nodes = []
qt = converter.GraphConverter(args.input_graph, args.output_graph, inputs,
outputs, excluded_ops, excluded_nodes,
args.per_channel)
qt.debug = args.debug
if 'input_graph=' in args.callback:
prefix = args.callback.split('input_graph=')[0]
postfix = ' '.join(
args.callback.split('input_graph=')[-1].split(' ')[1:])
callback_cmd = prefix + 'input_graph={} ' + postfix
else:
callback_cmd = args.callback
qt.gen_calib_data_cmds = callback_cmd
qt.convert()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--callback',
type=str,
default=None,
help='The calibration callback command.')
parser.add_argument(
'--inputs',
type=str,
default=None,
help='The input op names of the graph.')
parser.add_argument(
'--outputs',
type=str,
default=None,
help='The output op names of the graph.')
parser.add_argument(
'--input_graph', type=str, default=None, help='The input fp32 graph.')
parser.add_argument(
'--output_graph', type=str, default=None, help='The quantized graph')
parser.add_argument(
'--per_channel',
type=bool,
default=False,
help='Apply the per channel quantization or not.')
parser.add_argument(
'--excluded_ops',
type=str,
default=None,
help='The ops that excluded from quantization.')
parser.add_argument(
'--excluded_nodes',
type=str,
default=None,
help='The nodes that excluded from quantization.')
parser.add_argument(
'--debug', type=bool, default=False, help='Debug mode.')
args = parser.parse_args()
app.run()
|
Python
|
CL
|
c30d8a481133542f5c87233e817c96fbdd327a4300a983564a3da72afed97f8d
|
import kivy
from kivy.app import App
from kivy.lang import Builder
from kivy.core.window import Window
# screen
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.scrollview import ScrollView
from kivy.graphics import Color, Rectangle
from kivy.uix.filechooser import FileChooserIconView
# object
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.image import Image
#from kivy.uix.image import Image
#from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.button import Button
# layout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from functools import partial
# prop
from kivy.properties import NumericProperty, StringProperty, ObjectProperty
# python
import json, os
import numpy as np
from decoder.decoder import decode
class MenuWindow(Screen):
def __init__(self,**kwargs):
super(MenuWindow,self).__init__(**kwargs)
# set name
self.name = "menu"
self.path_image = ""
def on_leave(self):
self.clear_widgets()
def on_enter(self):
# add title label
self.title_label = Label(text="Calisthenics Evaluator 3000",
pos_hint={"center_x":0.5,"top":0.98},
size_hint= (0.1, 0.05),
font_size= (self.width**2 + self.height**2) / 5**4)
self.add_widget(self.title_label)
# Image
ratio_hw = Window.height / Window.width
print(ratio_hw)
self.image_plot = Image(pos_hint={'top':0.93,'center_x':0.5}, size_hint=(0.9,0.4), source="", allow_stretch=True)
self.add_widget(self.image_plot)
# file chooser
self.file_chooser = FileChooserIconView(pos_hint={'top':0.45}, size_hint=(1,0.45), sort_func=self.order_by_date)
self.file_chooser.bind(on_submit=partial(self.onFileSelected,self.file_chooser.selection))
self.add_widget(self.file_chooser)
print(self.file_chooser.file_system)
# Evaluate button
button_evaluate = Button(text='Evaluate',
pos_hint={"center_x":0.1,"top":0.5},
size_hint= (0.1, 0.05))
button_evaluate.bind(on_press=self.onPressed_evaluate)
self.add_widget(button_evaluate)
self.label_evaluate = Label(text='',
pos_hint={"center_x":0.4,"top":0.5},
size_hint= (0.3, 0.05))
#font_size= (self.width**2 + self.height**2) / 7**4)
self.add_widget(self.label_evaluate)
def order_by_date(self, files, filesystem):
list_file = sorted(f for f in files if filesystem.is_dir(f)) + sorted((f for f in files if not filesystem.is_dir(f)), key=lambda fi: os.stat(fi).st_mtime, reverse = True)
return list_file
def onFileSelected(self, entry, parent, selection, instance):
try:
self.path_image = selection[0]
self.image_plot.source = selection[0]
except:
pass
def onPressed_evaluate(self, instance):
# run ia on it and return a score, that is showed in self.label_evaluate
dic_res = decode(self.path_image)
if dic_res != None:
text_to_show = "This looks like a {}".format(dic_res['name'])
for metric_key in dic_res['metrics']:
text_to_show += '\n{}: {:.0%}'.format(metric_key,dic_res['metrics'][metric_key])
self.label_evaluate.text = text_to_show
else:
text_to_show = "This looks like nothing known, please do better."
self.label_evaluate.text = text_to_show
class WindowManager(ScreenManager):
pass
if __name__ == "__main__":
sm = WindowManager()
sm.add_widget(MenuWindow(name='menu'))
sm.current = "menu"
class MyMainApp(App):
def build(self):
return sm
Window.fullscreen = False
Window.size = (1080,2042)
import sys
app = MyMainApp()
sys.exit(app.run())
|
Python
|
CL
|
26eaab66b93c678a6f20728046a139b7bdf20fe7bd47384a6c3c06053773efe1
|
import os
# from .department import Department
from pipe.am.element import Element
from pipe.am.environment import Department, Environment
from pipe.am import pipeline_io
from pipe.am.registry import Registry
'''
body module
'''
class Body(object):
'''
Abstract class describing bodies that make up a project.
'''
# TODO allow users to subscribe to a body and recieve emails when changes are made
PIPELINE_FILENAME = '.body'
NAME = 'name'
REFERENCES = 'references'
DESCRIPTION = 'description'
TYPE = 'type'
FRAME_RANGE = 'frame_range'
@staticmethod
def create_new_dict(name):
'''
populate a dictionary with all the fields needed to create a new body
'''
datadict = {}
datadict[Body.NAME] = name
datadict[Body.REFERENCES] = []
datadict[Body.DESCRIPTION] = ''
datadict[Body.TYPE] = AssetType.PROP
datadict[Body.FRAME_RANGE] = 0
return datadict
@staticmethod
def get_parent_dir():
'''
return the parent directory that bodies of this type are stored in
'''
return Environment().get_assets_dir()
def __init__(self, filepath):
'''
creates a Body instance describing the asset or shot stored in the given filepath
'''
self._env = Environment()
self._filepath = filepath
self._pipeline_file = os.path.join(filepath, Body.PIPELINE_FILENAME)
if not os.path.exists(self._pipeline_file):
raise EnvironmentError('not a valid body: ' + self._pipeline_file + ' does not exist')
self._datadict = pipeline_io.readfile(self._pipeline_file)
def __str__(self):
name = self.get_name()
filepath = self.get_filepath()
type = self.get_type()
return "<Body Object of TYPE " + str(type) + " with NAME " + str(name) + " AT " + str(filepath) + ">"
def get_name(self):
return self._datadict[Body.NAME]
def get_filepath(self):
return self._filepath
def is_shot(self):
if self.get_type() == AssetType.SHOT:
return True
else:
return False
def is_set(self):
if self.get_type() == AssetType.SET:
return True
else:
return False
def is_asset(self):
return True
def is_tool(self):
raise NotImplementedError('subclass must implement is_tool')
def is_crowd_cycle(self):
raise NotImplementedError('subclass must implement is_crowd_cycle')
def get_description(self):
return self._datadict[Body.DESCRIPTION]
def get_type(self):
return self._datadict[Body.TYPE]
def update_type(self, new_type):
self._datadict[Body.TYPE] = new_type
pipeline_io.writefile(self._pipeline_file, self._datadict)
def get_frame_range(self):
return self._datadict[Body.FRAME_RANGE]
def set_frame_range(self, frame_range):
self._datadict[Body.FRAME_RANGE] = frame_range
def update_frame_range(self, frame_range):
self._datadict[Body.FRAME_RANGE] = frame_range
pipeline_io.writefile(self._pipeline_file, self._datadict)
def version_prop_json(self, prop, filepath):
files = os.listdir(filepath)
latest_version = -1
for file in files:
filename, ext = os.path.splitext(file)
if not str(ext) == ".json":
continue
if str(prop) not in str(filename):
continue
name_and_version = str(filename).split("_")
version = name_and_version[-1]
if int(version) > latest_version:
latest_version = int(version)
latest_version += 1
return latest_version, str(latest_version)
def get_element(self, department, name=Element.DEFAULT_NAME, force_create=False):
'''
get the element object for this body from the given department. Raises EnvironmentError
if no such element exists.
department -- the department to get the element from
name -- the name of the element to get. Defaults to the name of the
element created by default for each department.
'''
element_dir = os.path.join(self._filepath, department, name)
if not os.path.exists(element_dir):
if force_create:
try:
self.create_element(department, name)
except Exception as e:
print(e)
else:
raise EnvironmentError('no such element: ' + element_dir + ' does not exist')
return Registry().create_element(department, element_dir)
def create_element(self, department, name):
'''
create an element for this body from the given department and return the
resulting element object. Raises EnvironmentError if the element already exists.
department -- the department to create the element for
name -- the name of the element to create
'''
dept_dir = os.path.join(self._filepath, department)
if not os.path.exists(dept_dir):
pipeline_io.mkdir(dept_dir)
name = pipeline_io.alphanumeric(name)
element_dir = os.path.join(dept_dir, name)
if not pipeline_io.mkdir(element_dir):
raise EnvironmentError('element already exists: ' + element_dir)
empty_element = Registry().create_element(department)
datadict = empty_element.create_new_dict(name, department, self.get_name())
pipeline_io.writefile(os.path.join(element_dir, empty_element.PIPELINE_FILENAME), datadict)
return Registry().create_element(department, element_dir)
def list_elements(self, department):
'''
return a list of all elements for the given department in this body
'''
subdir = os.path.join(self._filepath, department)
if not os.path.exists(subdir):
return []
dirlist = os.listdir(subdir)
elementlist = []
for elementdir in dirlist:
abspath = os.path.join(subdir, elementdir)
if os.path.exists(os.path.join(abspath, Element.PIPELINE_FILENAME)):
elementlist.append(elementdir)
elementlist.sort()
return elementlist
def add_reference(self, reference):
'''
Add the given reference to this body. If it already exists, do nothing. If reference is not a valid
body, raise an EnvironmentError.
'''
ref_asset_path = os.path.join(self._env.get_assets_dir(), reference, Body.PIPELINE_FILENAME)
ref_shot_path = os.path.join(self._env.get_shots_dir(), reference, Body.PIPELINE_FILENAME)
ref_crowd_path = os.path.join(self._env.get_crowds_dir(), reference, Body.PIPELINE_FILENAME)
if not os.path.exists(ref_asset_path) and not os.path.exists(ref_shot_path) and not os.path.exists(ref_crowd_path):
raise EnvironmentError(reference + ' is not a valid body')
if reference not in self._datadict[Body.REFERENCES]:
self._datadict[Body.REFERENCES].append(reference)
pipeline_io.writefile(self._pipeline_file, self._datadict)
def remove_reference(self, reference):
'''
Remove the given reference, if it exists, and return True. Otherwise do nothing, and return False.
'''
try:
self._datadict[Body.REFERENCES].remove(reference)
return True
except ValueError:
return False
pipeline_io.writefile(self._pipeline_file, self._datadict)
def update_description(self, description):
self._datadict[Body.DESCRIPTION] = description
pipeline_io.writefile(self._pipeline_file, self._datadict)
def get_references(self):
'''
Return a list of all references for this body.
'''
return self._datadict[Body.REFERENCES]
def has_relation(self, attribute, relate, value):
'''
Return True if this body has the given attribute and if the given relationship
to the the given value. Return False otherwise
'''
if attribute not in self._datadict:
return False
return relate(self._datadict[attribute],value)
class AssetType:
'''
Class describing types of assets.
'''
ACTOR = 'actor'
SET = 'set'
PROP = 'prop'
TOOL = 'tool'
SHOT = 'shot'
ALL = [ACTOR, PROP, SET, SHOT, TOOL]
MAYA = [ACTOR, PROP, SET, SHOT]
def __init__(self):
pass
def list_asset_types(self):
return self.ALL
def list_maya_types(self):
return self.MAYA
class Asset(Body):
'''
Class describing an asset body.
'''
@staticmethod
def create_new_dict(name):
datadict = Body.create_new_dict(name)
return datadict
def __str__(self):
return super(Asset, self).__str__()
def is_tool(self):
return False
def is_crowd_cycle(self):
return False
class Shot(Body):
'''
Class describing a shot body.
'''
@staticmethod
def create_new_dict(name):
datadict = Body.create_new_dict(name)
return datadict
def __str__(self):
return super(Shot, self).__str__()
def is_tool(self):
return False
def is_crowd_cycle(self):
return False
class Tool(Body):
'''
Class describing a tool body.
'''
@staticmethod
def create_new_dict(name):
datadict = Body.create_new_dict(name)
return datadict
def __str__(self):
return super(Tool, self).__str__()
def is_shot(self):
return False
def is_asset(self):
return False
def is_tool(self):
return True
def is_crowd_cycle(self):
return False
class CrowdCycle(Body):
'''
Class describing a tool body.
'''
@staticmethod
def create_new_dict(name):
datadict = Body.create_new_dict(name)
return datadict
def __str__(self):
return super(CrowdCycle, self).__str__()
def is_shot(self):
return False
def is_asset(self):
return False
def is_tool(self):
return False
def is_crowd_cycle(self):
return True
|
Python
|
CL
|
e1fc52f03cc637fdb770080d3d30f75fadb28e049b437030c42459a88f0b7070
|
## @file KeyWording.py
# @brief This module sends the proper keywords to the database
# and works on the number of occurances of each keyword per article.
import sqlite3
import sys
import re
import time
import PdfDownload
from collections import defaultdict
import math
connection = None
## @fn regexp(expr, item)
# @brief This function implements python regular expression mechanisms for sqlite3 queries
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(item, re.IGNORECASE) is not None
def generateYearTrendData(cursor, keyWord, threshold):
dic, tot = keyWordingDB(cursor=cursor, keyWord=keyWord, threshold=threshold)
for year, idList in dic.iteritems():
print year,len(idList)
## @fn keyWordingDB(cursor, keyWord, threshold)
# @brief The main keywording function that mines the database PDF section to report keywording results
# This function uses @var KeyWord to find the IDs that match the expression
def keyWordingDB(cursor, keyWord, threshold):
data = {}
print keyWord
# Request for the count of the keywords
cursor.execute('SELECT count(*) FROM IEEETable WHERE PDF REGEXP ? and PDF REGEXP ?',
keyWord)
dataKey = str(keyWord).replace(',', ' & ')[1:-1]
data[dataKey] = cursor.fetchall()
# To print the keyword and number of articles found data
for key, values in data.iteritems():
print key, values
# Do the same query but for id, document title, and PDF
cursor.execute('SELECT id,Document_Title,PDF,Publication_Year FROM IEEETable WHERE PDF REGEXP ? and PDF REGEXP ?',
keyWord)
idPDF = cursor.fetchall()
# print "length",len(idPDF)
idYearDic = defaultdict(list)
# Use the queried data to find the count of occurances of each keyword
for id,title, pdfText, year in idPDF:
countDic = PdfDownload.pdfCount(keyWord,fileText=pdfText)
# print "title:", title
if countDic[keyWord[0]] >= threshold and countDic[keyWord[1]] >= 4:
# print "( id:", id, ")",
# print countDic
idYearDic[year].append(id)
# Get the total number of classified articles
totalNumber = 0
for values in idYearDic.values():
totalNumber += len(values)
print "Total classified articles:", totalNumber
print "\n---------------------------------------------------------------\n"
return idYearDic, totalNumber
def checkDuplicates(firstData, secondData):
firstFullIDList = []
secondFullIDList = []
for ids in firstData[0]:
for id in ids:
firstFullIDList.append(id)
for ids in secondData[0]:
for id in ids:
secondFullIDList.append(id)
return len(set(firstFullIDList) & set(secondFullIDList))
## @fn classifyQ1(cursor)
# @brief This functions makes the classifcation for Q1 and reports results
def classifyQ1(cursor):
data = {}
tot = 0
# IEEE Data only
data['hardware'] =keyWordingDB(cursor=cursor, keyWord=['high level synthesis|synthesis|HLS',''],threshold=5)
data['software'] = keyWordingDB(cursor=cursor, keyWord=['virtual prototype|embedded software|virtual platform|driver|software debug',''],threshold=3)
data['system'] = keyWordingDB(cursor=cursor, keyWord=['design exploration|system designer|optimization|behavioral model|partition point',''],threshold=5)
# Check for duplicates to determine intersection between classes
print 'hw/sw duplicates:', checkDuplicates(data['hardware'], data['software'])
print 'hw/system duplicates:', checkDuplicates(data['hardware'], data['system'])
print 'system/sw duplicates:', checkDuplicates(data['system'], data['software'])
total = 0
for key in data.keys():
print data[key][1]
# Set the precision of the decimal representation to 3
print str(int(math.ceil(float('%.3f'%(((data[key][1])/float(695))*100)))))+'/'+key+','
total += data[key][1]/float('%.2f'%(float(695)))
#
print "Classification Coverage:", total
## @fn classifyQ2(cursor)
# @brief This functions makes the classifcation for Q1 and reports results
def classifyQ2(cursor):
data = {}
tot = 0
# TODO: Fix the total calculations without too many local vars
# Check to see the number of tools roughly for coverage % calculation
data['tools']= keyWordingDB(cursor=cursor, keyWord=['tools|Tool|language|framework',''], threshold=5)
data['Matlab']=keyWordingDB(cursor=cursor, keyWord=['Matlab|Simulink',''],threshold=10)
data['systemC'] = keyWordingDB(cursor=cursor, keyWord=['SystemC|IEEE 1666|systemC',''],threshold=10)
data['C/C++'] = keyWordingDB(cursor=cursor, keyWord=['C\+\+|C language|C Language',''],threshold=10)
# data['C'] = keyWordingDB(cursor=cursor, keyWord=['C language|C Language',''],threshold=10)
data['TLM'] = keyWordingDB(cursor=cursor, keyWord=['TLM|tlm 2.0|transaction level modeling',''],threshold=10)
data['HDL'] = keyWordingDB(cursor=cursor, keyWord=['VHDL|verilog|SVA|system verilog|HDVL',''],threshold=10)
data['ptolemy'] = keyWordingDB(cursor=cursor, keyWord=['ptolemy',''],threshold=10)
data['UML'] = keyWordingDB(cursor=cursor, keyWord=['UML',''],threshold=10)
data['SysML'] = keyWordingDB(cursor=cursor, keyWord=['SysML',''],threshold=10)
data['MARTE'] = keyWordingDB(cursor=cursor, keyWord=['MARTE',''],threshold=1)
data['Rosetta'] = keyWordingDB(cursor=cursor, keyWord=['Rosetta',''],threshold=1)
data['IP-XACT'] = keyWordingDB(cursor=cursor, keyWord=['IP-XACT',''],threshold=1)
# # Check for duplicates to determine intersection between classes
print 'C++ & SystemC duplicates:', checkDuplicates(data['C/C++'], data['systemC'])
# print 'C & SystemC duplicates:', checkDuplicates(data['C'], data['systemC'])
print 'SystemC & Matlab duplicates:', checkDuplicates(data['Matlab'], data['systemC'])
print 'SystemC & TLM duplicates:', checkDuplicates(data['systemC'], data['TLM'])
print 'SystemC & UML duplicates:', checkDuplicates(data['systemC'], data['UML'])
print 'SystemC & HDL duplicates:', checkDuplicates(data['systemC'], data['HDL'])
total = 0
for key in data.keys():
# Set the precision of the decimal representation to 3
print str(int(math.ceil(float('%.3f'%(((data[key][1])/float(data['tools'][1]))*100)))))+'/'+key+','
total += data[key][1]/float('%.2f'%(float(data['tools'][1])))
print "Classification Coverage:", total-1
## @fn classifyAgile(cursor)
# @brief This functions makes the classifcation for agile and reports results
def classifyAgile(cursor):
data = {}
tot = 0
# IEEE Data only
data['agile'] =keyWordingDB(cursor=cursor, keyWord=['agile |scrum|TDD|XPI',''],threshold=10)
# data['software'] = keyWordingDB(cursor=cursor, keyWord=['virtual prototype|embedded software|virtual platform|driver|software debug',''],threshold=3)
# data['system'] = keyWordingDB(cursor=cursor, keyWord=['design exploration|system designer|optimization|behavioral model|partition point',''],threshold=5)
#
# # Check for duplicates to determine intersection between classes
# print 'hw/sw duplicates:', checkDuplicates(data['hardware'], data['software'])
# print 'hw/system duplicates:', checkDuplicates(data['hardware'], data['system'])
# print 'system/sw duplicates:', checkDuplicates(data['system'], data['software'])
# total = 0
for key, values in data.iteritems():
print values[0]
# # Set the precision of the decimal representation to 3
# print str(int(math.ceil(float('%.3f'%(((data[key][1])/float(695))*100)))))+'/'+key+','
# total += data[key][1]/float('%.2f'%(float(695)))
##
# print "Classification Coverage:", total
try:
t = time.time()
# Connect to the database
connection = sqlite3.connect('PySysMapDB.db')
# Set the connection to return string (UTF-8)
connection.text_factory = str
# Create the regex function map for sqlite
connection.create_function("REGEXP", 2, regexp)
cursor = connection.cursor()
# classifyQ1(cursor)
# generateYearTrendData(cursor,keyWord=['high level synthesis|synthesis|HLS',''], threshold=6)
# generateYearTrendData(cursor,keyWord=['virtual prototype|embedded software|virtual platform|driver|software debug|software/hardware partition',''], threshold=5)
# generateYearTrendData(cursor,keyWord=['design exploration|system designer|optimization|behavioral model|partition point',''], threshold=6)
classifyAgile(cursor)
# for year, id in dic.iteritems():
# print "year", year, "num", id
print "\n Time Taken: %.3f sec" % (time.time()-t)
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
sys.exit(1)
#finally:
# if connection:
# # Commit the results and close the connection
# connection.commit()
|
Python
|
CL
|
574310d82725bd305c73b6deccd401ad6b9f6298835f296090dc963a794fdc9c
|
import json
import re
from datetime import datetime
from typing import Any, Dict, Optional
from dateutil import parser
from django.conf import settings
from django.http import JsonResponse
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status
from sentry_sdk import capture_exception
from statshog.defaults.django import statsd
from posthog.celery import app as celery_app
from posthog.ee import is_ee_enabled
from posthog.exceptions import RequestParsingError, generate_exception_response
from posthog.helpers.session_recording import preprocess_session_recording_events
from posthog.models import Team, User
from posthog.models.feature_flag import get_active_feature_flags
from posthog.models.utils import UUIDT
from posthog.utils import cors_response, get_ip_address, load_data_from_request
if is_ee_enabled():
from ee.kafka_client.client import KafkaProducer
from ee.kafka_client.topics import KAFKA_EVENTS_PLUGIN_INGESTION
def log_event(
distinct_id: str,
ip: Optional[str],
site_url: str,
data: dict,
team_id: int,
now: datetime,
sent_at: Optional[datetime],
event_uuid: UUIDT,
*,
topic: str = KAFKA_EVENTS_PLUGIN_INGESTION,
) -> None:
if settings.DEBUG:
print(f'Logging event {data["event"]} to Kafka topic {topic}')
data = {
"uuid": str(event_uuid),
"distinct_id": distinct_id,
"ip": ip,
"site_url": site_url,
"data": json.dumps(data),
"team_id": team_id,
"now": now.isoformat(),
"sent_at": sent_at.isoformat() if sent_at else "",
}
KafkaProducer().produce(topic=topic, data=data)
def _datetime_from_seconds_or_millis(timestamp: str) -> datetime:
if len(timestamp) > 11: # assuming milliseconds / update "11" to "12" if year > 5138 (set a reminder!)
timestamp_number = float(timestamp) / 1000
else:
timestamp_number = int(timestamp)
return datetime.fromtimestamp(timestamp_number, timezone.utc)
def _get_sent_at(data, request) -> Optional[datetime]:
if request.GET.get("_"): # posthog-js
sent_at = request.GET["_"]
elif isinstance(data, dict) and data.get("sent_at"): # posthog-android, posthog-ios
sent_at = data["sent_at"]
elif request.POST.get("sent_at"): # when urlencoded body and not JSON (in some test)
sent_at = request.POST["sent_at"]
else:
return None
if re.match(r"^[0-9]+$", sent_at):
return _datetime_from_seconds_or_millis(sent_at)
return parser.isoparse(sent_at)
def _get_token(data, request) -> Optional[str]:
if request.POST.get("api_key"):
return request.POST["api_key"]
if request.POST.get("token"):
return request.POST["token"]
if data:
if isinstance(data, list):
data = data[0] # Mixpanel Swift SDK
if isinstance(data, dict):
if data.get("$token"):
return data["$token"] # JS identify call
if data.get("token"):
return data["token"] # JS reloadFeatures call
if data.get("api_key"):
return data["api_key"] # server-side libraries like posthog-python and posthog-ruby
if data.get("properties") and data["properties"].get("token"):
return data["properties"]["token"] # JS capture call
return None
def _get_project_id(data, request) -> Optional[int]:
if request.GET.get("project_id"):
return int(request.POST["project_id"])
if request.POST.get("project_id"):
return int(request.POST["project_id"])
if isinstance(data, list):
data = data[0] # Mixpanel Swift SDK
if data.get("project_id"):
return int(data["project_id"])
return None
def _get_distinct_id(data: Dict[str, Any]) -> str:
raw_value: Any = ""
try:
raw_value = data["$distinct_id"]
except KeyError:
try:
raw_value = data["properties"]["distinct_id"]
except KeyError:
raw_value = data["distinct_id"]
if not raw_value:
raise ValueError()
return str(raw_value)[0:200]
def _ensure_web_feature_flags_in_properties(event: Dict[str, Any], team: Team, distinct_id: str):
"""If the event comes from web, ensure that it contains property $active_feature_flags."""
if event["properties"].get("$lib") == "web" and not event["properties"].get("$active_feature_flags"):
event["properties"]["$active_feature_flags"] = get_active_feature_flags(team, distinct_id)
@csrf_exempt
def get_event(request):
timer = statsd.timer("posthog_cloud_event_endpoint").start()
now = timezone.now()
try:
data = load_data_from_request(request)
except RequestParsingError as error:
capture_exception(error) # We still capture this on Sentry to identify actual potential bugs
return cors_response(
request, generate_exception_response("capture", f"Malformed request data: {error}", code="invalid_payload"),
)
if not data:
return cors_response(
request,
generate_exception_response(
"capture",
"No data found. Make sure to use a POST request when sending the payload in the body of the request.",
code="no_data",
),
)
sent_at = _get_sent_at(data, request)
token = _get_token(data, request)
if not token:
return cors_response(
request,
generate_exception_response(
"capture",
"API key not provided. You can find your project API key in PostHog project settings.",
type="authentication_error",
code="missing_api_key",
status_code=status.HTTP_401_UNAUTHORIZED,
),
)
team = Team.objects.get_team_from_token(token)
if team is None:
try:
project_id = _get_project_id(data, request)
except ValueError:
return cors_response(
request,
generate_exception_response(
"capture", "Invalid Project ID.", code="invalid_project", attr="project_id"
),
)
if not project_id:
return cors_response(
request,
generate_exception_response(
"capture",
"Project API key invalid. You can find your project API key in PostHog project settings.",
type="authentication_error",
code="invalid_api_key",
status_code=status.HTTP_401_UNAUTHORIZED,
),
)
user = User.objects.get_from_personal_api_key(token)
if user is None:
return cors_response(
request,
generate_exception_response(
"capture",
"Invalid Personal API key.",
type="authentication_error",
code="invalid_personal_api_key",
status_code=status.HTTP_401_UNAUTHORIZED,
),
)
team = user.teams.get(id=project_id)
if isinstance(data, dict):
if data.get("batch"): # posthog-python and posthog-ruby
data = data["batch"]
assert data is not None
elif "engage" in request.path_info: # JS identify call
data["event"] = "$identify" # make sure it has an event name
if isinstance(data, list):
events = data
else:
events = [data]
try:
events = preprocess_session_recording_events(events)
except ValueError as e:
return cors_response(
request, generate_exception_response("capture", f"Invalid payload: {e}", code="invalid_payload")
)
for event in events:
try:
distinct_id = _get_distinct_id(event)
except KeyError:
return cors_response(
request,
generate_exception_response(
"capture",
"You need to set user distinct ID field `distinct_id`.",
code="required",
attr="distinct_id",
),
)
except ValueError:
return cors_response(
request,
generate_exception_response(
"capture",
"Distinct ID field `distinct_id` must have a non-empty value.",
code="required",
attr="distinct_id",
),
)
if not event.get("event"):
return cors_response(
request,
generate_exception_response(
"capture", "You need to set user event name, field `event`.", code="required", attr="event"
),
)
if not event.get("properties"):
event["properties"] = {}
_ensure_web_feature_flags_in_properties(event, team, distinct_id)
event_uuid = UUIDT()
ip = None if team.anonymize_ips else get_ip_address(request)
if is_ee_enabled():
statsd.incr("posthog_cloud_plugin_server_ingestion")
log_event(
distinct_id=distinct_id,
ip=ip,
site_url=request.build_absolute_uri("/")[:-1],
data=event,
team_id=team.pk,
now=now,
sent_at=sent_at,
event_uuid=event_uuid,
)
else:
task_name = "posthog.tasks.process_event.process_event_with_plugins"
celery_queue = settings.PLUGINS_CELERY_QUEUE
celery_app.send_task(
name=task_name,
queue=celery_queue,
args=[distinct_id, ip, request.build_absolute_uri("/")[:-1], event, team.pk, now.isoformat(), sent_at,],
)
timer.stop()
statsd.incr(f"posthog_cloud_raw_endpoint_success", tags={"endpoint": "capture",})
return cors_response(request, JsonResponse({"status": 1}))
|
Python
|
CL
|
dce8b125ab929f51edceb2df796b47d79dff0557976905037ffe9af9cf50a504
|
from plenum.common.ledger import Ledger
from plenum.common.request import Request
from plenum.common.types import f
from plenum.server.consensus.utils import get_original_viewno
class ThreePcBatch:
def __init__(self, ledger_id, inst_id, view_no, pp_seq_no, pp_time, state_root, txn_root, valid_digests,
pp_digest, primaries=None, node_reg=None, has_audit_txn=True, original_view_no=None) -> None:
self.ledger_id = ledger_id
self.inst_id = inst_id
self.view_no = view_no
self.pp_seq_no = pp_seq_no
self.pp_time = pp_time
self.state_root = state_root
self.txn_root = txn_root
self.primaries = primaries
self.valid_digests = valid_digests
self.pp_digest = pp_digest
self.node_reg = node_reg
self.has_audit_txn = has_audit_txn
self.original_view_no = original_view_no
def __repr__(self) -> str:
return str(self.__dict__)
@staticmethod
def from_pre_prepare(pre_prepare, state_root, txn_root, valid_digests):
return ThreePcBatch(
ledger_id=pre_prepare.ledgerId,
inst_id=pre_prepare.instId,
view_no=pre_prepare.viewNo,
pp_seq_no=pre_prepare.ppSeqNo,
pp_time=pre_prepare.ppTime,
# do not trust PrePrepare's root hashes and use the current replica's ones
state_root=state_root,
txn_root=txn_root,
valid_digests=valid_digests,
pp_digest=pre_prepare.digest,
has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in pre_prepare and pre_prepare.auditTxnRootHash is not None,
original_view_no=get_original_viewno(pre_prepare)
)
@staticmethod
def from_ordered(ordered):
return ThreePcBatch(
ledger_id=ordered.ledgerId,
inst_id=ordered.instId,
view_no=ordered.viewNo,
pp_seq_no=ordered.ppSeqNo,
pp_time=ordered.ppTime,
state_root=Ledger.strToHash(ordered.stateRootHash),
txn_root=Ledger.strToHash(ordered.txnRootHash),
primaries=ordered.primaries,
valid_digests=ordered.valid_reqIdr,
pp_digest=ordered.digest,
node_reg=ordered.nodeReg,
has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in ordered and ordered.auditTxnRootHash is not None,
original_view_no=ordered.originalViewNo
)
@staticmethod
def from_batch_committed_dict(batch_comitted):
valid_req_keys = [Request(**req_dict).key for req_dict in batch_comitted[f.REQUESTS.nm]]
return ThreePcBatch(
ledger_id=batch_comitted[f.LEDGER_ID.nm],
inst_id=batch_comitted[f.INST_ID.nm],
view_no=batch_comitted[f.VIEW_NO.nm],
pp_seq_no=batch_comitted[f.PP_SEQ_NO.nm],
pp_time=batch_comitted[f.PP_TIME.nm],
state_root=Ledger.strToHash(batch_comitted[f.STATE_ROOT.nm]),
txn_root=Ledger.strToHash(batch_comitted[f.TXN_ROOT.nm]),
primaries=batch_comitted[f.PRIMARIES.nm],
valid_digests=valid_req_keys,
pp_digest=batch_comitted[f.DIGEST.nm],
node_reg=batch_comitted[f.NODE_REG.nm],
has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in batch_comitted and batch_comitted[
f.AUDIT_TXN_ROOT_HASH.nm] is not None,
original_view_no=batch_comitted[f.ORIGINAL_VIEW_NO.nm] if f.ORIGINAL_VIEW_NO.nm in batch_comitted else None
)
|
Python
|
CL
|
1fc9f9e7e5eb3b4ee58b19bb7a64d377bda94aa8238801ef96011b86e0b61394
|
#! /usr/bin/env python
#
# This script tests the persistence of a redis database
import redis
import socket # because we might handle a socket timeout exception
import os
import sys
import subprocess
import random
import time
def check_super_user():
"""This function checks that the process has super user (root) privileges.
This software will only run on UNIX. It will not run under Cygwin. I don't
know if this software will run under MS-Windows"""
egid = os.getegid()
euid = os.geteuid()
return egid == 0 and euid == 0
def stop_start_redis(op):
"""This function either starts or stops the redis-server daemon, depending
on the value of op. If op is start, then the daemon is started. If op is stop
then the daemon is stopped. If other values are used, then the results are
unpredictable"""
ret_status = subprocess.call('service redis-server %s' % op, shell=True)
if ret_status != 0 :
print "Failed to %s the redis-server" % op
sys.exit(1)
def test_persistence ( r_server, save=True ):
# Generate a random string, and see if it is preserved across database calls
value = str(random.random())
print "value is %s" % value
r_server.set('foo', value)
if save :
r_server.save() # synchronous save - will block until complete
print "Shutting down the server after saving state"
else :
print "Shutting down the server without saving state first"
stop_start_redis("stop")
stop_start_redis("start")
while True:
print "Waiting for the daemon to start"
time.sleep(5.0)
try:
results = r_server.get('foo')
except redis.exceptions.ConnectionError:
print "The daemon isn't accepting connections yet - wait"
else:
print "results of the get is %s" % results
break
assert results == value, """The value was *not* preserved across daemon
restarts. save is %s""" % str(save)
print "The value was preserved across daemon restart. save is %s" % str(save)
if __name__ == "__main__" :
if not check_super_user() :
print """This program must run with root privileges because it stops and
the restarts the redis server"""
sys.exit(1)
# Open a connection to the database
# r_server = redis.Redis("108.59.89.216", port=6379, db=0)
r_server = redis.Redis("localhost", port=6379, db=1)
test_persistence ( r_server, save=True )
try :
test_persistence ( r_server, save=False )
except AssertionError:
print "The value was not persisted when the state of the database was not saved"
|
Python
|
CL
|
a117148acf1d011a5e9560792e88abf4f5b9bc3fe8d0501eb6db93dcc793f619
|
import nltk
import pandas as pd
# 1st, I will check the available data. This data contains the texts with their respective Author.
texts_with_author = pd.read_csv("data/train.csv")
texts_with_author.head()
# The ids are not important, we need to group and join all the texts by author
# The stop words inside this texts will help beacuse we are not trying to understand the intetion of the author.
# Every person uses this stop words with more or less frecuency than others.
texts_groupby_author = texts_with_author.groupby("author")["text"].apply(' '.join).reset_index()
texts_groupby_author
# We transform every text to lowercase to eliminate the tokenization of same words with capitalized letters.
texts_groupby_author["text"] = texts_groupby_author.text.str.lower()
texts_groupby_author
# We can use a dict to save the word frequencies for each author
word_frequencies_by_author = {}
for _, row in texts_groupby_author.iterrows():
author = row["author"]
text = row["text"]
tokens = nltk.tokenize.word_tokenize(text)
frequency = nltk.FreqDist(tokens)
word_frequencies_by_author[author] = frequency
# Test
sentence = "Still, as I urged our leaving Ireland with such inquietude and impatience, my father thought it best to yield."
sentence = sentence.lower()
sentence_tokens = nltk.tokenize.word_tokenize(sentence)
for author in word_frequencies_by_author.keys():
total = 0
for word in sentence_tokens:
total += word_frequencies_by_author[author].freq(word)
print(total, author)
# End test
# Create a new dataframe to save the data
dataframe_with_frequencies = pd.DataFrame(columns=('id', 'EAP', 'HPL', 'MWS'))
dataframe_with_frequencies.head()
# Open the test file and iterate on it
test = pd.read_csv("data/test.csv")
for iter_num, row in test.iterrows():
sentence = row["text"] # Get sentence
sentence = sentence.lower() # Str to lower
sentence_tokens = nltk.tokenize.word_tokenize(sentence) # Tokenize test words
# Get the author and probability of authorship attribution
row_results = [row["id"]]
for author in word_frequencies_by_author.keys():
total = 0
for word in sentence_tokens:
total += word_frequencies_by_author[author].freq(word)
#print(total, author)
total = total / len(sentence_tokens)
row_results.append(total)
# Add a new row to the dataframe
dataframe_with_frequencies.loc[iter_num+1] = row_results
# Save the dataframe to a CSV file
dataframe_with_frequencies.to_csv("word_frequency.csv", index=False)
|
Python
|
CL
|
6188039cd520157565fcb6f23cf9d935e809b24b6a6015f5789f8f0d0befaf12
|
#!/usr/bin/env python
import os
import psycopg2
import boto3
import re
from multiprocessing.pool import ThreadPool
# S3 legacy bucket name to use. It should exist and be accessible to your AWS credentials
S3_LEGACY_BUCKET_NAME = os.getenv(
'S3_LEGACY_BUCKET_NAME',
'sketch-legacy-s3'
)
# S3 production bucket name to use. It should exist and be accessible to your AWS credentials
S3_PRODUCTION_BUCKET_NAME = os.getenv(
'S3_PRODUCTION_BUCKET_NAME',
'sketch-production-s3'
)
# S3 connection details
S3_LEGACY_ENDPOINT_URL = os.getenv(
'S3_LEGACY_ENDPOINT_URL',
'https://sketch-legacy-s3.s3.amazonaws.com'
)
S3_PRODUCTION_ENDPOINT_URL = os.getenv(
'S3_PRODUCTION_ENDPOINT_URL',
'https://sketch-production-s3.s3.amazonaws.com'
)
# AWS creds
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', 'minioadmin')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'minioadmin')
AWS_DEFAULT_REGION = os.getenv('AWS_DEFAULT_REGION', 'us-east-1')
# DB connection
try:
client = boto3.client('rds')
instances = client.describe_db_instances()
rds_host_endpoint = instances['DBInstances'][0]['Endpoint']['Address']
DB_CONN_STRING = os.getenv(
'DB_CONN_STRING',
'postgres://sketch_user:YourPwdShouldBeLongAndSecure!@'
+ rds_host_endpoint + '/sketchdb'
)
except Exception as e:
raise e
# Get list of legacy db records
def get_legacy_db_records(connection,src_bucket, dst_bucket,path=None):
try:
cur = connection.cursor()
legacy_avatars = cur.execute("select * from avatars where path like '%{}%'".format(path))
legacy_avatars_ids = [ id for id,_ in cur.fetchall() ]
return legacy_avatars_ids
except Exception as e:
raise e
# Update lecagy item paths in S3 producton and clean up
def move_legacy_names_prodS3(connection, src_bucket, dst_bucket):
try:
cur = connection.cursor()
s3 = boto3.resource('s3')
pool = ThreadPool(processes=8)
cpy_list = []
prefix_old = "{}/image/".format(src_bucket)
prefix_new = "{}/avatar/".format(dst_bucket)
src_s3 = s3.Bucket(src_bucket)
for s3_file in src_s3.objects.filter(Prefix=prefix_old).all():
cpy_list.append(s3_file.key)
s3 = boto3.client('s3')
def move_mp(file_key):
new_key = re.sub(r'%s' % prefix_old, '%s' % prefix_new, file_key)
copy_source = {
'Bucket': src_bucket,
'Key': file_key
}
try:
s3.copy_object(CopySource=copy_source, Bucket=dst_bucket, Key=new_key)
except Exception as e:
raise e
old_db_path = S3_LEGACY_ENDPOINT_URL + '/' + file_key
new_db_path = S3_PRODUCTION_ENDPOINT_URL + '/' + new_key
print("UPDATE path SET = '{}' where path like '%{}%'".format(new_db_path,old_db_path))
try:
cur.execute("UPDATE avatars SET path = '{}' where path like '%{}%'".format(new_db_path,old_db_path))
except Exception as e:
raise e
try:
connection.commit()
except Exception as e:
raise e
try:
s3.delete_object(Bucket=src_bucket,Key=file_key)
except Exception as e:
raise e
return new_key
pool.map(move_mp,cpy_list)
except Exception as e:
raise e
def main():
# Connect to rds DB
try:
db_conn = psycopg2.connect(DB_CONN_STRING)
except Exception as e:
raise e
# Move legacy avarars to production and update DB records
try:
legacy_avatar_ids = get_legacy_db_records(db_conn,
S3_LEGACY_BUCKET_NAME,
S3_PRODUCTION_BUCKET_NAME,
S3_LEGACY_ENDPOINT_URL
+ '/' +
S3_LEGACY_BUCKET_NAME
)
legacy_avatars_count = len(legacy_avatar_ids)
if legacy_avatars_count != 0:
print("There are {} legacy avatars left to migrate".format(legacy_avatars_count))
move_legacy_names_prodS3(db_conn,S3_LEGACY_BUCKET_NAME,S3_PRODUCTION_BUCKET_NAME)
else:
print("All legacy avatars have been already migrated!")
except Exception as e:
raise e
if __name__ == "__main__":
main()
|
Python
|
CL
|
c7e64c6257aadcf8e8e0fa9caffa5c85bb06575e81b95553e2d7d895e2c90f25
|
'''
@author Ben DeMott
@file shapely_geom_ops.py
Geometric Operations that can be performed against Shapely Objects
'''
from shapely.geometry import *
point = Point(2, 2)
polygon = Polygon([[1, 1], [1, 3], [3, 3], [3, 1]])
polygon2 = Polygon([[2, 2], [2, 4], [4, 4], [4, 2]])
# increase the boundary of a point by 3 units
# Second argument is resolution/accuracy
newPoint = point.buffer(3, 16)
# Compute the distance from the boundary of an object to a point
polygon.distance(point)
# Compute the center of an object
polygon.centroid
# Compute a point inside an object
polygon.representative_point()
# Returns a representation of the boundary of a complex type
# The boundary of a Polgygon is a multi-line, the boundary of a line is a
# collection of points
polygon.boundary
# Create a shape from the difference between two shapes
polygon.difference(polygon2)
# Combine 2 objects
polygon.intersection(polygon2)
# Return points that are shared
polygon.symmetric_difference()
|
Python
|
CL
|
2287f42847288dca146ed614077aad8da5a206b6bf1149503944e59cc62cbad6
|
#!/usr/bin/env python3
import serial, time, struct
import csv
import math
from operator import itemgetter
import statistics
import sys
#Serial communication protocols for Lidar
Start_Scan = b"\xA5\x20" #Begins scanning
Force_Scan = b"\xA5\x21" #Overrides anything preventing a scan
Health = b"\xA5\x52" #Returns the state of the Lidar
Stop_Scan = b"\xA5\x25" #Stops the scan
RESET = b"\xA5\x40" #Resets the device
Motor = b"\xA5\xF0" #Sets motor speed
MotorFast = b'\xa5\xf0\x02\x94\x02\xc1'
Motor0 = b"\xA5\xF0\x02\x00\x00\x57" #A5F0 0200 0057
Rate = b'\xA5\x59'
##--------------Lidar Serial Communication
def chksm(payload):
cksm = 0
for elem in payload:
cksm ^= elem
return struct.pack('=B',cksm)
def serialComm(cmd, num= 0):
payload = cmd + chksm(cmd)
ser.write(payload)
print(payload)
reply = b''
for i in range(num):
reply += ser.readline()
if num > 0:
print(reply)
return reply
def translate(payload):
quality = payload[0]>>2
S = payload[0] & 0b1
notS = payload[0] & 0b10
C = payload[1] & 0b1
angle = struct.unpack('<H',payload[1:3])[0]/2/64 # divide by 2 gets rid of C bit
dist = struct.unpack('<H',payload[3:5])[0]/4000
return quality,S,notS,C,angle,dist
def GrabPts(num): #Request Lidar to collect "num" number of points
run = 0 #Tracker variable
#Preallocation for the 4 variables
x = [0]*num
y = [0]*num
a = [0]*num
d = [0]*num
#Wiping Lidar buffer
ser.flushInput()
ser.flushOutput()
while run < num: #Run Lidar for desired number of points. Generate all angle, distance, and calculated x and y data
reading = ser.read(5)
try:
(quality, S, notS, C, angle, dist) = translate(reading)
except Exception as e:
print(e)
continue
if quality > 10:
if dist == 0: #Eliminating noise calculated to be at distance (d) = 0
continue
else:
#Append polar coordinates to running variables
a[run] = (angle)
d[run] = (dist)
#Converting from polar to cartesian coordinates
ang = angle*3.14/180 #radians to degrees
x_calc = dist*math.cos(ang)
y_calc = dist*math.sin(ang)
#Append cartesian coordinates to running variables
x[run] = (x_calc)
y[run] = (y_calc)
run += 1 #Update Run variable
return (x,y,a,d) #Return all variables upon completion
def LidarComm(): #Compact method of initiating communication with Lidar
serialComm(Start_Scan)
while ser.read() != b'\xA5':
time.sleep(0.01)
reply = ser.read(6)
if (reply == b'\x5A\x05\x00\x00\x40\x81'):
print('starting...')
else:
print('incorrect reply')
def SaveData():
name = 'currData' #Adjustable name for temporary csv file
#Saving all data to the temporary .csv file
filename = "/home/robot/" + name + ".csv" #Rename directory address on EV3 as desired
with open(filename, 'w', newline='') as f:
writer = csv.writer(f, delimiter = ',')
writer.writerows(zip(x,y,a,d))
time.sleep(.1)
def opt1(num): #Have lidar collect desired number of points
global x,y,a,d
LidarComm() #Initiating Lidar
(x,y,a,d) = GrabPts(num)
serialComm(Stop_Scan)
ser.read(ser.inWaiting()) #Flush Lidar buffer
SaveData() #Save data to currData.csv
##----------------------------SETUPS----------------------------##
port = "/dev/ttyUSB0" #USB Port for EV3. Rename as necessary
ser = serial.Serial(port, 115200, timeout = 1)
ser.setDTR(False)
#Initial Serial commands
serialComm(RESET,3)
serialComm(Health,1)
serialComm(Rate,1)
#Set Motor speed
speed = 800 #Speed between 1 and 1023
motorSpeed = Motor + b'\x02'+struct.pack('<H',speed) #2 byte payload, little endian of the desired speed
#Assigning arguments recieved from main script to communicate with Lidar
endComm= int(sys.argv[1])
num = int(sys.argv[2])
##------MAIN LOOP------##
while True:
if endComm == 1: #Turn off lidar and end this script
serialComm(Motor0)
break
elif endComm == 0: #Start up lidar and collect data, then end this script (lidar will keep spinning)
serialComm(motorSpeed)
time.sleep(2) #Spin up period
opt1(num)
print(num, "points have been collected")
break
else:
print("endComm argument was not a valid input")
break
|
Python
|
CL
|
8f71f9ea9ebfef7f9652e28cca5bfaafce3bb50284e1903295be29bc72ef109f
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import funcsigs
import yaml
from collections import OrderedDict
from ..prune import *
from ..quantization import *
from ..distillation import *
from .strategy import *
__all__ = ['ConfigFactory']
"""This factory is used to create instances by loading and parsing configure file with yaml format.
"""
class ConfigFactory(object):
def __init__(self, config):
"""Init a factory from configure file."""
self.instances = {}
self.compress_pass = {}
self.version = None
self._parse_config(config)
def instance(self, name):
"""
Get instance from factory.
"""
if name in self.instances:
return self.instances[name]
else:
return None
def _new_instance(self, name, attrs):
if name not in self.instances:
class_ = globals()[attrs['class']]
sig = funcsigs.signature(class_.__init__)
keys = [
param.name for param in sig.parameters.values()
if (param.kind == param.POSITIONAL_OR_KEYWORD)
][1:]
keys = set(attrs.keys()).intersection(set(keys))
args = {}
for key in keys:
value = attrs[key]
if isinstance(value, str) and value.lower() == 'none':
value = None
if isinstance(value, str) and value in self.instances:
value = self.instances[value]
args[key] = value
self.instances[name] = class_(**args)
return self.instances.get(name)
def _parse_config(self, config):
assert config
with open(config, 'r') as config_file:
key_values = self._ordered_load(config_file)
for key in key_values:
# parse version
if key == 'version' and self.version is None:
self.version = int(key_values['version'])
assert self.version == int(key_values['version'])
# parse pruners
if key == 'distillers' or key == 'pruners' or key == 'quantizers' or key == 'strategies':
instances = key_values[key]
for name in instances:
self._new_instance(name, instances[name])
if key == 'compress_pass':
self.compress_pass['strategies'] = []
self.compress_pass['epoch'] = key_values[key]['epoch']
self.compress_pass['model_save_dir'] = key_values[key][
'model_save_dir']
self.compress_pass['init_epoch'] = key_values[key][
'init_epoch']
if 'strategies' in key_values[key]:
for name in key_values[key]['strategies']:
strategy = self.instance(name)
self.compress_pass['strategies'].append(strategy)
if key == 'include':
for config_file in key_values[key]:
self._parse_config(config_file.strip())
def _ordered_load(self,
stream,
Loader=yaml.Loader,
object_pairs_hook=OrderedDict):
"""
See: https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
"""
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
return yaml.load(stream, OrderedLoader)
|
Python
|
CL
|
9ca797c584875db637f8dcc3dea0fac0d9f836ee4519f01cb4f30159167e696f
|
#example: python3 MLstarter.py -s SIGforPhoAll.dat -b QCDconstpt.dat -c 0,1,2,3,4,5 -d 1,2,3,4,5,6,7,8,9,10,11,12 -p test -f 0.2
#LOAD LIBRARIES
##-------------------------------------------------------------------------------
from __future__ import print_function
import copy, os, sys, time, logging
import numpy as np
import json
import random
np.random.seed(1560)
import keras
print('using keras version:',keras.__version__)
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, Flatten, Merge
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras import regularizers
from keras import backend as K
from keras.utils import np_utils
from keras.utils import multi_gpu_model
import shuffle as shf
import tensorflow as tf
print('using tensorflow version: ',tf.__version__)
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
import tensorflow as tf
from keras.backend import tensorflow_backend as K
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-s', '--signal', metavar='F', type='string', action='store',
default = 'None',
dest = 'signal',
help = 'signal dat file locastion')
parser.add_option('-b', '--background', metavar='F', type='string', action='store',
default = 'None',
dest = 'background',
help = 'background dat file locastion')
parser.add_option('-m', '--mode', metavar='F', type='string', action='store',
default = 'train',
dest = 'mode',
help = 'what is being performed. train or test')
parser.add_option('-l', '--load', metavar='F', type='string', action='store',
default = 'None',
dest = 'load',
help = 'weights file to preload (.h5 format). This lets you continue training or test pre-trained weights -- None starts fresh.')
parser.add_option('-c', '--colors', metavar='F', type='string', action='store',
default = '0,1',
dest = 'colors',
help = 'csv of color indices to process')
parser.add_option('-d', '--dense', metavar='F', type='string', action='store',
default = '1,2,3,4,5,6,7,8,9,10,11,12',
dest = 'dense',
help = 'csv of dense-layer indices to process')
parser.add_option('-p', '--post', metavar='F', type='string', action='store',
default = '',
dest = 'post',
help = 'string to append to created filenames')
parser.add_option('-g', '--gpus', metavar='F', type='string', action='store',
default = '0',
dest = 'gpus',
help = 'csv of gpus to run on -- 0 or 1 or 0,1')
parser.add_option('-D', '--directory', metavar='F', type='string', action='store',
default = '/cms/knash/EOS/JetImages/kevin/',
dest = 'directory',
help = 'Directory to look for signal+background dat files')
parser.add_option('-e', '--epochs', metavar='F', type='int', action='store',
default = 1000,
dest = 'epochs',
help = 'total number of epochs')
parser.add_option('-f', '--fraction', metavar='F', type='float', action='store',
default = 1.0,
dest = 'fraction',
help = 'fraction of images to process')
parser.add_option('--skipgen', metavar='F', action='store_true',
default=False,
dest='skipgen',
help='skip the train,test,validate set generation (ie if it is already created)')
parser.add_option('-B', '--batchsize', metavar='F', type='int', action='store',
default = 128,
dest = 'batchsize',
help = 'batchsize')
parser.add_option('-L', '--learnrate', metavar='F', type='float', action='store',
default = 0.3,
dest = 'learnrate',
help = 'learnrate')
parser.add_option('-P', '--patience', metavar='F', type='int', action='store',
default = 5,
dest = 'patience',
help = 'patience')
parser.add_option('-C', '--nconv', metavar='F', type='int', action='store',
default = 64,
dest = 'nconv',
help = 'nconv')
parser.add_option('-N', '--ndense', metavar='F', type='int', action='store',
default = 256,
dest = 'ndense',
help = 'ndense')
parser.add_option('-X', '--ldense', metavar='F', type='int', action='store',
default = 3,
dest = 'ldense',
help = 'ldense')
(options, args) = parser.parse_args()
print('Options summary')
print('==================')
for opt,value in options.__dict__.items():
print(str(opt) +': '+ str(value))
print('==================')
os.environ['CUDA_VISIBLE_DEVICES'] = options.gpus
config.gpu_options.per_process_gpu_memory_fraction = 1.0
set_session(tf.Session(config=config))
start_time = time.time()
#np.set_printoptions(threshold=np.nan)
##------------------------------------------------------------------------------
# Global variables
##------------------------------------------------------------------------------
image_array_dir_in = options.directory
signalfilename = options.signal
backgroundfilename = options.background
extrastringarray = signalfilename.split('__')
extrastring = extrastringarray[-1].replace("/","-")
post = options.post
print('Input directory',image_array_dir_in)
name_sg=str('__'.join(signalfilename.split('__')[:2]))
name_bg=str('__'.join(backgroundfilename.split('__')[:2]))
print('Name signal ={}'.format(name_sg))
print('Name background ={}'.format(name_bg))
print('-----------'*10)
#This array refers to the color index (which is inside the first element of the ascii event)
colarray = options.colors.split(',')
for i in range(0,len(colarray)):
colarray[i]=int(colarray[i])
#This array refers to the dense layer index (which starts at the third element of the ascii event)
densearray = options.dense.split(',')
for i in range(0,len(densearray)):
densearray[i]=int(densearray[i])+2
gpuarray = options.gpus.split(',')
for i in range(0,len(gpuarray)):
gpuarray[i]=int(gpuarray[i])
npoints = 38
img_rows, img_cols = npoints-1, npoints-1
my_batch_size = options.batchsize
num_classes = 2
epochs = int(options.epochs)
sample_relative_size = float(options.fraction)
mode = options.mode
ncolors = len(colarray)
ndense = len(densearray)
learning_rate = [options.learnrate]
if len(gpuarray)==0:
logging.error('No GPUs specified')
sys.exit()
if ndense==0:
logging.error('No dense layer indices')
sys.exit()
if ncolors==0:
logging.error('No colors')
sys.exit()
if options.fraction<0.0 or options.fraction>1.0:
logging.error('Fraction out of range '+str(options.fraction))
sys.exit()
if options.mode not in ['train','test']:
logging.error('Invalid mode '+options.mode)
sys.exit()
if options.signal=='None' or options.background=='None':
logging.error('Need to specify both a signal and a background input file')
sys.exit()
##------------------------------------------------------------------------------
#FUNCTIONS
##------------------------------------------------------------------------------
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, 'rb') as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def load_array(Array):
print('Loading signal and background arrays ...')
print('-----------'*10)
data=np.load(image_array_dir_in+Array)
print(type(data))
return data
def expand_array(images):
Nimages=len(images)
expandedimages=np.zeros((Nimages,img_rows,img_cols,ncolors))
for i in range(Nimages):
npart = len(images[i])
for j in range(npart):
for nn in range(ncolors):
expandedimages[i,images[i][j][0][0],images[i][j][0][1]][nn] = images[i][j][1][colarray[nn]]
expandedimages=expandedimages.reshape(Nimages,img_rows,img_cols,ncolors)
return expandedimages
def prepare_keras(xlist,ylist):
yforkeras = keras.utils.to_categorical(ylist, num_classes)
xarray = np.array(xlist)
yarray = np.array(yforkeras)
print(xarray.shape)
return xarray,yarray
class DataGenerator(object):
print('Generates data for Keras')
def __init__(self, dim_x = img_rows, dim_y = img_cols, batch_size = my_batch_size):
self.dim_x = dim_x
self.dim_y = dim_y
self.batch_size = batch_size
def generate(self, N_train):
while True:
print('Number of training images:',N_train)
imax=int(N_train/self.batch_size)
print('Number of minibatches =',imax)
print('\n'+'-----------'*10)
print('////////////'*10)
traingenerator=(json.loads(s) for s in open(trainfilename))
for i in range(imax):
x_val=[]
y_val=[]
z_val=[]
for ijet in range(self.batch_size):
xy=next(traingenerator)
x_val.append(xy[0])
y_val.append(xy[1])
z_val.append([])
for iden in densearray:
z_val[-1].append(xy[iden])
y_val=keras.utils.to_categorical(y_val, num_classes)
x_val=np.array(x_val)
y_val=np.array(y_val)
z_val=np.array(z_val)
images=expand_array(x_val)
yield [images,z_val], y_val
def valgenerate(self, N_val):
while True:
print('Number of validation images:',N_val)
imax=int(N_val/self.batch_size)
print('\n'+'-----------'*10)
print('////////////'*10)
valgenerator=(json.loads(s) for s in open(valfilename))
for i in range(imax):
x_val=[]
y_val=[]
z_val=[]
for ijet in range(self.batch_size):
xy=next(valgenerator)
x_val.append(xy[0])
y_val.append(xy[1])
z_val.append([])
for iden in densearray:
z_val[-1].append(xy[iden])
y_val=keras.utils.to_categorical(y_val, num_classes)
x_val=np.array(x_val)
y_val=np.array(y_val)
z_val=np.array(z_val)
images=expand_array(x_val)
yield [images,z_val], y_val
##------------------------------------------------------------------------------
# DEFINE THE MODEL ARCHITECTURE
##------------------------------------------------------------------------------
input_shape_c = Input(shape=(img_rows, img_cols, ncolors))
input_shape_btag = Input(shape=(ndense,))
devstr = '/cpu:0'
if len(gpuarray)==1:
devstr = '/gpu:0'
nconv = options.nconv
with tf.device(devstr):
conv = Conv2D(2*nconv, kernel_size=(4,4),activation='relu',name='Conv1')
layers = conv(input_shape_c)
layers = ZeroPadding2D(padding=(1, 1))(layers)
layers = Conv2D(nconv, kernel_size=(4,4), activation='relu',name='Conv2')(layers)
layers = MaxPooling2D(pool_size=(2, 2))(layers)
layers = ZeroPadding2D(padding=(1, 1))(layers)
layers = Conv2D(nconv, kernel_size=(4,4), activation='relu',name='Conv3')(layers)
layers = ZeroPadding2D(padding=(1, 1))(layers)
layers = Conv2D(nconv, kernel_size=(4,4), activation='relu',name='Conv4')(layers)
layers = MaxPooling2D(pool_size=(2, 2))(layers)
layers = Flatten()(layers)
layers = Dense(nconv,activation='relu',name='Dense00')(layers)
layersbtag = Dense(nconv,activation='relu',name='Dense10')(input_shape_btag)
model_12 = keras.layers.concatenate([layers, layersbtag])
for jjj in range(0,options.ldense):model_12 = Dense(options.ndense, activation='relu',name='Dense2'+str(jjj))(model_12)
finalmodel = Dense(num_classes, activation = 'softmax',name='Final')(model_12)
Adadelta=keras.optimizers.Adadelta(lr=learning_rate[0], rho=0.95, epsilon=1e-08, decay=0.0)
model = Model([input_shape_c, input_shape_btag], finalmodel)
#multi gpu support in testing -- need to do it this way in order to save weights
if len(gpuarray)>1:
modeltr = multi_gpu_model(model, gpus=len(gpuarray))
modeltr.compile(loss=keras.losses.categorical_crossentropy,
optimizer=Adadelta,
metrics=['categorical_accuracy'])
modeltr.summary()
else:
modeltr = model
modeltr.compile(loss=keras.losses.categorical_crossentropy,
optimizer=Adadelta,
metrics=['categorical_accuracy'])
modeltr.summary()
class TimingCallback(keras.callbacks.Callback):
def __init__(self):
self.logs=[]
def on_epoch_begin(self,epoch, logs={}):
self.starttime=time.time()
def on_epoch_end(self,epoch, logs={}):
self.logs.append(time.time()-self.starttime)
class LossHistory(keras.callbacks.Callback):
def __init__(self, model):
self.model_to_save = model
def on_train_begin(self, logs={}):
self.loss = [1000000.] #Initial value of the val loss function
self.acc = [1000000.] #Initial value of the val loss function
self.val_loss = [1000000.] #Initial value of the val loss function
self.val_acc = [1000000.] #Initial value of the val loss function
def on_epoch_end(self, epoch, logs={}):
self.loss.append(logs.get('loss')) # We append the val loss of the last epoch to losses
self.acc.append(logs.get('acc')) # We append the val loss of the last epoch to losses
self.val_loss.append(logs.get('val_loss')) # We append the val loss of the last epoch to losses
self.val_acc.append(logs.get('val_acc')) # We append the val loss of the last epoch to losses
def step_decay(losses):
if len(history.val_loss)>=2 and float(np.array(history.val_loss[-2])-np.array(history.val_loss[-1]))<0.0005:
lrate=learning_rate[-1]/np.sqrt(2)
learning_rate.append(lrate)
else:
lrate=learning_rate[-1]
if len(history.val_loss)>=2:
print('\n loss[-2] = ',np.array(history.val_loss[-2]))
print('\n loss[-1] = ',np.array(history.val_loss[-1]))
print('\n loss[-2] - loss[-1] = ',float(np.array(history.val_loss[-2])-np.array(history.val_loss[-1])))
print('\n Learning rate =',lrate)
print('------------'*10)
return lrate
history = LossHistory(modeltr)
lrate = keras.callbacks.LearningRateScheduler(step_decay)
# Get new learning rate
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0002, patience=options.patience, verbose=0, mode='auto')
# patience -- means that if there is no improvement in the cross-validation accuracy greater that min_delta within the following 3 epochs, then it stops
checkpoint = keras.callbacks.ModelCheckpoint('weights.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True, mode='auto', period=1)
cb = TimingCallback()
##------------------------------------------------------------------------------
# TRAIN THE MODEL OR LOAD TRAINED WEIGHTS
##------------------------------------------------------------------------------
weights_dir = 'weights/'
os.system('mkdir -p '+weights_dir)
print('Getting the length of the signal and background files')
Nsignal = 0
for s in open(image_array_dir_in+signalfilename):
Nsignal = Nsignal+1
Nbackground = 0
for s in open(image_array_dir_in+backgroundfilename):
Nbackground = Nbackground+1
print('total number of signal jets:',Nsignal)
print('total number of background jets:',Nbackground)
Njets=min([Nsignal,Nbackground])
print('Njets',Njets)
train_frac_rel=0.6
val_frac_rel=0.2
test_frac_rel=0.2
Ntrain=int(train_frac_rel*Njets*sample_relative_size)
Nval=int(val_frac_rel*Njets*sample_relative_size)
Ntest=int(test_frac_rel*Njets*sample_relative_size)
print('Size of training set:',2*Ntrain)
print('Size of validation set:',2*Nval)
print('Size of test set:',2*Ntest)
trainfilename='train_sample_'+str(Ntrain)+'_'+str(Nval)+'_'+str(Ntest)+'.dat'
valfilename='validation_sample_'+str(Ntrain)+'_'+str(Nval)+'_'+str(Ntest)+'.dat'
testfilename='test_sample_'+str(Ntrain)+'_'+str(Nval)+'_'+str(Ntest)+'.dat'
savename = 'epochs_'+str(epochs)+'_Ntrain_'+str(Ntrain)+'_'+name_sg.replace('.dat','_')+name_bg.replace('.dat','_')+post
savename = savename.replace("/","-")
if not options.skipgen:
shufobj = shf.pyshuffle(image_array_dir_in+signalfilename,image_array_dir_in+backgroundfilename,str(Ntrain),str(Nval),str(Ntest))
shufobj.run()
print('------------'*10)
print('running shuffle')
print('------------'*10)
os.system('/localhome/knash/terashuf/terashuf < ' + trainfilename + ' > shuffled_'+trainfilename)
os.system('mv ' + 'shuffled_'+trainfilename+' '+ trainfilename)
Ntrain=2*Ntrain
Nval=2*Nval
Ntest=2*Ntest
if options.load!='None':
my_weights=options.load
WEIGHTS_FNAME=my_weights
if os.path.exists(WEIGHTS_FNAME):
print('------------'*10)
print('Loading existing weights',WEIGHTS_FNAME)
print('------------'*10)
modeltr.load_weights(WEIGHTS_FNAME)
print('done')
savename+="_loadw"
else:
print('Weight file not found')
##------------------------------------------------------------------------------
# TRAIN THE MODEL
##------------------------------------------------------------------------------
saveweightname=weights_dir+'cnn_weights_'+savename+'.hdf'
if mode=='train':
train_x_train_y = DataGenerator().generate(Ntrain)
val_x_val_y = DataGenerator().valgenerate(Nval)
my_steps_per_epoch = int(Ntrain/my_batch_size)
print('my_steps_per_epoch =',my_steps_per_epoch)
valsteps = int(Nval/my_batch_size)
print(valsteps)
modeltr.fit_generator(generator = train_x_train_y,
steps_per_epoch = my_steps_per_epoch,
epochs = epochs,
verbose = 1,
validation_data = val_x_val_y,
validation_steps = valsteps,
callbacks = [history,lrate,early_stop,checkpoint,cb])
print(cb.logs)
print('------------'*10)
print('Weights filename =',saveweightname)
print('------------'*10)
if len(gpuarray)>1:
model.save_weights(saveweightname, overwrite=True)
else:
modeltr.save_weights(saveweightname, overwrite=True)
print('------------'*10)
##------------------------------------------------------------------------------
# ANALIZE RESULTS
##------------------------------------------------------------------------------
#LOAD LIBRARIES
import sklearn
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
import pandas as pd
print('Computing test accuracy and ROC curve...')
##------------------------------------------------------------------------------
# PLOT DATA
##------------------------------------------------------------------------------
ROC_plots_dir = 'analysis/ROC/'
os.system('mkdir -p '+ROC_plots_dir)
##------------------------------------------------------------------------------
# PREDICT OUTPUT PROBABILITIES
##------------------------------------------------------------------------------
tempgenerator=(json.loads(s) for s in open(testfilename))
tempbatchsize=min([10000,Ntest])
nbatches=int(Ntest/tempbatchsize)
Y_Pred_prob= np.empty((0, 2))
y_test=np.empty((0, 2))
Ntesttry=0
for ibatch in range(nbatches+1):
x_test_batch=[]
y_test_batch=[]
z_test_batch=[]
ijetmax=min([tempbatchsize,Ntest-Ntesttry])
if(ijetmax>0):
for ijet in range(ijetmax):
Ntesttry+=1
xy=next(tempgenerator)
x_test_batch.append(xy[0])
y_test_batch.append(xy[1])
z_test_batch.append([])
for iden in densearray:
z_test_batch[-1].append(xy[iden])
x_test_batch,y_test_batch=prepare_keras(x_test_batch,y_test_batch)
testimages=expand_array(x_test_batch)
z_test_batch=np.array(z_test_batch)
Y_Pred_batch=model.predict([testimages,z_test_batch])
Y_Pred_prob=np.concatenate((Y_Pred_prob,Y_Pred_batch))
y_test=np.concatenate((y_test,y_test_batch))
print('begin printing CNN output')
print('------------'*10)
ypredfile=open('ypred.dat','w')
print('------------'*10)
difflist=[(1-int(x[0][0]/0.5))-x[1][0] for x in zip(Y_Pred_prob,y_test)]
print('Test accuracy = ',float(np.count_nonzero(np.array(difflist)))/float(Ntesttry) )
# Predict output probability for each class (signal or background) for the image
y_Pred = np.argmax(Y_Pred_prob, axis=1)
y_Test = np.argmax(y_test, axis=1)
print('Predicted output from the CNN (0 is signal and 1 is background) = \n',y_Pred[0:15])
print('y_Test (True value) =\n ',y_Test[0:15])
print('y_Test lenght', len(y_Test))
print('------------'*10)
#Print classification report
print(classification_report(y_Test, y_Pred))
print('------------'*10)
# Calculate a single probability of tagging the image as signal
out_prob=[]
for i_prob in range(len(Y_Pred_prob)):
out_prob.append((Y_Pred_prob[i_prob][0]-Y_Pred_prob[i_prob][1]+1)/2)
print('Predicted probability of each output neuron = \n',Y_Pred_prob[0:15])
print('------------'*10)
print('Output of tagging image as signal = \n',np.array(out_prob)[0:15])
print('------------'*10)
np.savetxt('outprob.csv', np.array(out_prob), delimiter=',')
np.savetxt('inprob.csv', np.array(y_test), delimiter=',')
# Make ROC with area under the curve plot
def generate_results(y_test, y_score):
fpr, tpr, thresholds = roc_curve(y_test, y_score,pos_label=0, drop_intermediate=False)
print('Thresholds[0:6] = \n',thresholds[:6])
print('Thresholds lenght = \n',len(thresholds))
print('fpr lenght',len(fpr))
print('tpr lenght',len(tpr))
rocnums=list(zip(fpr,tpr))
rocout=open(ROC_plots_dir+'roc_'+savename+'.csv','wb')
np.savetxt(rocout,rocnums,fmt='%10.5g',delimiter=',')
print('------------'*10)
roc_auc = auc(fpr, tpr)
print('AUC =', np.float128(roc_auc))
print('------------'*10)
generate_results(y_Test, out_prob)
print('FINISHED.')
print('Saving model',weights_dir+'model_'+savename+'.h5')
if len(gpuarray)>1:
model.save(weights_dir+'model_'+savename+'.h5')
else:
modeltr.save(weights_dir+'model_'+savename+'.h5')
print('-----------'*10)
print('Code execution time = %s minutes' % ((time.time() - start_time)/60))
print('-----------'*10)
del tf.Session
|
Python
|
CL
|
2f63b8bc7cfd290e391d655d42c740c6ba1aeb4c886a23054a4c49788f7a3803
|
# -*- coding: utf-8 -*-
from __future__ import division
"""Documentation at https://github.com/oTree-org/otree/wiki"""
from otree.db import models
import otree.models
from otree.common import money_range
from otree import widgets
doc = """
Kaushik Basu's famous traveller's dilemma (<a href="http://www.jstor.org/stable/2117865" target="_blank">AER 1994</a>).
It is a 2-player game.
The game is framed as a traveller's dilemma and intended for classroom/teaching use.
<br />
Source code <a href="https://github.com/oTree-org/oTree/tree/master/traveler_dilemma" target="_blank">here</a>.
"""
class Subsession(otree.models.BaseSubsession):
name_in_url = 'traveler_dilemma'
reward = models.MoneyField(default=0.10,
doc="""Player's reward for the lowest claim""")
penalty = models.MoneyField(default=0.10,
doc="""Player's deduction for the higher claim""")
max_amount = models.MoneyField(default=1.00,
doc="""The maximum claim to be requested""")
min_amount = models.MoneyField(default=0.20,
doc="""The minimum claim to be requested""")
class Group(otree.models.BaseGroup):
# <built-in>
subsession = models.ForeignKey(Subsession)
# </built-in>
players_per_group = 2
class Player(otree.models.BasePlayer):
# <built-in>
group = models.ForeignKey(Group, null=True)
subsession = models.ForeignKey(Subsession)
# </built-in>
# claim by player
claim = models.MoneyField(
default=None,
doc="""
Each player's claim
"""
)
def claim_choices(self):
"""Range of allowed claim values"""
return money_range(self.subsession.min_amount, self.subsession.max_amount, 0.05)
def other_player(self):
return self.other_players_in_group()[0]
def set_payoff(self):
if self.claim < self.other_player().claim:
self.payoff = self.claim + self.subsession.reward
elif self.claim > self.other_player().claim:
self.payoff = self.other_player().claim - self.subsession.penalty
else:
self.payoff = self.claim
|
Python
|
CL
|
fd0b9e7be81b372be3bc27338c94dfe51a0cac0517c110ae440a58ceefcf3005
|
1
/rosout
/rosout_agg
the purpose of topic /rosout is a topic to convey the console log message to rosout node,it is similar to the cout, an standard output.
the purpose of topic /rosout_agg is to convey aggregate message to rosout
2
three new added
/turtle1/cmd_vel
/turtle1/color_sensor
/turtle1/pose
using rostopic info, we could conclude
/turtle1/cmd_vel has the message about the twist(linear and angular) in geometry.
/turtle1/color_sensor has the color info from turtlesim node
/turtle1/pose has the message of pose from turtlesim
3
using command rosmsg show we could know
/turtle1/cmd_vel message type is geometry_msgs/Twist. in details it contains two vectors
geometry_msgs/Vector3 linear
float64 x
float64 y
float64 z
geometry_msgs/vector3 angular
float64 x
float64 y
float63 z
that is, the information about the request linear velocity and angular velocity of the target turtle would be
/turtle1/pose message type is turtlesim/Pose. in detail it contains
float32 x
float32 y
float32 theta
float32 linear_velocity
float32 angular_velocity
that is, the information of the velocity and location of current turtle.
4
/turtle1/cmd_val in current condition , no publisher, node /turtlesim subscribe it.
/turtle1/pose in current condition , /turtlesim is publisher , no subsriber.
5
by typing rosservice, we could know these services are running:
/clear
/kill
/reset
/rosout/get_loggers
/rosout/set_logger_level
/spawn
/turtle1/set_pen
/turtle1/teleport_absolute
/turtle1/teleport_relative
/turtlesim/get_loggers
/turtlesim/get_logger_level
By typing rosservice info we could know use /turtle/teleport_absolute to move the turtle instantly
6
rosservice find geometry_msgs/Twist
|
Python
|
CL
|
4a632e5fd5059796581570f7e9df2541a38bade0c6f3c68a457036c5241959f3
|
import tensorflow as tf
import numpy as np
from tensorflow.python.tools import inspect_checkpoint as chkp
from tf_tutorial_dataset_cap import IPI_CIFAR_Dataset
from tf_tutorial_basic_module import conv_relu, conv_relu_maxpool
# instead of use random generated data, here we will use real data (CIFAR)
# necessary methods are wrapped in the class IPI_CIFAR_Dataset.
dataset = IPI_CIFAR_Dataset()
# start to define the network which is used for classification
#build 3 conv-relu-maxpool layers. The 3rd layer without pooling
"""
-------------------------------------------------------------------------------
define the graph
"""
g1 = tf.Graph()
with g1.as_default():
# placeholder for the network input
input_img = tf.placeholder(tf.float32,shape=(None,32,32,3))
labels = tf.placeholder(tf.float32,shape=(None,10))
with tf.variable_scope("1st-layer"):
output_1 = conv_relu_maxpool(input_img,[5,5,3,10],[10])
with tf.variable_scope("2nd-layer"):
output_2 = conv_relu_maxpool(output_1,[5,5,10,20],[20])
with tf.variable_scope("3rd-layer"):
output_3 = conv_relu(output_2,[5,5,20,50],[50])
#build 2 fully connected layers
output_3 = tf.reshape(tf.squeeze(output_3),[-1,50])
fc_1 = tf.layers.dense(output_3, units=200, activation=tf.nn.relu)
fc_2 = tf.layers.dense(fc_1, units=10, activation=tf.nn.relu)
# predictions and losses of the network
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=fc_2, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(fc_2, name="softmax_tensor")
}
#define loss of the network
loss = tf.losses.softmax_cross_entropy(labels, fc_2)
tf.summary.scalar("loss",loss)
#define the training optimizer, it automatically do the forward and backward prop.
# however, you can also compute the gradient and apply them yourself.
# train_op = tf.train.AdagradOptimizer(0.001).minimize(loss)
# the second way of processing the gradients
optimizer = tf.train.AdagradOptimizer(0.0001)
#variables created before were automatically added to trainable_variables...
grad_var_list = optimizer.compute_gradients(loss, tf.trainable_variables())
#do some operation to calculated gradients...
#clip large gradients
new_grads_and_vars = []
for idx, (grad, var) in enumerate(grad_var_list):
grad = tf.clip_by_norm(grad, 50)
new_grads_and_vars.append((grad, var))
grad_var_list = new_grads_and_vars
#summary gradients for tensorboard
for grad, var in grad_var_list:
tf.summary.histogram(var.name + '/gradient', grad)
tf.summary.histogram(var.op.name, var)
optim_selfbuild = optimizer.apply_gradients(grad_var_list)
#add operations to save and restore all variables in graph
# passing a tensor list for selected tensor to save is also possible
saver = tf.train.Saver()
merge_summary_op = tf.summary.merge_all()
decoded_one_hot_label = tf.argmax(labels, axis=1)
correct = tf.nn.in_top_k(fc_2, decoded_one_hot_label, 1)
eval_correct = tf.reduce_sum(tf.cast(correct, tf.int32))
"""
-------------------------------------------------------------------------------
"""
"""
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
run graph in a session with CIFAR Data
"""
with tf.Session(graph=g1) as sess:
writer = tf.summary.FileWriter("/tmp/log/", sess.graph)
#if there is pretrained weights, restore them; otherwise train from scratch
latest_checkpoint = tf.train.latest_checkpoint('/tmp/log/')
if latest_checkpoint is not None:
saver.restore(sess, latest_checkpoint)
print('loaded weights from pretrained model')
else:
print('train from scratch')
sess.run(tf.global_variables_initializer())
best_validation_prec = 0.0
for i in range(10000):
data_batch, lable_batch = dataset.next_batch(128, mode='train')
feed_dict={input_img:data_batch, labels:lable_batch}
if i%10 == 0:
#summary every 10 steps
summary, train = sess.run([merge_summary_op, optim_selfbuild], feed_dict)
writer.add_summary(summary,i)
writer.flush()
print("loss in %5d th step: %.4f" % (i,loss.eval(feed_dict)))
# do validation every 50 steps
if i%200 == 0:
valid_batch, valid_label = dataset.next_batch(2000, mode='valid')
feed_dict_val={input_img:valid_batch, labels:valid_label}
fetch_valid = {"loss":loss,
"eval_correct":eval_correct}
res_valid = sess.run(fetch_valid, feed_dict_val)
# add validation loss and precision into summary
summary_valid_loss = [tf.Summary.Value(
tag="validation/loss",
simple_value=res_valid["loss"],
)]
summary_valid_prec = [tf.Summary.Value(
tag="validation/precision",
simple_value=res_valid["eval_correct"]/2000,
)]
writer.add_summary(tf.Summary(value=summary_valid_loss),i)
writer.add_summary(tf.Summary(value=summary_valid_prec),i)
writer.flush()
print("valid precision in %5d th step: %.4f"
% (i, res_valid["eval_correct"]/2000))
# check whether it is better than the best validated trained model
if res_valid["eval_correct"]/2000 > best_validation_prec:
# save best validated training result
save_path = saver.save(sess, "/tmp/log/model.ckpt")
print("best validated model saved in path: %s" % save_path)
best_validation_prec = res_valid["eval_correct"]/2000
else:
sess.run(optim_selfbuild, feed_dict)
"""
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""
|
Python
|
CL
|
ba2fe8f9e924a0798db86c00b554a913e5ec2db1ed9c45bb61ee2f652d31893f
|
__author__ = 'abdul'
import traceback
import multiprocessing
import logging
import time
from mbs import mbs_logging
from mbs import persistence
from mbs.mbs import get_mbs
from mbs.date_utils import date_now, date_minus_seconds
from mbs.schedule import Schedule
from mbs.globals import State, EventType
from mbs.target import CloudBlockStorageSnapshotReference
from robustify.robustify import robustify
from mbs.errors import (
raise_if_not_retriable, raise_exception, BackupSweepError)
from mbs.utils import document_pretty_string
from mbs.notification.handler import NotificationPriority, NotificationType
###############################################################################
# LOGGER
###############################################################################
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
###############################################################################
# BackupSweeper
###############################################################################
DEFAULT_SWEEP_SCHEDULE = Schedule(frequency_in_seconds=12 * 60 * 60)
DEFAULT_DELETE_DELAY_IN_SECONDS = 5 * 24 * 60 * 60 # 5 days
class BackupSweeper(multiprocessing.Process):
"""
A Thread that periodically deletes backups targets that
are due for deletion
"""
###########################################################################
def __init__(self):
multiprocessing.Process.__init__(self)
self._test_mode = False
self._delete_delay_in_seconds = DEFAULT_DELETE_DELAY_IN_SECONDS
self._schedule = DEFAULT_SWEEP_SCHEDULE
self._worker_count = 0
self._sweep_workers = None
self._cycle_total_processed = 0
self._cycle_total_deleted = 0
self._cycle_total_errored = 0
###########################################################################
@property
def schedule(self):
return self._schedule
@schedule.setter
def schedule(self, val):
self._schedule = val
###########################################################################
def run(self):
self._setup_logging()
self._sweep_queue = multiprocessing.JoinableQueue()
# create an inlined schedule runner
while True:
next_occurrence = self.schedule.next_natural_occurrence()
while date_now() < next_occurrence:
time.sleep(1)
self.tick()
###########################################################################
def _setup_logging(self):
logging.getLogger().handlers = []
mbs_logging.setup_logging(False, "sweeper.log")
mbs_logging.redirect_std_to_logger()
###########################################################################
@property
def test_mode(self):
return self._test_mode
@test_mode.setter
def test_mode(self, val):
self._test_mode = val
###########################################################################
@property
def delete_delay_in_seconds(self):
return self._delete_delay_in_seconds
@delete_delay_in_seconds.setter
def delete_delay_in_seconds(self, val):
self._delete_delay_in_seconds = val
###########################################################################
def tick(self):
try:
self._delete_backups_targets_due()
except Exception, ex:
logger.exception("BackupSweeper Error")
subject = "BackupSweeper Error"
message = ("BackupSweeper Error!.\n\nStack Trace:\n%s" %
traceback.format_exc())
get_mbs().notifications.send_notification(subject, message, notification_type=NotificationType.EVENT,
priority=NotificationPriority.CRITICAL)
###########################################################################
def _delete_backups_targets_due(self):
logger.info("BackupSweeper: Starting a sweep cycle...")
# clear stats
self._cycle_total_processed = 0
self._cycle_total_errored = 0
self._cycle_total_deleted = 0
# compute # of workers based on cpu count
self._worker_count = multiprocessing.cpu_count() * 2 + 1
self._sweep_workers = []
self._start_workers()
if self.test_mode:
logger.info("BackupSweeper: Running in TEST MODE. Nothing will"
" be really deleted")
logger.info("BackupSweeper: Finding all backups"
" due for deletion")
q = self._check_to_delete_query()
logger.info("BackupSweeper: Executing query :\n%s" %
document_pretty_string(q))
backups_iter = get_mbs().backup_collection.find_iter(query=q, no_cursor_timeout=True)
backups_iterated = 0
# process all plan backups
for backup in backups_iter:
self._sweep_queue.put(backup)
backups_iterated += 1
# PERFORMANCE OPTIMIZATION
# process 10 * worker at max
# This is needed because making backup objects (from within the backups_iter) takes up a lot of CPU/Memory
# This is needed to give it a breath
if backups_iterated % (self._worker_count * 10) == 0:
self._wait_for_queue_to_be_empty()
self._finish_cycle()
logger.info("BackupSweeper: Finished sweep cycle. "
"Total Deleted=%s, Total Errored=%s, "
"Total Processed=%s" %
(self._cycle_total_deleted,
self._cycle_total_errored,
self._cycle_total_processed))
###########################################################################
def _start_workers(self):
for i in xrange(self._worker_count):
sweep_worker = SweepWorker(self, self._sweep_queue)
self._sweep_workers.append(sweep_worker)
sweep_worker.start()
###########################################################################
def _finish_cycle(self):
self._wait_for_queue_to_be_empty()
self._stop_and_wait_for_all_workers_to_finish()
###########################################################################
def _wait_for_queue_to_be_empty(self):
self._sweep_queue.join()
###########################################################################
def _stop_and_wait_for_all_workers_to_finish(self):
# request stop
for i in xrange(self._worker_count):
# put a None for each worker to stop
self._sweep_queue.put(None)
# join and gather stats
for worker in self._sweep_workers:
worker.join()
self._cycle_total_processed += worker.total_processed
self._cycle_total_deleted += worker.total_deleted
self._cycle_total_errored += worker.total_errored
###########################################################################
def _check_to_delete_query(self):
"""
We only delete backups that got expired at least two days ago.
This is just to make sure that if the expiration monitor screws up we
would still have time to see what happened
"""
q = {
"expiredDate": {
"$lt": self.max_expire_date_to_delete()
},
"deletedDate": None
}
return q
###########################################################################
def delete_backup_targets(self, backup):
logger.info("Attempt to delete targets for backup '%s'" % backup.id)
self.validate_backup_target_delete(backup)
try:
if not self.test_mode:
self.robustified_delete_backup(backup)
return True
else:
logger.info("NOOP. Running in test mode. Not deleting "
"targets for backup '%s'" % backup.id)
except Exception, e:
msg = "Error while attempting to delete backup '%s': %s" % (backup.id, e)
logger.exception(msg)
get_mbs().notifications.send_notification("Backup Delete Error",
msg, notification_type=NotificationType.EVENT,
priority=NotificationPriority.CRITICAL)
###########################################################################
def validate_backup_target_delete(self, backup):
logger.info("Validating delete of backup '%s'. startDate='%s',"
" expiredDate='%s' ..." % (backup.id, backup.start_date,
backup.expired_date))
if not backup.expired_date:
raise BackupSweepError(
"Bad target delete attempt for backup '%s'. Backup has "
"not expired yet" % backup.id)
cutoff_date = self.max_expire_date_to_delete()
if backup.expired_date > cutoff_date:
msg = ("Bad target delete attempt for backup '%s'. Backup expired"
" date '%s' is not before max expire date to delete '%s'" %
(backup.id, backup.expired_date, cutoff_date))
raise BackupSweepError(msg)
logger.info("Validation succeeded. Backup '%s' good to be deleted" %
backup.id)
###########################################################################
def max_expire_date_to_delete(self):
return date_minus_seconds(date_now(), self.delete_delay_in_seconds)
###############################################################################
# EXPIRE/DELETE BACKUP HELPERS
###############################################################################
@robustify(max_attempts=3, retry_interval=5,
do_on_exception=raise_if_not_retriable,
do_on_failure=raise_exception)
def robustified_delete_backup(self, backup):
"""
deletes the backup targets
"""
# do some validation,
target_ref = backup.target_reference
if backup.state == State.SUCCEEDED and not target_ref:
raise BackupSweepError("Cannot delete backup '%s'. "
"Backup never uploaded" % backup.id)
logger.info("Deleting target references for backup '%s'." % backup.id)
logger.info("Deleting primary target reference for backup '%s'." %
backup.id)
# target ref can be None for CANCELED backups
if target_ref:
self.do_delete_target_ref(backup, backup.target, target_ref)
# delete log file
if backup.log_target_reference:
logger.info("Deleting log target reference for backup '%s'." %
backup.id)
self.do_delete_target_ref(backup, backup.target, backup.log_target_reference)
if backup.secondary_target_references:
logger.info("Deleting secondary target references for backup '%s'." %
backup.id)
sec_targets = backup.secondary_targets
sec_target_refs = backup.secondary_target_references
for (sec_target, sec_tgt_ref) in zip(sec_targets, sec_target_refs):
logger.info("Deleting secondary target reference %s for backup "
"'%s'." % (sec_tgt_ref, backup.id))
self.do_delete_target_ref(backup, sec_target, sec_tgt_ref)
# set deleted date
backup.deleted_date = date_now()
update_props = ["deletedDate", "targetReference",
"secondaryTargetReferences"]
persistence.update_backup(backup, properties=update_props,
event_name="DELETING",
message="Deleting target references")
logger.info("Backup %s target references deleted successfully!" %
backup.id)
###############################################################################
def do_delete_target_ref(self, backup, target, target_ref):
if target_ref.preserve:
logger.info("Skipping deletion for target ref %s (backup '%s') because"
" it is preserved" % (target_ref, backup.id))
return
try:
target_ref.deleted_date = date_now()
# if the target reference is a cloud storage one then make the cloud
# storage object take care of it
if isinstance(target_ref, CloudBlockStorageSnapshotReference):
logger.info("Deleting backup '%s' snapshot " % backup.id)
return target_ref.cloud_block_storage.delete_snapshot(target_ref)
else:
logger.info("Deleting backup '%s file" % backup.id)
return target.delete_file(target_ref)
except Exception as e:
if self.is_whitelisted_target_delete_error(backup, target, target_ref, e):
msg = ("Caught a whitelisted error while attempting to delete backup %s."
" Marking backup as deleted. Error: %s" % (backup.id, e))
logger.warn(msg)
persistence.update_backup(backup,
event_name="WHITELIST_DELETE_ERROR",
message=msg,
event_type=EventType.WARNING)
return False
else:
# raise error
raise
###############################################################################
def is_whitelisted_target_delete_error(self, backup, target, target_ref, e):
return False
###############################################################################
class SweepWorker(multiprocessing.Process):
"""
A Thread that periodically expire backups that are due for expiration
"""
###########################################################################
def __init__(self, backup_sweeper, sweep_queue):
multiprocessing.Process.__init__(self)
self._stats = multiprocessing.Manager().dict()
self._backup_sweeper = backup_sweeper
self._sweep_queue = sweep_queue
self.total_processed = 0
self.total_deleted = 0
self.total_errored = 0
###########################################################################
@property
def total_processed(self):
return self._stats["total_processed"]
@total_processed.setter
def total_processed(self, val):
self._stats["total_processed"] = val
###########################################################################
@property
def total_deleted(self):
return self._stats["total_deleted"]
@total_deleted.setter
def total_deleted(self, val):
self._stats["total_deleted"] = val
###########################################################################
@property
def total_errored(self):
return self._stats["total_errored"]
@total_errored.setter
def total_errored(self, val):
self._stats["total_errored"] = val
###########################################################################
def run(self):
while True:
backup = self._sweep_queue.get()
if backup is None: # None in Queue means STOP!!!
logger.info("%s Exiting..." % self.name)
self._sweep_queue.task_done()
# breaking
break
logger.info("%s Processing backup %s" % (self.name, backup.id))
self.total_processed += 1
try:
deleted = self._backup_sweeper.delete_backup_targets(backup)
if deleted:
self.total_deleted += 1
except Exception, ex:
self.total_errored += 1
msg = ("%s: Error while attempting to "
"delete backup targets for backup '%s'" % (self.name, backup.id))
logger.exception(msg)
finally:
self._sweep_queue.task_done()
|
Python
|
CL
|
d3613af539321fc841df277ab61ef127af39a5421be638a71756e5a304441c96
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Header utilities for the compiler.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
import os.path
from functools import partial
from coconut.root import _indent
from coconut.constants import (
hash_prefix,
tabideal,
default_encoding,
template_ext,
justify_len,
report_this_text,
)
from coconut.util import univ_open
from coconut.terminal import internal_assert
from coconut.compiler.util import (
get_target_info,
split_comment,
get_vers_for_target,
)
# -----------------------------------------------------------------------------------------------------------------------
# UTILITIES:
# -----------------------------------------------------------------------------------------------------------------------
def gethash(compiled):
"""Retrieve a hash from a header."""
lines = compiled.splitlines()
if len(lines) < 3 or not lines[2].startswith(hash_prefix):
return None
else:
return lines[2][len(hash_prefix):]
def minify(compiled):
"""Perform basic minification of the header.
Fails on non-tabideal indentation, strings with #s, or multi-line strings.
(So don't do those things in the header.)
"""
compiled = compiled.strip()
if compiled:
out = []
for line in compiled.splitlines():
new_line, comment = split_comment(line)
new_line = new_line.rstrip()
if new_line:
ind = 0
while new_line.startswith(" "):
new_line = new_line[1:]
ind += 1
internal_assert(ind % tabideal == 0, "invalid indentation in", line)
new_line = " " * (ind // tabideal) + new_line
comment = comment.strip()
if comment:
new_line += "#" + comment
if new_line:
out.append(new_line)
compiled = "\n".join(out) + "\n"
return compiled
template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
def get_template(template):
"""Read the given template file."""
with univ_open(os.path.join(template_dir, template) + template_ext, "r") as template_file:
return template_file.read()
def one_num_ver(target):
"""Return the first number of the target version, if it has one."""
return target[:1] # "2", "3", or ""
def section(name, newline_before=True):
"""Generate a section break."""
line = "# " + name + ": "
return (
"\n" * int(newline_before)
+ line
+ "-" * (justify_len - len(line))
+ "\n\n"
)
def base_pycondition(target, ver, if_lt=None, if_ge=None, indent=None, newline=False, fallback=""):
"""Produce code that depends on the Python version for the given target."""
internal_assert(isinstance(ver, tuple), "invalid pycondition version")
internal_assert(if_lt or if_ge, "either if_lt or if_ge must be specified")
if if_lt:
if_lt = if_lt.strip()
if if_ge:
if_ge = if_ge.strip()
target_supported_vers = get_vers_for_target(target)
if all(tar_ver < ver for tar_ver in target_supported_vers):
if not if_lt:
return fallback
out = if_lt
elif all(tar_ver >= ver for tar_ver in target_supported_vers):
if not if_ge:
return fallback
out = if_ge
else:
if if_lt and if_ge:
out = """if _coconut_sys.version_info < {ver}:
{lt_block}
else:
{ge_block}""".format(
ver=repr(ver),
lt_block=_indent(if_lt, by=1),
ge_block=_indent(if_ge, by=1),
)
elif if_lt:
out = """if _coconut_sys.version_info < {ver}:
{lt_block}""".format(
ver=repr(ver),
lt_block=_indent(if_lt, by=1),
)
else:
out = """if _coconut_sys.version_info >= {ver}:
{ge_block}""".format(
ver=repr(ver),
ge_block=_indent(if_ge, by=1),
)
if indent is not None:
out = _indent(out, by=indent)
if newline:
out += "\n"
return out
# -----------------------------------------------------------------------------------------------------------------------
# FORMAT DICTIONARY:
# -----------------------------------------------------------------------------------------------------------------------
class Comment(object):
"""When passed to str.format, allows {COMMENT.<>} to serve as a comment."""
def __getattr__(self, attr):
"""Return an empty string for all comment attributes."""
return ""
COMMENT = Comment()
def process_header_args(which, target, use_hash, no_tco, strict, no_wrap):
"""Create the dictionary passed to str.format in the header."""
target_startswith = one_num_ver(target)
target_info = get_target_info(target)
pycondition = partial(base_pycondition, target)
format_dict = dict(
COMMENT=COMMENT,
empty_dict="{}",
lbrace="{",
rbrace="}",
target_startswith=target_startswith,
default_encoding=default_encoding,
hash_line=hash_prefix + use_hash + "\n" if use_hash is not None else "",
typing_line="# type: ignore\n" if which == "__coconut__" else "",
VERSION_STR=VERSION_STR,
module_docstring='"""Built-in Coconut utilities."""\n\n' if which == "__coconut__" else "",
object="" if target_startswith == "3" else "(object)",
report_this_text=report_this_text,
import_pickle=pycondition(
(3,),
if_lt=r'''
import cPickle as pickle
''',
if_ge=r'''
import pickle
''',
indent=1,
),
import_OrderedDict=_indent(
r'''OrderedDict = collections.OrderedDict if _coconut_sys.version_info >= (2, 7) else dict'''
if not target
else "OrderedDict = collections.OrderedDict" if target_info >= (2, 7)
else "OrderedDict = dict",
by=1,
),
import_collections_abc=pycondition(
(3, 3),
if_lt=r'''
abc = collections
''',
if_ge=r'''
import collections.abc as abc
''',
indent=1,
),
set_zip_longest=_indent(
r'''zip_longest = itertools.zip_longest if _coconut_sys.version_info >= (3,) else itertools.izip_longest'''
if not target
else "zip_longest = itertools.zip_longest" if target_info >= (3,)
else "zip_longest = itertools.izip_longest",
by=1,
),
comma_bytearray=", bytearray" if target_startswith != "3" else "",
lstatic="staticmethod(" if target_startswith != "3" else "",
rstatic=")" if target_startswith != "3" else "",
zip_iter=_indent(
r'''for items in _coconut.iter(_coconut.zip(*self.iters, strict=self.strict) if _coconut_sys.version_info >= (3, 10) else _coconut.zip_longest(*self.iters, fillvalue=_coconut_sentinel) if self.strict else _coconut.zip(*self.iters)):
if self.strict and _coconut_sys.version_info < (3, 10) and _coconut.any(x is _coconut_sentinel for x in items):
raise _coconut.ValueError("zip(..., strict=True) arguments have mismatched lengths")
yield items'''
if not target else
r'''for items in _coconut.iter(_coconut.zip(*self.iters, strict=self.strict)):
yield items'''
if target_info >= (3, 10) else
r'''for items in _coconut.iter(_coconut.zip_longest(*self.iters, fillvalue=_coconut_sentinel) if self.strict else _coconut.zip(*self.iters)):
if self.strict and _coconut.any(x is _coconut_sentinel for x in items):
raise _coconut.ValueError("zip(..., strict=True) arguments have mismatched lengths")
yield items''',
by=2,
),
# disabled mocks must have different docstrings so the
# interpreter can tell them apart from the real thing
def_prepattern=(
r'''def prepattern(base_func, **kwargs):
"""DEPRECATED: use addpattern instead."""
def pattern_prepender(func):
return addpattern(func, **kwargs)(base_func)
return pattern_prepender'''
if not strict else
r'''def prepattern(*args, **kwargs):
"""Deprecated feature 'prepattern' disabled by --strict compilation; use 'addpattern' instead."""
raise _coconut.NameError("deprecated feature 'prepattern' disabled by --strict compilation; use 'addpattern' instead")'''
),
def_datamaker=(
r'''def datamaker(data_type):
"""DEPRECATED: use makedata instead."""
return _coconut.functools.partial(makedata, data_type)'''
if not strict else
r'''def datamaker(*args, **kwargs):
"""Deprecated feature 'datamaker' disabled by --strict compilation; use 'makedata' instead."""
raise _coconut.NameError("deprecated feature 'datamaker' disabled by --strict compilation; use 'makedata' instead")'''
),
return_method_of_self=pycondition(
(3,),
if_lt=r'''
return _coconut.types.MethodType(self, obj, objtype)
''',
if_ge=r'''
return _coconut.types.MethodType(self, obj)
''',
indent=2,
),
return_method_of_self_func=pycondition(
(3,),
if_lt=r'''
return _coconut.types.MethodType(self.func, obj, objtype)
''',
if_ge=r'''
return _coconut.types.MethodType(self.func, obj)
''',
indent=2,
),
def_call_set_names=(
r'''def _coconut_call_set_names(cls):
for k, v in _coconut.vars(cls).items():
set_name = _coconut.getattr(v, "__set_name__", None)
if set_name is not None:
set_name(cls, k)'''
if target_startswith == "2" else
r'''def _coconut_call_set_names(cls): pass'''
if target_info >= (3, 6) else
r'''def _coconut_call_set_names(cls):
if _coconut_sys.version_info < (3, 6):
for k, v in _coconut.vars(cls).items():
set_name = _coconut.getattr(v, "__set_name__", None)
if set_name is not None:
set_name(cls, k)'''
),
pattern_func_slots=pycondition(
(3, 7),
if_lt=r'''
__slots__ = ("FunctionMatchError", "patterns", "__doc__", "__name__")
''',
if_ge=r'''
__slots__ = ("FunctionMatchError", "patterns", "__doc__", "__name__", "__qualname__")
''',
indent=1,
),
set_qualname_none=pycondition(
(3, 7),
if_ge=r'''
self.__qualname__ = None
''',
indent=2,
),
set_qualname_func=pycondition(
(3, 7),
if_ge=r'''
self.__qualname__ = _coconut.getattr(func, "__qualname__", self.__qualname__)
''',
indent=2,
),
tco_comma="_coconut_tail_call, _coconut_tco, " if not no_tco else "",
call_set_names_comma="_coconut_call_set_names, " if target_info < (3, 6) else "",
handle_cls_args_comma="_coconut_handle_cls_kwargs, _coconut_handle_cls_stargs, " if target_startswith != "3" else "",
)
# second round for format dict elements that use the format dict
format_dict.update(
dict(
# when anything is added to this list it must also be added to *both* __coconut__.pyi stub files
underscore_imports="{tco_comma}{call_set_names_comma}{handle_cls_args_comma}_coconut, _coconut_MatchError, _coconut_igetitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_mark_as_match, _coconut_reiterable, _coconut_self_match_types, _coconut_dict_merge, _coconut_exec".format(**format_dict),
import_typing_NamedTuple=pycondition(
(3, 6),
if_lt='''
class typing{object}:
@staticmethod
def NamedTuple(name, fields):
return _coconut.collections.namedtuple(name, [x for x, t in fields])
'''.format(**format_dict),
if_ge='''
import typing
''',
indent=1,
),
import_asyncio=pycondition(
(3, 4),
if_lt='''
try:
import trollius as asyncio
except ImportError:
class you_need_to_install_trollius{object}: pass
asyncio = you_need_to_install_trollius()
'''.format(**format_dict),
if_ge='''
import asyncio
''',
indent=1,
),
maybe_bind_lru_cache=pycondition(
(3, 2),
if_lt='''
try:
from backports.functools_lru_cache import lru_cache
functools.lru_cache = lru_cache
except ImportError:
class you_need_to_install_backports_functools_lru_cache{object}: pass
functools.lru_cache = you_need_to_install_backports_functools_lru_cache()
'''.format(**format_dict),
if_ge=None,
indent=1,
newline=True,
),
),
)
return format_dict
# -----------------------------------------------------------------------------------------------------------------------
# HEADER GENERATION:
# -----------------------------------------------------------------------------------------------------------------------
def getheader(which, target, use_hash, no_tco, strict, no_wrap):
"""Generate the specified header."""
internal_assert(
which.startswith("package") or which in (
"none", "initial", "__coconut__", "sys", "code", "file",
),
"invalid header type",
which,
)
if which == "none":
return ""
target_startswith = one_num_ver(target)
target_info = get_target_info(target)
# initial, __coconut__, package:n, sys, code, file
format_dict = process_header_args(which, target, use_hash, no_tco, strict, no_wrap)
if which == "initial" or which == "__coconut__":
header = '''#!/usr/bin/env python{target_startswith}
# -*- coding: {default_encoding} -*-
{hash_line}{typing_line}
# Compiled with Coconut version {VERSION_STR}
{module_docstring}'''.format(**format_dict)
elif use_hash is not None:
raise CoconutInternalException("can only add a hash to an initial or __coconut__ header, not", which)
else:
header = ""
if which == "initial":
return header
# __coconut__, package:n, sys, code, file
header += section("Coconut Header", newline_before=False)
if target_startswith != "3":
header += "from __future__ import print_function, absolute_import, unicode_literals, division\n"
elif target_info >= (3, 7):
if no_wrap:
header += "from __future__ import generator_stop\n"
else:
header += "from __future__ import generator_stop, annotations\n"
elif target_info >= (3, 5):
header += "from __future__ import generator_stop\n"
if which.startswith("package"):
levels_up = int(which[len("package:"):])
coconut_file_dir = "_coconut_os.path.dirname(_coconut_os.path.abspath(__file__))"
for _ in range(levels_up):
coconut_file_dir = "_coconut_os.path.dirname(" + coconut_file_dir + ")"
return header + '''import sys as _coconut_sys, os as _coconut_os
_coconut_file_dir = {coconut_file_dir}
_coconut_cached_module = _coconut_sys.modules.get({__coconut__})
if _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir:
del _coconut_sys.modules[{__coconut__}]
_coconut_sys.path.insert(0, _coconut_file_dir)
_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]
if _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and "__init__.py" in _coconut_os.listdir(_coconut_file_dir):
_coconut_full_module_name = str(_coconut_module_name + ".__coconut__")
import __coconut__ as _coconut__coconut__
_coconut__coconut__.__name__ = _coconut_full_module_name
for _coconut_v in vars(_coconut__coconut__).values():
if getattr(_coconut_v, "__module__", None) == {__coconut__}:
try:
_coconut_v.__module__ = _coconut_full_module_name
except AttributeError:
_coconut_v_type = type(_coconut_v)
if getattr(_coconut_v_type, "__module__", None) == {__coconut__}:
_coconut_v_type.__module__ = _coconut_full_module_name
_coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__
from __coconut__ import *
from __coconut__ import {underscore_imports}
_coconut_sys.path.pop(0)
'''.format(
coconut_file_dir=coconut_file_dir,
__coconut__=(
'"__coconut__"' if target_startswith == "3"
else 'b"__coconut__"' if target_startswith == "2"
else 'str("__coconut__")'
),
**format_dict
) + section("Compiled Coconut")
if which == "sys":
return header + '''import sys as _coconut_sys
from coconut.__coconut__ import *
from coconut.__coconut__ import {underscore_imports}
'''.format(**format_dict)
# __coconut__, code, file
header += "import sys as _coconut_sys\n"
if target_info >= (3, 7):
header += PY37_HEADER
elif target_startswith == "3":
header += PY3_HEADER
elif target_info >= (2, 7):
header += PY27_HEADER
elif target_startswith == "2":
header += PY2_HEADER
else:
header += PYCHECK_HEADER
header += get_template("header").format(**format_dict)
if which == "file":
header += section("Compiled Coconut")
return header
|
Python
|
CL
|
c2ab4680d4b8de672bab898e8ecb26e132a4bccf537027b587a3851f524b7829
|
from srm_helper import *
import pandas as pd
if __name__ == "__main__":
# params
tol = .5 # MS2 fragment tolerance for QqQ optimized transitions
ppmTol = 10 # m/z tolerance for HRMS data in ppm
numCores = 5 # number of CPU cores to use
# create srm_maker object
srm_maker = SRM_maker(ppm=ppmTol, numCores=numCores)
# set datafiles for learning conversion
trainingData = pd.read_csv("../data/IDX/M3T_transitions_ALTIS_optimized_allCpds.csv")
msFilenames = ["../data/IDX/IDX_MS2_data/M3T_10uM_pos_DDA_10NCEs_25-35_50ms_5e4_DE5s_updatedRT.mzML",
"../data/IDX/IDX_MS2_data/M3T_10uM_neg_DDA_10NCEs_25-35_50ms_5e4_DE5s_updatedRT.mzML",
"../data/IDX/IDX_MS2_data/M3T_10uM_pos_DDA_10NCEs_25-35_80ms_1e4_DE5s_updatedRT_missing.mzML"]
# build conversion
merged = srm_maker.buildConversion(msFilenames, trainingData, tic_cutoff=0, frag_cutoff=0,
frag_ppm_tolerance=2 * 1e6 * .5 / 200)
merged.to_csv("../data/IDX/AX_stab_pool/conversion_results_AX.csv")
# output conversion
print(srm_maker.getConversionEquationString())
# set datafiles to build srms
targets = pd.read_csv("../data/IDX/AX_stab_pool/combined_peak_list.csv")
# filename for HRMS MS/MS of targets
msFilenames = ["../data/IDX/AX_stab_pool/pos/"+x for x in os.listdir("../data/IDX/AX_stab_pool/pos/") if ".mzML" in x] + ["../data/IDX/AX_stab_pool/neg/"+x for x in os.listdir("../data/IDX/AX_stab_pool/neg/") if ".mzML" in x]
print(msFilenames)
srm_table = pd.DataFrame()
breakdownCurves = {}
for msFilename in msFilenames:
# create SRM table
srm_table1, breakdownCurves1 = srm_maker.createSRMsCE(msFilename, targets)
srm_table = pd.concat((srm_table,srm_table1),axis=0,ignore_index=True)
breakdownCurves.update(breakdownCurves1)
# output SRM file
srm_table.to_csv("../data/IDX/AX_stab_pool/generated_SRM_table.csv")
# plot breakdown curves
plotBreakdownCurves(breakdownCurves,"../data/IDX/AX_stab_pool/breakdown_curves.pdf")
|
Python
|
CL
|
79782fffab581adad13c6bbe6a50ea67690eef6684c1a14517a8cf79ada96654
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.