content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import pytest
from ethereum import tester
from functools import (
reduce
)
from fixtures import (
MAX_UINT,
fake_address,
token_events,
owner_index,
owner,
wallet_address,
get_bidders,
fixture_decimals,
contract_params,
get_token_contract,
token_contract,
create_contract,
print_logs,
create_accounts,
txnCost,
test_bytes,
event_handler,
)
from utils_logs import LogFilter
@pytest.fixture()
def proxy_contract(chain, create_contract):
AuctionProxy = chain.provider.get_contract_factory('Proxy')
proxy_contract = create_contract(AuctionProxy, [])
print_logs(proxy_contract, 'Payable', 'Proxy')
return proxy_contract
@pytest.fixture()
def proxy_erc223_contract(chain, create_contract):
AuctionProxy = chain.provider.get_contract_factory('ProxyERC223')
proxy_erc223_contract = create_contract(AuctionProxy, [])
return proxy_erc223_contract
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_init(
chain,
web3,
wallet_address,
get_token_contract,
proxy_contract,
decimals):
(A, B, C, D, E) = web3.eth.accounts[:5]
auction = proxy_contract
multiplier = 10**(decimals)
initial_supply = 5000 * multiplier
# Transaction fails when auction address is invalid
with pytest.raises(TypeError):
token = get_token_contract([
0,
wallet_address,
initial_supply
], {'from': E}, decimals=decimals)
with pytest.raises(TypeError):
token = get_token_contract([
proxy_contract.address,
0,
wallet_address,
initial_supply
], {'from': E}, decimals=decimals)
with pytest.raises(TypeError):
token = get_token_contract([
fake_address,
wallet_address,
initial_supply
], {'from': E}, decimals=decimals)
# Test max uint - 2 as supply (has to be even)
token = get_token_contract([
proxy_contract.address,
wallet_address,
MAX_UINT - 1
], {'from': E}, decimals=decimals)
with pytest.raises(TypeError):
token = get_token_contract([
proxy_contract.address,
wallet_address,
MAX_UINT + 1
], {'from': E}, decimals=decimals)
# Transaction fails if initial_supply == 0
with pytest.raises(tester.TransactionFailed):
token = get_token_contract([
proxy_contract.address,
wallet_address,
0
], {'from': E}, decimals=decimals)
with pytest.raises(TypeError):
token = get_token_contract([
proxy_contract.address,
wallet_address,
-2
], {'from': E}, decimals=decimals)
# Fails when supply is an odd number; auction and wallet addresses
# are assigned a different number of tokens
with pytest.raises(tester.TransactionFailed):
token = get_token_contract([
proxy_contract.address,
wallet_address,
10000001,
], {'from': E}, decimals=decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
initial_supply
], {'from': E}, decimals=decimals)
assert token.call().decimals() == decimals
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_variable_access(
chain,
web3,
wallet_address,
get_token_contract,
proxy_contract,
decimals):
owner = web3.eth.coinbase
(A, B, C) = web3.eth.accounts[1:4]
multiplier = 10**(decimals)
initial_supply = 3000 * multiplier
token = get_token_contract([
proxy_contract.address,
wallet_address,
initial_supply
], {'from': owner}, decimals=decimals)
assert token.call().name() == 'Yobicash Token'
assert token.call().symbol() == 'YBC'
assert token.call().decimals() == decimals
assert token.call().totalSupply() == initial_supply
def test_token_balanceOf(
chain,
web3,
wallet_address,
token_contract,
proxy_contract,
contract_params):
token = token_contract(proxy_contract.address)
multiplier = 10**(contract_params['decimals'])
supply = contract_params['supply'] * multiplier
half_balance = supply // 2
assert token.call().balanceOf(proxy_contract.address) == half_balance
assert token.call().balanceOf(wallet_address) == half_balance
def transfer_tests(
bidders,
balances,
multiplier,
token,
event_handler):
(A, B, C) = bidders
ev_handler = event_handler(token)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(0, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(fake_address, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(B, MAX_UINT + 1)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(B, -5)
with pytest.raises(tester.TransactionFailed):
balance_A = token.call().balanceOf(A)
token.transact({'from': A}).transfer(B, balance_A + 1)
with pytest.raises(tester.TransactionFailed):
balance_B = token.call().balanceOf(B)
token.transact({'from': A}).transfer(B, MAX_UINT + 1 - balance_B)
txn_hash = token.transact({'from': A}).transfer(B, 0)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == balances[0]
assert token.call().balanceOf(B) == balances[1]
txn_hash = token.transact({'from': A}).transfer(B, 120)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == balances[0] - 120
assert token.call().balanceOf(B) == balances[1] + 120
txn_hash = token.transact({'from': B}).transfer(C, 66)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(B) == balances[1] + 120 - 66
assert token.call().balanceOf(C) == balances[2] + 66
ev_handler.check()
def transfer_erc223_tests(
bidders,
balances,
multiplier,
token,
proxy,
token_erc223,
proxy_erc223,
event_handler):
(A, B, C) = bidders
ev_handler = event_handler(token_erc223)
test_data = test_bytes() # 32 bytes
test_data2 = test_bytes(value=20)
assert not test_data == test_data2
balance_A = token.call().balanceOf(A)
balance_proxy = token.call().balanceOf(proxy.address)
balance_proxy_erc223 = token.call().balanceOf(proxy_erc223.address)
with pytest.raises(TypeError):
token.transact({'from': A}).transfer(B, balance_A, 0)
# Make sure it fails when internal call of transfer(to, value) fails
with pytest.raises(tester.TransactionFailed):
token.transact({'from': A}).transfer(B, balance_A + 1, test_data)
with pytest.raises(tester.TransactionFailed):
token.transact({'from': A}).transfer(proxy_erc223.address, balance_A + 1, test_data)
# Receiver contracts without a tokenFallback
with pytest.raises(tester.TransactionFailed):
token.transact({'from': A}).transfer(proxy.address, balance_A, test_data)
# TODO FIXME erc223 transfer event not handled correctly
txn_hash = token.transact({'from': A}).transfer(proxy_erc223.address, balance_A, test_data)
# ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == 0
assert token.call().balanceOf(proxy_erc223.address) == balance_proxy_erc223 + balance_A
# Arbitrary tests to see if the tokenFallback function from the proxy is called
assert proxy_erc223.call().sender() == A
assert proxy_erc223.call().value() == balance_A
balance_B = token.call().balanceOf(B)
balance_proxy_erc223 = token.call().balanceOf(proxy_erc223.address)
txn_hash = token.transact({'from': B}).transfer(proxy_erc223.address, 0, test_data2)
# ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(B) == balance_B
assert token.call().balanceOf(proxy_erc223.address) == balance_proxy_erc223
assert proxy_erc223.call().sender() == B
assert proxy_erc223.call().value() == 0
txn_hash = token.transact({'from': A}).transfer(proxy_erc223.address, 0)
# ev_handler.add(txn_hash, token_events['transfer'])
txn_hash = token.transact({'from': A}).transfer(proxy.address, 0)
# ev_handler.add(txn_hash, token_events['transfer'])
ev_handler.check()
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_transfer(
chain,
web3,
wallet_address,
get_bidders,
get_token_contract,
token_contract,
proxy_contract,
proxy_erc223_contract,
decimals,
event_handler):
(A, B, C) = get_bidders(3)
multiplier = 10**(decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
5000 * multiplier,
], decimals=decimals)
assert token.call().decimals() == decimals
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
token.transact({'from': wallet_address}).transfer(C, 1000)
transfer_tests(
(A, B, C),
[3000, 2000, 1000],
multiplier,
token,
event_handler)
token_erc223 = token_contract(proxy_erc223_contract.address)
token_erc223.transact({'from': wallet_address}).transfer(A, 3000)
token_erc223.transact({'from': wallet_address}).transfer(B, 2000)
token_erc223.transact({'from': wallet_address}).transfer(C, 1000)
transfer_erc223_tests(
(A, B, C),
[3000, 2000, 1000],
multiplier,
token,
proxy_contract,
token_erc223,
proxy_erc223_contract,
event_handler)
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_approve(
web3,
wallet_address,
get_token_contract,
proxy_contract,
decimals,
event_handler):
(A, B, C) = web3.eth.accounts[1:4]
multiplier = 10**(decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
5000 * multiplier
], decimals=decimals)
assert token.call().decimals() == decimals
ev_handler = event_handler(token)
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
token.transact({'from': wallet_address}).transfer(C, 1000)
with pytest.raises(TypeError):
token.transact({'from': A}).approve(0, B)
with pytest.raises(TypeError):
token.transact({'from': A}).approve(fake_address, B)
with pytest.raises(TypeError):
token.transact({'from': A}).approve(B, -3)
# We can approve more than we have
# with pytest.raises(tester.TransactionFailed):
txn_hash = token.transact({'from': A}).approve(B, 3000 + 1)
ev_handler.add(txn_hash, token_events['approve'])
txn_hash = token.transact({'from': A}).approve(A, 300)
ev_handler.add(txn_hash, token_events['approve'])
assert token.call().allowance(A, A) == 300
with pytest.raises(tester.TransactionFailed):
txn_hash = token.transact({'from': A}).approve(B, 300)
txn_hash = token.transact({'from': A}).approve(B, 0)
txn_hash = token.transact({'from': A}).approve(B, 300)
ev_handler.add(txn_hash, token_events['approve'])
txn_hash = token.transact({'from': B}).approve(C, 650)
ev_handler.add(txn_hash, token_events['approve'])
assert token.call().allowance(A, B) == 300
assert token.call().allowance(B, C) == 650
ev_handler.check()
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_allowance(
web3,
wallet_address,
get_bidders,
get_token_contract,
proxy_contract,
decimals):
(A, B) = get_bidders(2)
multiplier = 10**(decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
5000 * multiplier
], decimals=decimals)
assert token.call().decimals() == decimals
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
with pytest.raises(TypeError):
token.call().allowance(0, B)
with pytest.raises(TypeError):
token.call().allowance(fake_address, B)
with pytest.raises(TypeError):
token.call().allowance(A, 0)
with pytest.raises(TypeError):
token.call().allowance(A, fake_address)
assert token.call().allowance(A, B) == 0
assert token.call().allowance(B, A) == 0
token.transact({'from': A}).approve(B, 300)
assert token.call().allowance(A, B) == 300
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_token_transfer_from(
chain,
web3,
wallet_address,
get_bidders,
get_token_contract,
proxy_contract,
decimals,
event_handler):
(A, B, C) = get_bidders(3)
multiplier = 10**(decimals)
token = get_token_contract([
proxy_contract.address,
wallet_address,
5000 * multiplier
], decimals=decimals)
assert token.call().decimals() == decimals
ev_handler = event_handler(token)
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
token.transact({'from': wallet_address}).transfer(C, 1000)
txn_hash = token.transact({'from': B}).approve(A, 300)
ev_handler.add(txn_hash, token_events['approve'])
assert token.call().allowance(B, A) == 300
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(0, C, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(B, 0, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(fake_address, C, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(B, fake_address, 10)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(B, C, MAX_UINT + 1)
with pytest.raises(TypeError):
token.transact({'from': A}).transferFrom(B, C, -5)
with pytest.raises(tester.TransactionFailed):
allowance_B = token.call().allowance(B, A)
token.transact({'from': A}).transferFrom(B, C, allowance_B + 1)
# We can allow more than the balance, but we cannot transfer more
with pytest.raises(tester.TransactionFailed):
balance_B = token.call().balanceOf(B)
token.transact({'from': B}).approve(A, balance_B + 10)
token.transact({'from': A}).transferFrom(B, C, balance_B + 10)
# Test for overflow
with pytest.raises(tester.TransactionFailed):
balance_B = token.call().balanceOf(B)
overflow = MAX_UINT + 1 - balance_B
token.transact({'from': B}).approve(A, overflow)
token.transact({'from': A}).transferFrom(B, C, overflow)
with pytest.raises(tester.TransactionFailed):
txn_hash = token.transact({'from': B}).approve(A, 300)
txn_hash = token.transact({'from': B}).approve(A, 0)
txn_hash = token.transact({'from': B}).approve(A, 300)
ev_handler.add(txn_hash, token_events['approve'])
assert token.call().allowance(B, A) == 300
balance_A = token.call().balanceOf(A)
balance_B = token.call().balanceOf(B)
balance_C = token.call().balanceOf(C)
txn_hash = token.transact({'from': A}).transferFrom(B, C, 0)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == balance_A
assert token.call().balanceOf(B) == balance_B
assert token.call().balanceOf(C) == balance_C
txn_hash = token.transact({'from': A}).transferFrom(B, C, 150)
ev_handler.add(txn_hash, token_events['transfer'])
assert token.call().balanceOf(A) == balance_A
assert token.call().balanceOf(B) == balance_B - 150
assert token.call().balanceOf(C) == balance_C + 150
ev_handler.check()
@pytest.mark.parametrize('decimals', fixture_decimals)
def test_burn(
chain,
web3,
wallet_address,
get_bidders,
get_token_contract,
proxy_contract,
decimals,
txnCost,
event_handler):
decimals = 18
eth = web3.eth
(A, B) = get_bidders(2)
multiplier = 10**(decimals)
initial_supply = 5000 * multiplier
token = get_token_contract([
proxy_contract.address,
wallet_address,
initial_supply
], decimals=decimals)
assert token.call().decimals() == decimals
ev_handler = event_handler(token)
token.transact({'from': wallet_address}).transfer(A, 3000)
token.transact({'from': wallet_address}).transfer(B, 2000)
with pytest.raises(TypeError):
token.transact({'from': B}).burn(-3)
with pytest.raises(TypeError):
token.transact({'from': B}).burn(MAX_UINT + 1)
with pytest.raises(tester.TransactionFailed):
token.transact({'from': B}).burn(0)
with pytest.raises(tester.TransactionFailed):
token.transact({'from': B}).burn(2000 + 1)
# Balance should not change besides transaction costs
tokens_B = token.call().balanceOf(B)
balance_B = eth.getBalance(B)
burnt = 250
txn_hash = token.transact({'from': B}).burn(burnt)
txn_cost = txnCost(txn_hash)
ev_handler.add(txn_hash, token_events['burn'])
assert token.call().totalSupply() == initial_supply - burnt
assert token.call().balanceOf(B) == tokens_B - burnt
assert balance_B == eth.getBalance(B) + txn_cost
tokens_B = token.call().balanceOf(B)
balance_B = eth.getBalance(B)
total_supply = token.call().totalSupply()
txn_hash = token.transact({'from': B}).burn(tokens_B)
txn_cost = txnCost(txn_hash)
assert token.call().totalSupply() == total_supply - tokens_B
assert token.call().balanceOf(B) == 0
assert balance_B == eth.getBalance(B) + txn_cost
ev_handler.check()
def test_event_handler(token_contract, proxy_contract, event_handler):
token = token_contract(proxy_contract.address)
ev_handler = event_handler(token)
fake_txn = 0x0343
# Add fake events with no transactions
ev_handler.add(fake_txn, token_events['deploy'])
ev_handler.add(fake_txn, token_events['setup'])
ev_handler.add(fake_txn, token_events['transfer'])
ev_handler.add(fake_txn, token_events['approve'])
ev_handler.add(fake_txn, token_events['burn'])
# This should fail
with pytest.raises(Exception):
ev_handler.check(1)
|
nilq/baby-python
|
python
|
import progressbar
def test_with():
with progressbar.ProgressBar(max_value=10) as p:
for i in range(10):
p.update(i)
def test_with_stdout_redirection():
with progressbar.ProgressBar(max_value=10, redirect_stdout=True) as p:
for i in range(10):
p.update(i)
def test_with_extra_start():
with progressbar.ProgressBar(max_value=10) as p:
p.start()
p.start()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright 2016 Steve Kyle. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Serengeti TAXII Client Script
"""
import sys
import os
from datetime import datetime
import warnings
import glob
from . import args
from . import config
from . import service
def main(*args, **kwargs):
"""Client Entry Point"""
parser = args.get_arg_parser()
args = parser.parse_args()
config = config.read_config()
config.merge_args(args)
request = config.get_request()
response = service.handler(request)
print(response)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from .find_maximum_value_binary_tree import find_maximum_value, BST
def test_find_maximum_value_tree_with_one_value():
one_value = BST([5])
assert find_maximum_value(one_value) == 5
def test_find_maximum_value_tree_with_two_values():
one_value = BST([10, 2])
assert find_maximum_value(one_value) == 10
def test_find_maximum_value_balanced():
balanced = BST([10, 7, 3, 16, 12, 8, 20])
assert find_maximum_value(balanced) == 20
def test_find_maximum_value_left():
left = BST([10, 8, 6, 4])
assert find_maximum_value(left) == 10
def test_find_maximum_value_right():
right = BST([1, 3, 5, 7, 9])
assert find_maximum_value(right) == 9
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import networkx as nx
import os
import random
import warnings
from matplotlib.backends import backend_gtk3
from settings import OUTPUT_DIR
warnings.filterwarnings('ignore', module=backend_gtk3.__name__)
RESTART_PROBABILITY = 0.15
STEPS_MULTIPLIER = 100
def random_walk_sample(graph, n):
selected_nodes = set()
while len(selected_nodes) < n:
last_node = random.choice(list(graph.nodes))
selected_nodes.add(last_node)
for i in range(STEPS_MULTIPLIER * n):
last_node = random.choice(list(graph.neighbors(last_node)))
selected_nodes.add(last_node)
if len(selected_nodes) >= n:
break
subgraph = graph.subgraph(selected_nodes)
return subgraph
def save_graph_figure(graph, name):
plt.title(name, fontsize=16)
nx.draw(graph, node_size=100)
plt.savefig(os.path.join(OUTPUT_DIR, '%s.png' % name))
plt.close('all')
|
nilq/baby-python
|
python
|
from collections import KeysView
def find(iterable: list or KeysView, key=lambda x: x):
for elem in iterable:
if key(elem):
return elem
return None
def index(iterable: list, key=lambda x: x) -> int:
x = 0
for elem in iterable:
if key(elem):
return x
x += 1
return -1
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class CalToolConfig(AppConfig):
name = 'cal_tool'
|
nilq/baby-python
|
python
|
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Callable script to start a training on MyhalCollision dataset
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 06/03/2020
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Common libs
import sys
import time
import signal
import os
os.environ.update(OMP_NUM_THREADS='1',
OPENBLAS_NUM_THREADS='1',
NUMEXPR_NUM_THREADS='1',
MKL_NUM_THREADS='1',)
import numpy as np
import torch
# Dataset
from slam.PointMapSLAM import pointmap_slam, detect_short_term_movables, annotation_process
from slam.dev_slam import bundle_slam, pointmap_for_AMCL
from torch.utils.data import DataLoader
from datasets.MyhalCollision import MyhalCollisionDataset, MyhalCollisionSlam, MyhalCollisionSampler, \
MyhalCollisionCollate
from utils.config import Config
from utils.trainer import ModelTrainer
from models.architectures import KPCollider
from os.path import exists, join
from os import makedirs
# ----------------------------------------------------------------------------------------------------------------------
#
# Config Class
# \******************/
#
class MyhalCollisionConfig(Config):
"""
Override the parameters you want to modify for this dataset
"""
####################
# Dataset parameters
####################
# Dataset name
dataset = 'MyhalCollision'
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
num_classes = None
# Type of task performed on this dataset (also overwritten)
dataset_task = ''
# Number of CPU threads for the input pipeline
input_threads = 16
#########################
# Architecture definition
#########################
# Define layers (only concerning the 3D architecture)
architecture = ['simple',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb_strided',
'resnetb',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary']
######################
# Collision parameters
######################
# Number of propagating layer
n_2D_layers = 30
# Total time propagated
T_2D = 3.0
# Size of 2D convolution grid
dl_2D = 0.12
# Power of the loss for the 2d predictions (use smaller prop loss when shared weights)
power_2D_init_loss = 1.0
power_2D_prop_loss = 50.0
neg_pos_ratio = 1.0
loss2D_version = 2
# Specification of the 2D networks composition
init_2D_levels = 4 # 3
init_2D_resnets = 3 # 2
prop_2D_resnets = 3 # 2
# Path to a pretrained 3D network. if empty, ignore, if 'todo', then only train 3D part of the network.
#pretrained_3D = 'Log_2021-01-27_18-53-05'
pretrained_3D = ''
# Detach the 2D network from the 3D network when backpropagating gradient
detach_2D = False
# Share weights for 2D network TODO: see if not sharing makes a difference
shared_2D = False
# Trainable backend 3D network
apply_3D_loss = True
#frozen_layers = ['encoder_blocks', 'decoder_blocks', 'head_mlp', 'head_softmax']
# Use visibility mask for training
use_visibility = False
###################
# KPConv parameters
###################
# Radius of the input sphere
in_radius = 8.0
val_radius = 8.0
n_frames = 3
in_features_dim = n_frames
max_in_points = -1
max_val_points = -1
# Choice of input features
first_features_dim = 100
# Number of batch
batch_num = 6
val_batch_num = 6
# Number of kernel points
num_kernel_points = 15
# Size of the first subsampling grid in meter
first_subsampling_dl = 0.06
# Radius of convolution in "number grid cell". (2.5 is the standard value)
conv_radius = 2.5
# Radius of deformable convolution in "number grid cell". Larger so that deformed kernel can spread out
deform_radius = 6.0
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
KP_extent = 1.2
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
KP_influence = 'linear'
# Aggregation function of KPConv in ('closest', 'sum')
aggregation_mode = 'sum'
# Can the network learn modulations
modulated = False
# Batch normalization parameters
use_batch_norm = True
batch_norm_momentum = 0.02
# Deformable offset loss
# 'point2point' fitting geometry by penalizing distance from deform point to input points
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
deform_fitting_mode = 'point2point'
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
#####################
# Training parameters
#####################
# Maximal number of epochs
max_epoch = 1000
# Learning rate management
learning_rate = 1e-2
momentum = 0.98
lr_decays = {i: 0.1 ** (1 / 120) for i in range(1, max_epoch)}
grad_clip_norm = 100.0
# Number of steps per epochs
epoch_steps = 500
# Number of validation examples per epoch
validation_size = 30
# Number of epoch between each checkpoint
checkpoint_gap = 20
# Augmentations
augment_scale_anisotropic = False
augment_symmetries = [False, False, False]
augment_rotation = 'vertical'
augment_scale_min = 0.99
augment_scale_max = 1.01
augment_noise = 0.001
augment_color = 1.0
# Do we nee to save convergence
saving = True
saving_path = None
# ----------------------------------------------------------------------------------------------------------------------
#
# Main Call
# \***************/
#
if __name__ == '__main__':
# NOT_NOW_TODO: Optimize online predictions
# > Try to parallelise the batch preprocessing for a single input frame.
# > Use OMP for neighbors processing
# > Use the polar coordinates to get neighbors???? (avoiding tree building time)
# > cpp extension for conversion into a 2D lidar_range_scan
#
############################
# Initialize the environment
############################
# Set which gpu is going to be used (auto for automatic choice)
GPU_ID = 'auto'
# Automatic choice (need pynvml to be installed)
if GPU_ID == 'auto':
print('\nSearching a free GPU:')
for i in range(torch.cuda.device_count()):
a = torch.cuda.list_gpu_processes(i)
print(torch.cuda.list_gpu_processes(i))
a = a.split()
if a[1] == 'no':
GPU_ID = a[0][-1:]
# Safe check no free GPU
if GPU_ID == 'auto':
print('\nNo free GPU found!\n')
a = 1/0
else:
print('\nUsing GPU:', GPU_ID, '\n')
# Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
chosen_gpu = int(GPU_ID)
###################
# Training sessions
###################
# Day used as map
map_day = '2020-10-02-13-39-05'
train_days_RandBounce = ['2021-05-15-23-15-09',
'2021-05-15-23-33-25',
'2021-05-15-23-54-50',
'2021-05-16-00-44-53',
'2021-05-16-01-09-43',
'2021-05-16-20-37-47',
'2021-05-16-20-59-49',
'2021-05-16-21-22-30',
'2021-05-16-22-26-45',
'2021-05-16-22-51-06',
'2021-05-16-23-34-15',
'2021-05-17-01-21-44',
'2021-05-17-01-37-09',
'2021-05-17-01-58-57',
'2021-05-17-02-34-27',
'2021-05-17-02-56-02',
'2021-05-17-03-54-39',
'2021-05-17-05-26-10',
'2021-05-17-05-41-45']
train_days_RandWand = ['2021-05-17-14-04-52',
'2021-05-17-14-21-56',
'2021-05-17-14-44-46',
'2021-05-17-15-26-04',
'2021-05-17-15-50-45',
'2021-05-17-16-14-26',
'2021-05-17-17-02-17',
'2021-05-17-17-27-02',
'2021-05-17-17-53-42',
'2021-05-17-18-46-44',
'2021-05-17-19-02-37',
'2021-05-17-19-39-19',
'2021-05-17-20-14-57',
'2021-05-17-20-48-53',
'2021-05-17-21-36-22',
'2021-05-17-22-16-13',
'2021-05-17-22-40-46',
'2021-05-17-23-08-01',
'2021-05-17-23-48-22',
'2021-05-18-00-07-26',
'2021-05-18-00-23-15',
'2021-05-18-00-44-33',
'2021-05-18-01-24-07']
train_days_RandFlow = ['2021-06-02-19-55-16',
'2021-06-02-20-33-09',
'2021-06-02-21-09-48',
'2021-06-02-22-05-23',
'2021-06-02-22-31-49',
'2021-06-03-03-51-03',
'2021-06-03-14-30-25',
'2021-06-03-14-59-20',
'2021-06-03-15-43-06',
'2021-06-03-16-48-18',
'2021-06-03-18-00-33',
'2021-06-03-19-07-19',
'2021-06-03-19-52-45',
'2021-06-03-20-28-22',
'2021-06-03-21-32-44',
'2021-06-03-21-57-08']
######################
# Automatic Annotation
######################
# Choose the dataset between train_days_RandBounce, train_days_RandWand, or train_days_RandFlow
train_days = np.array(train_days_RandBounce)
# Validation sessions
val_inds = [0, 1, 2]
train_inds = [i for i in range(len(train_days)) if i not in val_inds]
# Check if we need to redo annotation (only if there is no collison folder)
redo_annot = False
for day in train_days:
annot_path = join('../Data/Simulation/collisions', day)
if not exists(annot_path):
redo_annot = True
break
# train_days = ['2020-10-20-16-30-49']
# redo_annot = True
if redo_annot:
# Initiate dataset
slam_dataset = MyhalCollisionSlam(day_list=train_days, map_day=map_day)
# Create a refined map from the map_day.
# UNCOMMENT THIS LINE if you are using your own data for the first time
# COMMENT THIS LINE if you already have a nice clean map of the environment as a point cloud
# like this one: Data/Simulation/slam_offline/2020-10-02-13-39-05/map_update_0001.ply
# slam_dataset.refine_map()
# Groundtruth annotation
annotation_process(slam_dataset, on_gt=False)
# TODO: Loop closure for aligning days together when not simulation
# Annotation of preprocessed 2D+T point clouds for SOGM generation
slam_dataset.collision_annotation()
print('annotation finished')
##############
# Prepare Data
##############
print()
print('Data Preparation')
print('****************')
# Initialize configuration class
config = MyhalCollisionConfig()
# Override with configuration from previous 3D network if given
if config.pretrained_3D and config.pretrained_3D != 'todo':
# Check if path exists
previous_path = os.path.join('results', config.pretrained_3D)
if not exists(previous_path):
raise ValueError('Given path for previous 3D network does not exist')
# Load config
prev_config = MyhalCollisionConfig()
prev_config.load(previous_path)
# List of params we should not overwrite:
kept_params = ['n_2D_layers',
'T_2D',
'dl_2D',
'power_2D_init_loss',
'power_2D_prop_loss',
'neg_pos_ratio',
'init_2D_levels',
'init_2D_resnets',
'prop_2D_resnets',
'pretrained_3D',
'detach_2D',
'shared_2D',
'apply_3D_loss',
'frozen_layers',
'max_epoch',
'learning_rate',
'momentum',
'lr_decays',
'grad_clip_norm',
'epoch_steps',
'validation_size',
'checkpoint_gap',
'saving',
'saving_path',
'input_threads']
for attr_name, attr_value in vars(config).items():
if attr_name not in kept_params:
setattr(config, attr_name, getattr(prev_config, attr_name))
# Get path from argument if given
if len(sys.argv) > 1:
config.saving_path = sys.argv[1]
###############
# Previous chkp
###############
# Choose here if you want to start training from a previous snapshot (None for new training)
# Choose index of checkpoint to start from. If None, uses the latest chkp
chkp_idx = None
chosen_chkp = None
if config.pretrained_3D and config.pretrained_3D != 'todo':
# Check if path exists
chkp_path = os.path.join('results', config.pretrained_3D, 'checkpoints')
if not exists(chkp_path):
raise ValueError('Given path for previous 3D network does contain any checkpoints')
# Find all snapshot in the chosen training folder
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
# Find which snapshot to restore
if chkp_idx is None:
chosen_chkp = 'current_chkp.tar'
else:
chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join('results', config.pretrained_3D, 'checkpoints', chosen_chkp)
#####################
# Init input pipeline
#####################
# Initialize datasets (dummy validation)
training_dataset = MyhalCollisionDataset(config, train_days[train_inds], chosen_set='training', balance_classes=True)
test_dataset = MyhalCollisionDataset(config, train_days[val_inds], chosen_set='validation', balance_classes=False)
# Initialize samplers
training_sampler = MyhalCollisionSampler(training_dataset)
test_sampler = MyhalCollisionSampler(test_dataset)
# Initialize the dataloader
training_loader = DataLoader(training_dataset,
batch_size=1,
sampler=training_sampler,
collate_fn=MyhalCollisionCollate,
num_workers=config.input_threads,
pin_memory=True)
test_loader = DataLoader(test_dataset,
batch_size=1,
sampler=test_sampler,
collate_fn=MyhalCollisionCollate,
num_workers=config.input_threads,
pin_memory=True)
# Calibrate max_in_point value
if config.max_in_points < 0:
config.max_in_points = 1e9
training_loader.dataset.max_in_p = 1e9
training_sampler.calib_max_in(config, training_loader, untouched_ratio=0.9, verbose=True)
if config.max_val_points < 0:
config.max_val_points = 1e9
test_loader.dataset.max_in_p = 1e9
test_sampler.calib_max_in(config, test_loader, untouched_ratio=0.95, verbose=True)
# Calibrate samplers
training_sampler.calibration(training_loader, verbose=True)
test_sampler.calibration(test_loader, verbose=True)
# debug_timing(training_dataset, training_loader)
# debug_timing(test_dataset, test_loader)
# debug_class_w(training_dataset, training_loader)
print('\nModel Preparation')
print('*****************')
# Define network model
t1 = time.time()
net = KPCollider(config, training_dataset.label_values, training_dataset.ignored_labels)
debug = False
if debug:
print('\n*************************************\n')
print(net)
print('\n*************************************\n')
for param in net.parameters():
if param.requires_grad:
print(param.shape)
print('\n*************************************\n')
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad))
print('\n*************************************\n')
# Freeze layers if necessary
if config.frozen_layers:
for name, child in net.named_children():
if name in config.frozen_layers:
for param in child.parameters():
if param.requires_grad:
param.requires_grad = False
child.eval()
# Define a trainer class
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp, gpu_id=chosen_gpu)
print('Done in {:.1f}s\n'.format(time.time() - t1))
print('\nStart training')
print('**************')
# Training
trainer.train(net, training_loader, test_loader, config)
print('Forcing exit now')
os.kill(os.getpid(), signal.SIGINT)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Split large file into multiple pieces for upload to S3.
S3 only supports 5Gb files for uploading directly, so for larger CloudBioLinux
box images we need to use boto's multipart file support.
This parallelizes the task over available cores using multiprocessing.
Usage:
s3_multipart_upload.py <file_to_transfer> <bucket_name> [<s3_key_name>]
if <s3_key_name> is not specified, the filename will be used.
--norr -- Do not use reduced redundancy storage.
--public -- Make uploaded files public.
--cores=n -- Number of cores to use for upload
Files are stored at cheaper reduced redundancy storage by default.
"""
import os
import sys
import glob
import subprocess
import contextlib
import functools
import multiprocessing
from multiprocessing.pool import IMapIterator
from optparse import OptionParser
import boto
def main(transfer_file, bucket_name, s3_key_name=None, use_rr=True,
make_public=True, cores=None):
if s3_key_name is None:
s3_key_name = os.path.basename(transfer_file)
conn = boto.connect_s3()
bucket = conn.lookup(bucket_name)
if bucket is None:
bucket = conn.create_bucket(bucket_name)
mb_size = os.path.getsize(transfer_file) / 1e6
if mb_size < 50:
_standard_transfer(bucket, s3_key_name, transfer_file, use_rr)
else:
_multipart_upload(bucket, s3_key_name, transfer_file, mb_size, use_rr,
cores)
s3_key = bucket.get_key(s3_key_name)
if make_public:
s3_key.set_acl("public-read")
def upload_cb(complete, total):
sys.stdout.write(".")
sys.stdout.flush()
def _standard_transfer(bucket, s3_key_name, transfer_file, use_rr):
print " Upload with standard transfer, not multipart",
new_s3_item = bucket.new_key(s3_key_name)
new_s3_item.set_contents_from_filename(transfer_file, reduced_redundancy=use_rr,
cb=upload_cb, num_cb=10)
print
def map_wrap(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def mp_from_ids(mp_id, mp_keyname, mp_bucketname):
"""Get the multipart upload from the bucket and multipart IDs.
This allows us to reconstitute a connection to the upload
from within multiprocessing functions.
"""
conn = boto.connect_s3()
bucket = conn.lookup(mp_bucketname)
mp = boto.s3.multipart.MultiPartUpload(bucket)
mp.key_name = mp_keyname
mp.id = mp_id
return mp
@map_wrap
def transfer_part(mp_id, mp_keyname, mp_bucketname, i, part):
"""Transfer a part of a multipart upload. Designed to be run in parallel.
"""
mp = mp_from_ids(mp_id, mp_keyname, mp_bucketname)
print " Transferring", i, part
with open(part) as t_handle:
mp.upload_part_from_file(t_handle, i+1)
os.remove(part)
def _multipart_upload(bucket, s3_key_name, tarball, mb_size, use_rr=True,
cores=None):
"""Upload large files using Amazon's multipart upload functionality.
"""
def split_file(in_file, mb_size, split_num=5):
prefix = os.path.join(os.path.dirname(in_file),
"%sS3PART" % (os.path.basename(s3_key_name)))
# require a split size between 5Mb (AWS minimum) and 250Mb
split_size = int(max(min(mb_size / (split_num * 2.0), 250), 5))
if not os.path.exists("%saa" % prefix):
cl = ["split", "-b%sm" % split_size, in_file, prefix]
subprocess.check_call(cl)
return sorted(glob.glob("%s*" % prefix))
mp = bucket.initiate_multipart_upload(s3_key_name, reduced_redundancy=use_rr)
with multimap(cores) as pmap:
for _ in pmap(transfer_part, ((mp.id, mp.key_name, mp.bucket_name, i, part)
for (i, part) in
enumerate(split_file(tarball, mb_size, cores)))):
pass
mp.complete_upload()
@contextlib.contextmanager
def multimap(cores=None):
"""Provide multiprocessing imap like function.
The context manager handles setting up the pool, worked around interrupt issues
and terminating the pool on completion.
"""
if cores is None:
cores = max(multiprocessing.cpu_count() - 1, 1)
def wrapper(func):
def wrap(self, timeout=None):
return func(self, timeout=timeout if timeout is not None else 1e100)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
pool = multiprocessing.Pool(cores)
yield pool.imap
pool.terminate()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-r", "--norr", dest="use_rr",
action="store_false", default=True)
parser.add_option("-p", "--public", dest="make_public",
action="store_true", default=False)
parser.add_option("-c", "--cores", dest="cores",
default=multiprocessing.cpu_count())
(options, args) = parser.parse_args()
if len(args) < 2:
print __doc__
sys.exit()
kwargs = dict(use_rr=options.use_rr, make_public=options.make_public,
cores=int(options.cores))
main(*args, **kwargs)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
###################################################################################################
#
# File : frame_study.py
#
# Auhtor : P.Antilogus
#
# Version : 22 Feb 2019
#
# Goal : this python file read raw data image , and can be used for specific sensor diagnostic like :
# - cte
# - overscan
# - noise
#
# Example : see notebooks using this package
#
# Remark : it is under the process to be cleaned - simplified ...but this is not my top priority today ;-)
#
try:
import pyfits
except :
import astropy.io.fits as pyfits
import numpy as np
import glob
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import time
import pickle
matplotlib.rcParams['axes.formatter.useoffset'] = False
#
def image_area(image) :
# input : image ==> fits image
# output : image section coordinate to be used in python table: ymin , ymax ,xmin, xmax
# -use pyfits to open the file
# -extract the image area to be used in python table from the DATASEC keyword
#
r=image[1].header['DATASEC'][1:-1].split(',')
x=r[0].split(':')
y=r[1].split(':')
#
return int(y[0])-1,int(y[1]),int(x[0])-1,int(x[1])
#
class Ifile :
# Handel ( select et all ) file list from LSST ccd test bench
def __init__(self,dirall=['/Users/antilog/scratch/20160901'],Pickle=False,root_for_pickle='/sps/lsst/DataBE/lpnws5203',fkey={},verbose=False,Slow=True,single_t=False,nskip=0,nkeep=-1):
# dirall : a list of directory/file to read in : the file header will be used to select or not the file if fkey is set or the file will be read from the content of the pickle file (if Pickle is True ) , fkey will also be used for selection.
# fkey : {'selection_name' : {'fits_header_name':{'key':value} , ... } : a triple dictionary of { 'selection_name' : {header : {key:value}} } , to select a file
self.nkept=0
self.all_file=[]
self.clap=[]
self.selection=[]
self.fkey=fkey
self.directory=sorted(dirall)
self.stat={}
self.nseen=0
# loop on header
if Pickle :
self.all_file_from_pickle(dirall=dirall,root_for_pickle=root_for_pickle,fkey=fkey,verbose=verbose,Slow=Slow,single_t=single_t,nskip=nskip,nkeep=nkeep)
else :
self.all_file_from_dir(dirall=dirall,fkey=fkey,verbose=verbose,Slow=Slow,single_t=single_t,nskip=nskip,nkeep=nkeep)
return
def all_file_from_dir(self,dirall,fkey,verbose,Slow,single_t,nskip,nkeep):
# dirname : can be a directory name or a file with * at the moment it ends by .fz
# ex : /Users/antilog/scratch/REB_DATA/20160513/linearity
# or : /Users/antilog/scratch/REB_DATA/20160513/linearity/reb3*.fz
# fkey : dictionary key word for image selection
# verbose : if True , print messages about each selected file ( default= False )
# single_t : keep only one file per exposure time (default False )
self.all_file=[]
old_time=[0.]
fits_is_open=False
#
# fill all_file from the header of the files in dirall .
for dirname in self.directory :
# have we allready all the needed file ?
if nkeep> 0 and self.nkept==nkeep :
break
# build the list of file for this directory
if (len(os.path.splitext(dirname)[1])>0) :
file_list=glob.glob(dirname)
else :
file_list=glob.glob("%s/*.fz" % (dirname))
file_list.sort()
# loop on files to select them if needed
for filenamed in file_list :
#
keep=True
if len(fkey)==0 :
selection='Main'
else :
fits_is_open=False
dname, fname=os.path.split((filenamed))
# is there any extra selection based on Header , key , value ?
for selection, sel_id in fkey.items() :
local_keep=False
# select of file name if any
if 'first' in sel_id.keys() :
if fname < sel_id['first'] : continue
if 'last' in sel_id.keys() :
if fname > sel_id['last'] : continue
local_keep=True
if 'key' in sel_id.keys() :
if not(fits_is_open) :
fitsfile=pyfits.open(filenamed)
fits_is_open=True
for header, key_cur in sel_id['key'].items() :
if not ( local_keep ) : break
for key, value in key_cur.items() :
if ( key in fitsfile[header].header ) :
if ( fitsfile[header].header[key]!=value ):
local_keep=False
break
else :
local_keep=False
break
# this file is ok for the current selection , remark : a file is selected in the first selection that is compatible with
if local_keep : break
keep=local_keep
#
if (keep and single_t ) :
if not(fits_is_open) :
fitsfile=pyfits.open(filenamed)
fits_is_open=True
new_time=fitsfile[0].header['EXPTIME']
if new_time in old_time :
keep=False
else :
old_time.append(new_time)
if (keep) :
self.nseen+=1
if self.nseen>nskip :
if not(fits_is_open) :
fitsfile=pyfits.open(filenamed)
fits_is_open=True
self.all_file.append(datafile(fitsfile,Slow))
self.selection.append(selection)
# to be updated with a call to clap
#self.clap.append(new_time)
if verbose : print ('%d : Selected %s File %s ' % (self.nkept,selection,filenamed) )
self.nkept+=1
if self.nkept==nkeep and nkeep > 0 :
# we selected the number of files requested
fitsfile.close()
break
if fits_is_open :
fitsfile.close()
del fitsfile
fits_is_open=False
return
def all_file_from_pickle(self,dirall,root_for_pickle,fkey,verbose,Slow,single_t,nskip,nkeep):
old_time=[0.]
# fill all_file from the header of the files in dirall .
for dirname in self.directory :
# have we allready all the needed file ?
if nkeep> 0 and self.nkept==nkeep :
break
# build the list of file for this directory
if (len(os.path.splitext(dirname)[1])>0) :
file_list=glob.glob(dirname)
else :
file_list=glob.glob("%s/*.pkl" % (dirname))
file_list.sort()
# loop on files to select them if needed
for pickle_file in file_list :
# open the pickle file
input=open(pickle_file,'rb')
file=pickle.load(input)
#
for i_cur in range(len(file)) :
#filename=file[i_cur].filename
dname=file[i_cur].dir
fname=file[i_cur].filename
clap=file[i_cur].clap
keep=True
if len(fkey)==0 :
selection='Main'
else :
#
# is there any extra selection based on Header , key , value ?
for selection, sel_id in fkey.items() :
local_keep=False
# select of file name if any
if 'first' in sel_id.keys() :
if fname < sel_id['first'] : continue
if 'last' in sel_id.keys() :
if fname > sel_id['last'] : continue
local_keep=True
# test key (=) key+ (=>) key- (<=)
if 'key' in sel_id.keys() :
for header, key_cur in sel_id['key'].items() :
if not ( local_keep ) : break
for key, value in key_cur.items() :
if ( key in file[i_cur].header[header] ) :
if (file[i_cur].header[header][key] is None) or ( file[i_cur].header[header][key]!=value ):
local_keep=False
break
else :
local_keep=False
break
# this file is ok for the current selection , remark : a file is selected in the first selection that is compatible with
if not(local_keep) : continue
#
if 'key+' in sel_id.keys() :
for header, key_cur in sel_id['key+'].items() :
if not ( local_keep ) : break
for key, value in key_cur.items() :
if ( key in file[i_cur].header[header] ) :
if (file[i_cur].header[header][key] is None) or ( file[i_cur].header[header][key]<value ):
local_keep=False
break
else :
local_keep=False
break
# this file is ok for the current selection , remark : a file is selected in the first selection that is compatible with
if not(local_keep) : continue
#
if 'key-' in sel_id.keys() :
for header, key_cur in sel_id['key-'].items() :
if not ( local_keep ) : break
for key, value in key_cur.items() :
if ( key in file[i_cur].header[header] ) :
if (file[i_cur].header[header][key] is None) or ( file[i_cur].header[header][key]>value ):
local_keep=False
break
else :
local_keep=False
break
# this file is ok for the current selection , remark : a file is selected in the first selection that is compatible with
if local_keep : break
keep=local_keep
#
if (keep and single_t ) :
new_time=file[i_cur].header['Primary']['EXPTIME']
if new_time in old_time :
keep=False
else :
old_time.append(new_time)
if (keep) :
self.nseen+=1
if self.nseen>nskip :
fitsfile=pyfits.open(root_for_pickle+'/'+dname+'/'+fname)
self.all_file.append(datafile(fitsfile,Slow))
fitsfile.close()
#
self.clap.append(clap)
self.selection.append(selection)
if verbose : print ('%d : Selected %s File %s ' % (self.nkept,selection,fname) )
self.nkept+=1
if self.nkept==nkeep and nkeep > 0 :
# we selected the number of files requested
break
return
def plot(self,plt_name='',on_screen=False) :
# define last to plot :
#
fig=plt.figure(figsize=(15,15))
title="Noise estimation from Overscan : %s " % (plt_name)
fig.suptitle(title)
iplt=1
ax=fig.add_subplot(3,3,iplt)
iplt+=1
return
class datafile :
def __init__(self, fitsfile,Slow=True):
'''
Construct all the necessary attributes for the datafile object.
Parameters :
fitsfile (list of str ) : list of file to process , they should be all from the same raft-sensor
Slow (boll) : computed extended image properties or not ( Default : True )
remark for CTE analysis alone , Slow can be set to False , will be faster
'''
#
self.Image=[]
self.Hdu=[]
self.HduMax=0
self.fft=[]
self.w=[]
self.Mean=[]
self.Std=[]
self.Median=[]
self.MedPScan=[]
self.StdPScan=[]
self.MeanSScan=[]
self.MedSScan=[]
self.StdSScan=[]
self.StdSScanOS=[]
self.MeanPScan=[]
self.StdPScanOS=[]
self.Mean_col=[]
self.Std_col=[]
self.Mean_line=[]
self.Median_line=[]
self.Std_line=[]
self.Std_l60=[]
self.Attenuator=0.
self.CCD_COND={}
self.Range=0.
self.PreExp=0.
self.PostExp=0.
# image area
first_line,first_p_over,first_col,first_s_over=image_area(fitsfile)
self.first_col=first_col
self.first_s_over=first_s_over
self.first_line=first_line
self.first_p_over=first_p_over
#
try :
self.exptime=float(fitsfile[0].header['EXPTIME'])
except :
# Paris test bench key value for exposure time is different
self.exptime=float(fitsfile[0].header['EXPOSURE'])
try :
self.ccdslot=(fitsfile[0].header['CCDSLOT']).strip()
self.raftbay=(fitsfile[0].header['RAFTBAY']).strip()
except :
self.ccdslot=''
self.raftbay=''
self.fluxs_last=[]
self.fluxp_last=[]
self.fluxs_last_std=[]
#self.fluxs_last_var=[]
self.fluxp_last_std=[]
self.fluxp_used=[]
#
self.over4_col_std=[]
self.over4_line_std=[]
#
#
# self.Date=JdUtc(fitsfile[0].header['DATE']).Jd for i in range(len(fitsfile)):
for i in range(1,min(17,len(fitsfile))):
if ( fitsfile[i].header['XTENSION'].strip()=='IMAGE' ) :
self.Hdu.append(i)
self.HduMax=i
# Remark : for the moment we don't which REB slice we are looki
# for e2v and BNL data it's [8:]
# self.Channel.append(int(fitsfile[i].header['EXTNAME'][8:]))
# for Paris data it's [5:]
#self.Channel.append(int(fitsfile[i].header['EXTNAME'][5:]))
# self.Image.append(np.copy(fitsfile[i].data))
self.Image.append(fitsfile[i].header['EXTNAME'].strip())
# image
# Mean and noise
self.Median.append(np.median(fitsfile[i].data[first_line:first_p_over,first_col:first_s_over]))
if Slow :
self.Mean.append(fitsfile[i].data[first_line:first_p_over,first_col:first_s_over].mean())
self.Std.append(fitsfile[i].data[first_line:first_p_over,first_col:first_s_over].std())
# line OverScan
self.MedPScan.append(np.median(fitsfile[i].data[first_p_over+5:,first_col:first_s_over]))
self.MeanPScan.append(fitsfile[i].data[first_p_over+5:,first_col:first_s_over].mean())
self.StdPScan.append(fitsfile[i].data[first_p_over+5:,first_col:first_s_over].std())
# Serial over-scan + 1 to remove CTE / xxshoot
self.MedSScan.append(np.median(fitsfile[i].data[:,first_s_over+5:]))
self.MeanSScan.append(fitsfile[i].data[:,first_s_over+5:].mean())
self.StdSScan.append(fitsfile[i].data[:,first_s_over+5:].std())
# information for 2D diagmostic of the overscan : does the overscan is flat in function of the column ? line ?
# --- data in the overscan corner ( pixels are overscan in line and column )
self.over4_col_std.append(fitsfile[i].data[first_p_over:,first_s_over:].std(axis=1).mean())
self.over4_line_std.append(fitsfile[i].data[first_p_over:,first_s_over:].std(axis=0).mean())
if Slow :
# Same but bias subtrracted
mean_line=np.median(fitsfile[i].data[first_p_over:,:],axis=0)
mean_column=np.median(fitsfile[i].data[:,first_s_over:],axis=1)
last_l=len(fitsfile[i].data[:,0])
last_s=len(fitsfile[i].data[0,:])
rawl=np.zeros((last_l-first_p_over,last_s))
raws=np.zeros((last_l,last_s-first_s_over))
for l in range(first_p_over,last_l) :
rawl[l-first_p_over,:]=fitsfile[i].data[l,:]-mean_line
self.StdPScanOS.append((rawl[:,first_col:].std(axis=1)).mean())
#
for c in range(first_s_over,last_s) :
raws[:,c-first_s_over]=fitsfile[i].data[:,c]-mean_column
self.StdSScanOS.append((raws[first_line:,:].std(axis=0)).mean())
# average allong the column and line
#self.Mean_col.append(fitsfile[i].data[first_line:first_p_over,:].mean(axis=0))
#self.Mean_line.append(fitsfile[i].data[:,first_col:first_s_over].mean(axis=1))
#self.Median_line.append(np.median(fitsfile[i].data[:,first_col:first_s_over],axis=1))
#self.Std_col.append(fitsfile[i].data[first_line:first_p_over,:].std(axis=0))
#self.Std_line.append(fitsfile[i].data[:,first_col:first_s_over].std(axis=1))
#self.Std_l60.append(fitsfile[i].data[:60,first_col:first_s_over].std())
# For CTE Serie
#
# REMARK : The size cut ( 28 , line +/- 10 ... ) are hard wired and used in CTE part of the code to compute statistic !!!!
#
self.fluxs_last.append(fitsfile[i].data[first_line+10:first_p_over-10,first_s_over-1:first_s_over+28].mean(axis=0)-fitsfile[i].data[first_line+10:first_p_over-10,first_s_over+15:].mean())
#
self.fluxs_last_std.append(fitsfile[i].data[first_line+10:first_p_over-10,first_s_over-1:first_s_over+28].std(axis=0)/np.sqrt(float(first_p_over-10-first_line-10)))
#
#self.fluxs_last_var.append(fitsfile[i].data[first_line+100:first_p_over-100,first_s_over-1:first_s_over+28].std(axis=0)**2-fitsfile[i].data[first_line+100:first_p_over-100,first_s_over+15:].std()**2)
# For CTE //
#
# fluxp=np.array([ fitsfile[i].data[first_p_over-1:first_p_over+28,icol] - np.median(fitsfile[i].data[first_p_over+5:,icol ]) for icol in range(first_col+10,first_s_over-10) ])
overscan_offset= np.median(fitsfile[i].data[first_p_over+5:,first_col+10:first_s_over-10],axis=0)
#
# we do a median per slice , to kill outlier , but keep a statistical precision < 1 adu ...still not that precise compared to a mean
#self.fluxp_last.append((np.median(fluxp[0:100],axis=0)+np.median(fluxp[100:200],axis=0)+np.median(fluxp[200:300],axis=0)+np.median(fluxp[300:400],axis=0)+np.median(fluxp[400:500],axis=0))/5.)
# the correct version : kill outlier ( there is outlier in case of blooming column ) : to speed up we just kill based on the last physical column
self.fluxp_last.append(np.zeros((29)))
self.fluxp_last_std.append(np.zeros((29)))
self.fluxp_used.append(np.zeros((29)))
#
last_line=fitsfile[i].data[first_p_over-1,first_col+10:first_s_over-10]-overscan_offset
last_line_median=np.median(last_line)
last_line_std=5*last_line.std()
column_ok=[icol for icol in range(len(last_line)) if np.abs(last_line[icol]-last_line_median) < last_line_std ]
#
for j in range(29):
fluxp_truncated=(fitsfile[i].data[first_p_over-1+j,first_col+10:first_s_over-10]-overscan_offset)[column_ok]
self.fluxp_last[-1][j]=np.mean(fluxp_truncated)
self.fluxp_last_std[-1][j]=np.std(fluxp_truncated)/np.sqrt(len(fluxp_truncated))
self.fluxp_used[-1][j]=len(fluxp_truncated)
#fluxp=np.array([ fitsfile[i].data[first_p_over-1:first_p_over+28,icol] - np.median(fitsfile[i].data[first_p_over+5:,icol ]) for icol in range(first_col+10,first_s_over-10) ])
#self.fluxp_last.append(np.median(fluxp,axis=0))
# self.fluxp_last.append(np.median(fitsfile[i].data[first_p_over-1:first_p_over+28,first_col+10:first_s_over-10],axis=1)-np.median(fitsfile[i].data[first_p_over-1:first_p_over+28,first_s_over+5:first_s_over+15]))
else:
# last image section read
break
return
class cte :
def __init__(self, all_file, gain=[0.704650434205,0.68883578783,0.688459358774,0.696697494642,0.689209827484,0.696579402812,0.698973006751,0.689613072912,0.682880384357,0.696206655845,0.690349506621,0.691506176017,0.690763478766,0.689762341309,0.694801544092,0.850025229184 ],serie=True):
#
nb_f_max=len(all_file)
#
self.cte_flux=np.zeros((16,nb_f_max))
self.cte_time=np.zeros((nb_f_max))
self.cte_ftime=np.zeros((nb_f_max))
self.cte_flux_s=np.zeros((16,nb_f_max))
self.cte_y=np.zeros((16,nb_f_max,28))
self.cte_y_s=np.zeros((16,nb_f_max,28))
self.cte_y_std=np.zeros((16,nb_f_max,28))
self.cte_y_s_std=np.zeros((16,nb_f_max,28))
self.ylev=np.zeros((16,nb_f_max))
self.ylev_std=np.zeros((16,nb_f_max))
self.nb_file=np.zeros((nb_f_max))
self.serie=serie
self.cte_noise_s=np.zeros((16,nb_f_max))
self.cte_noise_s_std=np.zeros((16,nb_f_max))
self.overscan_std=np.zeros((16,nb_f_max))
self.over8_18=np.zeros((16,nb_f_max))
self.over8_18_std=np.zeros((16,nb_f_max))
# pixel number in unit of CCD
self.i_f=0
#
if nb_f_max==0 : return
#
self.first_file=all_file[0]
#
cte_noise_std=np.zeros((16,nb_f_max,28))
#
for f in all_file :
im_flux=np.median(np.array(f.Median))
if self.i_f>0 and f.exptime in self.cte_time[0:self.i_f] :
all_cur=np.flatnonzero(self.cte_time[0:self.i_f] == f.exptime)
# Attention could be that we have the same exposure time but not the same flux (extra filter)
found_cur=-1
for cur_cur in all_cur :
ratio=max(self.cte_ftime[cur_cur]/im_flux,im_flux/self.cte_ftime[cur_cur])
if ratio<1.1 :
found_cur=cur_cur
if found_cur > -1 :
i_cur=found_cur
else:
i_cur=self.i_f
self.cte_ftime[i_cur]=im_flux
self.cte_time[i_cur]=f.exptime
self.i_f+=1
else :
i_cur=self.i_f
self.cte_ftime[i_cur]=im_flux
self.cte_time[i_cur]=f.exptime
self.i_f+=1
for ch in range(f.HduMax) :
if serie :
# CTE serie
if ch==0 :
# print ('%s ,time %f, Channel %d, flux %f (flux last col %f) , image %f , signal dispersion %f , scan serie %f , scan serie dispersion %f ' % (f.filename,f.exptime,ch,f.Mean[ch]-f.MeanSScan[ch],f.fluxs_last[ch][0],f.Mean[ch], f.Std[ch],f.MeanSScan[ch],f.StdSScan[ch]))
self.first=f.first_s_over
# what matter in the CTE def is how many transfer you did for the last column read , which is the size of the pre-scan + size of the image (in the past we subtracted the prescan which is an error)
self.nb_pixel=f.first_s_over
self.nb_file[i_cur]+=1
flux_last=f.fluxs_last[ch][0]
#
self.cte_y[ch,i_cur,:]+=f.fluxs_last[ch][1:]
self.cte_y_std[ch,i_cur,:]+=(f.fluxs_last_std[ch][1:])**2
cte_noise_std[ch,i_cur,:]+=(f.fluxs_last_std[ch][1:])**2*(float(f.first_p_over-10-f.first_line-10))
self.overscan_std[ch,i_cur]+=(f.over4_col_std[ch])**2
else :
# CTE //
if ch==0 :
# print ('%s ,time % f, Channel %d, flux %f (flux last line %f) , image %f , signal dispersion %f , scan // %f , scan // dispersion %f ' % (f.filename,f.exptime,ch,f.Mean[ch]-f.MedPScan[ch],f.fluxp_last[ch][0],f.Mean[ch], f.Std[ch],f.MedPScan[ch],f.StdPScan[ch]))
self.first=f.first_p_over
self.nb_pixel=f.first_p_over
self.nb_file[i_cur]+=1
flux_last=f.fluxp_last[ch][0]
#
self.cte_y[ch,i_cur,:]+=f.fluxp_last[ch][1:]
self.cte_y_std[ch,i_cur,:]+=f.fluxp_last_std[ch][1:]**2
cte_noise_std[ch,i_cur,:]+=(f.fluxp_last_std[ch][1:])**2*f.fluxp_used[ch][1:]
self.overscan_std[ch,i_cur]+=(f.over4_line_std[ch])**2
#if flux_last==0. : flux_last=1e-6
self.cte_flux[ch,i_cur]+=flux_last
# self.i_f+=1
#fl=np.argsort(self.cte_flux,axis=1)
ft=np.argsort(self.cte_ftime[0:self.i_f])
l_ft=len(ft)
#print('order in time ',ft)
self.lmax=np.zeros((16),dtype=np.int16)
# we take the number of amplifiers from the last file read
for ch in range(f.HduMax) :
l_k=0
cte_sig=np.zeros((l_ft))
#for l in fl[ch,:] :
for l in ft[:] :
# protection against divide by 0 improbable ?
if self.cte_flux[ch,l]==0 : self.cte_flux[ch,l]=1.0e-6
self.cte_y_s[ch,l_k,:]=self.cte_y[ch,l,:]*gain[ch]/self.nb_file[l]
# remark that the 1/n below ...it's because sqrt(1/n) **2 is needed to get the error on the mean , and not the dispersion . ...
self.cte_y_s_std[ch,l_k,:]=np.sqrt(self.cte_y_std[ch,l,:])*gain[ch]/self.nb_file[l]
self.cte_noise_s[ch,l_k]=np.sqrt(cte_noise_std[ch,l,2:].mean(axis=0)/(self.nb_file[l]))*gain[ch]
self.cte_flux_s[ch,l_k]=self.cte_flux[ch,l]*gain[ch]/self.nb_file[l]
self.cte_noise_s_std[ch,l_k]=np.sqrt(cte_noise_std[ch,l,2:].std(axis=0)/(self.nb_file[l])/(26))*gain[ch]
l_k+=1
for l in range(1,l_ft) :
if self.cte_flux_s[ch,l]<self.cte_flux_s[ch,self.lmax[ch]] and self.cte_flux_s[ch,self.lmax[ch]] > 100000 :
self.lmax[ch]=l
break
self.lmax[ch]=l
if len(self.cte_flux_s[ch,:])==1 : self.lmax[ch]=1
self.ylev[ch,0:self.lmax[ch]]=(self.cte_y_s[ch,0:self.lmax[ch],0]+self.cte_y_s[ch,0:self.lmax[ch],1])/self.cte_flux_s[ch,0:self.lmax[ch]]/float(self.nb_pixel)
#self.ylev_std[ch,0:self.lmax[ch]]=self.ylev[ch,0:self.lmax[ch]]*np.sqrt((self.cte_y_s_std[ch,0:self.lmax[ch],0]/self.cte_y_s[ch,0:self.lmax[ch],0])**2+(self.cte_y_s_std[ch,0:self.lmax[ch],1]/self.cte_y_s[ch,0:self.lmax[ch],1])**2)
self.ylev_std[ch,0:self.lmax[ch]]=np.sqrt(self.cte_y_s_std[ch,0:self.lmax[ch],0]**2+self.cte_y_s_std[ch,0:self.lmax[ch],1]**2)/self.cte_flux_s[ch,0:self.lmax[ch]]/float(self.nb_pixel)
# re-order and normalize Overscan data
self.overscan_std[ch,0:l_ft]=np.sqrt(self.overscan_std[ch,ft]/self.nb_file[ft])*gain[ch]
# overscan stability
self.over8_18[ch,0:self.lmax[ch]]=(self.cte_y_s[ch,0:self.lmax[ch],8:18]).mean(axis=1)
self.over8_18_std[ch,0:self.lmax[ch]]=np.sqrt(np.sum(self.cte_y_s_std[ch,0:self.lmax[ch],8:18]**2,axis=1))/10.
return
def print_cte(self,ccd_name,nf=0):
if self.serie :
print('Serial CTE for %s ----------------------------------------------------------------' % (ccd_name) )
else :
print(' // CTE for %s ----------------------------------------------------------------' % (ccd_name) )
#
print('Ch | flux | 1-CTE | Signal in Overscan | Overscan Noise Noise in |')
print(' | | | ov 1 ov 2 ov 8 to 18 | overscan corner |')
for n in range(nf,self.i_f) :
print('---------------------------------------------------------------------------------------------------------')
for ch in range(16) :
if n>=self.lmax[ch] :
print('%02d | % 6.0f | saturation (no eval) | % 6.02f % 6.02f % 6.02f+/-%5.02f | % 5.02f+/-%5.02f % 5.02f |' % (
ch,
self.cte_flux_s[ch,n],
self.cte_y_s[ch,n,0],
self.cte_y_s[ch,n,1],
self.over8_18[ch,n],self.over8_18_std[ch,n],
self.cte_noise_s[ch,n],self.cte_noise_s_std[ch,n],
self.overscan_std[ch,n]))
else :
print('%02d | % 6.0f | %9.3g+/-%9.3g | % 6.02f % 6.02f % 6.02f+/-%5.02f | % 5.02f+/-%5.02f % 5.02f |' % (
ch,
self.cte_flux_s[ch,n],
self.ylev[ch,n],self.ylev_std[ch,n],
self.cte_y_s[ch,n,0],
self.cte_y_s[ch,n,1],
self.over8_18[ch,n],self.over8_18_std[ch,n],
self.cte_noise_s[ch,n],self.cte_noise_s_std[ch,n],
self.overscan_std[ch,n])
)
print('---------------------------------------------------------------------------------------------------------')
return
def plot_cte(self,ch,ccd_name,nf=0,on_screen=False,root_dir='.',unit='e-') :
'''
plot_cte(self,ch,ccd_name,nf=0,on_screen=False,root_dir='.')
Plot the CTE results from cte class per channel
Parameters:
ch (int) : channel index ( = hdu number -1 in data file ) to plot
ccd_name (str) : ccd name : extra sting used in caption to identify this plot serie
( remark the ccd name itself , from file header is automaticaly added , this is more to identify run
( or test level in plots label
nf (int) : index of first flux entry to plot (default=0)
on_screen (bool) : do we plot on display (or just save png on disk ) (default=False)
root_dir (str) : top directory to save directory tree with plots
(default = '.' , directory used to save the plots will be ./raft_name/ccd_name/ch/ )
unit (str) : unit of flux used ( e- or ADU )
'''
#
root_plt=os.path.join(root_dir,self.first_file.raftbay,self.first_file.ccdslot,str(self.first_file.Hdu[ch]))
label_header=ccd_name+' '+self.first_file.raftbay+' '+self.first_file.ccdslot+' '+self.first_file.Image[ch]+' (hdu='+str(self.first_file.Hdu[ch])+')'
# create the directorty
os.makedirs(root_plt,exist_ok=True)
#
xx=[max(np.min(self.cte_flux_s[:,nf:self.lmax[ch]])*.9,10.),min(2.0e5,np.max(self.cte_flux_s[:,nf:self.lmax[ch]])*1.1)]
#
pix_col=['b','c']
pix_sym=['<','>']
fig=plt.figure(figsize=(10,12))
x=range(self.first,self.first+28)
if self.serie :
title="CTI Serial : "+label_header
yv=5.0e-6
else :
title="CTI // : "+label_header
yv=3.0e-6
yy=[yv,yv]
fig.suptitle(title,y=0.94)
#fig.tight_layout()
iplt=1
ax=fig.add_subplot(3,3,iplt)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
iplt+=1
#ylev=(self.cte_y_s[ch,nf:self.lmax[ch],0]+self.cte_y_s[ch,nf:self.lmax[ch],1])/self.cte_flux_s[ch,nf:self.lmax[ch]]/float(self.nb_pixel)
label='%02d' % self.first_file.Hdu[ch]
#plt.plot(self.cte_flux_s[ch,nf:self.lmax[ch]],self.ylev[ch,nf:self.lmax[ch]],'o',color='r',label=label)
plt.errorbar(self.cte_flux_s[ch,nf:self.lmax[ch]],self.ylev[ch,nf:self.lmax[ch]], yerr=self.ylev_std[ch,nf:self.lmax[ch]],fmt='o', ecolor='r',label=label)
#print(self.ylev[ch,nf:self.lmax[ch]])
#print(self.ylev_std[ch,nf:self.lmax[ch]])
plt.plot(xx,yy,'g')
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
else :
plt.xlabel('<flux> of last line in '+unit)
plt.xlim(xx[0],xx[1])
y_min=min(max(int(np.min(self.ylev[ch,nf:self.lmax[ch]])*500)/100.,1.e-8),1e-7)
y_max=5e-5
plt.ylim(y_min,y_max)
plt.ylabel('1-CTE')
plt.xscale('log')
if (abs(y_max/y_min>80.)) : plt.yscale('log')
# plt.locator_params(axis="both", tight=True, nbins=10)
plt.legend()
ax=fig.add_subplot(3,3,iplt)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
iplt+=1
y_min=1.
y_max=0
for pix in range(2) :
ylev=self.cte_y_s[ch,nf:self.lmax[ch],pix]/self.cte_flux_s[ch,nf:self.lmax[ch]]/float(self.nb_pixel)
#
label="pix + %d " % (pix+1)
y_min=min(y_min,np.min(ylev))
y_max=max(0.,np.max(ylev))
plt.plot(self.cte_flux_s[ch,nf:self.lmax[ch]],ylev,pix_sym[pix],color=pix_col[pix],label=label)
plt.plot(xx,yy,'g')
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
else :
plt.xlabel('<flux> of last line in '+unit)
#plt.ylabel('1-CTE')
y_min=min(max(y_min*.5,1.e-8),5e-7)
y_max=max(y_max*1.5,1e-5)
plt.ylim(y_min,y_max)
plt.xscale('log')
if (y_max/y_min>80.) : plt.yscale('log')
plt.xlim(xx[0],xx[1])
plt.legend()
ax=fig.add_subplot(3,3,iplt)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
iplt+=1
y_min=0.
y_max=0.
for pix in range(2) :
label="pix + %d " % (pix+1)
plt.plot(self.cte_flux_s[ch,nf:self.lmax[ch]],self.cte_y_s[ch,nf:self.lmax[ch],pix],pix_sym[pix],color=pix_col[pix],label=label)
y_min=min(-1.,np.min(self.cte_y_s[ch,nf:self.lmax[ch],0:1]))
y_max=max(1.,np.max(self.cte_y_s[ch,nf:self.lmax[ch],0:1])*1.1)
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
else :
plt.xlabel('<flux> of last line in '+unit)
plt.ylabel('signal in overscan pixel(s) in '+unit)
plt.xscale('log')
if y_max > 10. :
plt.yscale('symlog')
plt.plot([xx[0],xx[1]],[0.,0.],'--',color='black')
#
plt.ylim(y_min,y_max)
plt.xlim(xx[0],xx[1])
plt.legend(loc=2)
#plt.xticks(ticks_flux)
#
ax=fig.add_subplot(3,3,iplt)
iplt+=1
#
xx=[self.cte_flux_s[ch,nf]*0.9,self.cte_flux_s[ch,self.lmax[ch]-1]*1.1]
yy=[0.,0.]
plt.plot(xx,yy,'b--')
plt.errorbar(self.cte_flux_s[ch,nf:self.lmax[ch]],
self.over8_18[ch,nf:self.lmax[ch]],yerr=self.over8_18_std[ch,nf:self.lmax[ch]],fmt='o',color='r', ecolor='r',label='Signal Overscan[8:18]')
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
plt.ylabel('signal in '+unit+' in serial Overscan')
else :
plt.xlabel('<flux> of last line in '+unit)
plt.ylabel('signal in '+unit+' in // Overscan')
plt.xscale('log')
plt.ylim(min(np.min(self.over8_18[ch,nf:self.lmax[ch]])*1.2,-0.5),min(10.,max(0.5,np.max(self.over8_18[ch,nf:max(nf+1,self.lmax[ch]-1)])*1.5)))
plt.legend(loc=2)
ax=fig.add_subplot(3,3,iplt)
ax.yaxis.set_label_position("right")
iplt+=1
plt.plot(self.cte_flux_s[ch,nf:self.lmax[ch]],self.overscan_std[ch,nf:self.lmax[ch]],'<',label='Corner Noise')
plt.errorbar(self.cte_flux_s[ch,nf:self.lmax[ch]],
self.cte_noise_s[ch,nf:self.lmax[ch]],yerr=self.cte_noise_s_std[ch,nf:self.lmax[ch]],fmt='o',color='r', ecolor='r',label='Frame Noise')
try :
mean_noiseV=np.array([self.cte_noise_s[ch,ii] for ii in range(nf,self.lmax[ch]) if self.cte_flux_s[ch,ii] > 1000 and self.cte_flux_s[ch,ii] < 50000])
if len(mean_noiseV)>0 :
mean_noise=mean_noiseV.mean()
xx=[self.cte_flux_s[ch,nf],self.cte_flux_s[ch,self.lmax[ch]-1]]
yy=[mean_noise,mean_noise]
plt.plot(xx,yy,'b--')
except :
pass
if self.serie :
plt.xlabel('<flux> of last column in '+unit)
plt.ylabel('Noise from Serial Overscan in '+unit)
else :
plt.xlabel('<flux> of last line in '+unit)
plt.ylabel('Noise from // Overscan in '+unit)
plt.xscale('log')
ymin_cc=3.
ymax_cc=max(min(30.,np.max(self.cte_noise_s[ch,nf:max(nf+1,self.lmax[ch]-2)])*1.2),10.)
#print(ymax_cc,np.max(self.cte_noise_s[ch,nf:max(nf+1,self.lmax[ch]-5)])*1.5)
plt.ylim(ymin_cc,ymax_cc)
#if ymax_cc > 20 :
plt.legend(loc=2)
#else :
# plt.legend(loc=3)
#
ax=fig.add_subplot(3,3,iplt)
iplt+=1
flux=0.
l_last=nf
#lmax=len(self.cte_flux_s[ch,:])
count=0
im=0
#
y_min=0.
y_max=0.
#max_plt=max(int((self.lmax[ch]-nf)/4)+1,9)
for l in range(nf,self.lmax[ch]) :
if ((self.cte_flux_s[ch,l_last]/self.cte_flux_s[ch,l] < 0.9 ) and ( l_last < l )) :
# first test to only plot result for point different enough , second test to be sure that we have already selected something , third test (l<lamx[ch] ) to avoid to plot too saturated guy
if im>1 :
if self.serie :
label="%5.1f %s in last Col. " % (self.cte_flux_s[ch,l_last:self.lmax[ch]].mean(axis=0),unit)
else :
label="%5.1f %s in last Line" % (self.cte_flux_s[ch,l_last:self.lmax[ch]].mean(axis=0),unit)
else :
label="%5.1f" % (self.cte_flux_s[ch,l_last:l].mean(axis=0))
yplt=self.cte_y_s[ch,l_last:l,:].mean(axis=0)
y_min=min(max(min(np.min(yplt)*1.2,0.),-10.),y_min)
y_max=max(min(np.max(yplt)*1.2,100.),y_max)
plt.plot(x,yplt,label=label)
l_last=l
count+=1
if count == 9 and im<2 and l < self.lmax[ch]-1 :
count = 0
#plt.yscale('log')
if self.serie :
plt.xlabel('column number (serial overscan)')
else :
plt.xlabel('line number (// overscan)')
if im==0 or im==1 :
plt.ylabel('Overscan Signal in '+unit)
ymax=max(y_max,y_min+1.)
plt.ylim(y_min,y_max)
if im==0 : plt.plot([x[0],x[-1]],[0.,0.],'--',color='black')
if y_max>80. :
plt.yscale('symlog')
plt.xlim(self.first,self.first+27)
if im == 0 :
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
# plt.legend(bbox_to_anchor=(1.05, 1),loc=2, borderaxespad=0.)
#else :
plt.legend(loc=1)
ax=fig.add_subplot(3,3,iplt)
iplt+=1
y_min=0
y_max=0
im+=1
if count !=0 or l==nf :
if self.serie :
plt.xlabel('column number (serial overscan)')
else :
plt.xlabel('line number (// overscan)')
if im<2 : plt.ylabel('Overscan Signal in '+unit)
if self.serie :
label="%5.1f %s in last Col. " % (self.cte_flux_s[ch,l_last:self.lmax[ch]].mean(axis=0),unit)
else :
label="%5.1f %s in last Line" % (self.cte_flux_s[ch,l_last:self.lmax[ch]].mean(axis=0),unit)
yplt=self.cte_y_s[ch,l_last:self.lmax[ch],:].mean(axis=0)
y_min=min(max(min(np.min(yplt)*1.2,0.),-10.),y_min)
y_max=max(min(np.max(yplt)*1.2,100.),y_max)
plt.plot(x,yplt,label=label)
plt.xlim(self.first,self.first+27)
plt.ylim(y_min,y_max)
#plt.ylim(-10.,min(np.max(yplt)*1.2,100.))
if y_max>80. :
plt.yscale('symlog')
plt.legend(bbox_to_anchor=(1.05, 1),loc=2, borderaxespad=0.)
#plt.legend()
# Overscan noise
#ax=fig.add_subplot(3,3,iplt)
#iplt+=1
#
if on_screen : plt.show()
if self.serie :
plotfile=root_plt+'/cte_serial.png'
else :
plotfile=root_plt+'/cte_parallel.png'
fig.savefig(plotfile)
if not(on_screen) : plt.close(fig)
return
def cte_example():
get_ipython().magic('matplotlib inline')
print (' file =',sys.argv[1:-1],' TESTTYPE=',sys.argv[-1])
selection=sys.argv[-1]
file=Ifile(dirall=sys.argv[1:-1],fkey={'IMGTYPE':'Acquisition','TESTTYPE':sys.argv[-1]})
#
plt.interactive(1)
#file=Ifile(dirall=['/Users/antilog/scratch/e2v_190/20170314102625/*.fits'],fkey={})
#
cte_data=cte(allfile=file.allfile,gain=[0.704650434205,0.68883578783,0.688459357874,0.696697494642,0.689209827484,0.696579402812,0.698973006751,0.689613072912,0.682880384357,0.696206655845,0.690349506621,0.691506176017,0.690763478766,0.689762341309,0.694801544092,0.850025229184 ])
for ch in range(16) :
cte_data.plot_extra(ch=ch,ccd_name=selection,nf=0,on_screen=True)
def fft_noise(h_all,channel=range(1,17),fplot=True,mean=False,start=1,int_pixel=1.8e-6,int_line=30.e-6,verbose=0,legend=True,xboundary=(20,500),yboundary=(30,2000),label='',color_v=None,two=True,axes=None,index=None) :
cmap=plt.get_cmap('nipy_spectral')
colors=[cmap(j)[:3] for j in np.linspace(0,1,17)]
if color_v != None :
color_val=color_v
else :
color_val=channel
#
nb_l=yboundary[1]-yboundary[0]
nb_c=xboundary[1]-xboundary[0]
nb_file=len(h_all)
nb_channel=len(channel)
freq_x = np.fft.rfftfreq(nb_c, d=int_pixel)[start:]
freqf_x = np.flipud(1./np.fft.rfftfreq(nb_c,1)[start:])
noise=np.zeros((nb_channel))
#
# image area
first_line,first_p_over,first_col,first_s_over=image_area(h_all[0])
first_good_overs=first_s_over+2
first_good_overp=first_p_over+2
#
for ich in range(nb_channel) :
ch=int(channel[ich])
for i_h in range(nb_file) :
h=h_all[i_h]
#
if i_h==0 :
(n_y,n_x)=np.shape(h[1].data)
#delta time between 2 pixels from 2 # lines
delta_line=int_pixel*n_x+int_line
freq_y = np.fft.rfftfreq(nb_l, d=delta_line)[start:]
freqf_y = np.flipud(1./np.fft.rfftfreq(nb_l,d=delta_line/int_pixel)[start:])
freq=np.append(freq_y,freq_x)
freqf=np.append(freqf_x,freqf_y)
#
mean_line=np.median(h[ch].data[yboundary[0]:yboundary[1],:],axis=0)
mean_column=np.median(h[ch].data[:,xboundary[0]:xboundary[1]],axis=1)
if (ich==0 and i_h==0) or ( not(mean) and i_h==0 ) :
ff_x=np.zeros((int(nb_c/2)))
ff_y=np.zeros((int(nb_l/2)))
for l in range(yboundary[0],yboundary[1]) :
raw=h[ch].data[l,:]-mean_line
to_fft=raw[xboundary[0]:xboundary[1]]-raw[first_good_overs:].mean()
ff_x+=np.absolute(np.fft.rfft(to_fft))[start:]
for c in range(xboundary[0],xboundary[1]) :
raw=h[ch].data[:,c]-mean_column
to_fft=raw[yboundary[0]:yboundary[1]]-raw[first_good_overp:].mean()
ff_y+=np.absolute(np.fft.rfft(to_fft))[start:]
noise[ich]+=(h[ch].data[yboundary[0]:yboundary[1],first_good_overs:].std())**2
if verbose>1 : print ('channel %d noise %3.3f Overscan dispersion = %3.3f '%(ch,h[ch].data[yboundary[0]:yboundary[1],first_good_overs:].std(),(h[ch].data[yboundary[0]:yboundary[1],first_good_overs:].mean(axis=1)).std()))
if (i_h==nb_file-1) and ( ich==len(channel)-1 or not(mean) ) :
if mean :
# en fait on doit / par le nombr d bin de la fft , pas du signal ...facteur 2 ?
ff_xn=ff_x/nb_l/nb_c/nb_file/nb_channel/2.
ff_yn=ff_y/nb_l/nb_c/nb_file/nb_channel
xnorm=np.append(ff_yn,ff_xn)
label_ch=label+'<'+','.join(map(str,channel))+'>'
else :
ff_xn=ff_x/nb_l/nb_c/nb_file/2.
ff_yn=ff_y/nb_l/nb_c/nb_file
xnorm=np.append(ff_yn,ff_xn)
label_ch=label+'%d' % (ch)
if fplot :
if two :
if index!=None :
axes[index[0]].plot(freq_x,ff_xn,label=label_ch,color=colors[color_val[ich]])
axes[index[1]].plot(freq_y,ff_yn,label=label_ch,color=colors[color_val[ich]])
else :
plt.plot(freq_x,ff_xn,label=label_ch,color=colors[color_val[ich]])
plt.plot(freq_y,ff_yn,label=label_ch,color=colors[color_val[ich]])
else :
if index!=None :
axes[index[0]].plot(freq,xnorm,label=label_ch,color=colors[color_val[ich]])
else :
plt.plot(freq,xnorm,label=label_ch,color=colors[color_val[ich]])
else :
if two :
ff_xnf=np.flipud(ff_xn)
ff_ynf=np.flipud(ff_yn)
if index!=None :
axes[index[0]].plot(freqf_x,ff_xnf,label=label_ch,color=colors[color_val[ich]])
axes[index[1]].plot(freqf_y,ff_ynf,label=label_ch,color=colors[color_val[ich]])
else :
plt.plot(freqf_x,ff_xnf,label=label_ch,color=colors[color_val[ich]])
plt.plot(freqf_y,ff_ynf,label=label_ch,color=colors[color_val[ich]])
else :
xnormf=np.flipud(xnorm)
if index!=None :
axes[index[0]].plot(freqf,xnormf,label=label_ch,color=colors[color_val[ich]])
else :
plt.plot(freqf,xnormf,label=label_ch,color=colors[color_val[ich]])
if verbose :
argsort=np.argsort(xnorm)
sort=np.sort(xnorm)
ff_mean=np.mean(xnorm)
ff_sum=np.sum(xnorm)
# do a quick and durty bias on the sigma ...
ff_sig=xnorm.std()
#for i in range(1,len(argsort)) :
# if xnorm[argsort[-i]] < ff_mean+3*ff_sig : break
#ff_sig=sort[0:-i].std()
#
print(' sum(fft) %g <fft level> %g fft dispersion %g ' % (ff_sum,ff_mean,ff_sig))
if not(fplot) :
xnormf=np.flipud(xnorm)
for i in range(1,len(argsort)) :
if xnorm[argsort[-i]] < ff_mean+2*ff_sig : break
if i>1 and ( ( argsort[-i]+1 in argsort[-i+1:] ) or ( argsort[-i]-1 in argsort[-i+1:] )) : continue
print (' fft bin %d , delta(pixels) %f (%6.1f Hz) : %g ' % (argsort[-i]+1,freqf[-argsort[-i]-1],freq[argsort[-i]],xnorm[argsort[-i]]))
#
if legend :
if fplot :
plt.xlabel('Noise in Hz')
else :
plt.xlabel('Noise Period in Pixel(s)')
plt.legend(bbox_to_anchor=(1.05, 1),loc=2, borderaxespad=0.)
#plt.xscale('log')
#plt.yscale('log')
noise=np.sqrt(noise/nb_file)
return freq,xnorm,noise
def for_ever(top_dir='/data/frames',do_fft=False,do_cte=False,xboundary=(20,500),yboundary=(30,2000),gain=[1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.]) :
filename_old=''
old_dir=''
while True :
dir=top_dir+'/'+time.strftime('%Y%m%d')
#dir=top_dir
files = glob.glob(dir+'/*.fz')
if dir != old_dir :
print (' Scan Dir %s ========================================' % (dir) )
old_dir=dir
if files:
_,new_file =os.path.split( max(files, key=os.path.getctime) )
if new_file != filename_old :
filename=dir+'/'+new_file
# so it's a new file , but it could be not fully written
time.sleep(5)
all_file=Ifile(dirall=[filename],single_t=False)
filename_old=new_file
print ('%s -------------------------------------------------------------------------' % (new_file) )
print ('Ch | mean median med. - std | <ov //> std std | <ov S.> std std |')
print (' | image image S_over image | ov // Fix//o | ov S. Fix S. |')
for ch in range(16) :
print ('%02d | % 6.0f % 6.0f % 6.0f % 8.02f | % 6.0f % 8.02f % 8.02f | % 6.0f % 8.02f % 8.02f |' % (ch,
all_file.all_file[0].Mean[ch],
all_file.all_file[0].Median[ch],
all_file.all_file[0].Median[ch]-all_file.all_file[0].MeanSScan[ch],
all_file.all_file[0].Std[ch],
all_file.all_file[0].MeanPScan[ch],
all_file.all_file[0].StdPScan[ch],
all_file.all_file[0].StdPScanOS[ch],
all_file.all_file[0].MeanSScan[ch],
all_file.all_file[0].StdSScan[ch],
all_file.all_file[0].StdSScanOS[ch]) )
print ('----------------------------------------------------------------------------------------------------')
if do_cte :
cte_s=cte(all_file=all_file.all_file,gain=gain,serie=True)
ccd=new_file
cte_s.print_cte(ccd_name=ccd,nf=0)
#for ch in range(16) :
# cte_s.plot_cte(ch=ch,ccd_name=ccd,nf=0,on_screen=True)
# plt.show()
cte_p=cte(all_file=all_file.all_file,gain=gain,serie=False)
cte_p.print_cte(ccd_name=ccd,nf=0)
#for ch in range(16) :
# cte_p.plot_cte(ch=ch,ccd_name=ccd,nf=0,on_screen=True)
# plt.show()
if do_fft :
fitsfile=pyfits.open(filename)
fft_it([fitsfile],channel=range(1,9),xboundary=xboundary,yboundary=yboundary)
fft_it([fitsfile],channel=range(9,17),xboundary=xboundary,yboundary=yboundary)
plt.show()
fitsfile.close()
time.sleep(2)
return
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# Copyright 2012 Sonya Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import csv, re, xlrd
from usa import config, eia, dbsetup
from common import fileutils, sqlhelper
from common.dbconnect import db
# tables by measurement
pricetable = "%s.seds_price" % config.EIA_SCHEMA
usetable = "%s.seds_use_btu" % config.EIA_SCHEMA
# we need to parse three files before we can populate the yearly
# tables
data = {}
def parse_measurement(filename, measurement, tracker):
filepath = fileutils.getcache(filename)
with open(filepath) as f:
csvf = csv.reader(f)
header = next(csvf)
for stryear in header[2:]:
year = int(stryear)
if year not in data:
data[year] = {}
for row in csvf:
if len(row) == len(header):
if row[0] == "US":
msn = row[1][:4]
for i in range(2, len(row)):
year = int(header[i])
value = row[i].strip()
if len(value):
if msn not in data[year]:
data[year][msn] = {measurement: value}
else:
data[year][msn][measurement] = value
source = msn[0:2]
sector = msn[2:4]
insert_values = [year, source, sector, float(value)]
if measurement == "price":
tracker.insert_row(pricetable, insert_values)
elif measurement == "use_btu":
tracker.insert_row(usetable, insert_values)
def create_consolidated_tables():
allsources = eia.fossilfuels + eia.elec_sources + \
eia.nuclear + eia.renewables
allsectors = ["TC", "AC", "CC", "IC", "RC"] + eia.elec_sectors
for year in config.STUDY_YEARS:
strings = {
"renewables": sqlhelper.set_repr(eia.renewables),
"elec_sources": sqlhelper.set_repr(eia.elec_sources),
"elec_sectors": sqlhelper.set_repr(eia.elec_sectors),
"allsources": sqlhelper.set_repr(allsources),
"allsectors": sqlhelper.set_repr(allsectors),
"from_table": "%s.seds_us_%d" % (config.EIA_SCHEMA, year),
"tablename": "%s.seds_short_%d" % (config.EIA_SCHEMA, year),
}
db.execute("DROP TABLE IF EXISTS %(tablename)s CASCADE" % strings)
db.execute("""
SELECT source, sector,
case when sum(use_btu) = 0 then 0
else sum(ex) / sum(use_btu) end as price,
sum(use_btu) as use_btu,
sum(ex) as ex
INTO %(tablename)s
FROM (SELECT case when source in %(renewables)s then 'RE'
when source in %(elec_sources)s then 'ES'
else source end as source,
case when sector in %(elec_sectors)s then 'EI'
else sector end as sector,
price, use_btu, ex
FROM %(from_table)s
WHERE source in %(allsources)s
AND sector in %(allsectors)s) e
GROUP by source, sector order by source, sector""" % strings)
def doparse():
tracker = dbsetup.MultiTableStateTracker()
tracker.create_table(pricetable,
["year", "source", "sector", "price"],
["int", "char(2)", "char(2)", "float"],
cascade=True)
tracker.create_table(usetable,
["year", "source", "sector", "use_btu"],
["int", "char(2)", "char(2)", "float"],
cascade=True)
tracker.warmup()
parse_measurement("eia/pr_all.csv", "price", tracker)
parse_measurement("eia/use_all_btu.csv", "use_btu", tracker)
parse_measurement("eia/ex_all.csv", "ex", tracker)
tracker.flush()
# tables by year
years = sorted(data)
for year in years:
tablename = "eia.seds_us_%d" % year
tracker.create_table(tablename,
["source", "sector", "price", "use_btu", "ex"],
["char(2)", "char(2)", "float", "float", "float"],
cascade=True)
tracker.warmup()
msns = sorted(data[year])
for msn in msns:
values = data[year][msn]
source = msn[0:2]
sector = msn[2:4]
insert_values = [source, sector]
for field in ("price", "use_btu", "ex"):
next_value = 0 # this will turn out to help our calculations
if field in values:
# convert expenditures to the same units as io table
if field == "ex":
next_value = float(values[field]) * 1000
else:
next_value = float(values[field])
insert_values.append(next_value)
tracker.insert_row(tablename, insert_values)
tracker.flush()
create_consolidated_tables()
|
nilq/baby-python
|
python
|
class Solution(object):
def lengthOfLongestSubstringTwoDistinct(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return 0
c = s[0]
maxlen = 0
chars = set([c])
llen = 0
start = 0
end = 0
for i, cc in enumerate(s):
if cc == c:
end += 1
llen += 1
elif cc in chars:
c = cc
start = i - 1
end = i
llen += 1
elif len(chars) == 1:
c = cc
chars.add(cc)
start = i - 1
end = i
llen += 1
else:
if llen > maxlen:
maxlen = llen
chars = set([c, cc])
c = cc
llen = end - start + 1
start = i - 1
end = i
# print cc, c, start, end, llen, chars
if llen > maxlen:
maxlen = llen
return maxlen
|
nilq/baby-python
|
python
|
import sys
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer, WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
import joblib
import nltk
nltk.download(['punkt','stopwords'])
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from nltk.stem.porter import *
def load_data(database_filepath):
##
"""
Load Data Function
Arguments:
database_filepath -> path to SQLite db
Output:
X -> feature DataFrame
Y -> label DataFrame
category_names -> used for data visualization (app)
"""
##
engine = create_engine(f'sqlite:///{database_filepath}')
df = pd.read_sql_table('InsertTableName',engine)
X = df['message']
Y = df.iloc[:, 4:]
del Y['child_alone']
return X , Y , Y.columns.values
def tokenize(text):.
##
"""
Tokenize function
Arguments:
text -> list of text messages (english)
Output:
clean_tokens -> tokenized text, clean for ML modeling
"""
##
lemmatizer = WordNetLemmatizer()
# Convert to lowercase
text = text.lower()
# Remove punctuation characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Split text into words using NLTK
words = word_tokenize(text)
# Remove stop words
words = [w for w in words if w not in stopwords.words("english")]
# Reduce words to their root form
stemmed = [PorterStemmer().stem(w) for w in words]
clean = [lemmatizer.lemmatize(t) for t in stemmed]
return clean
def build_model():
##
"""
Build Model function
This function output is a Scikit ML Pipeline that process text messages
according to NLP best-practice and apply a classifier.
"""
##
pipeline = Pipeline([
('vect', TfidfVectorizer(tokenizer=tokenize)),
('clf', MultiOutputClassifier(estimator=LogisticRegression()))
])
parameters = {'clf__estimator__C': [0.1,1,10]}
cv = GridSearchCV(pipeline, parameters,cv=5)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
##
"""
Evaluate Model function
This function applies ML pipeline to a test set and prints out
model performance
Arguments:
model -> Scikit ML Pipeline
X_test -> test features
Y_test -> test labels
category_names -> label names (multi-output)
"""
##
y_pred = pd.DataFrame(model.predict(X_test),columns=Y_test.columns.get_values())
print(classification_report(np.hstack(Y_test), np.hstack(y_pred)))
pass
def save_model(model, model_filepath):
##
"""
Save Model function
This function saves trained model as Pickle file, to be loaded later.
Arguments:
model -> GridSearchCV or Scikit Pipelin object
model_filepath -> destination path to save .pkl file
"""
##
joblib.dump(model,f'{model_filepath}')
pass
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from k8s.models.autoscaler import HorizontalPodAutoscaler
from mock import create_autospec
from requests import Response
from utils import TypeMatcher
from fiaas_deploy_daemon import ExtensionHookCaller
from fiaas_deploy_daemon.deployer.kubernetes.autoscaler import should_have_autoscaler, AutoscalerDeployer
from fiaas_deploy_daemon.specs.models import AutoscalerSpec, ResourcesSpec, ResourceRequirementSpec, \
LabelAndAnnotationSpec
LABELS = {"autoscaler_deployer": "pass through"}
AUTOSCALER_API = '/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers/'
def test_default_spec_should_create_no_autoscaler(app_spec):
assert should_have_autoscaler(app_spec) is False
def test_autoscaler_enabled_and_1_replica_gives_no_autoscaler(app_spec):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=1, max_replicas=1, cpu_threshold_percentage=50))
assert should_have_autoscaler(app_spec) is False
def test_autoscaler_enabled_and_2_max_replicas_and_no_requested_cpu_gives_no_autoscaler(app_spec):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=1, max_replicas=2, cpu_threshold_percentage=50))
assert should_have_autoscaler(app_spec) is False
def test_autoscaler_enabled_and_2_max_replicas_and__requested_cpu_gives_autoscaler(app_spec):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=1, max_replicas=2, cpu_threshold_percentage=50))
app_spec = app_spec._replace(resources=ResourcesSpec(limits=[], requests=ResourceRequirementSpec(cpu=1, memory=1)))
assert should_have_autoscaler(app_spec)
class TestAutoscalerDeployer(object):
@pytest.fixture
def extension_hook(self):
return create_autospec(ExtensionHookCaller, spec_set=True, instance=True)
@pytest.fixture
def deployer(self, owner_references, extension_hook):
return AutoscalerDeployer(owner_references, extension_hook)
@pytest.mark.usefixtures("get")
def test_new_autoscaler(self, deployer, post, app_spec, owner_references, extension_hook):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=2, max_replicas=4, cpu_threshold_percentage=50))
app_spec = app_spec._replace(
resources=ResourcesSpec(limits=[], requests=ResourceRequirementSpec(cpu=1, memory=1)))
expected_autoscaler = {
'metadata': pytest.helpers.create_metadata('testapp', labels=LABELS),
'spec': {
"scaleTargetRef": {
"kind": "Deployment",
"name": "testapp",
"apiVersion": "apps/v1"
},
"minReplicas": 2,
"maxReplicas": 4,
"targetCPUUtilizationPercentage": 50
},
}
mock_response = create_autospec(Response)
mock_response.json.return_value = expected_autoscaler
post.return_value = mock_response
deployer.deploy(app_spec, LABELS)
pytest.helpers.assert_any_call(post, AUTOSCALER_API, expected_autoscaler)
owner_references.apply.assert_called_once_with(TypeMatcher(HorizontalPodAutoscaler), app_spec)
extension_hook.apply.assert_called_once_with(TypeMatcher(HorizontalPodAutoscaler), app_spec)
@pytest.mark.usefixtures("get")
def test_new_autoscaler_with_custom_labels_and_annotations(self, deployer, post, app_spec, owner_references,
extension_hook):
app_spec = app_spec._replace(
autoscaler=AutoscalerSpec(enabled=True, min_replicas=2, max_replicas=4, cpu_threshold_percentage=50))
app_spec = app_spec._replace(
resources=ResourcesSpec(limits=[], requests=ResourceRequirementSpec(cpu=1, memory=1)))
labels = LabelAndAnnotationSpec(deployment={}, horizontal_pod_autoscaler={"custom": "label"}, ingress={},
service={}, pod={}, status={})
annotations = LabelAndAnnotationSpec(deployment={}, horizontal_pod_autoscaler={"custom": "annotation"},
ingress={}, service={}, pod={}, status={})
app_spec = app_spec._replace(labels=labels, annotations=annotations)
expected_autoscaler = {
'metadata': pytest.helpers.create_metadata('testapp', labels={"autoscaler_deployer": "pass through",
"custom": "label"},
annotations={"custom": "annotation"}),
'spec': {
"scaleTargetRef": {
"kind": "Deployment",
"name": "testapp",
"apiVersion": "apps/v1"
},
"minReplicas": 2,
"maxReplicas": 4,
"targetCPUUtilizationPercentage": 50
}
}
mock_response = create_autospec(Response)
mock_response.json.return_value = expected_autoscaler
post.return_value = mock_response
deployer.deploy(app_spec, LABELS)
pytest.helpers.assert_any_call(post, AUTOSCALER_API, expected_autoscaler)
owner_references.apply.assert_called_once_with(TypeMatcher(HorizontalPodAutoscaler), app_spec)
extension_hook.apply.assert_called_once_with(TypeMatcher(HorizontalPodAutoscaler), app_spec)
def test_no_autoscaler_gives_no_post(self, deployer, delete, post, app_spec):
deployer.deploy(app_spec, LABELS)
delete.assert_called_with(AUTOSCALER_API + app_spec.name)
pytest.helpers.assert_no_calls(post)
def test_no_autoscaler_gives_no_put(self, deployer, delete, put, app_spec):
deployer.deploy(app_spec, LABELS)
delete.assert_called_with(AUTOSCALER_API + app_spec.name)
pytest.helpers.assert_no_calls(put)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2022 all rights reserved
# support
import qed
# custom properties
def selectors(default={}, **kwds):
"""
A map from selector names to their legal values
"""
# build the trait descriptor and return it
return qed.properties.dict(
schema=qed.properties.tuple(schema=qed.properties.str(), default=()),
default=default, **kwds)
# end of file
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import numpy
from numpy.random import RandomState
from sklearn.datasets import make_friedman1
from sklearn.model_selection import train_test_split
from typing import Union
from backprop.network import Network
_demo_problem_num_train_samples: int = 1000
_demo_problem_num_test_samples: int = 100
_demo_num_uninformative_columns: int = 0
_random_state = 0
def demo_backprop(
num_train_samples: int = _demo_problem_num_train_samples,
num_test_samples: int = _demo_problem_num_test_samples,
num_uninformative_columns: int = _demo_num_uninformative_columns,
random_state: Union[int, None, RandomState] =_random_state
):
random_state = random_state if isinstance(random_state, RandomState) \
else RandomState(random_state)
# make training and test data sets for demo
inputs_train, inputs_test, outputs_train, outputs_test = make_test_problem(
n_train_samples=num_train_samples, n_test_samples=num_test_samples,
n_uninformative=num_uninformative_columns, random_state=random_state
)
# build network
num_inputs = inputs_train.shape[1]
num_outputs = outputs_train.shape[1]
num_hidden = 2 * num_inputs * num_outputs
# make a network with a single hidden layer with num_hidden nodes
network = Network(num_inputs, num_hidden, num_outputs,
random_state=random_state)
# to make two hidden layers, could do:
# network = Network(num_inputs, num_hidden, num_hidden, num_outputs)
# train network on training set
network.train_online(inputs=inputs_train, correct_outputs=outputs_train)
# predict results on test set
predict_test = network.predict(inputs_test)
# calculate error
err = ((predict_test - outputs_test)**2).sum(axis=1).mean(axis=0)
print('Cross-validated error: %.3g' % err)
def make_test_problem(
n_train_samples: int = _demo_problem_num_train_samples,
n_test_samples: int = _demo_problem_num_test_samples,
n_uninformative: int = 0,
random_state: Union[int, None, RandomState] = _random_state
) -> (numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray):
n_samples = n_train_samples + n_test_samples
assert n_uninformative >= 0
n_features = 5 + n_uninformative
inputs, outputs = make_friedman1(
n_samples=n_samples, n_features=n_features, random_state=random_state
)
if inputs.ndim == 1:
inputs = numpy.reshape(inputs, inputs.shape + (1,))
if outputs.ndim == 1:
outputs = numpy.reshape(outputs, outputs.shape + (1,))
inputs_train, inputs_test, outputs_train, outputs_test = train_test_split(
inputs, outputs,
train_size=n_train_samples, test_size=n_test_samples,
random_state=random_state
)
return inputs_train, inputs_test, outputs_train, outputs_test
if __name__ == "__main__":
demo_backprop()
|
nilq/baby-python
|
python
|
# Copyright 2020 Board of Trustees of the University of Illinois.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import utils.datasetutils as datasetutils
class Capability():
def __init__(self, injson):
self.name = None
self.description = None
self.isOpenSource = None
self.apiDocUrl = None
self.deploymentDetails = None
self.apiBaseUrl = None
self.version = None
self.healthCheckUrl = None
self.status = None
self.dataDeletionEndpointDetails = None
self.contacts = None
# self.creationDate = None
# self.lastModifiedDate = None
self, restjson = datasetutils.update_capability_dataset_from_json(self, injson)
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_is_open_source(self, isOpenSource):
self.isOpenSource = isOpenSource
def get_is_open_source(self):
return self.isOpenSource
def set_api_doc_url(self, apiDocUrl):
self.apiDocUrl = apiDocUrl
def get_api_doc_url(self):
return self.apiDocUrl
def set_deployment_details(self, deploymentDetails):
self.deploymentDetails = deploymentDetails
def get_deployment_details(self):
return self.deploymentDetails
def set_docker_image_name(self, dockerImageName):
self.dockerImageName = dockerImageName
def get_docker_image_name(self):
return self.dockerImageName
def set_environment_variables(self, environmentVariables):
self.environmentVariables = environmentVariables
def get_environment_variables(self):
return self.environmentVariables
def set_database_details(self, databaseDetails):
self.databaseDetails = databaseDetails
def get_database_details(self):
return self.databaseDetails
def set_version(self, version):
self.version = version
def get_version(self):
return self.version
def set_health_check_url(self, healthCheckUrl):
self.healthCheckUrl = healthCheckUrl
def get_health_check_url(self):
return self.healthCheckUrl
def set_auth_method(self, authMethod):
self.authMethod = authMethod
def get_auth_method(self):
return self.authMethod
def set_status(self, status):
self.status = status
def get_status(self):
return self.status
def set_data_deletion_endpoint_details(self, dataDeletionEndpointDetails):
self.dataDeletionEndpointDetails = dataDeletionEndpointDetails
def get_data_deletion_endpoint_details(self):
return self.dataDeletionEndpointDetails
def set_contacts(self, contacts):
self.contacts = contacts
def get_contacts(self):
return self.contacts
# def set_creation_date(self, creationDate):
# self.creationDate = creationDate
#
# def get_creation_date(self):
# return self.creationDate
#
# def set_last_modified_date(self, lastModifiedDate):
# self.lastModifiedDate = lastModifiedDate
#
# def get_last_modified_date(self):
# return self.lastModifiedDate
|
nilq/baby-python
|
python
|
import numpy as np
from mrcnn import utils
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, IMAGE_SOURCES, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, IMAGE_SOURCES, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
n_boxes, n_image_source = gt_boxes.shape[:2]
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, n_image_source, 4))
# master boxes
master_gt_boxes = gt_boxes[:, 0, :]
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
# crowd_ix = np.where(gt_class_ids < 0)[0]
# if crowd_ix.shape[0] > 0:
# # Filter out crowds from ground truth class IDs and boxes
# non_crowd_ix = np.where(gt_class_ids > 0)[0]
# crowd_boxes = master_gt_boxes[crowd_ix]
# gt_class_ids = gt_class_ids[non_crowd_ix]
# master_gt_boxes = master_gt_boxes[non_crowd_ix]
# # Compute overlaps with crowd boxes [anchors, crowds]
# crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
# crowd_iou_max = np.amax(crowd_overlaps, axis=1)
# no_crowd_bool = (crowd_iou_max < 0.001)
# else:
# # All anchors don't intersect a crowd
# no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, master_gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
# rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
rpn_match[anchor_iou_max < 0.3] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
for idx_image_source in range(n_image_source):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i], idx_image_source]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix, idx_image_source] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix, idx_image_source] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
|
nilq/baby-python
|
python
|
from django import forms
from django.contrib import admin
from django.utils.safestring import mark_safe
from .models import Episode, ShowDate
from .tasks import generate_peaks
class ShowsCommonModelAdminMixin:
save_on_top = True
list_filter = ("published", "show_code")
list_display_links = ("show_code", "display_name")
@admin.display(description="Name", ordering="name", empty_value=mark_safe("<em>Untitled</em>"))
def display_name(self, obj):
return obj.name or None
def get_fields(self, request, obj=None):
fields = list(super().get_fields(request, obj=obj))
if obj is None:
for field in self.get_readonly_fields(request, obj=obj):
fields.remove(field)
return fields
class EpisodeAdminModelForm(forms.ModelForm):
name_from_ffprobe = forms.BooleanField(
label="Generate name from file's metadata.",
required=False,
help_text=(
'Attempt to extract name from metadata on save. Will attempt to do so in <strong>"artist - title"</strong>'
" format."
),
)
class EpisodeAdmin(ShowsCommonModelAdminMixin, admin.ModelAdmin):
form = EpisodeAdminModelForm
fields = (
"show_code",
"published",
"slug",
"asset_url",
"name",
"name_from_ffprobe",
"description",
"date",
"duration",
"guid",
"has_peaks",
)
readonly_fields = ("guid", "has_peaks")
list_display = ("published", "show_code", "display_name", "date", "date", "duration", "has_peaks")
@admin.display(description="Peaks", boolean=True)
def has_peaks(self, obj):
return bool(obj.peaks)
def save_model(self, request, obj, form, change):
if form.cleaned_data["name_from_ffprobe"]:
obj.name = " - ".join(filter(None, (obj.ffprobe.artist, obj.ffprobe.title)))
super().save_model(request, obj, form, change)
generate_peaks(obj)
class ShowDateAdmin(ShowsCommonModelAdminMixin, admin.ModelAdmin):
fields = ("show_code", "published", "name", "dates", "start_time", "duration", "end_time")
list_display = ("published", "show_code", "display_name", "start_time", "end_time", "duration")
readonly_fields = ("end_time",)
admin.site.register(Episode, EpisodeAdmin)
admin.site.register(ShowDate, ShowDateAdmin)
|
nilq/baby-python
|
python
|
""" Events emitted by the artist model """
from dataclasses import dataclass
from typing import List, Optional
from OpenCast.domain.event.event import Event, ModelId
@dataclass
class ArtistCreated(Event):
name: str
ids: List[ModelId]
thumbnail: Optional[str]
@dataclass
class ArtistThumbnailUpdated(Event):
thumbnail: str
@dataclass
class ArtistDeleted(Event):
ids: List[ModelId]
@dataclass
class ArtistVideosUpdated(Event):
ids: List[ModelId]
|
nilq/baby-python
|
python
|
from unittest import TestCase
from lie2me import Field
class CommonTests(object):
def get_instance(self):
return self.Field()
def test_submitting_empty_value_on_required_field_returns_error(self):
field = self.get_instance()
field.required = True
value, error = field.submit(field.empty_value())
self.assertTrue(error)
def test_submitting_empty_value_on_optional_field_does_not_return_error(self):
field = self.get_instance()
field.required = False
value, error = field.submit(field.empty_value())
self.assertFalse(error)
def test_field_is_required_by_default(self):
field = self.get_instance()
value, error = field.submit(field.empty_value())
self.assertTrue(error)
def test_field_with_default_is_not_required(self):
field = self.get_instance()
field.default = self.valid_default
value, error = field.submit(field.empty_value())
self.assertFalse(error)
def test_field_instance_can_overwrite_specific_messages(self):
field = self.get_instance()
field.messages = {'required': 'Lorem ipsum'}
value, error = field.submit(None)
self.assertIn('Lorem ipsum', str(error))
|
nilq/baby-python
|
python
|
import os
import sys
from RLTest import Env
from redisgraph import Graph, Node, Edge
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from base import FlowTestsBase
redis_graph = None
male = ["Roi", "Alon", "Omri"]
female = ["Hila", "Lucy"]
class testGraphMixLabelsFlow(FlowTestsBase):
def __init__(self):
self.env = Env()
global redis_graph
redis_con = self.env.getConnection()
redis_graph = Graph("G", redis_con)
self.populate_graph()
def populate_graph(self):
redis_graph
nodes = {}
# Create entities
for m in male:
node = Node(label="male", properties={"name": m})
redis_graph.add_node(node)
nodes[m] = node
for f in female:
node = Node(label="female", properties={"name": f})
redis_graph.add_node(node)
nodes[f] = node
for n in nodes:
for m in nodes:
if n == m: continue
edge = Edge(nodes[n], "knows", nodes[m])
redis_graph.add_edge(edge)
redis_graph.commit()
# Connect a single node to all other nodes.
def test_male_to_all(self):
query = """MATCH (m:male)-[:knows]->(t) RETURN m,t ORDER BY m.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * (len(male + female)-1)))
def test_male_to_male(self):
query = """MATCH (m:male)-[:knows]->(t:male) RETURN m,t ORDER BY m.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * (len(male)-1)))
def test_male_to_female(self):
query = """MATCH (m:male)-[:knows]->(t:female) RETURN m,t ORDER BY m.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * len(female)))
def test_female_to_all(self):
query = """MATCH (f:female)-[:knows]->(t) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(female) * (len(male + female)-1)))
def test_female_to_male(self):
query = """MATCH (f:female)-[:knows]->(t:male) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(female) * len(male)))
def test_female_to_female(self):
query = """MATCH (f:female)-[:knows]->(t:female) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(female) * (len(female)-1)))
def test_all_to_female(self):
query = """MATCH (f)-[:knows]->(t:female) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * len(female)) + (len(female) * (len(female)-1)))
def test_all_to_male(self):
query = """MATCH (f)-[:knows]->(t:male) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male) * (len(male)-1)) + len(female) * len(male))
def test_all_to_all(self):
query = """MATCH (f)-[:knows]->(t) RETURN f,t ORDER BY f.name"""
actual_result = redis_graph.query(query)
self.env.assertEquals(len(actual_result.result_set), (len(male+female) * (len(male+female)-1)))
|
nilq/baby-python
|
python
|
import io
import logging
from typing import List, Union
LOG = logging.getLogger(__name__)
class FileHelper:
"""Encapsulates file related functions."""
def __init__(self, filepath, line_idx, contents):
self.filepath = filepath
self.line_idx = line_idx
self.contents = contents
@classmethod
def read_file(cls, fpath: str) -> List[str]:
"""Reads a file from FS. Returns a lis of strings from it.
:param fpath: File path
"""
with io.open(fpath, encoding='utf-8') as f:
data = f.read().splitlines()
return data
def write(self):
"""Writes updated contents back to a file."""
LOG.debug(f'Writing `{self.filepath}` ...')
with io.open(self.filepath, 'w', encoding='utf-8') as f:
f.write('\n'.join(self.contents))
def line_replace(self, value: str, offset: int = 0):
"""Replaces a line in file.
:param value: New line.
:param offset: Offset from line_idx
"""
target_idx = self.line_idx + offset
self.contents[target_idx] = value
def insert(self, value: Union[List[str], str], offset: int = 1):
"""Inserts a line (or many) into file.
:param value: New line(s).
:param offset: Offset from line_idx
"""
target_idx = self.line_idx + offset
if not isinstance(value, list):
value = [value]
self.contents[target_idx:target_idx] = value
def iter_after(self, offset: int) -> str:
"""Generator. Yields lines after line_idx
:param offset:
"""
target_idx = self.line_idx + offset
for line in self.contents[target_idx:]:
yield line
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import sys
import tarfile
import json
import gzip
import pandas as pd
import botometer
from pandas.io.json import json_normalize
## VARIABLE INITIATION
#system argument
# arg 1 = 'rs' or 'sn'
# arg 2 = hour file 6,7 or 8 ?
# arg 3 = start row
# arg 4 = end row
# arg 5 = key selection, 1,2,3,4
# sn 7 : total row 33277
# sn 8 : total row 53310
# rs 7 : 7230
# rs 8 : 10493
mashape_key = "QRraJnMT9KmshkpJ7iu74xKFN1jtp1IyBBijsnS5NGbEuwIX54"
if(int(sys.argv[5])==1):
twitter_app_auth = {
'consumer_key': 'qngvt8PPer3irSHHkx71gqpJg',
'consumer_secret': 'bAH258rRds9uWAi38kSwxgbJ1x0rAspasQACgOruuK4qnKsXld',
'access_token': '218041595-yrk9WyMnTjh4PBidhApb0DwryK83Wzr32IWi6bP4',
'access_token_secret': 'GCmOzFmzrOoAv59lCpKRQrC9e7H1P0449iaBW1rI66saS',
}
elif (int(sys.argv[5])==2):
twitter_app_auth = {
'consumer_key': 'xQkTg8KSU7HlEEvaD8EJA',
'consumer_secret': 'TMFRBmvGdGJtzwFJ3fyluPWszl5qCDuwBUqy0AGj0g',
'access_token': '218041595-JUmLw0xEtnJVrqn03DCirlZpnL1Z7taWwKYZYUPN',
'access_token_secret': 'cIdkjvTghunH6GGLRIjQW06ghyOFkX1w7jnurcJPVyIQw',
}
elif (int(sys.argv[5])==3):
twitter_app_auth = {
'consumer_key': 'sPzHpcj4jMital75nY7dfd4zn',
'consumer_secret': 'rTGm68zdNmLvnTc22cBoFg4eVMf3jLVDSQLOwSqE9lXbVWLweI',
'access_token': '4258226113-4UnHbbbxoRPz10thy70q9MtEk9xXfJGOpAY12KW',
'access_token_secret': '549HdasMEW0q2uV05S5s4Uj5SdCeEWT8dNdLNPiAeeWoX',
}
elif (int(sys.argv[5])==4):
twitter_app_auth = {
'consumer_key': 'wZnIRW0aMRmHuQ3Rh5c2v7al4',
'consumer_secret': 'ugFcKDc0WP7ktDw3Ch1ZddWknckkfFiH9ZvIKFDwg7k8ivDyFB',
'access_token': '218041595-JSRBUY3CJ55km9Jb0QnJA6lQnyRoPfvpq6lNAsak',
'access_token_secret': 'ck1wTLfMP5CeLAfnbkS3U7oKxY6e0xu9C7fosq3fNH8gO',
}
else:
twitter_app_auth = {
'consumer_key': 'kcnlkVFRADdxaWNtWNAy3LquT',
'consumer_secret': 'bAH258rRds9uWAi38kSwxgbJ1x0rAspasQACgOruuK4qnKsXld',
'access_token': '218041595-yrk9WyMnTjh4PBidhApb0DwryK83Wzr32IWi6bP4',
'access_token_secret': 'GCmOzFmzrOoAv59lCpKRQrC9e7H1P0449iaBW1rI66saS',
}
bom = botometer.Botometer(wait_on_ratelimit=True,
mashape_key=mashape_key,
**twitter_app_auth)
if(sys.argv[1]=='rs'):
input_file="data/distinct_userlist_rs_201606230"+sys.argv[2]+".csv"
else:
input_file="data/distinct_userlist_201606230"+sys.argv[2]+".csv"
bot_data = pd.read_csv(input_file, index_col = 0, names =['screen_name'])
print(len(bot_data))
distinct_uname=[]
for i in bot_data.values:
distinct_uname.append((str('@'+i).replace("['","")).replace("']",''))
botoresult = pd.DataFrame()
for screen_name, result in bom.check_accounts_in(distinct_uname[int(sys.argv[3]):int(sys.argv[4])]):
botoresult=botoresult.append(result, ignore_index=True)
output_bot=pd.concat([botoresult.user.apply(pd.Series), botoresult.scores.apply(pd.Series), botoresult.categories.apply(pd.Series)], axis=1)
print("bot result :",len(botoresult))
print("bot output :",len(output_bot))
output_file="data/outputbot_201606230"+sys.argv[2]+"_"+sys.argv[1]+"_"+sys.argv[6]+".csv"
output_bot.to_csv(output_file, sep=',', encoding='utf-8')
|
nilq/baby-python
|
python
|
from analyzers.utility_functions import auth
from parsers.Parser import Parser
class HthParser(Parser):
__url = "https://fantasy.premierleague.com/api/leagues-h2h-matches/league/{}/?page={}&event={}"
__HUGE_HTH_LEAGUE = 19824
def __init__(self, team_id, leagues, current_event):
super().__init__(team_id)
self.__leagues = leagues
self.__current_event = current_event
"""
self.__leagues is a dictionary:
- keys are leagues codes
- values are strings = names of these leagues
result is a dictionary:
- keys are opponent ids
- values are strings = names of the league where the match is going to be played
"""
def get_opponents_ids(self):
result = {}
session = auth()
for key, value in self.__leagues.items():
# Ignoring this league because there's an issue with it
if key == self.__HUGE_HTH_LEAGUE:
continue
(opponent_id, (my_points, opponent_points)) = self.__get_opponent_id(session=session, league_code=key)
# Regular match
if opponent_id is not None:
result[opponent_id] = value
# H2H league with odd number of players:
# In this case, opponent is league's average score
# opponent_id[1] = average score
else:
result["AVERAGE"] = (my_points, opponent_points, value)
return result
# TO-DO: Issue with HUGE H2H leagues
# Example league: 19824
def __get_opponent_id(self, session, league_code, page_cnt=1):
new_url = self.__url.format(league_code, page_cnt, self.__current_event)
response = session.get(new_url).json()
# has_next = response["has_next"]
data = response["results"]
opponent_id = -1
points = -1
for element in data:
match = [element["entry_1_entry"], element["entry_2_entry"]]
points = [element["entry_1_points"], element["entry_2_points"]]
if match[0] == self._id:
opponent_id = match[1]
elif match[1] == self._id:
opponent_id = match[0]
points.reverse()
if opponent_id != -1:
result = (opponent_id, points)
return result
else:
return self.__get_opponent_id(session, league_code, page_cnt + 1)
|
nilq/baby-python
|
python
|
import numpy as np
from math import log2
from scamp_filter.Item import Item as I
from termcolor import colored
def approx(target, depth=5, max_coeff=-1, silent=True):
coeffs = {}
total = 0.0
current = 256
for i in range(-8, depth):
if total == target:
break
# if the error is smaller than half the current coefficient, we go further away from target
if abs(total - target) > 1/2*current:
# decide which direction brings us closer to the target
if abs((total-current)-target) > abs(total + current - target):
coeffs[current] = 1
total += current
else:
coeffs[current] = -1
total -= current
current /= 2
if max_coeff > 0 and len(coeffs) >= max_coeff:
break
if not silent:
print("Target: %.5f\n" % target)
print("Error: %.5f\n" % (total-target))
print(coeffs)
return total, coeffs
def print_filter(filter):
print('----------------------')
for row in filter:
for item in row:
print('%5s'%str(item), end=' ')
print('')
print('----------------------')
def approx_filter(filter, depth=4, max_coeff=-1, verbose=0):
if verbose>1:
print(colored('>> Input filter', 'yellow'))
print_filter(filter)
if verbose>0:
print(colored('>> Approximating Filter', 'magenta'))
pre_goal = []
h, w = filter.shape
approximated_filter = np.zeros(filter.shape)
for (y, x), val in np.ndenumerate(filter):
a, coeffs = approx(val, depth, silent=True, max_coeff=max_coeff)
approximated_filter[y, x] = a
pre_goal = pre_goal + [I(int(-log2(c)), x-w//2, h//2-y) if weight == 1 else -I(int(-log2(c)), x-w//2, h//2-y) for c, weight in coeffs.items()]
if verbose>1:
print(colored('>> Approximated filter', 'yellow'))
print_filter(approximated_filter)
return pre_goal, approximated_filter
|
nilq/baby-python
|
python
|
###########################################################
# Re-bindings for unpickling
#
# We want to ensure class
# sage.modular.congroup_element.CongruenceSubgroupElement still exists, so we
# can unpickle safely.
#
###########################################################
from sage.modular.arithgroup.arithgroup_element import ArithmeticSubgroupElement
CongruenceSubgroupElement = ArithmeticSubgroupElement
|
nilq/baby-python
|
python
|
"""Impementation for print_rel_notes."""
def print_rel_notes(
name,
repo,
version,
outs = None,
setup_file = "",
deps_method = "",
toolchains_method = "",
org = "bazelbuild",
changelog = None,
mirror_host = None):
tarball_name = ":%s-%s.tar.gz" % (repo, version)
# Must use Label to get a path relative to the rules_pkg repository,
# instead of the calling BUILD file.
print_rel_notes_helper = Label("//pkg/releasing:print_rel_notes")
tools = [print_rel_notes_helper]
cmd = [
"LC_ALL=C.UTF-8 $(location %s)" % str(print_rel_notes_helper),
"--org=%s" % org,
"--repo=%s" % repo,
"--version=%s" % version,
"--tarball=$(location %s)" % tarball_name,
]
if setup_file:
cmd.append("--setup_file=%s" % setup_file)
if deps_method:
cmd.append("--deps_method=%s" % deps_method)
if toolchains_method:
cmd.append("--toolchains_method=%s" % toolchains_method)
if changelog:
cmd.append("--changelog=$(location %s)" % changelog)
# We should depend on a changelog as a tool so that it is always built
# for the host configuration. If the changelog is generated on the fly,
# then we would have to run commands against our revision control
# system. That only makes sense locally on the host, because the
# revision history is never exported to a remote build system.
tools.append(changelog)
if mirror_host:
cmd.append("--mirror_host=%s" % mirror_host)
cmd.append(">$@")
native.genrule(
name = name,
srcs = [
tarball_name,
],
outs = outs or [name + ".txt"],
cmd = " ".join(cmd),
tools = tools,
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Run VGG benchmark series.
# Prepares and runs multiple tasks on multiple GPUs: one task per GPU.
# Waits if no GPUs available. For GPU availability check uses "nvidia-smi dmon" command.
# 2018 (C) Peter Bryzgalov @ CHITECH Stair Lab
import multigpuexec
import time
import os
import datetime
# Set GPU range
gpus = range(0, 1)
# Change hostname
host = "p3.2xlarge"
# Set number of runs
runs = 1
# Set mini-batch sizes
batchsizes = [7, 8, 9] + range(10, 200, 10) + range(200, 501, 50)
# Log algos
# batchsizes = [10, 20, 50, 100, 150, 200, 400, 500]
# Set algorithms
backfilterconvalgos = ["cudnn"]
algods = ["cudnn"] # Data gradient algorithm
algofwds = ["cudnn"]
benchmark = "VGG"
template = "VGG.dnntemplate"
datasetsize = 50000
date = datetime.datetime.today().strftime('%Y%m%d')
nvprof = False
with_memory = False
debuginfo = False
debuginfo_option = ""
if debuginfo:
debuginfo_option = " --debug"
tasks = []
command = "./run_dnnmark_template.sh -b test_{} --template {}".format(benchmark, template)
logdir = "logs/{}/dnnmark_{}_microseries_{}/".format(host, benchmark, date)
if not os.path.exists(logdir):
os.makedirs(logdir)
print "Logdir", logdir
logfile_base = "dnnmark_{}_{}".format(host, benchmark)
for batch in batchsizes:
for algod in algods:
for algo in backfilterconvalgos:
for algofwd in algofwds:
algod_opt = " --algod {}".format(algod)
logname = "{}_bs{}_algos{}-{}-{}".format(logfile_base, batch, algofwd, algo, algod)
for run in range(runs):
logfile = os.path.join(logdir, "{}_{:02d}.log".format(logname, run))
if os.path.isfile(logfile):
print "file", logfile, "exists."
else:
command_pars = command + " -n {} --algo {} --algod {} --algofwd {} -d {}{}".format(
batch, algo, algod, algofwd, datasetsize, debuginfo_option)
task = {"comm": command_pars, "logfile": logfile, "batch": batch, "nvsmi": with_memory}
tasks.append(task)
if nvprof:
iterations = 10
# print "BS: {}, Iterations: {}".format(batch,iterations)
nvlogname = "{}_iter{}".format(logname, iterations)
command_pars = command + " -n {} -d {} --algo {} --algod {} --algofwd {} --iter {} --warmup 0".format(
batch, datasetsize, algo, algod, algofwd, iterations)
logfile = os.path.join(logdir, "{}_%p.nvprof".format(nvlogname))
if os.path.isfile(logfile):
print "file", logfile, "exists."
else:
profcommand = "nvprof -u s --profile-api-trace none --unified-memory-profiling off --profile-child-processes --csv --log-file {} {}".format(
logfile, command_pars)
task = {"comm": profcommand, "logfile": logfile, "batch": batch, "nvsmi": False}
tasks.append(task)
print "Have", len(tasks), "tasks"
gpu = -1
for i in range(0, len(tasks)):
gpu = multigpuexec.getNextFreeGPU(gpus, start=gpu + 1, c=1, d=1, nvsmi=tasks[i]["nvsmi"], mode="dmon", debug=False)
gpu_info = multigpuexec.getGPUinfo(gpu)
f = open(tasks[i]["logfile"], "w+")
f.write(tasks[i]["comm"] + "\n")
f.write("b{}\n".format(tasks[i]["batch"]))
f.write("GPU{}: {}\n".format(gpu, gpu_info))
f.close()
print time.strftime("[%d %H:%M:%S]"),
multigpuexec.runTask(tasks[i], gpu, nvsmi=tasks[i]["nvsmi"], delay=0, debug=False)
print tasks[i]["logfile"]
print "{}/{} tasks".format(i + 1, len(tasks))
time.sleep(1)
|
nilq/baby-python
|
python
|
from distutils.core import Extension, setup
import numpy as np
setup(
name="numpy_ctypes_example",
version="1.0",
description="numpy ctypes example",
author="Mateen Ulhaq",
author_email="mulhaq2005@gmail.com",
maintainer="mulhaq2005@gmail.com",
url="https://github.com/YodaEmbedding/experiments",
ext_modules=[
Extension(
name="lib",
sources=["lib.c"],
extra_compile_args=["-Ofast", "-march=native"],
include_dirs=[np.get_include()],
),
],
)
|
nilq/baby-python
|
python
|
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Test cases for L{ofspy.context}.
"""
import unittest
from ..federation import Federation
from ..simulator import Simulator
from ..context import Context
from ..surface import Surface
from ..orbit import Orbit
from ..demand import Demand
from ..valueSchedule import ValueSchedule
class ContextTestCase(unittest.TestCase):
def setUp(self):
self.default = Context(seed=0)
self.locs = []
for s in range(6):
self.locs.append(Surface(s, name='SUR{0}'.format(s+1)))
self.locs.append(Orbit(s, 'LEO', name='LEO{0}'.format(s+1)))
self.locs.append(Orbit(s, 'MEO', name='MEO{0}'.format(s+1)))
self.locs.append(Orbit(s, 'GEO', name='GEO{0}'.format(s+1)))
self.evts = []
for d in range(8):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(1,500),(4,400)], -50),
name='SAR1.{0}'.format(d+1)))
for d in range(12):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(2,450),(5,350)], -100),
name='SAR2.{0}'.format(d+1)))
for d in range(23):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(3,400),(6,300)], -150),
name='SAR3.{0}'.format(d+1)))
for d in range(8):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(1,600),(4,500)], -50),
name='VIS1.{0}'.format(d+1)))
for d in range(17):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(2,500),(5,400)], -100),
name='VIS2.{0}'.format(d+1)))
for d in range(8):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(3,450),(6,350)], -150),
name='VIS3.{0}'.format(d+1)))
self.default = Context(locations=self.locs, events=self.evts,
federations=[Federation()], seed=0)
self.sim = Simulator(entities=[self.default],
initTime=0, timeStep=1, maxTime=3)
def tearDown(self):
self.default = None
self.locs = None
self.evts = None
def test_propagate(self):
self.assertEqual(self.default.propagate(self.locs[0], 0), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[0], 1), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[0], 2), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[1], 0), self.locs[1])
self.assertEqual(self.default.propagate(self.locs[1], 1), self.locs[9])
self.assertEqual(self.default.propagate(self.locs[1], 2), self.locs[17])
self.assertEqual(self.default.propagate(self.locs[1], 3), self.locs[1])
self.assertEqual(self.default.propagate(self.locs[1], 4), self.locs[9])
self.assertEqual(self.default.propagate(self.locs[1], -1), self.locs[17])
self.assertEqual(self.default.propagate(self.locs[2], 0), self.locs[2])
self.assertEqual(self.default.propagate(self.locs[2], 1), self.locs[6])
self.assertEqual(self.default.propagate(self.locs[2], 2), self.locs[10])
self.assertEqual(self.default.propagate(self.locs[3], 0), self.locs[3])
self.assertEqual(self.default.propagate(self.locs[3], 1), self.locs[3])
self.assertEqual(self.default.propagate(self.locs[3], 2), self.locs[3])
def test_init(self):
self.assertEqual(self.default.currentEvents, [])
self.assertEqual(self.default.futureEvents, [])
self.assertEqual(self.default.pastEvents, [])
self.default.init(self.sim)
self.assertEqual(self.default.currentEvents, [])
self.assertNotEqual(self.default.futureEvents, [])
self.assertEqual(len(self.default.futureEvents),
len(self.default.events))
self.assertEqual(self.default.pastEvents, [])
def test_tick(self):
self.default.init(self.sim)
self.default.tick(self.sim)
def test_tock(self):
self.default.init(self.sim)
self.default.tick(self.sim)
self.default.tock()
self.assertEqual(len(self.default.currentEvents), 6)
self.assertEqual(len(self.default.futureEvents),
len(self.default.events) - 6)
|
nilq/baby-python
|
python
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=invalid-overridden-method
import functools
from typing import ( # pylint: disable=unused-import
Union,
Optional,
Any,
IO,
Iterable,
AnyStr,
Dict,
List,
Tuple,
TYPE_CHECKING,
)
try:
from urllib.parse import urlparse, quote, unquote # pylint: disable=unused-import
except ImportError:
from urlparse import urlparse # type: ignore
from urllib2 import quote, unquote # type: ignore
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.async_paging import AsyncItemPaged
from .._shared.base_client_async import AsyncStorageAccountHostsMixin
from .._shared.request_handlers import add_metadata_headers, serialize_iso
from .._shared.response_handlers import (
return_response_headers,
process_storage_error,
return_headers_and_deserialized,
)
from .._deserialize import deserialize_queue_properties, deserialize_queue_creation
from .._generated.version import VERSION
from .._generated.aio import AzureQueueStorage
from .._generated.models import StorageErrorException, SignedIdentifier
from .._generated.models import QueueMessage as GenQueueMessage
from .._models import QueueMessage, AccessPolicy
from ._models import MessagesPaged
from .._shared.policies_async import ExponentialRetry
from .._queue_client import QueueClient as QueueClientBase
if TYPE_CHECKING:
from datetime import datetime
from azure.core.pipeline.policies import HTTPPolicy
from .._models import QueueSasPermissions, QueueProperties
class QueueClient(AsyncStorageAccountHostsMixin, QueueClientBase):
"""A client to interact with a specific Queue.
:param str account_url:
The URL to the storage account. In order to create a client given the full URI to the queue,
use the :func:`from_queue_url` classmethod.
:param queue_name: The name of the queue.
:type queue_name: str
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string,
an instance of a AzureSasCredential from azure.core.credentials, an account
shared access key, or an instance of a TokenCredentials class from azure.identity.
:keyword str api_version:
The Storage API version to use for requests. Default value is '2019-07-07'.
Setting to an older version may result in reduced feature compatibility.
:keyword str secondary_hostname:
The hostname of the secondary endpoint.
:keyword message_encode_policy: The encoding policy to use on outgoing messages.
Default is not to encode messages. Other options include :class:`TextBase64EncodePolicy`,
:class:`BinaryBase64EncodePolicy` or `None`.
:keyword message_decode_policy: The decoding policy to use on incoming messages.
Default value is not to decode messages. Other options include :class:`TextBase64DecodePolicy`,
:class:`BinaryBase64DecodePolicy` or `None`.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_create_queue_client]
:end-before: [END async_create_queue_client]
:language: python
:dedent: 16
:caption: Create the queue client with url and credential.
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_create_queue_client_from_connection_string]
:end-before: [END async_create_queue_client_from_connection_string]
:language: python
:dedent: 8
:caption: Create the queue client with a connection string.
"""
def __init__(
self,
account_url, # type: str
queue_name, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
loop = kwargs.pop('loop', None)
super(QueueClient, self).__init__(
account_url, queue_name=queue_name, credential=credential, loop=loop, **kwargs
)
self._client = AzureQueueStorage(self.url, pipeline=self._pipeline, loop=loop) # type: ignore
self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access
self._loop = loop
@distributed_trace_async
async def create_queue(self, **kwargs):
# type: (Optional[Any]) -> None
"""Creates a new queue in the storage account.
If a queue with the same name already exists, the operation fails with
a `ResourceExistsError`.
:keyword dict(str,str) metadata:
A dict containing name-value pairs to associate with the queue as
metadata. Note that metadata names preserve the case with which they
were created, but are case-insensitive when set or read.
:keyword int timeout:
The server timeout, expressed in seconds.
:return: None or the result of cls(response)
:rtype: None
:raises: StorageErrorException
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_hello_world_async.py
:start-after: [START async_create_queue]
:end-before: [END async_create_queue]
:language: python
:dedent: 12
:caption: Create a queue.
"""
metadata = kwargs.pop('metadata', None)
timeout = kwargs.pop('timeout', None)
headers = kwargs.pop("headers", {})
headers.update(add_metadata_headers(metadata)) # type: ignore
try:
return await self._client.queue.create( # type: ignore
metadata=metadata, timeout=timeout, headers=headers, cls=deserialize_queue_creation, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def delete_queue(self, **kwargs):
# type: (Optional[Any]) -> None
"""Deletes the specified queue and any messages it contains.
When a queue is successfully deleted, it is immediately marked for deletion
and is no longer accessible to clients. The queue is later removed from
the Queue service during garbage collection.
Note that deleting a queue is likely to take at least 40 seconds to complete.
If an operation is attempted against the queue while it was being deleted,
an :class:`HttpResponseError` will be thrown.
:keyword int timeout:
The server timeout, expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_hello_world_async.py
:start-after: [START async_delete_queue]
:end-before: [END async_delete_queue]
:language: python
:dedent: 16
:caption: Delete a queue.
"""
timeout = kwargs.pop('timeout', None)
try:
await self._client.queue.delete(timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def get_queue_properties(self, **kwargs):
# type: (Optional[Any]) -> QueueProperties
"""Returns all user-defined metadata for the specified queue.
The data returned does not include the queue's list of messages.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: User-defined metadata for the queue.
:rtype: ~azure.storage.queue.QueueProperties
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_get_queue_properties]
:end-before: [END async_get_queue_properties]
:language: python
:dedent: 16
:caption: Get the properties on the queue.
"""
timeout = kwargs.pop('timeout', None)
try:
response = await self._client.queue.get_properties(
timeout=timeout, cls=deserialize_queue_properties, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
response.name = self.queue_name
return response # type: ignore
@distributed_trace_async
async def set_queue_metadata(self, metadata=None, **kwargs):
# type: (Optional[Dict[str, Any]], Optional[Any]) -> None
"""Sets user-defined metadata on the specified queue.
Metadata is associated with the queue as name-value pairs.
:param metadata:
A dict containing name-value pairs to associate with the
queue as metadata.
:type metadata: dict(str, str)
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_set_queue_metadata]
:end-before: [END async_set_queue_metadata]
:language: python
:dedent: 16
:caption: Set metadata on the queue.
"""
timeout = kwargs.pop('timeout', None)
headers = kwargs.pop("headers", {})
headers.update(add_metadata_headers(metadata)) # type: ignore
try:
return await self._client.queue.set_metadata( # type: ignore
timeout=timeout, headers=headers, cls=return_response_headers, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def get_queue_access_policy(self, **kwargs):
# type: (Optional[Any]) -> Dict[str, Any]
"""Returns details about any stored access policies specified on the
queue that may be used with Shared Access Signatures.
:keyword int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the queue.
:rtype: dict(str, ~azure.storage.queue.AccessPolicy)
"""
timeout = kwargs.pop('timeout', None)
try:
_, identifiers = await self._client.queue.get_access_policy(
timeout=timeout, cls=return_headers_and_deserialized, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
return {s.id: s.access_policy or AccessPolicy() for s in identifiers}
@distributed_trace_async
async def set_queue_access_policy(self, signed_identifiers, **kwargs):
# type: (Dict[str, AccessPolicy], Optional[Any]) -> None
"""Sets stored access policies for the queue that may be used with Shared
Access Signatures.
When you set permissions for a queue, the existing permissions are replaced.
To update the queue's permissions, call :func:`~get_queue_access_policy` to fetch
all access policies associated with the queue, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a queue, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`HttpResponseError` until the access policy becomes active.
:param signed_identifiers:
SignedIdentifier access policies to associate with the queue.
This may contain up to 5 elements. An empty dict
will clear the access policies set on the service.
:type signed_identifiers: dict(str, ~azure.storage.queue.AccessPolicy)
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_set_access_policy]
:end-before: [END async_set_access_policy]
:language: python
:dedent: 16
:caption: Set an access policy on the queue.
"""
timeout = kwargs.pop('timeout', None)
if len(signed_identifiers) > 15:
raise ValueError(
"Too many access policies provided. The server does not support setting "
"more than 15 access policies on a single resource."
)
identifiers = []
for key, value in signed_identifiers.items():
if value:
value.start = serialize_iso(value.start)
value.expiry = serialize_iso(value.expiry)
identifiers.append(SignedIdentifier(id=key, access_policy=value))
signed_identifiers = identifiers # type: ignore
try:
await self._client.queue.set_access_policy(queue_acl=signed_identifiers or None, timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def send_message( # type: ignore
self,
content, # type: Any
**kwargs # type: Optional[Any]
):
# type: (...) -> QueueMessage
"""Adds a new message to the back of the message queue.
The visibility timeout specifies the time that the message will be
invisible. After the timeout expires, the message will become visible.
If a visibility timeout is not specified, the default value of 0 is used.
The message time-to-live specifies how long a message will remain in the
queue. The message will be deleted from the queue when the time-to-live
period expires.
If the key-encryption-key field is set on the local service object, this method will
encrypt the content before uploading.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str. The encoded message can be up to
64KB in size.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int time_to_live:
Specifies the time-to-live interval for the message, in
seconds. The time-to-live may be any positive number or -1 for infinity. If this
parameter is omitted, the default time-to-live is 7 days.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A :class:`~azure.storage.queue.QueueMessage` object.
This object is also populated with the content although it is not
returned from the service.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_send_messages]
:end-before: [END async_send_messages]
:language: python
:dedent: 16
:caption: Send messages.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
time_to_live = kwargs.pop('time_to_live', None)
timeout = kwargs.pop('timeout', None)
self._config.message_encode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
encoded_content = self._config.message_encode_policy(content)
new_message = GenQueueMessage(message_text=encoded_content)
try:
enqueued = await self._client.messages.enqueue(
queue_message=new_message,
visibilitytimeout=visibility_timeout,
message_time_to_live=time_to_live,
timeout=timeout,
**kwargs
)
queue_message = QueueMessage(content=content)
queue_message.id = enqueued[0].message_id
queue_message.inserted_on = enqueued[0].insertion_time
queue_message.expires_on = enqueued[0].expiration_time
queue_message.pop_receipt = enqueued[0].pop_receipt
queue_message.next_visible_on = enqueued[0].time_next_visible
return queue_message
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def receive_message(self, **kwargs):
# type: (Optional[Any]) -> QueueMessage
"""Removes one message from the front of the queue.
When the message is retrieved from the queue, the response includes the message
content and a pop_receipt value, which is required to delete the message.
The message is not automatically deleted from the queue, but after it has
been retrieved, it is not visible to other clients for the time interval
specified by the visibility_timeout parameter.
If the key-encryption-key or resolver field is set on the local service object, the message will be
decrypted before being returned.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
Returns a message from the Queue.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START receive_one_message]
:end-before: [END receive_one_message]
:language: python
:dedent: 12
:caption: Receive one message from the queue.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function)
try:
message = await self._client.messages.dequeue(
number_of_messages=1,
visibilitytimeout=visibility_timeout,
timeout=timeout,
cls=self._config.message_decode_policy,
**kwargs
)
wrapped_message = QueueMessage._from_generated( # pylint: disable=protected-access
message[0]) if message != [] else None
return wrapped_message
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace
def receive_messages(self, **kwargs):
# type: (Optional[Any]) -> AsyncItemPaged[QueueMessage]
"""Removes one or more messages from the front of the queue.
When a message is retrieved from the queue, the response includes the message
content and a pop_receipt value, which is required to delete the message.
The message is not automatically deleted from the queue, but after it has
been retrieved, it is not visible to other clients for the time interval
specified by the visibility_timeout parameter.
If the key-encryption-key or resolver field is set on the local service object, the messages will be
decrypted before being returned.
:keyword int messages_per_page:
A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If
fewer are visible, the visible messages are returned. By default,
a single message is retrieved from the queue with this operation.
`by_page()` can be used to provide a page iterator on the AsyncItemPaged if messages_per_page is set.
`next()` can be used to get the next page.
:keyword int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
Returns a message iterator of dict-like Message objects.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.queue.QueueMessage]
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_receive_messages]
:end-before: [END async_receive_messages]
:language: python
:dedent: 16
:caption: Receive messages from the queue.
"""
messages_per_page = kwargs.pop('messages_per_page', None)
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
try:
command = functools.partial(
self._client.messages.dequeue,
visibilitytimeout=visibility_timeout,
timeout=timeout,
cls=self._config.message_decode_policy,
**kwargs
)
return AsyncItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def update_message(
self,
message,
pop_receipt=None,
content=None,
**kwargs
):
# type: (Any, int, Optional[str], Optional[Any], Any) -> QueueMessage
"""Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
This operation can be used to continually extend the invisibility of a
queue message. This functionality can be useful if you want a worker role
to "lease" a queue message. For example, if a worker role calls :func:`~receive_messages()`
and recognizes that it needs more time to process a message, it can
continually extend the message's invisibility until it is processed. If
the worker role were to fail during processing, eventually the message
would become visible again and another worker role could process it.
If the key-encryption-key field is set on the local service object, this method will
encrypt the content before uploading.
:param message:
The message object or id identifying the message to update.
:type message: str or ~azure.storage.queue.QueueMessage
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~receive_messages` or :func:`~update_message` operation.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str.
:keyword int visibility_timeout:
Specifies the new visibility timeout value, in seconds,
relative to server time. The new value must be larger than or equal
to 0, and cannot be larger than 7 days. The visibility timeout of a
message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
The message object or message id identifying the message to update.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A :class:`~azure.storage.queue.QueueMessage` object. For convenience,
this object is also populated with the content, although it is not returned by the service.
:rtype: ~azure.storage.queue.QueueMessage
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_update_message]
:end-before: [END async_update_message]
:language: python
:dedent: 16
:caption: Update a message.
"""
visibility_timeout = kwargs.pop('visibility_timeout', None)
timeout = kwargs.pop('timeout', None)
try:
message_id = message.id
message_text = content or message.content
receipt = pop_receipt or message.pop_receipt
inserted_on = message.inserted_on
expires_on = message.expires_on
dequeue_count = message.dequeue_count
except AttributeError:
message_id = message
message_text = content
receipt = pop_receipt
inserted_on = None
expires_on = None
dequeue_count = None
if receipt is None:
raise ValueError("pop_receipt must be present")
if message_text is not None:
self._config.message_encode_policy.configure(
self.require_encryption, self.key_encryption_key, self.key_resolver_function
)
encoded_message_text = self._config.message_encode_policy(message_text)
updated = GenQueueMessage(message_text=encoded_message_text)
else:
updated = None # type: ignore
try:
response = await self._client.message_id.update(
queue_message=updated,
visibilitytimeout=visibility_timeout or 0,
timeout=timeout,
pop_receipt=receipt,
cls=return_response_headers,
queue_message_id=message_id,
**kwargs
)
new_message = QueueMessage(content=message_text)
new_message.id = message_id
new_message.inserted_on = inserted_on
new_message.expires_on = expires_on
new_message.dequeue_count = dequeue_count
new_message.pop_receipt = response["popreceipt"]
new_message.next_visible_on = response["time_next_visible"]
return new_message
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def peek_messages(self, max_messages=None, **kwargs):
# type: (Optional[int], Optional[Any]) -> List[QueueMessage]
"""Retrieves one or more messages from the front of the queue, but does
not alter the visibility of the message.
Only messages that are visible may be retrieved. When a message is retrieved
for the first time with a call to :func:`~receive_messages`, its dequeue_count property
is set to 1. If it is not deleted and is subsequently retrieved again, the
dequeue_count property is incremented. The client may use this value to
determine how many times a message has been retrieved. Note that a call
to peek_messages does not increment the value of dequeue_count, but returns
this value for the client to read.
If the key-encryption-key or resolver field is set on the local service object,
the messages will be decrypted before being returned.
:param int max_messages:
A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
:keyword int timeout:
The server timeout, expressed in seconds.
:return:
A list of :class:`~azure.storage.queue.QueueMessage` objects. Note that
next_visible_on and pop_receipt will not be populated as peek does
not pop the message and can only retrieve already visible messages.
:rtype: list(:class:`~azure.storage.queue.QueueMessage`)
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_peek_message]
:end-before: [END async_peek_message]
:language: python
:dedent: 16
:caption: Peek messages.
"""
timeout = kwargs.pop('timeout', None)
if max_messages and not 1 <= max_messages <= 32:
raise ValueError("Number of messages to peek should be between 1 and 32")
self._config.message_decode_policy.configure(
require_encryption=self.require_encryption,
key_encryption_key=self.key_encryption_key,
resolver=self.key_resolver_function
)
try:
messages = await self._client.messages.peek(
number_of_messages=max_messages, timeout=timeout, cls=self._config.message_decode_policy, **kwargs
)
wrapped_messages = []
for peeked in messages:
wrapped_messages.append(QueueMessage._from_generated(peeked)) # pylint: disable=protected-access
return wrapped_messages
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def clear_messages(self, **kwargs):
# type: (Optional[Any]) -> None
"""Deletes all messages from the specified queue.
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_clear_messages]
:end-before: [END async_clear_messages]
:language: python
:dedent: 16
:caption: Clears all messages.
"""
timeout = kwargs.pop('timeout', None)
try:
await self._client.messages.clear(timeout=timeout, **kwargs)
except StorageErrorException as error:
process_storage_error(error)
@distributed_trace_async
async def delete_message(self, message, pop_receipt=None, **kwargs):
# type: (Any, Optional[str], Any) -> None
"""Deletes the specified message.
Normally after a client retrieves a message with the receive messages operation,
the client is expected to process and delete the message. To delete the
message, you must have the message object itself, or two items of data: id and pop_receipt.
The id is returned from the previous receive_messages operation. The
pop_receipt is returned from the most recent :func:`~receive_messages` or
:func:`~update_message` operation. In order for the delete_message operation
to succeed, the pop_receipt specified on the request must match the
pop_receipt returned from the :func:`~receive_messages` or :func:`~update_message`
operation.
:param message:
The message object or id identifying the message to delete.
:type message: str or ~azure.storage.queue.QueueMessage
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~receive_messages` or :func:`~update_message`.
:keyword int timeout:
The server timeout, expressed in seconds.
.. admonition:: Example:
.. literalinclude:: ../samples/queue_samples_message_async.py
:start-after: [START async_delete_message]
:end-before: [END async_delete_message]
:language: python
:dedent: 16
:caption: Delete a message.
"""
timeout = kwargs.pop('timeout', None)
try:
message_id = message.id
receipt = pop_receipt or message.pop_receipt
except AttributeError:
message_id = message
receipt = pop_receipt
if receipt is None:
raise ValueError("pop_receipt must be present")
try:
await self._client.message_id.delete(
pop_receipt=receipt, timeout=timeout, queue_message_id=message_id, **kwargs
)
except StorageErrorException as error:
process_storage_error(error)
|
nilq/baby-python
|
python
|
from utils.QtCore import *
class CustomAddCoinBtn (QPushButton):
def __init__(
self
):
super().__init__()
self.setCursor(Qt.PointingHandCursor)
self.setText('Add coin')
self.setObjectName('add_coin_btn')
self.setSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed)
self.setStyleSheet(f'''
QPushButton {{
margin-left: 20px;
color: #777777;
font: 600 12pt "Segoe UI";
}}
QPushButton:hover {{
color: #252525;
}}
''')
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
return self.clicked.emit()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
return self.released.emit()
|
nilq/baby-python
|
python
|
from typing import Callable, List
import numpy as np
from qcodes.instrument.base import Instrument
from qcodes.instrument.parameter import Parameter
from qcodes.instrument.channel import InstrumentChannel, ChannelList
from qcodes.utils import validators as vals
from .SD_Module import SD_Module, keysightSD1, SignadyneParameter, with_error_check
# Functions to log method calls from the SD_AIN class
import re, sys, types
def logmethod(value):
def method_wrapper(self, *args, **kwargs):
input_str = ', '.join(map(str, args))
if args and kwargs:
input_str += ', ' + ', '.join(
[f'{key}={val}' for key, val in kwargs.items()])
method_str = f'{value.__name__}({input_str})'
if not hasattr(self, '_method_calls'):
self._method_calls = []
self._method_calls += [method_str]
return value(self, *args, **kwargs)
return method_wrapper
def logclass(cls):
namesToCheck = cls.__dict__.keys()
for name in namesToCheck:
# unbound methods show up as mere functions in the values of
# cls.__dict__,so we have to go through getattr
value = getattr(cls, name)
if isinstance(value, types.FunctionType):
setattr(cls, name, logmethod(value))
return cls
model_channels = {'M3300A': 8}
class DigitizerChannel(InstrumentChannel):
"""Signadyne digitizer channel
Args:
parent: Parent Signadyne digitizer Instrument
name: channel name (e.g. 'ch1')
id: channel id (e.g. 1)
**kwargs: Additional kwargs passed to InstrumentChannel
"""
def __init__(self, parent: Instrument, name: str, id: int, **kwargs):
super().__init__(parent=parent, name=name, **kwargs)
self.SD_AIN = self._parent.SD_AIN
self.id = id
# For channelInputConfig
self.add_parameter(
'full_scale',
unit='V',
initial_value=1,
vals=vals.Numbers(0, 3),
# self.SD_AIN.channelMinFullScale(),
# self.SD_AIN.channelMaxFullScale()),
set_function=self.SD_AIN.channelInputConfig,
set_args=['full_scale', 'impedance', 'coupling'],
docstring=f'The full scale voltage for ch{self.id}'
)
# For channelTriggerConfig
self.add_parameter(
'impedance',
initial_value='50',
val_mapping={'high': 0, '50': 1},
get_function=self.SD_AIN.channelImpedance,
set_function=self.SD_AIN.channelInputConfig,
set_args=['full_scale', 'impedance', 'coupling'],
docstring=f'The input impedance of ch{self.id}. Note that for '
f'high input impedance, the measured voltage will not be '
f'the actual voltage'
)
self.add_parameter(
'coupling',
initial_value='AC',
val_mapping={'DC': 0, 'AC': 1},
get_function=self.SD_AIN.channelCoupling,
set_function=self.SD_AIN.channelInputConfig,
set_args=['full_scale', 'impedance', 'coupling'],
docstring=f'The coupling of ch{self.id}'
)
# For channelPrescalerConfig
self.add_parameter(
'prescaler',
initial_value=0,
vals=vals.Ints(0, 4095),
get_function=self.SD_AIN.channelPrescalerConfig,
set_function=self.SD_AIN.channelPrescalerConfig,
docstring=f'The sampling frequency prescaler for ch{self.id}. '
f'Sampling rate will be max_sampling_rate/(prescaler+1)'
)
# For DAQ config
self.add_parameter(
'points_per_cycle',
initial_value=0,
vals=vals.Ints(),
set_function=self.SD_AIN.DAQconfig,
set_args=['points_per_cycle', 'n_cycles',
'trigger_delay_samples', 'trigger_mode'],
docstring=f'The number of points per cycle for ch{self.id}'
)
self.add_parameter(
'n_cycles',
initial_value=-1,
vals=vals.Ints(),
set_function=self.SD_AIN.DAQconfig,
set_args=['points_per_cycle', 'n_cycles',
'trigger_delay_samples', 'trigger_mode'],
docstring=f'The number of cycles to collect on DAQ {self.id}'
)
self.add_parameter(
'trigger_mode',
initial_value='auto',
val_mapping={'auto': 0, 'software': 1, 'digital': 2, 'analog': 3},
set_function=self.SD_AIN.DAQconfig,
set_args=['points_per_cycle', 'n_cycles',
'trigger_delay_samples', 'trigger_mode'],
docstring=f'The trigger mode for ch{self.id}'
)
self.add_parameter(
'trigger_delay_samples',
initial_value=0,
vals=vals.Numbers(),
set_parser=int,
set_function=self.SD_AIN.DAQconfig,
set_args=['points_per_cycle', 'n_cycles',
'trigger_delay_samples', 'trigger_mode'],
docstring=f'The trigger delay (in samples) for ch{self.id}. '
f'Can be negative'
)
# For channelTriggerConfig
self.add_parameter(
'analog_trigger_edge',
initial_value='rising',
val_mapping={'rising': 1, 'falling': 2, 'both': 3},
set_function=self.SD_AIN.channelTriggerConfig,
set_args=['analog_trigger_edge', 'analog_trigger_threshold'],
docstring=f'The analog trigger edge for ch{self.id}.'
f'This is only used when the channel is set as the analog'
f'trigger channel'
)
self.add_parameter(
'analog_trigger_threshold',
initial_value=0,
vals=vals.Numbers(-3, 3),
set_function=self.SD_AIN.channelTriggerConfig,
set_args=['analog_trigger_edge', 'analog_trigger_threshold'],
docstring=f'the value in volts for the trigger threshold'
)
self.add_parameter(
'analog_trigger_mask',
initial_value=0,
vals=vals.Ints(),
set_function=self.SD_AIN.DAQanalogTriggerConfig,
docstring='the trigger mask you are using. Each bit signifies '
'which analog channel to trigger on. The channel trigger'
' behaviour must be configured separately (trigger_edge '
'and trigger_threshold). Needs to be double checked, but '
'it seems multiple analog trigger channels can be used.'
)
# For DAQ trigger Config
self.add_parameter(
'digital_trigger_mode',
initial_value='rising',
val_mapping={'active_high': 1, 'active_low': 2,
'rising': 3, 'falling': 4},
set_function=self.SD_AIN.DAQdigitalTriggerConfig,
set_args=['digital_trigger_source', 'digital_trigger_mode'],
docstring='The digital trigger mode. Can be `active_high`, '
'`active_low`, `rising`, `falling`'
)
self.add_parameter(
'digital_trigger_source',
initial_value='trig_in',
val_mapping={'trig_in': 0, **{f'pxi{k}': 4000+k for k in range(8)}},
set_function=self.SD_AIN.DAQdigitalTriggerConfig,
set_args=['digital_trigger_source', 'digital_trigger_mode'],
docstring='the trigger source you are using. Can be trig_in '
'(external IO) or pxi0 to pxi7'
)
# For DAQ read
self.add_parameter(
'n_points',
initial_value=0,
vals=vals.Ints(),
set_cmd=None,
docstring='the number of points to be read from specified DAQ'
)
self.add_parameter(
'timeout',
unit='s',
initial_value=-1,
vals=vals.Numbers(min_value=0),
set_cmd=None,
docstring=f'The read timeout in seconds. 0 means infinite.'
f'Warning: setting to 0 will freeze the digitizer until'
f'acquisition has completed.'
)
self.add_parameter(
'data_multiplier',
initial_value=1,
vals=vals.Numbers(),
set_cmd=None,
docstring=f'Value to multiply all acquisition data by'
)
def add_parameter(self, name: str,
parameter_class: type=SignadyneParameter, **kwargs):
"""Use SignadyneParameter by default"""
super().add_parameter(name=name, parameter_class=parameter_class,
parent=self, **kwargs)
@with_error_check
def start(self):
""" Start acquiring data or waiting for a trigger on the specified DAQ
Acquisition data can then be read using `daq_read`
Raises:
AssertionError if DAQstart was unsuccessful
"""
return self.SD_AIN.DAQstart(self.id)
@with_error_check
def read(self) -> np.ndarray:
""" Read from the specified DAQ.
Channel acquisition must first be started using `daq_start`
Uses channel parameters `n_points` and `timeout`
Returns:
Numpy array with acquisition data
Raises:
AssertionError if DAQread was unsuccessful
"""
value = self.SD_AIN.DAQread(self.id, self.n_points(),
int(self.timeout() * 1e3)) # ms
if not isinstance(value, int):
# Scale signal from int to volts, why are we checking for non-int?
int_min, int_max = -0x8000, 0x7FFF
v_min, v_max = -self.full_scale(), self.full_scale()
relative_value = (value.astype(float) - int_min) / (int_max - int_min)
scaled_value = v_min + (v_max-v_min) * relative_value
else:
scaled_value = value
scaled_value *= self.data_multiplier()
return scaled_value
@with_error_check
def stop(self):
""" Stop acquiring data on the specified DAQ
Raises:
AssertionError if DAQstop was unsuccessful
"""
return self.SD_AIN.DAQstop(self.id)
@with_error_check
def flush(self):
""" Flush the DAQ channel
Raises:
AssertionError if DAQflush was unsuccessful
"""
return self.SD_AIN.DAQflush(self.id)
@with_error_check
def trigger(self):
""" Manually trigger the specified DAQ
Raises:
AssertionError if DAQtrigger was unsuccessful
"""
return self.SD_AIN.DAQtrigger(self.id)
class SD_DIG(SD_Module):
"""Qcodes driver for a generic Keysight Digitizer of the M32/33XX series.
This driver is written with the M3300A in mind.
This driver makes use of the Python library provided by Keysight as part of
the SD1 Software package (v.2.01.00).
Args:
name: the name of the digitizer card
model: Digitizer model (e.g. 'M3300A').
Used to retrieve number of channels if not specified
chassis: Signadyne chassis (usually 0).
slot: module slot in chassis (starting at 1)
channels: the number of input channels the specified card has
triggers: the number of pxi trigger inputs the specified card has
"""
def __init__(self,
name: str,
model: str,
chassis: int,
slot: int,
channels: int = None,
triggers: int = 8,
**kwargs):
super().__init__(name, model, chassis, slot, triggers, **kwargs)
if channels is None:
channels = model_channels[self.model]
# Create instance of keysight SD_AIN class
# We wrap it in a logclass so that any method call is recorded in
# self.SD_AIN._method_calls
self.SD_AIN = logclass(keysightSD1.SD_AIN)()
# store card-specifics
self.n_channels = channels
# Open the device, using the specified chassis and slot number
self.initialize(chassis=chassis, slot=slot)
# for triggerIOconfig
self.add_parameter(
'trigger_direction',
label='Trigger direction for trigger port',
val_mapping={'out': 0, 'in': 1},
set_cmd=self.SD_AIN.triggerIOconfig,
docstring='The trigger direction for digitizer trigger port'
)
# for clockSetFrequency
self.add_parameter(
'system_frequency',
label='System clock frequency',
vals=vals.Numbers(),
set_cmd=None,
initial_value=100e6,
# clockGetFrequency seems to give issues
# set_cmd=self.SD_AIN.clockSetFrequency,
# get_cmd=self.SD_AIN.clockGetFrequency,
docstring='The frequency of internal CLKsys in Hz'
)
# for clockGetSyncFrequency
self.add_parameter(
'sync_frequency',
label='Clock synchronization frequency',
vals=vals.Ints(),
get_cmd=self.SD_AIN.clockGetSyncFrequency,
docstring='The frequency of internal CLKsync in Hz'
)
self.add_parameter('trigger_io',
label='trigger io',
get_function=self.SD_AIN.triggerIOread,
set_function=self.SD_AIN.triggerIOwrite,
docstring='The trigger input value, 0 (OFF) or 1 (ON)',
val_mapping={'off': 0, 'on': 1})
channels = ChannelList(self,
name='channels',
chan_type=DigitizerChannel)
for ch in range(self.n_channels):
channel = DigitizerChannel(self, name=f'ch{ch}', id=ch)
setattr(self, f'ch{ch}', channel)
channels.append(channel)
self.add_submodule('channels', channels)
def add_parameter(self, name: str,
parameter_class: type=SignadyneParameter, **kwargs):
"""Use SignadyneParameter by default"""
super().add_parameter(name=name, parameter_class=parameter_class,
parent=self, **kwargs)
def initialize(self, chassis: int, slot: int):
"""Open connection to digitizer
Args:
chassis: Signadyne chassis number (usually 1)
slot: Module slot in chassis
Returns:
Name of digitizer
Raises:
AssertionError if connection to digitizer was unsuccessful
"""
digitizer_name = self.SD_AIN.getProductNameBySlot(chassis, slot)
assert isinstance(digitizer_name, str), \
f'No SD_DIG found at chassis {chassis}, slot {slot}'
result_code = self.SD_AIN.openWithSlot(digitizer_name, chassis, slot)
assert result_code > 0, f'Could not open SD_DIG error code {result_code}'
return digitizer_name
@with_error_check
def start_channels(self, channels: List[int]):
""" Start acquiring data or waiting for a trigger on the specified DAQs
Args:
channels: list of channels to start
Raises:
AssertionError if DAQstartMultiple was unsuccessful
"""
# DAQ channel mask, where LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
channel_mask = sum(2**channel for channel in channels)
return self.SD_AIN.DAQstartMultiple(channel_mask)
@with_error_check
def stop_channels(self, channels: List[int]):
""" Stop acquiring data on the specified DAQs
Args:
channels: List of DAQ channels to stop
Raises:
AssertionError if DAQstopMultiple was unsuccessful
"""
# DAQ channel mask, where LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
channel_mask = sum(2**channel for channel in channels)
return self.SD_AIN.DAQstopMultiple(channel_mask)
@with_error_check
def trigger_channels(self, channels):
""" Manually trigger the specified DAQs
Args:
channels: List of DAQ channels to trigger
Raises:
AssertionError if DAQtriggerMultiple was unsuccessful
"""
# DAQ channel mask, where LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
channel_mask = sum(2**channel for channel in channels)
return self.SD_AIN.DAQtriggerMultiple(channel_mask)
@with_error_check
def flush_channels(self, channels: List[int]):
""" Flush the specified DAQ channels
Args:
channels: List of DAQ channels to flush
Raises:
AssertionError if DAQflushMultiple was unsuccessful
"""
# DAQ channel mask, where LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
channel_mask = sum(2**channel for channel in channels)
return self.SD_AIN.DAQflushMultiple(channel_mask)
@with_error_check
def reset_clock_phase(self,
trigger_behaviour: int,
trigger_source: int,
skew: float = 0.0):
""" Reset the clock phase between CLKsync and CLKsys
Args:
trigger_behaviour:
trigger_source: the PXI trigger number
skew: the skew between PXI_CLK10 and CLKsync in multiples of 10ns
Raises:
AssertionError if clockResetPhase was unsuccessful
"""
return self.SD_AIN.clockResetPhase(trigger_behaviour, trigger_source, skew)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from urllib import request
import sys, os
import time, types, json
import os.path as op
import win32api, win32con, win32gui
# default_encoding = 'utf-8'
# if sys.getdefaultencoding() != default_encoding:
# reload(sys)
# sys.setdefaultencoding(default_encoding)
TRY_TIMES = 1
DEFAULT_PIC_PATH = ""
if DEFAULT_PIC_PATH == "":
DEFAULT_PIC_PATH = os.path.expanduser("~") + "\\Pictures\\Bing"
def schedule(a,b,c):
per = 100.0 * a * b / c
if per > 100 :
print("\r100.00%")
return
print("\r%.2f%%" % per, end="")
def get_pic_URL():
bing_json = ''
req = request.Request(
url = 'http://cn.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1'
)
i = TRY_TIMES
while True:
try:
bing_json = request.urlopen(req).read()
except request.HTTPError as e:
print(e)
i = i - 1
if i == 0:
break
time.sleep(5)
else :
break
if bing_json:
bing_dic = json.loads(bing_json)
if bing_dic != None:
return "http://cn.bing.com%s" % bing_dic['images'][0]['url']
print("无法获取URL!")
return ""
def set_wallpaper(pic_path):
if sys.platform == 'win32':
k = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, 'Control Panel\Desktop', 0, win32con.KEY_ALL_ACCESS)
curpath = win32api.RegQueryValueEx(k, 'Wallpaper')[0]
if curpath == pic_path:
pass
else:
# win32api.RegSetValueEx(k, "WallpaperStyle", 0, win32con.REG_SZ, "2")#2 for tile,0 for center
# win32api.RegSetValueEx(k, "TileWallpaper", 0, win32con.REG_SZ, "0")
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, pic_path, 1+2)
win32api.RegCloseKey(k)
else:
curpath = commands.getstatusoutput('gsettings get org.gnome.desktop.background picture-uri')[1][1:-1]
if curpath == pic_path:
pass
else:
commands.getstatusoutput('DISPLAY=:0 gsettings set org.gnome.desktop.background picture-uri "%s"' % (picpath))
try:
print("开始运行。")
localtime = time.localtime(time.time())
url = get_pic_URL()
if url != '':
print("URL:" + url)
pic_name = url.split('/')[-1].split('&')[0].split('OHR.')[-1]
pic_name = "%04d.%02d.%02d.%s" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday, pic_name)
pic_path = "%s\\%s" % (DEFAULT_PIC_PATH, pic_name)
if os.path.exists(pic_path):
print("图片已存在!")
exit()
print("图片名:" + pic_name)
print("开始下载...")
try:
request.urlretrieve(url, pic_path, schedule)
set_wallpaper(pic_path)
print("成功")
except Exception as e:
print(e)
exit()
except KeyboardInterrupt:
pass
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from django.urls import reverse, reverse_lazy
from django.views.generic import ListView, DetailView, DeleteView
from django.views.generic.edit import CreateView, UpdateView
from django.contrib.auth.mixins import PermissionRequiredMixin
from .models import AdvThreatEvent, NonAdvThreatEvent
from .models import AdvThreatSource, NonAdvThreatSource
from .models import Vulnerability, RiskCondition
from .models import Impact, RiskResponse
# Create your views here.
def index(request):
return render(request, 'risk/index.html')
def at_index(request):
return render(request, 'risk/at_index.html')
def nt_index(request):
return render(request, 'risk/nt_index.html')
def help_index(request):
return render(request, 'risk/help_index.html')
def help_adv_threat(request):
return render(request, 'risk/help_adv_threat.html')
def help_nonadv_threat(request):
return render(request, 'risk/help_nonadv_threat.html')
class ATEIndexView(ListView):
model = AdvThreatEvent
template_name = 'risk/ate_index.html'
context_object_name = 'ate_list'
permission_required = 'risk.view_advthreatevent'
def get_queryset(self):
return AdvThreatEvent.objects.order_by('-assigned_risk')
class NTEIndexView(ListView):
model = NonAdvThreatEvent
template_name = 'risk/nte_index.html'
context_object_name = 'nte_list'
permission_required = 'risk.view_nonadvthreatevent'
def get_queryset(self):
return NonAdvThreatEvent.objects.order_by('-assigned_risk')
class ATSIndexView(ListView):
model = AdvThreatSource
template_name = 'risk/ats_index.html'
context_object_name = 'ats_list'
permission_required = 'risk.view_advthreatsource'
class NTSIndexView(ListView):
model = NonAdvThreatSource
template_name = 'risk/nts_index.html'
context_object_name = 'nts_list'
permission_required = 'risk.view_nonadvthreatsource'
class VulnIndexView(ListView):
model = Vulnerability
template_name = 'risk/vuln_index.html'
context_object_name = 'vuln_list'
permission_required = 'risk.view_vulnerability'
class CondIndexView(ListView):
model = RiskCondition
template_name = 'risk/cond_index.html'
context_object_name = 'cond_list'
permission_required = 'risk.view_riskcondition'
class ImpactIndexView(ListView):
model = Impact
template_name = 'risk/impact_index.html'
context_object_name = 'impact_list'
permission_required = 'risk.view_impact'
class ResponseIndexView(ListView):
model = RiskResponse
template_name = 'risk/response_index.html'
context_object_name = 'response_list'
permission_required = 'risk.view_riskresponse'
class ATEDetailView(DetailView):
model = AdvThreatEvent
template_name = 'risk/ate_detail.html'
context_object_name = 'ate'
permission_required = 'risk.view_advthreatevent'
class NTEDetailView(DetailView):
model = NonAdvThreatEvent
template_name = 'risk/nte_detail.html'
context_object_name = 'nte'
permission_required = 'risk.view_nonadvthreatevent'
class ATSDetailView(DetailView):
model = AdvThreatSource
template_name = 'risk/ats_detail.html'
context_object_name = 'ats'
permission_required = 'risk.view_advthreatsource'
class NTSDetailView(DetailView):
model = NonAdvThreatSource
template_name = 'risk/nts_detail.html'
context_object_name = 'nts'
permission_required = 'risk.view_nonadvthreatsource'
class VulnDetailView(DetailView):
model = Vulnerability
template_name = 'risk/vuln_detail.html'
context_object_name = 'vuln'
permission_required = 'risk.view_vulnerability'
class CondDetailView(DetailView):
model = RiskCondition
template_name = 'risk/cond_detail.html'
context_object_name = 'cond'
permission_required = 'risk.view_riskcondition'
class ImpactDetailView(DetailView):
model = Impact
template_name = 'risk/impact_detail.html'
context_object_name = 'impact'
permission_required = 'risk.view_impact'
class ResponseDetailView(DetailView):
model = RiskResponse
template_name = 'risk/response_detail.html'
context_object_name = 'response'
permission_required = 'risk.view_riskresponse'
class ATECreateView(PermissionRequiredMixin, CreateView):
model = AdvThreatEvent
permission_required = 'risk.add_advthreatevent'
fields = ['name', 'desc', 'event_type', 'sources', 'relevance',
'info_source', 'tier', 'likelihood_initiation',
'likelihood_impact', 'vulnerabilities', 'impacts',
'responses', 'assigned_risk']
def get_success_url(self):
return reverse_lazy('risk:ate_detail', args=(self.object.id,))
class NTECreateView(PermissionRequiredMixin, CreateView):
model = NonAdvThreatEvent
permission_required = 'risk.add_nonadvthreatevent'
fields = ['name', 'desc', 'event_type', 'sources', 'relevance',
'info_source', 'tier', 'likelihood_initiation',
'likelihood_impact', 'risk_conditions', 'impacts',
'responses', 'assigned_risk']
def get_success_url(self):
return reverse_lazy('risk:nte_detail', args=(self.object.id,))
class ATSCreateView(PermissionRequiredMixin, CreateView):
model = AdvThreatSource
permission_required = 'risk.add_advthreatsource'
fields = ['name', 'desc', 'source_type', 'info_source', 'tier',
'in_scope', 'capability', 'intent', 'targeting']
def get_success_url(self):
return reverse_lazy('risk:ats_detail', args=(self.object.id,))
class NTSCreateView(PermissionRequiredMixin, CreateView):
model = NonAdvThreatSource
permission_required = 'risk.add_nonadvthreatsource'
fields = ['name', 'desc', 'source_type', 'info_source', 'tier',
'in_scope', 'range_of_effect']
def get_success_url(self):
return reverse_lazy('risk:nts_detail', args=(self.object.id,))
class VulnCreateView(PermissionRequiredMixin, CreateView):
model = Vulnerability
permission_required = 'risk.add_vulnerability'
fields = ['name', 'desc', 'vuln_type', 'severity',
'info_source', 'tier']
def get_success_url(self):
return reverse_lazy('risk:vuln_detail', args=(self.object.id,))
class CondCreateView(PermissionRequiredMixin, CreateView):
model = RiskCondition
permission_required = 'risk.add_riskcondition'
fields = ['name', 'desc', 'condition_type', 'pervasiveness',
'info_source', 'tier']
def get_success_url(self):
return reverse_lazy('risk:cond_detail', args=(self.object.id,))
class ImpactCreateView(PermissionRequiredMixin, CreateView):
model = Impact
permission_required = 'risk.add_impact'
fields = ['name', 'desc', 'impact_type', 'info_source', 'tier',
'severity', 'impact_tier']
def get_success_url(self):
return reverse_lazy('risk:impact_detail', args=(self.object.id,))
class ResponseCreateView(PermissionRequiredMixin, CreateView):
model = RiskResponse
permission_required = 'risk.add_riskresponse'
fields = ['name', 'desc', 'response_type', 'effectiveness', 'status']
def get_success_url(self):
return reverse_lazy('risk:response_detail', args=(self.object.id,))
class ATEUpdateView(PermissionRequiredMixin, UpdateView):
model = AdvThreatEvent
permission_required = 'change_advthreatevent'
template_name = 'risk/advthreatevent_update_form.html'
fields = ['name', 'desc', 'event_type', 'sources', 'relevance',
'info_source', 'tier', 'likelihood_initiation',
'likelihood_impact', 'vulnerabilities', 'impacts',
'responses', 'assigned_risk']
def get_success_url(self):
return reverse_lazy('risk:ate_detail', args=(self.object.id,))
class NTEUpdateView(PermissionRequiredMixin, UpdateView):
model = NonAdvThreatEvent
permission_required = 'change_nonadvthreatevent'
template_name = 'risk/nonadvthreatevent_update_form.html'
fields = ['name', 'desc', 'event_type', 'sources', 'relevance',
'info_source', 'tier', 'likelihood_initiation',
'likelihood_impact', 'risk_conditions', 'impacts',
'responses', 'assigned_risk']
def get_success_url(self):
return reverse_lazy('risk:nte_detail', args=(self.object.id,))
class ATSUpdateView(PermissionRequiredMixin, UpdateView):
model = AdvThreatSource
permission_required = 'change_advthreatsource'
template_name = 'risk/advthreatsource_update_form.html'
fields = ['name', 'desc', 'source_type', 'info_source', 'tier',
'in_scope', 'capability', 'intent', 'targeting']
def get_success_url(self):
return reverse_lazy('risk:ats_detail', args=(self.object.id,))
class NTSUpdateView(PermissionRequiredMixin, UpdateView):
model = NonAdvThreatSource
permission_required = 'change_nonadvthreatsource'
template_name = 'risk/nonadvthreatsource_update_form.html'
fields = ['name', 'desc', 'source_type', 'info_source', 'tier',
'in_scope', 'range_of_effect']
def get_success_url(self):
return reverse_lazy('risk:nts_detail', args=(self.object.id,))
class VulnUpdateView(PermissionRequiredMixin, UpdateView):
model = Vulnerability
permission_required = 'change_vulnerability'
template_name = 'risk/vulnerability_update_form.html'
fields = ['name', 'desc', 'vuln_type', 'severity',
'info_source', 'tier']
def get_success_url(self):
return reverse_lazy('risk:vuln_detail', args=(self.object.id,))
class CondUpdateView(PermissionRequiredMixin, UpdateView):
model = RiskCondition
permission_required = 'change_riskconditions'
template_name = 'risk/riskcondition_update_form.html'
fields = ['name', 'desc', 'condition_type', 'pervasiveness',
'info_source', 'tier']
def get_success_url(self):
return reverse_lazy('risk:cond_detail', args=(self.object.id,))
class ImpactUpdateView(PermissionRequiredMixin, UpdateView):
model = Impact
permission_required = 'change_impact'
template_name = 'risk/impact_update_form.html'
fields = ['name', 'desc', 'impact_type', 'info_source', 'tier',
'severity', 'impact_tier']
def get_success_url(self):
return reverse_lazy('risk:impact_detail', args=(self.object.id,))
class ResponseUpdateView(PermissionRequiredMixin, UpdateView):
model = RiskResponse
permission_required = 'change_riskresponse'
template_name = 'risk/riskresponse_update_form.html'
fields = ['name', 'desc', 'response_type', 'effectiveness', 'status']
def get_success_url(self):
return reverse_lazy('risk:response_detail', args=(self.object.id,))
class ATEDeleteView(PermissionRequiredMixin, DeleteView):
model = AdvThreatEvent
permission_required = 'delete_advthreatevent'
template_name = 'risk/ate_delete.html'
context_object_name = 'ate'
def get_success_url(self):
return reverse('risk:ate_index')
class NTEDeleteView(PermissionRequiredMixin, DeleteView):
model = NonAdvThreatEvent
permission_required = 'delete_nonadvthreatevent'
template_name = 'risk/nte_delete.html'
context_object_name = 'nte'
def get_success_url(self):
return reverse('risk:nte_index')
class ATSDeleteView(PermissionRequiredMixin, DeleteView):
model = AdvThreatSource
permission_required = 'delete_advthreatsource'
template_name = 'risk/ats_delete.html'
context_object_name = 'ats'
def get_success_url(self):
return reverse('risk:ats_index')
class NTSDeleteView(PermissionRequiredMixin, DeleteView):
model = NonAdvThreatSource
permission_required = 'delete_nonadvthreatsource'
template_name = 'risk/nts_delete.html'
context_object_name = 'nts'
def get_success_url(self):
return reverse('risk:nts_index')
class VulnDeleteView(PermissionRequiredMixin, DeleteView):
model = Vulnerability
permission_required = 'delete_vulnerability'
template_name = 'risk/vuln_delete.html'
context_object_name = 'vuln'
def get_success_url(self):
return reverse('risk:vuln_index')
class CondDeleteView(PermissionRequiredMixin, DeleteView):
model = RiskCondition
permission_required = 'delete_riskcondition'
template_name = 'risk/cond_delete.html'
context_object_name = 'cond'
def get_success_url(self):
return reverse('risk:cond_index')
class ImpactDeleteView(PermissionRequiredMixin, DeleteView):
model = Impact
permission_required = 'delete_impact'
template_name = 'risk/impact_delete.html'
context_object_name = 'impact'
def get_success_url(self):
return reverse('risk:impact_index')
class ResponseDeleteView(PermissionRequiredMixin, DeleteView):
model = RiskResponse
permission_required = 'delete_riskresponse'
template_name = 'risk/response_delete.html'
context_object_name = 'response'
def get_success_url(self):
return reverse('risk:response_index')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.clm.urls
"""
from django.conf.urls import patterns, include, url
from clm.utils.decorators import decorated_functions
from clm.views.guest.user import *
from clm.views.guest.cluster import *
from clm.views.guest.message import *
from clm.views.guest.news import *
from clm.views.admin_clm.cluster import *
from clm.views.admin_clm.news import *
from clm.views.admin_clm.user import *
from clm.views.admin_cm.user import *
from clm.views.admin_cm.admin import *
from clm.views.admin_cm.cluster import *
from clm.views.admin_cm.farm import *
from clm.views.admin_cm.node import *
from clm.views.admin_cm.storage import *
from clm.views.admin_cm.template import *
from clm.views.admin_cm.vm import *
from clm.views.admin_cm.network import *
from clm.views.admin_cm.iso_image import *
from clm.views.admin_cm.storage_image import *
from clm.views.admin_cm.system_image import *
from clm.views.admin_cm.monia import *
from clm.views.admin_cm.public_ip import *
from clm.views.user.ctx import *
from clm.views.user.group import *
from clm.views.user.iso_image import *
from clm.views.user.storage_image import *
from clm.views.user.system_image import *
from clm.views.user.key import *
from clm.views.user.message import *
from clm.views.user.template import *
from clm.views.user.user import *
from clm.views.user.vm import *
from clm.views.user.farm import *
from clm.views.user.public_ip import *
from clm.views.user.network import *
from clm.views.user.admin import *
from clm.views.user.monia import *
global decorated_functions
urlpatterns = patterns('',)
for fun in decorated_functions:
urlpatterns += patterns('', url(r'^%s/%s/' % (fun.__module__.replace('clm.views.', '').replace('.', '/'),
fun.__name__), fun)
)
# TODO: Remove it when it will be logged somewhere
f = open('/tmp/log-clm', 'w')
for u in urlpatterns:
f.write(str(u) + '\n')
f.close()
|
nilq/baby-python
|
python
|
import os
from optparse import make_option
from django.core.management import call_command, BaseCommand
from django.conf import settings
from fixture_generator.base import get_available_fixtures
from django.db.models.loading import get_app
class Command(BaseCommand):
"""
Regenerate fixtures for all applications.
"""
option_list = BaseCommand.option_list + (
make_option("--format", default="json", dest="format",
help="Specifies the output serialization format for fixtures."),
make_option("--indent", default=4, dest="indent", type="int",
help="Specifies the indent level to use when pretty-printing output"),
make_option("--not-natural", default=True, dest="use_natural_keys", action="store_false",
help="Don't use natural keys."),
make_option("--databases", dest="dbs", default="",
help="Comma separeted list of databases to dump. All databases are used by default")
)
args = '<app app ... app>'
def handle(self, *apps, **options):
fixtures = get_available_fixtures(apps or settings.INSTALLED_APPS)
for fixture in fixtures.itervalues():
if not isinstance(fixture.export, basestring):
continue
print fixture
app = get_app(fixture.app)
destdir = os.path.dirname(app.__file__)
if app.__file__.rsplit('.', 1)[0].endswith("__init__"):
destdir = os.path.dirname(destdir)
destdir = os.path.join(destdir, "fixtures")
call_command("generate_fixture", fixture.label, prefix=fixture.export, dest_dir=destdir, **options)
|
nilq/baby-python
|
python
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
Builtin tools that come with pyiron base.
"""
from abc import ABC
from pyiron_base.job.factory import JobFactory
__author__ = "Liam Huber"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Liam Huber"
__email__ = "huber@mpie.de"
__status__ = "production"
__date__ = "Sep 7, 2021"
class Toolkit(ABC):
def __init__(self, project):
self._project = project
class BaseTools(Toolkit):
def __init__(self, project):
super().__init__(project)
self._job = JobFactory(project)
@property
def job(self) -> JobFactory:
return self._job
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from yandex_checkout import ReceiptItem
from yandex_checkout.domain.common.receipt_type import ReceiptType
from yandex_checkout.domain.common.request_object import RequestObject
from yandex_checkout.domain.models.receipt_customer import ReceiptCustomer
from yandex_checkout.domain.models.settlement import Settlement
class ReceiptRequest(RequestObject):
__type = None
__send = None
__customer = None
__tax_system_code = None
__items = None
__settlements = None
__payment_id = None
__refund_id = None
@property
def type(self):
return self.__type
@type.setter
def type(self, value):
self.__type = str(value)
@property
def send(self):
return self.__send
@send.setter
def send(self, value):
if isinstance(value, bool):
self.__send = value
else:
raise TypeError('Invalid send value type in receipt_request')
@property
def customer(self):
return self.__customer
@customer.setter
def customer(self, value):
if isinstance(value, dict):
self.__customer = ReceiptCustomer(value)
elif isinstance(value, ReceiptCustomer):
self.__customer = value
else:
raise TypeError('Invalid customer value type in receipt_request')
@property
def tax_system_code(self):
return self.__tax_system_code
@tax_system_code.setter
def tax_system_code(self, value):
if isinstance(value, int):
self.__tax_system_code = value
else:
raise TypeError('Invalid tax_system_code value type in receipt_request')
@property
def items(self):
return self.__items
@items.setter
def items(self, value):
if isinstance(value, list):
items = []
for item in value:
if isinstance(item, dict):
items.append(ReceiptItem(item))
elif isinstance(item, ReceiptItem):
items.append(item)
else:
raise TypeError('Invalid item type in receipt.items')
self.__items = items
else:
raise TypeError('Invalid items value type in receipt_request')
@property
def settlements(self):
return self.__settlements
@settlements.setter
def settlements(self, value):
if isinstance(value, list):
items = []
for item in value:
if isinstance(item, dict):
items.append(Settlement(item))
elif isinstance(item, Settlement):
items.append(item)
else:
raise TypeError('Invalid settlement type in receipt.settlements')
self.__settlements = items
else:
raise TypeError('Invalid settlements value type in receipt_request')
@property
def payment_id(self):
return self.__payment_id
@payment_id.setter
def payment_id(self, value):
self.__refund_id = None
self.__payment_id = str(value)
@property
def refund_id(self):
return self.__refund_id
@refund_id.setter
def refund_id(self, value):
self.__payment_id = None
self.__refund_id = str(value)
def validate(self):
if self.type is None:
self.__set_validation_error('Receipt type not specified')
if self.send is None:
self.__set_validation_error('Receipt send not specified')
if self.customer is not None:
email = self.customer.email
phone = self.customer.phone
if not email and not phone:
self.__set_validation_error('Both email and phone values are empty in customer')
else:
self.__set_validation_error('Receipt customer not specified')
if not self.has_items():
self.__set_validation_error('Receipt items not specified')
if not self.has_settlements():
self.__set_validation_error('Receipt settlements not specified')
if self.type is ReceiptType.PAYMENT and self.payment_id is None:
self.__set_validation_error('Receipt payment_id not specified')
if self.type is ReceiptType.REFUND and self.refund_id is None:
self.__set_validation_error('Receipt refund_id not specified')
def has_items(self):
return bool(self.items)
def has_settlements(self):
return bool(self.settlements)
def __set_validation_error(self, message):
raise ValueError(message)
|
nilq/baby-python
|
python
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skywater 130 PDK support rules.
These rules generate PDK providers for downstream tools.
"""
load("//pdk:build_defs.bzl", "CornerInfo", "StandardCellInfo")
def _skywater_corner_impl(ctx):
# Choose user supplied root, or default to build directory.
standard_cell_root = ctx.attr.standard_cell_root
# Choose the build target name as the corner first unless overwritten.
corner = ctx.attr.corner if ctx.attr.corner else ctx.attr.name
corner_suffix = ""
args = ctx.actions.args()
if ctx.attr.with_leakage:
corner_suffix = "_pwrlkg"
args.add("--leakage")
if ctx.attr.with_ccsnoise:
corner_suffix = "_ccsnoise"
args.add("--ccsnoise")
timing_output = ctx.actions.declare_file("timing/{}__{}{}.lib".format(
ctx.attr.standard_cell_name,
corner,
corner_suffix,
))
args.add_all("-o", [timing_output.dirname])
args.add(standard_cell_root)
args.add(corner)
ctx.actions.run(
outputs = [timing_output],
inputs = ctx.files.srcs,
arguments = [args],
executable = ctx.executable._liberty_tool,
)
return [
DefaultInfo(files = depset([timing_output])),
CornerInfo(
liberty = timing_output,
with_ccsnoise = ctx.attr.with_ccsnoise,
with_leakage = ctx.attr.with_leakage,
corner_name = corner,
),
]
def _skywater_cell_library_impl(ctx):
corners = dict([(dep[CornerInfo].corner_name, dep[CornerInfo]) for dep in ctx.attr.process_corners])
return [
DefaultInfo(files = depset([])),
StandardCellInfo(corners = corners, default_corner = corners.get(ctx.attr.default_corner, None)),
]
skywater_cell_library = rule(
implementation = _skywater_cell_library_impl,
attrs = {
"srcs": attr.label_list(allow_files = True),
"process_corners": attr.label_list(
providers = [CornerInfo],
),
"default_corner": attr.string(mandatory = True),
},
)
skywater_corner = rule(
implementation = _skywater_corner_impl,
attrs = {
"srcs": attr.label_list(
allow_files = True,
allow_empty = False,
),
"corner": attr.string(
default = "",
doc = "The selected process corner to generate liberty files for.",
),
"standard_cell_root": attr.string(
default = "",
doc = "The root directory of the standard cell variants.",
mandatory = True,
),
"with_ccsnoise": attr.bool(
default = False,
doc = "Wheter to generate ccsnoise.",
),
"standard_cell_name": attr.string(
mandatory = True,
doc = "The name of the standar cell variant ex. sky130_fd_sc_hd",
),
"with_leakage": attr.bool(
default = False,
doc = "Wheter to generate leakage",
),
"_liberty_tool": attr.label(
default = Label("@com_google_skywater_pdk//:liberty"),
executable = True,
cfg = "exec",
),
},
)
|
nilq/baby-python
|
python
|
from flaky import flaky
from .. import SemparseTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestAtisParserPredictor(SemparseTestCase):
@flaky
def test_atis_parser_uses_named_inputs(self):
inputs = {"utterance": "show me the flights to seattle"}
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_json(inputs)
action_sequence = result.get("best_action_sequence")
if action_sequence:
# An untrained model will likely get into a loop, and not produce at finished states.
# When the model gets into a loop it will not produce any valid SQL, so we don't get
# any actions. This basically just tests if the model runs.
assert len(action_sequence) > 1
assert all([isinstance(action, str) for action in action_sequence])
predicted_sql_query = result.get("predicted_sql_query")
assert predicted_sql_query is not None
@flaky
def test_atis_parser_predicted_sql_present(self):
inputs = {"utterance": "show me flights to seattle"}
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_json(inputs)
predicted_sql_query = result.get("predicted_sql_query")
assert predicted_sql_query is not None
@flaky
def test_atis_parser_batch_predicted_sql_present(self):
inputs = [{"utterance": "show me flights to seattle"}]
archive_path = self.FIXTURES_ROOT / "atis" / "serialization" / "model.tar.gz"
archive = load_archive(archive_path)
predictor = Predictor.from_archive(archive, "atis-parser")
result = predictor.predict_batch_json(inputs)
predicted_sql_query = result[0].get("predicted_sql_query")
assert predicted_sql_query is not None
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.3 on 2021-05-29 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discordbot', '0021_auto_20210529_2045'),
]
operations = [
migrations.AddField(
model_name='member',
name='settings',
field=models.JSONField(default=dict, verbose_name='Settings'),
),
]
|
nilq/baby-python
|
python
|
# Author: Jintao Huang
# Time: 2020-5-24
import torch.nn as nn
from .utils import FrozenBatchNorm2d
default_config = {
# backbone
"pretrained_backbone": True,
"backbone_norm_layer": nn.BatchNorm2d,
"backbone_freeze": ["conv_first", "layer1", "layer2"],
# "backbone_freeze": [""], # freeze backbone all
# anchor:
"anchor_scales": (1., 2 ** (1 / 3.), 2 ** (2 / 3.)), # scales on a single feature
"anchor_aspect_ratios": ((1., 1.), (0.7, 1.4), (1.4, 0.7)), # H, W
# focal loss
"alpha": 0.25,
"gamma": 2, # 1.5
# other:
"other_norm_layer": nn.BatchNorm2d,
}
config_dict = {
# resolution[% 128 == 0], backbone, fpn_channels, fpn_num_repeat, regressor_classifier_num_repeat,
# anchor_base_scale(anchor_size / stride)(基准尺度)
'efficientdet_d0': (512, 'efficientnet_b0', 64, 3, 3, 4.), #
'efficientdet_d1': (640, 'efficientnet_b1', 88, 4, 3, 4.), #
'efficientdet_d2': (768, 'efficientnet_b2', 112, 5, 3, 4.), #
'efficientdet_d3': (896, 'efficientnet_b3', 160, 6, 4, 4.), #
'efficientdet_d4': (1024, 'efficientnet_b4', 224, 7, 4, 4.), #
'efficientdet_d5': (1280, 'efficientnet_b5', 288, 7, 4, 4.),
'efficientdet_d6': (1280, 'efficientnet_b6', 384, 8, 5, 4.), #
'efficientdet_d7': (1536, 'efficientnet_b6', 384, 8, 5, 5.)
}
# 官方配置 official configuration
# config_dict = {
# # resolution[% 128 == 0], backbone, fpn_channels, fpn_num_repeat, regressor_classifier_num_repeat,
# # anchor_base_scale(anchor_size / stride)(基准尺度)
# 'efficientdet_d0': (512, 'efficientnet_b0', 64, *2, 3, 4.), #
# 'efficientdet_d1': (640, 'efficientnet_b1', 88, *3, 3, 4.), #
# 'efficientdet_d2': (768, 'efficientnet_b2', 112, *4, 3, 4.), #
# 'efficientdet_d3': (896, 'efficientnet_b3', 160, *5, 4, 4.), #
# 'efficientdet_d4': (1024, 'efficientnet_b4', 224, *6, 4, 4.), #
# 'efficientdet_d5': (1280, 'efficientnet_b5', 288, 7, 4, 4.),
# 'efficientdet_d6': (*1408, 'efficientnet_b6', 384, 8, 5, 4.), #
# 'efficientdet_d7': (1536, 'efficientnet_b6', 384, 8, 5, 5.)
# }
|
nilq/baby-python
|
python
|
# TOO EASY
T = int(input())
for _ in range(T):
lower, upper = map(int, input().split())
n = int(input())
# a < num <= b
for _ in range(n):
mid = (lower+upper)//2
print(mid)
res = input()
if res == "TOO_SMALL":
lower = mid + 1
elif res == "TOO_BIG":
upper = mid - 1
else:
break
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from odoo import _, api, fields, models
class Trainer(models.Model):
_name = "bista.trainer"
_description = "Bista Training Management System - Trainer"
_rec_name = "name"
profile_image = fields.Binary(string="Profile Image", attachement=True)
first_name = fields.Char(string="First Name", required=True)
last_name = fields.Char(string="Last Name")
name = fields.Char(string="Name", compute="_get_name", store=True)
@api.depends('first_name', 'last_name')
def _get_name(self):
for record in self:
if record.last_name:
record.name = record.first_name + ' ' + record.last_name
else:
record.name = record.first_name
class TrainerNotes(models.Model):
_name = "bista.trainer.note"
_description = "Bista Training Management System - Trainer Notes"
_rec_name = "subject"
added_by = fields.Many2one('res.users', string="Added By")
subject = fields.Char(string="Subject", required=True)
date = fields.Date(
string="Date", default=lambda self: fields.datetime.today())
note = fields.Char(string="Note")
|
nilq/baby-python
|
python
|
#coding: utf-8
#!python3
# 5) Solicite o preço de uma mercadoria e o percentual de desconto. Exiba o valor do desconto
# e o preço a pagar:
p_produto = float(input('Valor produto: '))
p_desconto = float(input('Percentual de desconto: '))
p_produto_atual = p_produto - (p_produto*p_desconto/100)
print('Preço atual: ', p_produto_atual)
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.16 on 2020-10-02 08:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0047_source_id_nullable'),
('routes', '0048_athlete_activities_imported'),
]
operations = [
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import unittest
import numpy as np
from selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
def run_cruise_simulation(cruise, t_end=100.):
man = Maneuver(
'',
duration=t_end,
initial_speed=float(0.),
lead_relevancy=True,
initial_distance_lead=100,
cruise_values=[cruise],
prob_lead_values=[0.0],
breakpoints=[0.],
)
valid, output = man.evaluate()
assert valid
return output[-1,3]
class TestCruiseSpeed(unittest.TestCase):
def test_cruise_speed(self):
for speed in np.arange(5, 40, 5):
print(f'Testing {speed} m/s')
cruise_speed = float(speed)
simulation_steady_state = run_cruise_simulation(cruise_speed)
self.assertAlmostEqual(simulation_steady_state, cruise_speed, delta=.01, msg=f'Did not reach {speed} m/s')
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
from __future__ import print_function
from mmstage import MicroManagerStage
|
nilq/baby-python
|
python
|
# Copyright: 2006-2011 Brian Harring <ferringb@gmail.com>
# License: GPL2/BSD
"""
exceptions thrown by the MergeEngine
"""
__all__ = ("ModificationError", "BlockModification",
"TriggerUnknownCset",
)
class ModificationError(Exception):
"""Base Exception class for modification errors"""
def __init__(self, trigger, msg):
self.trigger = trigger
self.msg = msg
Exception.__init__(self, "%s: modification error: %s" %
(self.trigger, self.msg))
class BlockModification(ModificationError):
"""Merging cannot proceed"""
def __str__(self):
return "Modification was blocked by %s: %s" % (
self.trigger.__class__.__name__, self.msg)
class TriggerUnknownCset(ModificationError):
"""Trigger's required content set isn't known"""
def __init__(self, trigger, csets):
if not isinstance(csets, (tuple, list)):
csets = (csets,)
ModificationError.__init__(self, "%s: trigger %r unknown cset: %r" %
(self.__class__, trigger, csets))
self.trigger, self.csets = trigger, csets
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.7 on 2019-04-16 15:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('materials', '0068_auto_20190415_2140'),
]
operations = [
migrations.RenameField(
model_name='dataset',
old_name='experimental',
new_name='is_experimental',
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
flask_security.recoverable
~~~~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security recoverable module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app as app
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from .signals import password_reset, reset_password_instructions_sent
from .utils import config_value, encrypt_password, get_token_status, md5, \
send_mail, url_for_security
# Convenient references
_security = LocalProxy(lambda: app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
def send_reset_password_instructions(user):
"""Sends the reset password instructions email for the specified user.
:param user: The user to send the instructions to
"""
token = generate_reset_password_token(user)
reset_link = url_for_security(
'reset_password', token=token, _external=True
)
send_mail(config_value('EMAIL_SUBJECT_PASSWORD_RESET'), user.email,
'reset_instructions',
user=user, reset_link=reset_link)
reset_password_instructions_sent.send(
app._get_current_object(), user=user, token=token
)
def send_password_reset_notice(user):
"""Sends the password reset notice email for the specified user.
:param user: The user to send the notice to
"""
if config_value('SEND_PASSWORD_RESET_NOTICE_EMAIL'):
send_mail(config_value('EMAIL_SUBJECT_PASSWORD_NOTICE'), user.email,
'reset_notice', user=user)
def generate_reset_password_token(user):
"""Generates a unique reset password token for the specified user.
:param user: The user to work with
"""
password_hash = md5(user.password) if user.password else None
data = [str(user.id), password_hash]
return _security.reset_serializer.dumps(data)
def reset_password_token_status(token):
"""Returns the expired status, invalid status, and user of a password reset
token. For example::
expired, invalid, user, data = reset_password_token_status('...')
:param token: The password reset token
"""
expired, invalid, user, data = get_token_status(
token, 'reset', 'RESET_PASSWORD', return_data=True
)
if not invalid:
if user.password:
password_hash = md5(user.password)
if not safe_str_cmp(password_hash, data[1]):
invalid = True
return expired, invalid, user
def update_password(user, password):
"""Update the specified user's password
:param user: The user to update_password
:param password: The unencrypted new password
"""
user.password = encrypt_password(password)
_datastore.put(user)
send_password_reset_notice(user)
password_reset.send(app._get_current_object(), user=user)
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report,accuracy_score,f1_score,precision_score,recall_score
from scikitplot.metrics import plot_confusion_matrix
class eval_metrics():
def __init__(self,targets,preds,classes):
try:
self.targets = targets.cpu().numpy()
self.preds = preds.cpu().numpy()
self.classes = classes
self.num_classes = len(self.classes)
except:
self.targets = targets
self.preds = preds
self.classes = classes
self.num_classes = len(self.classes)
def plot_conf_matx(self,normalized=False):
fig, axs = plt.subplots(figsize=(16, 12))
plot_confusion_matrix(self.targets, self.preds, ax=axs,normalize=normalized)
tick_marks = np.arange(self.num_classes)
plt.xticks(tick_marks, self.classes, rotation=45)
plt.yticks(tick_marks, self.classes)
plt.savefig(os.path.join(os.getcwd(),'confusion_matrix.png'))
return fig
def accuracy(self):
return accuracy_score(self.targets,self.preds,normalize=True)
def f1_score_weighted(self):
return f1_score(self.targets,self.preds,average='weighted')
def precision_weighted(self):
return precision_score(self.targets,self.preds,average='weighted')
def recall_weighted(self):
return recall_score(self.targets,self.preds,average='weighted')
def classify_report(self):
return classification_report(self.targets,self.preds,
target_names=self.classes)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import itertools
from saliency_map import *
from utils import OpencvIo
class GaussianPyramidTest(unittest.TestCase):
def setUp(self):
oi = OpencvIo()
src = oi.imread('./images/fruit.jpg')
self.__gp = GaussianPyramid(src)
def test_get_intensity(self):
its = self.__gp._GaussianPyramid__get_intensity(10, 20, 30)
self.assertEqual(20, its)
self.assertNotEqual(type(1), type(its))
def test_get_colors(self):
real = self.__gp._GaussianPyramid__get_colors(0.9, 0.9, 0.9, 0.9, 10)
self.assertEqual([0.0, 0.0, 0.0, 0.0], real)
class FeatureMapTest(unittest.TestCase):
def setUp(self):
oi = OpencvIo()
src = oi.imread('./images/fruit.jpg')
gp = GaussianPyramid(src)
self.__fm = FeatureMap(gp.maps)
def test_scale_diff(self):
c, s = np.zeros((4, 6)), np.zeros((2, 3))
expect = np.ones((4, 6))
for y, x in itertools.product(xrange(len(s)), xrange(len(s[0]))):
s[y][x] = (-1) ** x
self.assertTrue(np.array_equal(expect, self.__fm._FeatureMap__scale_diff(c, s)))
def test_scale_color_diff(self):
c1, s1 = np.zeros((4, 6)), np.zeros((2, 3))
c2, s2 = np.zeros((4, 6)), np.zeros((2, 3))
expect = np.ones((4, 6))
for y, x in itertools.product(xrange(len(s1)), xrange(len(s1[0]))):
s1[y][x] = (-1) ** x
real = self.__fm._FeatureMap__scale_color_diff((c1, s1), (c2, s2))
self.assertTrue(np.array_equal(expect, real))
class ConspicuityMapTest(unittest.TestCase):
def setUp(self):
oi = OpencvIo()
src = oi.imread('./images/fruit.jpg')
gp = GaussianPyramid(src)
fm = FeatureMap(gp.maps)
self.__cm = ConspicuityMap(fm.maps)
def test_scale_add(self):
srcs = [np.ones((4, 6)), np.zeros((2, 3))]
expect = np.ones((4, 6))
self.assertTrue(np.array_equal(expect, self.__cm._ConspicuityMap__scale_add(srcs)))
class SaliencyMapTest(unittest.TestCase):
def setUp(self):
self.sm = SaliencyMap()
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(GaussianPyramidTest))
suite.addTests(unittest.makeSuite(FeatureMapTest))
suite.addTests(unittest.makeSuite(ConspicuityMapTest))
suite.addTests(unittest.makeSuite(SaliencyMapTest))
return suite
|
nilq/baby-python
|
python
|
# MODULE: TypeRig / Core / Ojects
# -----------------------------------------------------------
# (C) Vassil Kateliev, 2017-2020 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#------------------------------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
__version__ = '0.26.0'
from collection import *
from cubicbezier import *
from line import *
from matrix import *
from point import *
from array import *
from transform import *
from utils import *
|
nilq/baby-python
|
python
|
from pyomac.clustering.cluster_utils import (
ModalSet,
IndexedModalSet,
indexed_modal_sets_from_sequence,
modal_sets_from_lists,
modal_clusters,
single_set_statistics,
filter_clusters,
plot_indexed_clusters
)
|
nilq/baby-python
|
python
|
# TextChart - Roll The Dice
import pygwidgets
from Constants import *
class TextView():
def __init__(self, window, oModel):
self.window = window
self.oModel = oModel
totalText = ['Roll total', '']
for rollTotal in range(MIN_TOTAL, MAX_TOTAL_PLUS_1):
totalText.append(rollTotal)
self.oTotalDisplay = pygwidgets.DisplayText(self.window, (200, 135), totalText,
fontSize=36, width=120, justified='right')
self.oCountDisplay = pygwidgets.DisplayText(self.window, (320, 135),
fontSize=36, width=120, justified='right')
self.oPercentDisplay = pygwidgets.DisplayText(self.window, (440, 135),
fontSize=36, width=120, justified='right')
def update(self):
nRounds, resultsDict, percentsDict = self.oModel.getRoundsRollsPercents()
countList = ['Count', ''] # extra empty string for a blank line
percentList = ['Percent', '']
for rollTotal in range(MIN_TOTAL, MAX_TOTAL_PLUS_1):
count = resultsDict[rollTotal]
percent = percentsDict[rollTotal]
countList.append(count)
# Build percent as a string with one decimal digit
percent = '{:.1%}'.format(percent)
percentList.append(percent)
self.oCountDisplay.setValue(countList)
self.oPercentDisplay.setValue(percentList)
def draw(self):
self.oTotalDisplay.draw()
self.oCountDisplay.draw()
self.oPercentDisplay.draw()
|
nilq/baby-python
|
python
|
"""
Exercise 1
Write a function that takes a string as an argument and displays the letters
backward, one per line.
"""
def backwards(word):
x = len(word) - 1
while x >= 0:
print(word[x])
x -= 1
backwards("hello")
|
nilq/baby-python
|
python
|
# with open('./space.text', 'w') as message:
# message.write('我是写入的数据\n')
# message.write('我再写一段文字哦\n')
# message.write('继续写入\n')
with open('./space.text', 'a') as msg:
msg.write('附加一段文字\n')
msg.write('继续附加一段\n')
|
nilq/baby-python
|
python
|
import modeli, dobi_zneske
from bottle import *
import hashlib # racunaje md5
import json,requests
import pandas as pd
secret = "to skrivnost je zelo tezko uganiti 1094107c907cw982982c42"
def get_administrator():
username = request.get_cookie('administrator', secret=secret)
return username
def get_user(auto_login = True):
"""Poglej cookie in ugotovi, kdo je prijavljeni uporabnik,
vrni njegov username in ime. Če ni prijavljen, presumeri
na stran za prijavo ali vrni None (advisno od auto_login).
"""
# Dobimo username iz piškotka
username = request.get_cookie('username', secret=secret)
# Preverimo, ali ta uporabnik obstaja
if username is not None:
r = modeli.mail(username)
if r is not None:
# uporabnik obstaja, vrnemo njegove podatke
return r
# Če pridemo do sem, uporabnik ni prijavljen, naredimo redirect, če ni administratorjevega coockie-ja
if auto_login and not get_administrator():
redirect('/prijava')
else:
return None
def password_md5(s):
"""Vrni MD5 hash danega UTF-8 niza. Gesla vedno spravimo v bazo
kodirana s to funkcijo."""
h = hashlib.md5()
h.update(s.encode('utf-8'))
return h.hexdigest()
@get('/')
def glavniMenu():
valute = modeli.seznam_valut()
data = requests.get(r'https://www.bitstamp.net/api/v2/order_book/ethbtc')
data = data.json()
bids = pd.DataFrame()
bids['quantity'] = [i[1] for i in data['bids']]
bids['price'] = [i[0] for i in data['bids']]
asks = pd.DataFrame()
asks['price'] = [i[0] for i in data['asks']]
asks['quantity'] = [i[1] for i in data['asks']]
asks.price = asks.price.apply(float)
asks.quantity = asks.quantity.apply(float)
bids.price = bids.price.apply(float)
bids.quantity = bids.quantity.apply(float)
bids_dict = {x[1]:x[0] for x in bids.itertuples(index=False)}
asks_dict = {x[0]:x[1] for x in asks.itertuples(index=False)}
bidask = dict()
bidask['asks'] = asks_dict
bidask['bids'] = bids_dict
data['asks'] = [{'price':float(i[0]), 'amount':float(i[1])} for i in data['asks']][:100]
data['bids'] = [{'price':float(i[0]), 'amount':float(i[1])} for i in data['bids']][:100]
return template('glavni.html', mail=None, geslo=None,ime=None,priimek=None, valute=valute,napaka_registriraj=None,napaka_prijava=None, orderbook=data)
@get('/static/<filename:path>')
def static(filename):
return static_file(filename, root='static')
@get('/oseba/<id_st>')
def oOsebi(id_st):
mail=get_user()
admin = get_administrator()
uporabnik = modeli.podatki(id_st)
vsota = 0
if admin or (uporabnik is not None and mail[0] == uporabnik[3]):
id, ime, priimek, mail, geslo = uporabnik
valute = modeli.seznam_valut()
lastnistvo = modeli.vsi_podatki(id_st)
for _, _ , _, nova_vrednost, kol, _ in lastnistvo:
vsota+=nova_vrednost*kol
vsota = round(vsota,2)
zasluzek = modeli.zasluzek(id)
return template('oseba.html', id=id, ime = ime, priimek=priimek, mail=mail,valute=valute,kolicina=None,lastnistvo=lastnistvo, zasluzek=zasluzek, vsota=vsota)
abort(404,"Not found: '/oseba/{0}'".format(id_st))
@post('/kupi')
def nakup():
mail = get_user()
admin = get_administrator()
id = request.forms.id
ime = request.forms.k
vrednost = request.forms.vrednost
kolicina = request.forms.kolicina
modeli.kupi_valuto(id, ime, vrednost, kolicina)
redirect('/oseba/'+str(id))
return template('oseba.html', id=id, ime = ime, kolicina=kolicina,vrednost=vrednost,k=k)
@post('/prodaj')
def prodaj():
mail = get_user()
admin = get_administrator()
id = request.forms.id
ime = request.forms.valut
vred = request.forms.vredn
kol = float(request.forms.kol)
kolicina = float(request.forms.kolicina)
kolicina = min(kol, kolicina)
modeli.prodaj_valuto(id, ime, kolicina, vred)
redirect('/oseba/'+str(id))
return template('oseba.html', id=id, ime = ime, kol=kol, vred=vred, kolicina=kolicina)
@get('/administrator')
def administrator():
if get_administrator():
valute = modeli.seznam_valut()
return template('administrator.html', valute=valute)
abort(404, "Not found: '/administrator'")
@get('/administrator/osebe')
def administrator_osebe():
if get_administrator():
sez = {}
rezultat = modeli.podatki_vsi()
for el in rezultat:
sez[el[0]]=modeli.zasluzek(el[0])
return template('seznam_oseb.html', rezultat=rezultat,zasluzek=sez)
abort(404,"Not found: '/administrator/osebe")
@get('/administrator/valute')
def administrator_valute():
if get_administrator():
rezultat = modeli.seznam_valut()
return template('seznam_valut.html', rezultat=rezultat)
abort(404,"Not found: '/administrator/valute")
@get('/isci')
def isci():
id_st = request.query.iskalniNiz
rezultat = modeli.podatki(id_st)
if rezultat is not None:
return template('isci.html', rezultat = rezultat)
@get('/registracija')
def glavni_r():
return template('registriraj.html', ime = None, priimek = None, mail = None, napaka_registriraj=None, geslo = None)
@post('/registracija')
def dodaj():
ime = request.forms.ime
priimek = request.forms.priimek
mail = request.forms.mail
geslo = password_md5(request.forms.geslo)
if ime and priimek and mail and geslo:
je_v_bazi = modeli.mail_v_bazi(mail)
if je_v_bazi or mail=="admin@admin":
redirect('/registracija')
return template('registriraj.html', ime=None, priimek=None, mail=None, geslo=None, napaka_registriraj = 'Uporabnik obstaja')
modeli.dodaj_osebo(ime, priimek, mail, geslo)
id_1 = modeli.id_st(mail)
response.set_cookie('username', mail, path='/', secret=secret)
redirect('/oseba/'+str(id_1))
return template('registriraj.html', ime = ime, priimek = priimek, mail = mail, geslo = geslo, napaka_registriraj=None)
#redirect('/registracija')
redirect('/#registracija')
return template('registriraj.html', ime=None, priimek=None, mail=None, geslo=None, napaka_registriraj = 'Neveljavna registracija')
@get('/oseba/<id>/spremeni')
def spremen(id):
if get_user() is not None:
return template('spremeni.html', ime = None, priimek = None, mail = get_user()[0], staro_geslo = None, geslo = None, napaka=None)
return template('spremeni.html', ime = None, priimek = None, mail = None, staro_geslo = None, geslo = None, napaka=None)
@post('/spremeni')
def spremeni():
mail = None or get_user()[0]
id = modeli.id_st(mail)
ime = request.forms.ime or modeli.ime(id)
priimek = request.forms.priimek or modeli.priimek(id)
staro_geslo = request.forms.staro_geslo
geslo = password_md5(request.forms.geslo)
if password_md5(staro_geslo) == modeli.geslo(id):
modeli.spremeni_osebo(id, ime, priimek, mail, geslo)
modeli.spremeni_osebo(id, ime, priimek, mail, modeli.geslo(id))
response.set_cookie('username', mail, path='/', secret=secret)
redirect('/oseba/'+str(id))
return template('spremeni.html', ime = ime, priimek = priimek, staro_geslo = staro_geslo, mail = mail, geslo = geslo, napaka=None)
@get('/administrator/luzerji')
def luzerji():
if get_administrator():
rezultat = modeli.lozerji()
return template('loserji.html', lastnistvo=rezultat)
abort(404,"Not found: '/administrator/luzerji")
@get('/prijava')
def glavni():
return template('prijava.html', mail = None, napaka_prijava=None, geslo = None)
@post('/prijava')
def glavni_p():
mail = request.forms.mail
geslo = password_md5(request.forms.geslo)
if mail == "admin@admin" and geslo == password_md5("admin"):
response.set_cookie('administrator', mail, path='/', secret=secret)
redirect('/administrator')
return template('prijava.html', mail = mail, napaka_prijava=None, geslo = geslo)
id_s = modeli.id_st(mail)
podatki = modeli.podatki(id_s)
if podatki is not None:
_, _, _, email, psw = podatki
if email == mail and geslo == psw:
response.set_cookie('username', mail, path='/', secret=secret)
redirect('/oseba/'+str(id_s))
return template('prijava.html', mail = mail, napaka_prijava=None, geslo = geslo)
else:
redirect('/#prijava')
return template('prijava.html', mail=None, geslo=None, napaka_prijava='Neveljavna prijava')
else:
redirect('/#prijava')
return template('prijava.html', mail = None, geslo = None, napaka_prijava = 'Izpolni polja')
@get('/zapri_racun')
def odstrani_g():
return template('zapri_racun.html',mail=None,geslo=None,napaka=None)
@post('/zapri_racun')
def odstrani():
mail = request.forms.mail
geslo = password_md5(request.forms.geslo)
id = modeli.id_st(mail)
podatki = modeli.podatki(id)
if podatki is not None:
id_s, _, _, email, psw = podatki
if email == mail and geslo == psw and id==id_s:
modeli.zapri_racun(id)
redirect('/')
return template('zapri_racun.html', mail=mail, geslo=geslo,napaka=None)
redirect('/zapri_racun')
return template('zapri_racun.html', mail=mail, geslo=geslo, napaka='Nepravilno mail/geslo')
return template('zapri_racun.html', mail=None, geslo=None, napaka=None)
@post('/administrator/zapri_racun_admin')
def zapri_racun_admin():
id = request.forms.id
modeli.zapri_racun(id)
redirect('/administrator/luzerji')
@post('/administrator/zapri_racun_adm')
def zapri_racun_adm():
id = request.forms.id
modeli.zapri_racun(id)
redirect('/administrator/osebe')
@post('/administrator/zbrisi_valute')
def zbrisi_valuto():
id = request.forms.id
modeli.zbrisi_valuto(id)
redirect('/administrator/valute')
@post('/odstrani_valute')
def zbrisi_valute():
modeli.zbrisi_valute()
redirect('/administrator/valute')
@post('/zbrisi_osebe')
def zbrisi_osebe():
modeli.zbrisi_vse_osebe()
redirect('/administrator/osebe')
@get('/dodaj_valute')
def dodaj_valute():
if get_administrator():
rezultat = modeli.seznam_valut()
redirect('/administrator/valute')
return template('seznam_valut.html', rezultat=rezultat)
abort(404,"Not found: '/dodaj_valute")
@post('/dodaj_valute')
def dodaj_valute():
if get_administrator():
modeli.dodaj_valute()
rezultat = modeli.seznam_valut()
redirect('/administrator/valute')
return template('seznam_valut.html', rezultat=rezultat)
@get('/dodaj_nove_valute')
def dodaj_valute():
if get_administrator():
rezultat = modeli.seznam_valut()
redirect('/administrator/valute')
return template('seznam_valut.html', rezultat=rezultat)
abort(404,"Not found: '/dodaj_nove_valute")
@post('/dodaj_nove_valute')
def dodaj_nove_valute():
if get_administrator():
modeli.dodaj_nove_valute()
rezultat = modeli.seznam_valut()
redirect('/administrator/valute')
return template('seznam_valut.html', rezultat=rezultat)
@get('/oseba/<id>/zgodovina')
def zgodovina(id):
mail = get_user()
uporabnik = modeli.podatki(id)
if get_administrator() or uporabnik is not None and mail[0] == uporabnik[3]:
zgodovina_transakcij = modeli.vrni_zgodovino(id)
zasluzek = modeli.zasluzek(id)
return template('zgodovina.html',zasluzek=zasluzek,lastnistvo=zgodovina_transakcij)
else:
odjava()
@get('/odjavi')
def odjava():
response.delete_cookie('username')
redirect('/')
@get('/odjava')
def odjavi():
response.delete_cookie('administrator')
redirect('/')
# poženemo strežnik na portu 8080, glej http://localhost:8080/
run(host='localhost', port=8080,debug=True, reloader=False) #problem reloader idle
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
filepath = sys.argv[1]
path, filename = os.path.split(filepath)
filename, ext = os.path.splitext(filename)
for i in os.listdir(os.getcwd()+'/'+path):
file_i, ext = os.path.splitext(i)
if i.startswith(filename+'_segmented') and ext == '.ttl':
# print 'Converting {0} to dot format'.format(file_i)
os.system('~/.virtualenvs/tc/bin/rdf2dot {2}/{0} > {2}/{1}'.format(i, file_i+'.dot', os.getcwd()+'/'+path))
|
nilq/baby-python
|
python
|
from datetime import datetime, timedelta
import pendulum
import prefect
from prefect import task, Flow
from prefect.schedules import CronSchedule
import pandas as pd
from io import BytesIO
import zipfile
import requests
schedule = CronSchedule(
cron="*/30 * * * *",
start_date=pendulum.datetime(2021, 3, 12, 17, 00, tz='America/Sao_Paulo')
)
@task
def get_raw_data():
url = 'http://download.inep.gov.br/microdados/microdados_enem_2019.zip'
filebytes = BytesIO(
requests.get(url).content
)
logger = prefect.context.get('logger')
logger.info('Dados obtidos')
# Extrair o conteudo do zipfile
myzip = zipfile.ZipFile(filebytes)
myzip.extractall()
path = './DADOS/'
return path
@task
def aplica_filtros(path):
enade = pd.read_csv(path + 'MICRODADOS_ENEM_2019.csv',
sep=';', decimal=',', nrows=1000)
logger = prefect.context.get('logger')
logger.info(f'Colunas do df sao: {enade.columns}')
enade = enade.loc[
(enade.NU_IDADE > 20) &
(enade.NU_IDADE < 40) &
(enade.NT_GER > 0)
]
return enade
@task
def constroi_idade_centralizada(df):
idade = df[['NU_IDADE']]
idade['idadecent'] = idade.NU_IDADE - idade.NU_IDADE.mean()
return idade[['idadecent']]
@task
def constroi_idade_cent_quad(df):
idadecent = df.copy()
idadecent['idade2'] = idadecent.idadecent ** 2
return idadecent[['idade2']]
@task
def constroi_est_civil(df):
filtro = df[['QE_I01']]
filtro['estcivil'] = filtro.QE_I01.replace({
'A': 'Solteiro',
'B': 'Casado',
'C': 'Separado',
'D': 'Viuvo',
'E': 'Outro'
})
return filtro[['estcivil']]
@task
def constroi_cor(df):
filtro = df[['QE_I02']]
filtro['cor'] = filtro.QE_I02.replace({
'A': 'Branca',
'B': 'Preta',
'C': 'Amarela',
'D': 'Parda',
'E': 'Indigena',
'F': '',
' ': ''
})
return filtro[['cor']]
@task
def constroi_escopai(df):
filtro = df[['QE_I_04']]
filtro['escopai'] = filtro.QE_I04.replace({
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5
})
return filtro[['escopai']]
@task
def constroi_escomae(df):
filtro = df[['QE_I_05']]
filtro['escomae'] = filtro.QE_I05.replace({
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5
})
return filtro[['escomae']]
@task
def constroi_renda(df):
filtro = df[['QE_I_08']]
filtro['renda'] = filtro.QE_I08.replace({
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6
})
return filtro[['renda']]
@task
def join_data(df, idadecent, idadequadrado, estcivil, cor,
escopai, escomae, renda):
final = pd.concat([df, idadecent, idadequadrado, estcivil, cor,
escopai, escomae, renda],
axis=1)
logger = prefect.context.get('logger')
logger.info(final.head().to_json())
final.to_csv('enade_tratato.csv', index=False)
with Flow('Enade', schedule) as flow:
path = get_raw_data()
filtro = aplica_filtros(path)
idadecent = constroi_idade_centralizada(filtro)
idadequadrado = constroi_idade_cent_quad(idadecent)
estcivil = constroi_est_civil(filtro)
cor = constroi_cor(filtro)
escomae = constroi_escomae(filtro)
escopai = constroi_escopai(filtro)
renda = constroi_renda(filtro)
j = join_data(filtro, idadecent, idadequadrado, estcivil, cor,
escomae, escopai, renda)
# prefect create project IGTI --description "Projetos do bootcamp de engenharia de dados do IGTI"
flow.register(project_name='IGTI', idempotency_key=flow.serialized_hash())
# prefect auth create-token -n my-runner-token -s RUNNER
flow.run_agent(token='dE5zGVFdfzZpNj6bTBcweg')
|
nilq/baby-python
|
python
|
import ROOT as root
qMap_Ag_C0_V0 = root.TProfile2D("qMap_Ag_C0_V0","qMap_Ag_C0 (V0)",52,0,52,80,0,80,0,0);
qMap_Ag_C0_V0.SetBinEntries(3585,29768);
qMap_Ag_C0_V0.SetBinEntries(3586,79524);
qMap_Ag_C0_V0.SetBinEntries(3639,83953);
qMap_Ag_C0_V0.SetBinEntries(3640,124982);
qMap_Ag_C0_V0.SetBinEntries(3641,14345);
qMap_Ag_C0_V0.SetBinEntries(3693,31598);
qMap_Ag_C0_V0.SetBinEntries(3694,91098);
qMap_Ag_C0_V0.SetBinContent(3585,3245287);
qMap_Ag_C0_V0.SetBinContent(3586,1.615629e+07);
qMap_Ag_C0_V0.SetBinContent(3639,2.731302e+07);
qMap_Ag_C0_V0.SetBinContent(3640,3.14566e+08);
qMap_Ag_C0_V0.SetBinContent(3641,1444064);
qMap_Ag_C0_V0.SetBinContent(3693,3763256);
qMap_Ag_C0_V0.SetBinContent(3694,2.928397e+07);
qMap_Ag_C0_V0.SetBinError(3585,174261.7);
qMap_Ag_C0_V0.SetBinError(3586,278676.5);
qMap_Ag_C0_V0.SetBinError(3639,960499.6);
qMap_Ag_C0_V0.SetBinError(3640,4324021);
qMap_Ag_C0_V0.SetBinError(3641,12124.33);
qMap_Ag_C0_V0.SetBinError(3693,148045.1);
qMap_Ag_C0_V0.SetBinError(3694,865776);
qMap_Ag_C0_V0.SetMinimum(0);
qMap_Ag_C0_V0.SetEntries(455268);
qMap_Ag_C0_V0.SetStats(0);
qMap_Ag_C0_V0.SetContour(20);
qMap_Ag_C0_V0.SetContourLevel(0,0);
qMap_Ag_C0_V0.SetContourLevel(1,125.8445);
qMap_Ag_C0_V0.SetContourLevel(2,251.689);
qMap_Ag_C0_V0.SetContourLevel(3,377.5335);
qMap_Ag_C0_V0.SetContourLevel(4,503.3781);
qMap_Ag_C0_V0.SetContourLevel(5,629.2226);
qMap_Ag_C0_V0.SetContourLevel(6,755.0671);
qMap_Ag_C0_V0.SetContourLevel(7,880.9116);
qMap_Ag_C0_V0.SetContourLevel(8,1006.756);
qMap_Ag_C0_V0.SetContourLevel(9,1132.601);
qMap_Ag_C0_V0.SetContourLevel(10,1258.445);
qMap_Ag_C0_V0.SetContourLevel(11,1384.29);
qMap_Ag_C0_V0.SetContourLevel(12,1510.134);
qMap_Ag_C0_V0.SetContourLevel(13,1635.979);
qMap_Ag_C0_V0.SetContourLevel(14,1761.823);
qMap_Ag_C0_V0.SetContourLevel(15,1887.668);
qMap_Ag_C0_V0.SetContourLevel(16,2013.512);
qMap_Ag_C0_V0.SetContourLevel(17,2139.357);
qMap_Ag_C0_V0.SetContourLevel(18,2265.201);
qMap_Ag_C0_V0.SetContourLevel(19,2391.046);
ci = root.TColor.GetColor("#000099");
qMap_Ag_C0_V0.SetLineColor(ci);
qMap_Ag_C0_V0.GetXaxis().SetTitle("col");
qMap_Ag_C0_V0.GetXaxis().SetRange(17,29);
qMap_Ag_C0_V0.GetXaxis().SetNdivisions(508);
qMap_Ag_C0_V0.GetXaxis().SetLabelFont(42);
qMap_Ag_C0_V0.GetXaxis().SetLabelSize(0.05);
qMap_Ag_C0_V0.GetXaxis().SetTitleSize(0.05);
qMap_Ag_C0_V0.GetXaxis().SetTitleOffset(1.1);
qMap_Ag_C0_V0.GetXaxis().SetTitleFont(42);
qMap_Ag_C0_V0.GetYaxis().SetTitle("row");
qMap_Ag_C0_V0.GetYaxis().SetRange(55,76);
qMap_Ag_C0_V0.GetYaxis().SetLabelFont(42);
qMap_Ag_C0_V0.GetYaxis().SetLabelSize(0.05);
qMap_Ag_C0_V0.GetYaxis().SetTitleSize(0.05);
qMap_Ag_C0_V0.GetYaxis().SetTitleOffset(1.1);
qMap_Ag_C0_V0.GetYaxis().SetTitleFont(42);
qMap_Ag_C0_V0.GetZaxis().SetLabelFont(42);
qMap_Ag_C0_V0.GetZaxis().SetLabelSize(0.035);
qMap_Ag_C0_V0.GetZaxis().SetTitleSize(0.035);
qMap_Ag_C0_V0.GetZaxis().SetTitleFont(42);
|
nilq/baby-python
|
python
|
from . import start_watcher
def main():
start_watcher()
|
nilq/baby-python
|
python
|
from .market import Market, TradeException
import time
import hmac
import urllib.parse
import urllib.request
import requests
import hashlib
import config
import database
from datetime import datetime
class PrivateBter(Market):
url = "https://bter.com/api/1/private/"
def __init__(self):
super().__init__()
self.key = config.bter_key
self.secret = config.bter_secret
self.min_tx_volume = 0.001
try:
self.get_balances()
except Exception:
self.s_coin_balance = 0
self.p_coin_balance = 0
def query(self, method, req={}):
# generate POST data string
req["nonce"] = int(time.time())
post_data = urllib.parse.urlencode(req)
# sign it
sign = hmac.new(self.secret.encode("ascii"), post_data.encode("ascii"), hashlib.sha512).hexdigest()
# extra headers for request
headers = {"Sign": sign, "Key": self.key}
full_url = self.url + method
try:
res = requests.post(full_url, data=req, headers=headers)
except Exception as e:
raise Exception("Error sending request to %s - %s" % (self.name, e))
try:
value = res.json()
except Exception as e:
raise Exception("Unable to decode response from %s - %s" %
(self.name, e))
return value
def get_open_orders(self):
# Might not be necessary
response = self.query('orderlist', {})
if not response["result"]:
raise TradeException(response["msg"])
return response
def _buy(self, amount, price):
"""Create a buy limit order"""
currency_pair = self.p_coin.lower() + "_" + self.s_coin.lower()
req = {"pair": currency_pair, "type": "BUY", "rate": price, "amount": amount}
response = self.query("placeorder", req)
if not response["result"]:
raise TradeException(response["msg"])
order_id = response['order_id']
# Check open order list to see if the most recent open order matches this order:
# match by price and primary coin type. If we find it output the real order id,
# otherwise return the dummy order id returned from the order request.
time.sleep(10)
open_orders = self.get_open_orders()['orders']
open_orders.sort(key=lambda x: x['id'], reverse=True)
if open_orders and float(open_orders[0]['sell_amount']) == (price * 1000) and \
open_orders[0]['buy_type'] == self.p_coin:
order_id = open_orders[0]['id']
return order_id
def _sell(self, amount, price):
"""Create a sell limit order"""
currency_pair = self.p_coin.lower() + "_" + self.s_coin.lower()
req = {"pair": currency_pair, "type": "SELL", "rate": price, "amount": amount}
response = self.query("placeorder", req)
if not response["result"]:
raise TradeException(response["msg"])
order_id = response['order_id']
# Check open order list to see if the most recent open order matches this order:
# match by price and primary coin type. If we find it output the real order id,
# otherwise return the dummy order id returned from the order request.
time.sleep(10)
open_orders = self.get_open_orders()['orders']
open_orders.sort(key=lambda x: x['id'], reverse=True)
if open_orders and float(open_orders[0]['buy_amount']) == (price * 1000) and \
open_orders[0]['sell_type'] == self.p_coin:
order_id = open_orders[0]['id']
return order_id
def update_order_status(self):
if not self.open_orders:
return
response = self.query('orderlist')
remaining_open_orders = []
completed_order_ids = []
for open_order in self.open_orders:
found_order = [found_order for found_order in response['orders'] if
found_order['id'] == open_order['order_id']]
if not found_order:
completed_order_ids.append(open_order['order_id'])
else:
remaining_open_orders.append(open_order)
if completed_order_ids:
self.open_orders = remaining_open_orders
database.order_completed(self.name, completed_order_ids)
def get_balances(self):
"""Get balance of primary coin and secondary coin"""
try:
res = self.query("getfunds")
if self.p_coin in res["available_funds"]:
self.p_coin_balance = float(res["available_funds"][self.p_coin])
else:
self.p_coin_balance = 0
if self.s_coin in res["available_funds"]:
self.s_coin_balance = float(res["available_funds"][self.s_coin])
else:
self.s_coin_balance = 0
except Exception:
raise Exception("Error getting balance")
|
nilq/baby-python
|
python
|
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.launcher_util import run_experiment
import os
from rlkit.misc.asset_loader import sync_down
def experiment(variant):
from rlkit.core import logger
demo_path = sync_down(variant['demo_path'])
off_policy_path = sync_down(variant['off_policy_path'])
logdir = logger.get_snapshot_dir()
os.system('python -m BEAR.main' +
' --demo_data='+demo_path+
' --off_policy_data='+off_policy_path+
' --eval_freq='+variant['eval_freq']+
' --algo_name='+variant['algo_name']+
' --env_name='+variant['env_name']+
' --log_dir='+logdir+
' --lagrange_thresh='+variant['lagrange_thresh']+
' --distance_type='+variant['distance_type']+
' --mode='+variant['mode']+
' --num_samples_match='+variant['num_samples_match']+
' --lamda='+variant['lambda_']+
' --version='+variant['version']+
' --mmd_sigma='+variant['mmd_sigma']+
' --kernel_type='+variant['kernel_type']+
' --use_ensemble_variance='+variant['use_ensemble_variance'])
if __name__ == "__main__":
variant = dict(
demo_path='demos/ant_action_noise_15.npy',
off_policy_path='demos/ant_off_policy_15_demos_100.npy',
eval_freq='1000',
algo_name='BEAR',
env_name='Ant-v2',
lagrange_thresh='10.0',
distance_type='MMD',
mode='auto',
num_samples_match='5',
lambda_='0.0',
version='0.0',
mmd_sigma='10.0',
kernel_type='laplacian',
use_ensemble_variance='"False"',
)
search_space = {
'mmd_sigma':['10.0', '20.0'],
'num_samples_match':['5', '10', '20'],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_name = 'test'
# n_seeds = 1
# mode = 'ec2'
# exp_name = 'ant_bear_sweep_v1'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
exp_name=exp_name,
mode=mode,
unpack_variant=False,
variant=variant,
num_exps_per_instance=1,
use_gpu=False,
gcp_kwargs=dict(
preemptible=False,
),
)
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.7 on 2021-09-27 02:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user_type',
field=models.CharField(choices=[('user', 'User'), ('admin', 'Admin')], default='user', max_length=200),
),
]
|
nilq/baby-python
|
python
|
# encoding: utf-8
"""
route.py
Created by Thomas Mangin on 2015-06-22.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
from exabgp.configuration.core import Section
from exabgp.configuration.flow.match import ParseFlowMatch
from exabgp.configuration.flow.then import ParseFlowThen
from exabgp.configuration.flow.scope import ParseFlowScope
from exabgp.configuration.static.mpls import route_distinguisher
from exabgp.configuration.flow.parser import flow
from exabgp.configuration.flow.parser import next_hop
class ParseFlowRoute(Section):
syntax = (
'route give-me-a-name {\n'
' (optional) rd 255.255.255.255:65535|65535:65536|65536:65535;\n'
' next-hop 1.2.3.4; (to use with redirect-to-nexthop)\n'
' %s\n'
' %s\n'
' %s\n'
'}\n'
% (
'\n '.join(ParseFlowMatch.syntax.split('\n')),
'\n '.join(ParseFlowScope.syntax.split('\n')),
'\n '.join(ParseFlowThen.syntax.split('\n')),
)
)
known = {
'rd': route_distinguisher,
'route-distinguisher': route_distinguisher,
'next-hop': next_hop,
}
action = {
'rd': 'nlri-set',
'route-distinguisher': 'nlri-set',
'next-hop': 'nlri-nexthop',
}
assign = {
'rd': 'rd',
'route-distinguisher': 'rd',
}
name = 'flow/route'
def __init__(self, tokeniser, scope, error, logger):
Section.__init__(self, tokeniser, scope, error, logger)
def clear(self):
pass
def pre(self):
self.scope.append_route(flow(None))
return True
def post(self):
route = self.scope.get_route()
if route.nlri.rd is not RouteDistinguisher.NORD:
route.nlri.safi = SAFI.flow_vpn
return True
def _check(self, change):
self.logger.debug('warning: no check on flows are implemented', 'configuration')
return True
|
nilq/baby-python
|
python
|
'''This file provides editor completions while working on DFHack using ycmd:
https://github.com/Valloric/ycmd
'''
# pylint: disable=import-error,invalid-name,missing-docstring,unused-argument
import os
import ycm_core
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
# We need to tell YouCompleteMe how to compile this project. We do this using
# clang's "Compilation Database" system, which essentially just dumps a big
# json file into the build folder.
# More details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# We don't use clang, but luckily CMake supports generating a database on its
# own, using:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
for potential_build_folder in ['build', 'build-osx']:
if os.path.exists(DirectoryOfThisScript() + os.path.sep + potential_build_folder
+ os.path.sep + 'compile_commands.json'):
database = ycm_core.CompilationDatabase(potential_build_folder)
break
else:
raise RuntimeError("Can't find dfhack build folder: not one of build, build-osx")
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return list(flags)
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
SOURCE_EXTENSIONS = ['.cpp', '.cxx', '.cc', '.c', '.m', '.mm']
def PotentialAlternatives(header):
dirname, filename = os.path.split(header)
basename, _ = os.path.splitext(filename)
source_dirs = [dirname]
if dirname.endswith(os.path.sep + 'include'):
# if we're in a folder 'include', also look in its parent
parent = os.path.abspath(os.path.join(dirname, os.path.pardir))
source_dirs.append(parent)
# and ../src (used by lua dependency)
source_dirs.append(os.path.join(parent, 'src'))
include_idx = dirname.rfind(os.path.sep + 'include' + os.path.sep)
if include_idx != -1:
# we're in a subfolder of a parent '/include/'
# .../include/subdir/path
# look in .../subdir/path
source_dirs.append(
dirname[:include_idx] +
os.path.sep +
dirname[include_idx + len('include') + 2*len(os.path.sep):]
)
for source_dir in source_dirs:
for ext in SOURCE_EXTENSIONS:
yield source_dir + os.path.sep + basename + ext
def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile(filename):
for alternative in PotentialAlternatives(filename):
if os.path.exists(alternative):
compilation_info = database.GetCompilationInfoForFile(
alternative
)
if compilation_info.compiler_flags_:
return compilation_info
return None
else:
return database.GetCompilationInfoForFile(filename)
def FlagsForFile(filename, **kwargs):
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile(filename)
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_
)
return {
'flags': final_flags,
'do_cache': True
}
|
nilq/baby-python
|
python
|
from twisted.trial import unittest
from signing.persistence import Persistence
class PersistenceTests(unittest.TestCase):
def setUp(self):
self.persistence = Persistence()
def test_set_get(self):
d = self.persistence.set('somekey', 'somefield', 'somevalue')
d.addCallback(lambda _: self.persistence.get('somekey', 'somefield'))
return d.addCallback(self.assertEquals, 'somevalue')
def test_update(self):
d = self.persistence.set('updatekey', 'updatefield', 'firstvalue')
d.addCallback(lambda _: self.persistence.set('updatekey', 'updatefield', 'secondvalue'))
d.addCallback(lambda _: self.persistence.get('updatekey', 'updatefield'))
return d.addCallback(self.assertEquals, 'secondvalue')
def test_delete(self):
d = self.persistence.set('deletekey', 'deletefield', 'somevalue')
d.addCallback(lambda _: self.persistence.delete('deletekey', 'deletefield'))
d.addCallback(lambda _: self.persistence.get('deletekey', 'deletefield'))
return d.addCallback(self.assertEquals, None)
def test_getAll(self):
d = self.persistence.set('getallkey', 'getallfield', 'getallvalue')
d.addCallback(lambda _: self.persistence.set('getallkey', 'getallfield2', 'getallvalue2'))
d.addCallback(lambda _: self.persistence.get_all('getallkey'))
return d.addCallback(lambda result: self.assertTrue('getallfield' in result and 'getallfield2' in result))
def test_deleteAll(self):
d = self.persistence.set('deleteall_key', 'deleteall_field', 'deleteall_value')
d.addCallback(lambda _: self.persistence.delete('deleteall_key'))
d.addCallback(lambda _: self.persistence.get_all('deleteall_key'))
return d.addCallback(self.assertEquals, [])
|
nilq/baby-python
|
python
|
""" Module storing the `~halotools.sim_manager.CachedHaloCatalog`,
the class responsible for retrieving halo catalogs from shorthand
keyword inputs such as ``simname`` and ``redshift``.
"""
import os
from warnings import warn
from copy import deepcopy
import numpy as np
from astropy.table import Table
from ..utils.python_string_comparisons import _passively_decode_string, compare_strings_py23_safe
try:
import h5py
_HAS_H5PY = True
except ImportError:
_HAS_H5PY = False
warn("Most of the functionality of the "
"sim_manager sub-package requires h5py to be installed,\n"
"which can be accomplished either with pip or conda. ")
from ..sim_manager import sim_defaults, supported_sims
from ..utils import broadcast_host_halo_property, add_halo_hostid
from .halo_table_cache import HaloTableCache
from .ptcl_table_cache import PtclTableCache
from .halo_table_cache_log_entry import get_redshift_string
from ..custom_exceptions import HalotoolsError, InvalidCacheLogEntry
__all__ = ('CachedHaloCatalog', )
class CachedHaloCatalog(object):
"""
Container class for the halo catalogs and particle data
that are stored in the Halotools cache log.
`CachedHaloCatalog` is used to retrieve halo catalogs
from shorthand keyword inputs such as
``simname``, ``halo_finder`` and ``redshift``.
The halos are stored in the ``halo_table`` attribute
in the form of an Astropy `~astropy.table.Table`.
If available, another `~astropy.table.Table` storing
a random downsampling of dark matter particles
is stored in the ``ptcl_table`` attribute.
See the Examples section below for details on how to
access and manipulate this data.
For a list of available snapshots provided by Halotools,
see :ref:`supported_sim_list`.
For information about the subhalo vs. host halo nomenclature
conventions used throughout Halotools, see :ref:`rockstar_subhalo_nomenclature`.
For a thorough discussion of the meaning of each column in the Rockstar halo catalogs,
see the appendix of `Rodriguez Puebla et al 2016 <http://arxiv.org/abs/1602.04813>`_.
"""
acceptable_kwargs = ('ptcl_version_name', 'fname', 'simname',
'halo_finder', 'redshift', 'version_name', 'dz_tol', 'update_cached_fname',
'preload_halo_table')
def __init__(self, *args, **kwargs):
"""
Parameters
------------
simname : string, optional
Nickname of the simulation used as a shorthand way to keep track
of the halo catalogs in your cache.
The simnames of the Halotools-provided catalogs are
'bolshoi', 'bolplanck', 'consuelo' and 'multidark'.
Default is set by the ``default_simname`` variable in the
`~halotools.sim_manager.sim_defaults` module.
halo_finder : string, optional
Nickname of the halo-finder used to generate the hlist file from particle data.
Default is set by the ``default_halo_finder`` variable in the
`~halotools.sim_manager.sim_defaults` module.
redshift : float, optional
Redshift of the halo catalog.
Default is set by the ``default_redshift`` variable in the
`~halotools.sim_manager.sim_defaults` module.
version_name : string, optional
Nickname of the version of the halo catalog.
Default is set by the ``default_version_name`` variable in the
`~halotools.sim_manager.sim_defaults` module.
ptcl_version_name : string, optional
Nicknake of the version of the particle catalog associated with
the halos.
This argument is typically only used if you have cached your own
particles via the `~halotools.sim_manager.UserSuppliedPtclCatalog` class.
Default is set by the ``default_version_name`` variable in the
`~halotools.sim_manager.sim_defaults` module.
fname : string, optional
Absolute path to the location on disk storing the hdf5 file
of halo data. If passing ``fname``, do not pass the metadata keys
``simname``, ``halo_finder``, ``version_name`` or ``redshift``.
update_cached_fname : bool, optional
If the hdf5 file storing the halos has been relocated to a new
disk location after storing the data in cache,
the ``update_cached_fname`` input can be used together with the
``fname`` input to update the cache log with the new disk location.
See :ref:`relocating_simulation_data_instructions` for
further instructions.
dz_tol : float, optional
Tolerance within to search for a catalog with a matching redshift.
Halo catalogs in cache with a redshift that differs by greater
than ``dz_tol`` will be ignored. Default is 0.05.
Examples
---------
If you followed the instructions in the
:ref:`download_default_halos` section of the :ref:`getting_started` guide,
then you can load the default halo catalog into memory by calling the
`~halotools.sim_manager.CachedHaloCatalog` with no arguments:
>>> halocat = CachedHaloCatalog() # doctest: +SKIP
The halos are stored in the ``halo_table`` attribute
in the form of an Astropy `~astropy.table.Table`.
>>> halos = halocat.halo_table # doctest: +SKIP
As with any Astropy `~astropy.table.Table`, the properties of the
halos can be accessed in the same manner as a Numpy structured array
or python dictionary:
>>> array_of_masses = halocat.halo_table['halo_mvir'] # doctest: +SKIP
>>> x_positions = halocat.halo_table['halo_x'] # doctest: +SKIP
Note that all keys of a cached halo catalog begin with the substring
``halo_``. This is a bookkeeping device used to help
the internals of Halotools differentiate
between halo properties and the properties of mock galaxies
populated into the halos with ambiguously similar names.
The ``simname``, ``halo_finder``, ``version_name`` and ``redshift``
keyword arguments fully specify the halo catalog that will be loaded.
Omitting any of them will select the corresponding default value
set in the `~halotools.sim_manager.sim_defaults` module.
>>> halocat = CachedHaloCatalog(redshift = 1, simname = 'multidark') # doctest: +SKIP
If you forget which catalogs you have stored in cache,
you have two options for how to remind yourself.
First, you can use the `~halotools.sim_manager.HaloTableCache` class:
>>> from halotools.sim_manager import HaloTableCache
>>> cache = HaloTableCache()
>>> for entry in cache.log: print(entry) # doctest: +SKIP
Alternatively, you can simply use a text editor to open the cache log,
which is stored as ASCII data in the following location on your machine:
$HOME/.astropy/cache/halotools/halo_table_cache_log.txt
See also
----------
:ref:`halo_catalog_analysis_quickstart`
:ref:`halo_catalog_analysis_tutorial`
"""
self._verify_acceptable_constructor_call(*args, **kwargs)
assert _HAS_H5PY, "Must have h5py package installed to use CachedHaloCatalog objects"
try:
dz_tol = kwargs['dz_tol']
except KeyError:
dz_tol = 0.05
self._dz_tol = dz_tol
try:
update_cached_fname = kwargs['update_cached_fname']
except KeyError:
update_cached_fname = False
self._update_cached_fname = update_cached_fname
self.halo_table_cache = HaloTableCache()
self._disallow_catalogs_with_known_bugs(**kwargs)
self.log_entry = self._determine_cache_log_entry(**kwargs)
self.simname = self.log_entry.simname
self.halo_finder = self.log_entry.halo_finder
self.version_name = self.log_entry.version_name
self.redshift = self.log_entry.redshift
self.fname = self.log_entry.fname
self._bind_additional_metadata()
try:
preload_halo_table = kwargs['preload_halo_table']
except KeyError:
preload_halo_table = False
if preload_halo_table is True:
_ = self.halo_table
del _
self._set_publication_list(self.simname)
def _set_publication_list(self, simname):
try:
simclass = supported_sims.supported_sim_dict[simname]
simobj = simclass()
self.publications = simobj.publications
except (KeyError, AttributeError):
self.publications = []
def _verify_acceptable_constructor_call(self, *args, **kwargs):
"""
"""
try:
assert len(args) == 0
except AssertionError:
msg = ("\nCachedHaloCatalog only accepts keyword arguments, not position arguments. \n")
raise HalotoolsError(msg)
for key in list(kwargs.keys()):
try:
assert key in self.acceptable_kwargs
except AssertionError:
msg = ("\nCachedHaloCatalog got an unexpected keyword ``" + key + "``\n"
"The only acceptable keywords are listed below:\n\n")
for acceptable_key in self.acceptable_kwargs:
msg += "``" + acceptable_key + "``\n"
raise HalotoolsError(msg)
def _determine_cache_log_entry(self, **kwargs):
"""
"""
try:
self.ptcl_version_name = kwargs['ptcl_version_name']
self._default_ptcl_version_name_choice = False
except KeyError:
self.ptcl_version_name = sim_defaults.default_ptcl_version_name
self._default_ptcl_version_name_choice = True
if 'fname' in kwargs:
fname = kwargs['fname']
if not os.path.isfile(fname):
msg = ("\nThe ``fname`` you passed to the CachedHaloCatalog "
"constructor is a non-existent path.\n")
raise HalotoolsError(msg)
try:
assert 'simname' not in kwargs
except AssertionError:
msg = ("\nIf you specify an input ``fname``, "
"do not also specify ``simname``.\n")
raise HalotoolsError(msg)
try:
assert 'halo_finder' not in kwargs
except AssertionError:
msg = ("\nIf you specify an input ``fname``, "
"do not also specify ``halo_finder``.\n")
raise HalotoolsError(msg)
try:
assert 'redshift' not in kwargs
except AssertionError:
msg = ("\nIf you specify an input ``fname``, "
"do not also specify ``redshift``.\n")
raise HalotoolsError(msg)
try:
assert 'version_name' not in kwargs
except AssertionError:
msg = ("\nIf you specify an input ``fname``, "
"do not also specify ``version_name``.\n")
raise HalotoolsError(msg)
return self._retrieve_matching_log_entry_from_fname(fname)
else:
try:
simname = str(kwargs['simname'])
self._default_simname_choice = False
except KeyError:
simname = sim_defaults.default_simname
self._default_simname_choice = True
try:
halo_finder = str(kwargs['halo_finder'])
self._default_halo_finder_choice = False
except KeyError:
halo_finder = sim_defaults.default_halo_finder
self._default_halo_finder_choice = True
try:
version_name = str(kwargs['version_name'])
self._default_version_name_choice = False
except KeyError:
version_name = sim_defaults.default_version_name
self._default_version_name_choice = True
try:
redshift = float(kwargs['redshift'])
self._default_redshift_choice = False
except KeyError:
redshift = sim_defaults.default_redshift
self._default_redshift_choice = True
return self._retrieve_matching_log_entry_from_metadata(
simname, halo_finder, version_name, redshift)
def _retrieve_matching_log_entry_from_fname(self, fname):
"""
"""
log_entry = self.halo_table_cache.determine_log_entry_from_fname(fname,
overwrite_fname_metadata=False)
if not compare_strings_py23_safe(log_entry.fname, fname):
if self._update_cached_fname is True:
old_fname = deepcopy(log_entry.fname)
log_entry = (
self.halo_table_cache.determine_log_entry_from_fname(fname,
overwrite_fname_metadata=self._update_cached_fname)
)
self.halo_table_cache.update_cached_file_location(
fname, old_fname)
else:
msg = ("\nThe ``fname`` you passed as an input to the "
"CachedHaloCatalog class \ndoes not match the ``fname`` "
"stored as metadata in the hdf5 file.\n"
"This means that at some point you manually relocated the catalog on disk \n"
"after storing its location in cache, "
"but you did not yet update the Halotools cache log. \n"
"When possible, try to keep your halo catalogs "
"at a fixed disk location \n"
"as this helps ensure reproducibility. \n"
"If the ``fname`` you passed to CachedHaloCatalog is the "
"new location you want to store the catalog, \n"
"then you can update the cache by calling the CachedHaloCatalog \n"
"constructor again and setting the ``update_cached_fname`` variable to True.\n")
raise HalotoolsError(msg)
return log_entry
def _retrieve_matching_ptcl_cache_log_entry(self):
"""
"""
ptcl_table_cache = PtclTableCache()
if len(ptcl_table_cache.log) == 0:
msg = ("\nThe Halotools cache log has no record of any particle catalogs.\n"
"If you have never used Halotools before, "
"you should read the Getting Started guide on halotools.readthedocs.io.\n"
"If you have previously used the package before, \n"
"try running the halotools/scripts/rebuild_ptcl_table_cache_log.py script.\n")
raise HalotoolsError(msg)
gen0 = ptcl_table_cache.matching_log_entry_generator(
simname=self.simname, version_name=self.ptcl_version_name,
redshift=self.redshift, dz_tol=self._dz_tol)
gen1 = ptcl_table_cache.matching_log_entry_generator(
simname=self.simname, version_name=self.ptcl_version_name)
gen2 = ptcl_table_cache.matching_log_entry_generator(simname=self.simname)
matching_entries = list(gen0)
msg = ("\nYou tried to load a cached particle catalog "
"with the following characteristics:\n\n")
if self._default_simname_choice is True:
msg += ("simname = ``" + str(self.simname) +
"`` (set by sim_defaults.default_simname)\n")
else:
msg += "simname = ``" + str(self.simname) + "``\n"
if self._default_ptcl_version_name_choice is True:
msg += ("ptcl_version_name = ``" + str(self.ptcl_version_name) +
"`` (set by sim_defaults.default_version_name)\n")
else:
msg += "ptcl_version_name = ``" + str(self.ptcl_version_name) + "``\n"
if self._default_redshift_choice is True:
msg += ("redshift = ``" + str(self.redshift) +
"`` (set by sim_defaults.default_redshift)\n")
else:
msg += "redshift = ``" + str(self.redshift) + "``\n"
msg += ("\nThere is no matching catalog in cache "
"within dz_tol = "+str(self._dz_tol)+" of these inputs.\n"
)
if len(matching_entries) == 0:
suggestion_preamble = ("\nThe following entries in the cache log "
"most closely match your inputs:\n\n")
alt_list1 = list(gen1) # discard the redshift requirement
if len(alt_list1) > 0:
msg += suggestion_preamble
for entry in alt_list1:
msg += str(entry) + "\n\n"
else:
alt_list2 = list(gen2) # discard the version_name requirement
if len(alt_list2) > 0:
msg += suggestion_preamble
for entry in alt_list2:
msg += str(entry) + "\n\n"
else:
msg += "There are no simulations matching your input simname.\n"
raise InvalidCacheLogEntry(msg)
elif len(matching_entries) == 1:
log_entry = matching_entries[0]
return log_entry
else:
msg += ("There are multiple entries in the cache log \n"
"within dz_tol = "+str(self._dz_tol)+" of your inputs. \n"
"Try using the exact redshift and/or decreasing dz_tol.\n"
"Now printing the matching entries:\n\n")
for entry in matching_entries:
msg += str(entry) + "\n"
raise InvalidCacheLogEntry(msg)
def _retrieve_matching_log_entry_from_metadata(self,
simname, halo_finder, version_name, redshift):
"""
"""
if len(self.halo_table_cache.log) == 0:
msg = ("\nThe Halotools cache log is empty.\n"
"If you have never used Halotools before, "
"you should read the Getting Started guide on halotools.readthedocs.io.\n"
"If you have previously used the package before, \n"
"try running the halotools/scripts/rebuild_halo_table_cache_log.py script.\n")
raise HalotoolsError(msg)
gen0 = self.halo_table_cache.matching_log_entry_generator(
simname=simname, halo_finder=halo_finder,
version_name=version_name, redshift=redshift,
dz_tol=self._dz_tol)
gen1 = self.halo_table_cache.matching_log_entry_generator(
simname=simname,
halo_finder=halo_finder, version_name=version_name)
gen2 = self.halo_table_cache.matching_log_entry_generator(
simname=simname, halo_finder=halo_finder)
gen3 = self.halo_table_cache.matching_log_entry_generator(
simname=simname)
matching_entries = list(gen0)
msg = ("\nYou tried to load a cached halo catalog "
"with the following characteristics:\n\n")
if self._default_simname_choice is True:
msg += ("simname = ``" + str(simname) +
"`` (set by sim_defaults.default_simname)\n")
else:
msg += "simname = ``" + str(simname) + "``\n"
if self._default_halo_finder_choice is True:
msg += ("halo_finder = ``" + str(halo_finder) +
"`` (set by sim_defaults.default_halo_finder)\n")
else:
msg += "halo_finder = ``" + str(halo_finder) + "``\n"
if self._default_version_name_choice is True:
msg += ("version_name = ``" + str(version_name) +
"`` (set by sim_defaults.default_version_name)\n")
else:
msg += "version_name = ``" + str(version_name) + "``\n"
if self._default_redshift_choice is True:
msg += ("redshift = ``" + str(redshift) +
"`` (set by sim_defaults.default_redshift)\n")
else:
msg += "redshift = ``" + str(redshift) + "``\n"
msg += ("\nThere is no matching catalog in cache "
"within dz_tol = "+str(self._dz_tol)+" of these inputs.\n"
)
if len(matching_entries) == 0:
suggestion_preamble = ("\nThe following entries in the cache log "
"most closely match your inputs:\n\n")
alt_list1 = list(gen1) # discard the redshift requirement
if len(alt_list1) > 0:
msg += suggestion_preamble
for entry in alt_list1:
msg += str(entry) + "\n\n"
else:
alt_list2 = list(gen2) # discard the version_name requirement
if len(alt_list2) > 0:
msg += suggestion_preamble
for entry in alt_list2:
msg += str(entry) + "\n\n"
else:
alt_list3 = list(gen3) # discard the halo_finder requirement
if len(alt_list3) > 0:
msg += suggestion_preamble
for entry in alt_list3:
msg += str(entry) + "\n\n"
else:
msg += "There are no simulations matching your input simname.\n"
raise InvalidCacheLogEntry(msg)
elif len(matching_entries) == 1:
log_entry = matching_entries[0]
return log_entry
else:
msg += ("There are multiple entries in the cache log \n"
"within dz_tol = "+str(self._dz_tol)+" of your inputs. \n"
"Try using the exact redshift and/or decreasing dz_tol.\n"
"Now printing the matching entries:\n\n")
for entry in matching_entries:
msg += str(entry) + "\n"
raise InvalidCacheLogEntry(msg)
@property
def halo_table(self):
"""
Astropy `~astropy.table.Table` object storing a catalog of dark matter halos.
You can access the array storing, say, halo virial mass using the following syntax:
>>> halocat = CachedHaloCatalog() # doctest: +SKIP
>>> mass_array = halocat.halo_table['halo_mvir'] # doctest: +SKIP
To see what halo properties are available in the catalog:
>>> print(halocat.halo_table.keys()) # doctest: +SKIP
"""
try:
return self._halo_table
except AttributeError:
if self.log_entry.safe_for_cache is True:
self._halo_table = Table.read(_passively_decode_string(self.fname), path='data')
self._add_new_derived_columns(self._halo_table)
return self._halo_table
else:
raise InvalidCacheLogEntry(self.log_entry._cache_safety_message)
def _add_new_derived_columns(self, t):
if 'halo_hostid' not in list(t.keys()):
add_halo_hostid(t)
if 'halo_mvir_host_halo' not in list(t.keys()):
broadcast_host_halo_property(t, 'halo_mvir')
def _bind_additional_metadata(self):
""" Create convenience bindings of all metadata to the `CachedHaloCatalog` instance.
"""
if not os.path.isfile(self.log_entry.fname):
msg = ("The following input fname does not exist: \n\n" +
self.log_entry.fname + "\n\n")
raise InvalidCacheLogEntry(msg)
f = h5py.File(self.log_entry.fname, 'r')
for attr_key in list(f.attrs.keys()):
if attr_key == 'redshift':
setattr(self, attr_key, float(get_redshift_string(f.attrs[attr_key])))
elif attr_key == 'Lbox':
self.Lbox = np.empty(3)
self.Lbox[:] = f.attrs['Lbox']
else:
setattr(self, attr_key, f.attrs[attr_key])
f.close()
matching_sim = self._retrieve_supported_sim()
if matching_sim is not None:
for attr in matching_sim._attrlist:
if hasattr(self, attr):
try:
a = _passively_decode_string(getattr(self, attr))
b = _passively_decode_string(getattr(matching_sim, attr))
assert np.all(a == b)
except AssertionError:
msg = ("The ``" + attr + "`` metadata of the hdf5 file \n"
"is inconsistent with the corresponding attribute of the \n" +
matching_sim.__class__.__name__ + " class in the "
"sim_manager.supported_sims module.\n"
"Double-check the value of this attribute in the \n"
"NbodySimulation sub-class you added to the supported_sims module. \n"
)
raise HalotoolsError(msg)
else:
setattr(self, attr, getattr(matching_sim, attr))
def _retrieve_supported_sim(self):
"""
"""
matching_sim = None
for clname in supported_sims.__all__:
try:
cl = getattr(supported_sims, clname)
obj = cl()
if isinstance(obj, supported_sims.NbodySimulation):
if compare_strings_py23_safe(self.simname, obj.simname):
matching_sim = obj
except TypeError:
pass
return matching_sim
@property
def ptcl_table(self):
"""
Astropy `~astropy.table.Table` object storing
a collection of ~1e6 randomly selected dark matter particles.
"""
try:
return self._ptcl_table
except AttributeError:
try:
ptcl_log_entry = self.ptcl_log_entry
except AttributeError:
self.ptcl_log_entry = (
self._retrieve_matching_ptcl_cache_log_entry()
)
ptcl_log_entry = self.ptcl_log_entry
if ptcl_log_entry.safe_for_cache is True:
self._ptcl_table = Table.read(_passively_decode_string(ptcl_log_entry.fname), path='data')
return self._ptcl_table
else:
raise InvalidCacheLogEntry(ptcl_log_entry._cache_safety_message)
def _disallow_catalogs_with_known_bugs(self, simname=sim_defaults.default_simname,
version_name=sim_defaults.default_version_name, **kwargs):
"""
"""
if (simname == 'bolplanck') and ('halotools_alpha_version' in version_name):
msg = ("The ``{0}`` version of the ``{1}`` simulation \n"
"is known to be spatially incomplete and should not be used.\n"
"See https://github.com/astropy/halotools/issues/598.\n"
"You can either download the original ASCII data and process it yourself, \n"
"or use version_name = ``halotools_v0p4`` instead.\n")
raise HalotoolsError(msg.format(version_name, simname))
|
nilq/baby-python
|
python
|
import unittest, os
import cuisine
USER = os.popen("whoami").read()[:-1]
class Text(unittest.TestCase):
def testEnsureLine( self ):
some_text = "foo"
some_text = cuisine.text_ensure_line(some_text, "bar")
assert some_text == 'foo\nbar'
some_text = cuisine.text_ensure_line(some_text, "bar")
assert some_text == 'foo\nbar'
class Users(unittest.TestCase):
def testUserCheck( self ):
user_data = cuisine.user_check(USER)
print "USER_DATA", user_data
class Files(unittest.TestCase):
def testB( self ):
cuisine.file_read("/etc/passwd")
def testC( self ):
pass
class Packages(unittest.TestCase):
def testInstall( self ):
pass
#with cuisine.mode_sudo():
# cuisine.package_ensure("tmux")
class SSHKeys(unittest.TestCase):
def testKeygen( self ):
if cuisine.ssh_keygen(USER):
print "SSH keys already there"
else:
print "SSH keys created"
def testAuthorize( self ):
key = "ssh-dss XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX= user@cuisine"""
cuisine.ssh_authorize(USER, key)
# FIXME: Should check that the key is present, and only one
if __name__ == "__main__":
# We bypass fabric as we want the tests to be run locally
cuisine.mode_local()
unittest.main()
# EOF
|
nilq/baby-python
|
python
|
import math
import numpy as np
from utils.functions.math.coordinate_trans import coordinate_transformation_in_angle
def circle_make(center_x, center_y, radius):
'''
Create circle matrix(2D)
Parameters
-------
center_x : float in meters
the center position of the circle coordinate x
center_y : float in meters
the center position of the circle coordinate y
radius : float in meters
Returns
-------
circle x : numpy.ndarray
circle y : numpy.ndarray
'''
point_num = 100
circle_xs = []
circle_ys = []
for i in range(point_num + 1):
circle_xs.append(center_x + radius * math.cos(i*2*math.pi/point_num))
circle_ys.append(center_y + radius * math.sin(i*2*math.pi/point_num))
return np.array(circle_xs), np.array(circle_ys)
def circle_make_with_angles(center_x, center_y, radius, angle):
'''
Create circle matrix with angle line matrix(2D)
Parameters
-------
center_x : float in meters
the center position of the circle coordinate x
center_y : float in meters
the center position of the circle coordinate y
radius : float in meters
angle : float in radians
Returns
-------
circle xs : numpy.ndarray
circle ys : numpy.ndarray
angle line xs : numpy.ndarray
angle line ys : numpy.ndarray
'''
point_num = 100
circle_xs = []
circle_ys = []
for i in range(point_num + 1):
circle_xs.append(center_x + radius * math.cos(i*2*math.pi/point_num))
circle_ys.append(center_y + radius * math.sin(i*2*math.pi/point_num))
angle_line_xs = [center_x, center_x + math.cos(angle) * radius]
angle_line_ys = [center_y, center_y + math.sin(angle) * radius]
return np.array(circle_xs), np.array(circle_ys), np.array(angle_line_xs), np.array(angle_line_ys)
def square_make_with_angles(center_x, center_y, size, angle):
'''
Create square matrix with angle line matrix(2D)
Parameters
-------
center_x : float in meters
the center x position of the square
center_y : float in meters
the center y position of the square
size : float in meters
the square's half-size
angle : float in radians
Returns
-------
square xs : numpy.ndarray
lenght is 5 (counterclockwise from right-up)
square ys : numpy.ndarray
length is 5 (counterclockwise from right-up)
angle line xs : numpy.ndarray
angle line ys : numpy.ndarray
'''
# start with the up right points
# create point in counterclockwise
square_xys = np.array([[size, size], [-size, size], [-size, -size], [size, -size], [size, size]])
trans_points = coordinate_transformation_in_angle(square_xys.T, -angle) # this is inverse type
trans_points += np.array([[center_x], [center_y]])
square_xs = trans_points[0, :]
square_ys = trans_points[1, :]
angle_line_xs = [center_x, center_x + math.cos(angle) * size]
angle_line_ys = [center_y, center_y + math.sin(angle) * size]
return square_xs, square_ys, np.array(angle_line_xs), np.array(angle_line_ys)
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import MaxAbsScaler
from tpot.builtins import StackingEstimator
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=None)
# Average CV score on the training set was:-2018.8830873326278
exported_pipeline = make_pipeline(
MaxAbsScaler(),
StackingEstimator(estimator=ExtraTreesRegressor(bootstrap=False, max_features=0.9500000000000001, min_samples_leaf=6, min_samples_split=5, n_estimators=100)),
GradientBoostingRegressor(alpha=0.75, learning_rate=0.5, loss="huber", max_depth=4, max_features=0.7500000000000001, min_samples_leaf=10, min_samples_split=9, n_estimators=100, subsample=0.9000000000000001)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
|
nilq/baby-python
|
python
|
# 454. 4Sum II
import collections
class Solution:
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
ab = {}
for a in A:
for b in B:
ab[a+b] = ab.get(a+b, 0) + 1
count = 0
for c in C:
for d in D:
count += ab.get(-c-d, 0)
return count
# short solution
def fourSumCount2(self, A, B, C, D):
ab = collections.Counter([a+b for a in A for b in B])
return sum([ab[-c-d] for c in C for d in D])
sol = Solution()
print(sol.fourSumCount2([1,2], [-2,-1], [-1,2], [0,2]))
|
nilq/baby-python
|
python
|
import soundfile as sf
import numpy as np
import librosa
from scipy import signal
import cPickle
import src.config as cfg
def to_mono(wav):
if wav.ndim == 1:
return wav
elif wav.ndim == 2:
return np.mean(wav, axis=-1)
def calculate_logmel(rd_fd):
wav, fs = sf.read(rd_fd)
wav = to_mono(wav)
#assert fs == cfg.fs
ham_win = np.hamming(cfg.n_fft)
[f, t, x] = signal.spectral.spectrogram(x=wav,
window=ham_win,
nperseg=cfg.n_fft,
noverlap=0,
detrend=False,
return_onesided=True,
mode='magnitude') #Compute a spectrogram with consecutive Fourier transforms.
x = x.T
print x.shape
if globals().get('melW') is None:
global melW
melW = librosa.filters.mel(sr=fs,
n_fft=cfg.n_fft,
n_mels=64,
fmin=0.,
fmax=22100)
x = np.dot(x, melW.T)
x = np.log(x + 1e-8)
print x
rd_fd +=".f"
cPickle.dump(x, open(rd_fd , 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
def make_pred(rd_path):
calculate_logmel(rd_path)
import kera_pred
msg = kera_pred.others(rd_path+".f",cfg.ld_md)
return msg
|
nilq/baby-python
|
python
|
from datetime import datetime as dt
from ..ff.window import Window
class BrowserSession(object):
def __init__(self, ss_json):
self._windows = WindowSet(ss_json["windows"])
self._start_time = dt.fromtimestamp(ss_json["session"]["startTime"] / 1000)
self._selected_window = ss_json["selectedWindow"] - 1
return
def __repr__(self):
n_win = len(self.windows)
return f"BrowserSession of {n_win} windows, since {self.start_time}"
@property
def windows(self):
return self._windows
@property
def start_time(self):
return self._start_time
class WindowSet(list):
"""
A class which reads the 'windows' of a Firefox recovery JSON file, and
instantiates a set of Window classes for each of the listed entries.
"""
def __init__(self, json_list):
self.extend([Window(j) for j in json_list])
return
def __repr__(self):
n_win = len(self)
window_reprs = "\n\n".join([str(w) for w in self])
return f"WindowSet of {n_win} windows\n\n{window_reprs}"
|
nilq/baby-python
|
python
|
import numpy as np;
import xlrd;
import xlwt;
sheet = xlrd.open_workbook('data1.xls');
workbook = xlwt.Workbook(encoding = 'ascii');
worksheet = workbook.add_sheet('school')
data = sheet.sheets()[0];
row = data.nrows;
col = data.ncols;
for i in range(0,row):
for j in range(0,col):
worksheet.write(i, j, label = str(data.cell(i, j).value).split('.')[0] );
workbook.save('/Users/tinoryj/Desktop/ans.xls');
|
nilq/baby-python
|
python
|
import json
import sys
import os
import typer
from typing import List
import time
from .logger import get_logger
from .config.imports import check_imports
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger = get_logger()
cli = typer.Typer()
@cli.command('setup')
def setup_auto():
chk_libs = ['tensorflow', 'torch', 'transformers']
typer.echo(f'Setting Up Libraries and Checking Installed')
installed_libs = check_imports()
for lib in chk_libs:
_is_installed = f'{lib} - {installed_libs[lib]} is installed' if installed_libs[lib] else f'{lib} is not not installed'
typer.echo(_is_installed)
if typer.confirm(f"Update {lib}?"):
os.system(f'pip install -q --upgrade {lib}')
if __name__ == "__main__":
cli()
|
nilq/baby-python
|
python
|
import re
import matplotlib.pyplot as plt
import numpy as np
import sklearn
from sklearn import svm
from sklearn import linear_model, tree
from sklearn.ensemble import RandomForestClassifier
from sklearn import neural_network
import copy
path = r"D:\THU\curriculum\Software Engineering\Course_Project\data_1109_0427_RGB_fixed.txt"
f = open(path, "r")
#Read data from file
channels = []
for line in f:
data = re.split(" ", line)
data = [float(i) for i in data]
channels.append(data)
#Smoothing
length = len(channels[0])
smoothed = copy.deepcopy(channels)
for i in [2,3,4]:
for j in range(length):
smoothed[i][j] = np.mean(channels[i][max(j - 5, 0):min(length - 1, j + 5)])
#Specify each channel
time = channels[0]
index = np.arange(len(time))
ppg = channels[1]
colors = np.transpose(smoothed[3:])
#Split the training set and test set
training_data = colors[:int(len(colors)*0.9)]
training_label = ppg[:int(len(colors)*0.9)]
test_data = colors[int(len(colors)*0.9):]
test_label = ppg[int(len(colors)*0.9):]
#Rescale the ppg data into the range of 0-1
training_label = np.divide(np.subtract(training_label,50),200)
test_label = np.divide(np.subtract(test_label,50),200)
#reg = linear_model.LinearRegression()
reg = linear_model.BayesianRidge()
reg.fit(training_data, training_label)
predicted_label = np.add(np.matmul(test_data, reg.coef_), reg.intercept_)
plt.plot(index[:200], test_label[:200])
plt.show()
plt.plot(index[:200], np.transpose(test_data)[0][:200])
plt.show()
print(np.shape(training_label))
plt.plot(index[:200], np.transpose(test_data)[1][:200])
plt.show()
print(np.shape(training_label))
plt.plot(index[:200], predicted_label[:200])
plt.show()
print(np.shape(training_label))
print(reg.coef_, reg.intercept_)
|
nilq/baby-python
|
python
|
import csv
import re
from os import path
import numpy as np
FIELDNAMES = 'timeStamp', 'response_time', 'request_name', "status_code", "responseMessage", "threadName", "dataType",\
"success", "failureMessage", "bytes", "sentBytes", "grpThreads", "allThreads", "URL", "Latency",\
"IdleTime", "Connect"
class JTLParser(object):
def parse_jtl(self):
log_file = "/tmp/reports/jmeter.jtl"
unparsed_counter = 0
requests = {}
if not path.exists(log_file):
return requests
start_timestamp, end_timestamp = float('inf'), 0
with open(log_file, 'r+', encoding="utf-8") as tsv:
entries = csv.DictReader(tsv, delimiter=",", fieldnames=FIELDNAMES, restval="not_found")
for entry in entries:
try:
if entry['request_name'] != 'label':
if re.search(r'-\d+$', entry['request_name']):
continue
if start_timestamp > int(entry['timeStamp']):
start_timestamp = int(entry['timeStamp']) - int(entry['response_time'])
if end_timestamp < int(entry['timeStamp']):
end_timestamp = int(entry['timeStamp'])
if entry['request_name'] not in requests:
data = {'request_name': entry['request_name'],
'response_time': [int(entry['response_time'])]}
if entry['success'] == 'true':
data['OK'], data['KO'] = 1, 0
else:
data['OK'], data['KO'] = 0, 1
requests[entry['request_name']] = data
else:
requests[entry['request_name']]['response_time'].append(int(entry['response_time']))
if entry['success'] == 'true':
requests[entry['request_name']]['OK'] += 1
else:
requests[entry['request_name']]['KO'] += 1
except Exception as e:
print(e)
unparsed_counter += 1
pass
if unparsed_counter > 0:
print("Unparsed errors: %d" % unparsed_counter)
for req in requests:
requests[req]['response_time'] = int(np.percentile(requests[req]['response_time'], 95, interpolation="linear"))
duration = int((end_timestamp - start_timestamp)/1000)
throughput = self.calculate_throughput(requests, duration)
error_rate = self.calculate_error_rate(requests)
results = {"requests": requests, "throughput": throughput, "error_rate": error_rate}
return results
@staticmethod
def calculate_throughput(requests, duration):
count = 0
for req in requests:
count += requests[req]['OK']
return round(float(count/duration), 2)
@staticmethod
def calculate_error_rate(requests):
count, failed = 0, 0
for req in requests:
count += requests[req]['OK']
count += requests[req]['KO']
failed += requests[req]['KO']
return round(float(failed/count) * 100, 2)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
setup(
name='taxies',
version='0.1.dev',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Click',
],
entry_points='''
[console_scripts]
taxies=taxies.scripts.taxies:cli
''',
)
|
nilq/baby-python
|
python
|
"""App.
"""
import logging
import sys
from django.apps import AppConfig
from configs.part_detection import DF_PD_VIDEO_SOURCE_IS_OPENCV
logger = logging.getLogger(__name__)
class AzurePartDetectionConfig(AppConfig):
"""App Config."""
name = "vision_on_edge.azure_part_detections"
def ready(self):
"""ready."""
if "runserver" in sys.argv:
# pylint: disable=unused-import, import-outside-toplevel
logger.info("ready while running server")
logger.info("Importing Signals")
from ..azure_part_detections.models import PartDetection, PDScenario
from ..azure_projects.models import Project
from ..cameras.models import Camera
from ..inference_modules.models import InferenceModule
from . import signals # noqa: F401
# pylint: enable=unused-import, import-outside-toplevel
create_demo = True
if create_demo:
project_obj = Project.objects.filter(is_demo=False).first()
inference_obj = InferenceModule.objects.first()
else:
project_obj = inference_obj = None
if PartDetection.objects.count() == 0:
PartDetection.objects.create(
name="Part Detection",
project=project_obj,
inference_module=inference_obj,
inference_source=(
"opencv" if DF_PD_VIDEO_SOURCE_IS_OPENCV else "lva"
),
)
PDScenario.objects.all().delete()
# =============================================
# Simple Part Detection ===
# =============================================
pd_scenario = PDScenario.objects.create(
name="Simple Part Detection",
inference_mode="PD",
project=Project.objects.get(name="Demo Part Detection Project"),
)
pd_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Part Detection Project"
).part_set.all()
)
# =============================================
# Part Counting ===
# =============================================
pc_scenario = PDScenario.objects.create(
name="Counting objects",
inference_mode="PC",
project=Project.objects.get(name="Demo Part Counting Project"),
)
pc_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 1 - Counting Objects"
)
)
pc_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Part Counting Project"
).part_set.all()
)
# =============================================
# Employee safety ===
# =============================================
es_scenario = PDScenario.objects.create(
name="Employee safety",
inference_mode="ES",
project=Project.objects.get(name="Demo Employee Safety Project"),
)
es_scenario.cameras.set(
Camera.objects.filter(is_demo=True, name="Scenario 2 - Employ Safety")
)
es_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Employee Safety Project"
).part_set.all()
)
# =============================================
# Defect Detection ===
# =============================================
dd_scenario = PDScenario.objects.create(
name="Defect detection",
inference_mode="DD",
project=Project.objects.get(name="Demo Defect Detection Project"),
)
dd_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 3 - Defect Detection"
)
)
dd_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Defect Detection Project"
).part_set.all()
)
# =============================================
# Empty Shelf Alert ===
# =============================================
esa_scenario = PDScenario.objects.create(
name="Empty shelf alert",
inference_mode="ESA",
project=Project.objects.get(name="Demo Empty Shelf Alert Project"),
)
esa_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 4 - Empty Shelf Alert"
)
)
esa_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Empty Shelf Alert Project"
).part_set.all()
)
# =============================================
# Total Customer Counting ===
# =============================================
tcc_scenario = PDScenario.objects.create(
name="People counting",
inference_mode="TCC",
project=Project.objects.get(
name="Demo Total Customer Counting Project"
),
)
tcc_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 5 - Total Customer Counting"
)
)
tcc_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Total Customer Counting Project"
).part_set.all()
)
# =============================================
# Crowded Queue Alert ===
# =============================================
cqa_scenario = PDScenario.objects.create(
name="Crowded queue alert",
inference_mode="CQA",
project=Project.objects.get(name="Demo Crowded Queue Alert Project"),
)
cqa_scenario.cameras.set(
Camera.objects.filter(
is_demo=True, name="Scenario 6 - Crowded Queue Alert"
)
)
cqa_scenario.parts.set(
Project.objects.get(
is_demo=True, name="Demo Crowded Queue Alert Project"
).part_set.all()
)
|
nilq/baby-python
|
python
|
"""
Query suggestion hierarchical encoder-decoder code.
The code is inspired from nmt encdec code in groundhog
but we do not rely on groundhog infrastructure.
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Alessandro Sordoni")
__contact__ = "Alessandro Sordoni <sordonia@iro.umontreal>"
import theano
import theano.tensor as T
import numpy as np
import cPickle
import logging
logger = logging.getLogger(__name__)
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv3d2d import *
from collections import OrderedDict
from model import *
from utils import *
import operator
# Theano speed-up
theano.config.scan.allow_gc = False
def add_to_params(params, new_param):
params.append(new_param)
return new_param
class EncoderDecoderBase():
def __init__(self, state, rng, parent):
self.rng = rng
self.parent = parent
self.state = state
self.__dict__.update(state)
self.session_rec_activation = eval(self.session_rec_activation)
self.query_rec_activation = eval(self.query_rec_activation)
self.params = []
class Encoder(EncoderDecoderBase):
def init_params(self):
""" sent weights """
self.W_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='W_emb'))
self.W_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in'))
self.W_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh'))
self.b_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_hh'))
if self.query_step_type == "gated":
self.W_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_r'))
self.W_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_z'))
self.W_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh_r'))
self.W_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh_z'))
self.b_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_z'))
self.b_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_r'))
""" Context weights """
self.Ws_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in'))
self.Ws_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh'))
self.bs_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_hh'))
if self.session_step_type == "gated":
self.Ws_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in_r'))
self.Ws_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in_z'))
self.Ws_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh_r'))
self.Ws_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh_z'))
self.bs_z = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_z'))
self.bs_r = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_r'))
def plain_query_step(self, x_t, m_t, h_tm1, hr_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
h_t = self.query_rec_activation(T.dot(x_t, self.W_in) + T.dot(hr_tm1, self.W_hh) + self.b_hh)
hr_t = m_t * h_t
return h_t, hr_t,
def gated_query_step(self, x_t, m_t, h_tm1, hr_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_r) + T.dot(hr_tm1, self.W_hh_r) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_z) + T.dot(hr_tm1, self.W_hh_z) + self.b_z)
h_tilde = self.query_rec_activation(T.dot(x_t, self.W_in) + T.dot(r_t * hr_tm1, self.W_hh) + self.b_hh)
h_t = (np.float32(1.0) - z_t) * hr_tm1 + z_t * h_tilde
hr_t = m_t * h_t
# return both reset state and non-reset state
return h_t, hr_t, r_t, z_t, h_tilde
def plain_session_step(self, h_t, m_t, hs_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hs_update = self.session_rec_activation(T.dot(h_t, self.Ws_in) + T.dot(hs_tm1, self.Ws_hh) + self.bs_hh)
hs_t = (m_t) * hs_tm1 + (1 - m_t) * hs_update
return hs_t,
def gated_session_step(self, h_t, m_t, hs_tm1):
rs_t = T.nnet.sigmoid(T.dot(h_t, self.Ws_in_r) + T.dot(hs_tm1, self.Ws_hh_r) + self.bs_r)
zs_t = T.nnet.sigmoid(T.dot(h_t, self.Ws_in_z) + T.dot(hs_tm1, self.Ws_hh_z) + self.bs_z)
hs_tilde = self.session_rec_activation(T.dot(h_t, self.Ws_in) + T.dot(rs_t * hs_tm1, self.Ws_hh) + self.bs_hh)
hs_update = (np.float32(1.) - zs_t) * hs_tm1 + zs_t * hs_tilde
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hs_t = (m_t) * hs_tm1 + (1 - m_t) * hs_update
return hs_t, hs_tilde, rs_t, zs_t
def approx_embedder(self, x):
return self.W_emb[x]
def build_encoder(self, x, xmask=None, **kwargs):
one_step = False
if len(kwargs):
one_step = True
# if x.ndim == 2 then
# x = (n_steps, batch_size)
if x.ndim == 2:
batch_size = x.shape[1]
# else x = (word_1, word_2, word_3, ...)
# or x = (last_word_1, last_word_2, last_word_3, ..)
# in this case batch_size is
else:
batch_size = 1
# if it is not one_step then we initialize everything to 0
if not one_step:
h_0 = T.alloc(np.float32(0), batch_size, self.qdim)
hr_0 = T.alloc(np.float32(0), batch_size, self.qdim)
hs_0 = T.alloc(np.float32(0), batch_size, self.sdim)
# in sampling mode (i.e. one step) we require
else:
# in this case x.ndim != 2
assert x.ndim != 2
assert 'prev_h' in kwargs
assert 'prev_hr' in kwargs
assert 'prev_hs' in kwargs
h_0 = kwargs['prev_h']
hr_0 = kwargs['prev_hr']
hs_0 = kwargs['prev_hs']
xe = self.approx_embedder(x)
if xmask == None:
xmask = T.neq(x, self.eoq_sym)
# Gated Encoder
if self.query_step_type == "gated":
f_enc = self.gated_query_step
o_enc_info = [h_0, hr_0, None, None, None]
else:
f_enc = self.plain_query_step
o_enc_info = [h_0, hr_0]
if self.session_step_type == "gated":
f_hier = self.gated_session_step
o_hier_info = [hs_0, None, None, None]
else:
f_hier = self.plain_session_step
o_hier_info = [hs_0]
# Run through all the sentence (encode everything)
if not one_step:
_res, _ = theano.scan(
f_enc, sequences=[xe, xmask], outputs_info=o_enc_info)
# Make just one step further
else:
_res = f_enc(xe, xmask, h_0, hr_0)
# Get the hidden state sequence
h = _res[0]
hr = _res[1]
# All hierarchical sentence
# The hs sequence is based on the original mask
if not one_step:
_res, _ = theano.scan(
f_hier, sequences=[h, xmask], outputs_info=o_hier_info)
# Just one step further
else:
_res = f_hier(h, xmask, hs_0)
if isinstance(_res, list) or isinstance(_res, tuple):
hs = _res[0]
else:
hs = _res
return (h, hr), hs, (_res[2], _res[3])
def __init__(self, state, rng, parent):
EncoderDecoderBase.__init__(self, state, rng, parent)
self.init_params()
class Decoder(EncoderDecoderBase):
EVALUATION = 0
BEAM_SEARCH = 1
def __init__(self, state, rng, parent, encoder):
EncoderDecoderBase.__init__(self, state, rng, parent)
# Take as input the encoder instance for the embeddings..
# To modify in the future
self.encoder = encoder
self.trng = MRG_RandomStreams(self.seed)
self.init_params()
def init_params(self):
""" Decoder weights """
self.Wd_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='Wd_emb'))
self.Wd_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh'))
self.Wd_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in'))
self.bd_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_hh'))
self.Wd_s_0 = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_0'))
self.bd_s_0 = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_s_0'))
if self.decoder_bias_type == 'all':
self.Wd_s_q = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_q'))
if self.query_step_type == "gated":
self.Wd_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in_r'))
self.Wd_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in_z'))
self.Wd_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh_r'))
self.Wd_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh_z'))
self.bd_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_r'))
self.bd_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_z'))
if self.decoder_bias_type == 'all':
self.Wd_s_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_z'))
self.Wd_s_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_r'))
out_target_dim = self.qdim
if not self.maxout_out:
out_target_dim = self.rankdim
self.Wd_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, out_target_dim), name='Wd_out'))
self.bd_out = add_to_params(self.params, theano.shared(value=np.zeros((self.idim,), dtype='float32'), name='bd_out'))
# Set up deep output
if self.deep_out:
self.Wd_e_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, out_target_dim), name='Wd_e_out'))
self.bd_e_out = add_to_params(self.params, theano.shared(value=np.zeros((out_target_dim,), dtype='float32'), name='bd_e_out'))
if self.decoder_bias_type != 'first':
self.Wd_s_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, out_target_dim), name='Wd_s_out'))
""" Rank """
if hasattr(self, 'train_rank'):
self.Wr_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, 1), name='Wr_out'))
self.br_out = add_to_params(self.params, theano.shared(value=np.zeros((1,), dtype='float32'), name='br_out'))
def build_rank_layer(self, hs):
return T.dot(hs, self.Wr_out) + self.br_out
def build_output_layer(self, hs, xd, hd):
pre_activ = T.dot(hd, self.Wd_out)
if self.deep_out:
pre_activ += T.dot(xd, self.Wd_e_out) + self.bd_e_out
if self.decoder_bias_type != 'first':
pre_activ += T.dot(hs, self.Wd_s_out)
# ^ if bias all, bias the deep output
if self.maxout_out:
pre_activ = Maxout(2)(pre_activ)
return pre_activ
def build_next_probs_predictor(self, hs, x, prev_hd):
"""
Return output probabilities given prev_words x, hierarchical pass hs, and previous hd
hs should always be the same (and should not be updated).
"""
return self.build_decoder(hs, x, mode=Decoder.BEAM_SEARCH, prev_hd=prev_hd)
def approx_embedder(self, x):
# Here we use the same embeddings learnt in the encoder !!!
return self.encoder.approx_embedder(x)
def output_softmax(self, pre_activ):
# returns a (timestep, bs, idim) matrix (huge)
return SoftMax(T.dot(pre_activ, self.Wd_emb.T) + self.bd_out)
def build_decoder(self, hs, x, xmask=None, y=None, y_neg=None, mode=EVALUATION, prev_hd=None, step_num=None):
# Check parameter consistency
if mode == Decoder.EVALUATION:
assert not prev_hd
assert y
else:
assert not y
assert prev_hd
# if mode == EVALUATION
# xd = (timesteps, batch_size, qdim)
#
# if mode != EVALUATION
# xd = (n_samples, dim)
xd = self.approx_embedder(x)
if not xmask:
xmask = T.neq(x, self.eoq_sym)
# we must zero out the </s> embedding
# i.e. the embedding x_{-1} is the 0 vector
# as well as hd_{-1} which will be reseted in the scan functions
if xd.ndim != 3:
assert mode != Decoder.EVALUATION
xd = (xd.dimshuffle((1, 0)) * xmask).dimshuffle((1, 0))
else:
assert mode == Decoder.EVALUATION
xd = (xd.dimshuffle((2,0,1)) * xmask).dimshuffle((1,2,0))
# Run the decoder
if mode == Decoder.EVALUATION:
hd_init = T.alloc(np.float32(0), x.shape[1], self.qdim)
else:
hd_init = prev_hd
if self.query_step_type == "gated":
f_dec = self.gated_step
o_dec_info = [hd_init, None, None, None]
else:
f_dec = self.plain_step
o_dec_info = [hd_init]
# If the mode of the decoder is EVALUATION
# then we evaluate by default all the sentence
# xd - i.e. xd.ndim == 3, xd = (timesteps, batch_size, qdim)
if mode == Decoder.EVALUATION:
_res, _ = theano.scan(f_dec,
sequences=[xd, xmask, hs],\
outputs_info=o_dec_info)
# else we evaluate only one step of the recurrence using the
# previous hidden states and the previous computed hierarchical
# states.
else:
_res = f_dec(xd, xmask, hs, prev_hd)
if isinstance(_res, list) or isinstance(_res, tuple):
hd = _res[0]
else:
hd = _res
pre_activ = self.build_output_layer(hs, xd, hd)
# EVALUATION : Return target_probs + all the predicted ranks
# target_probs.ndim == 3
if mode == Decoder.EVALUATION:
target_probs = GrabProbs(self.output_softmax(pre_activ), y)
return target_probs, hd, _res
# BEAM_SEARCH : Return output (the softmax layer) + the new hidden states
elif mode == Decoder.BEAM_SEARCH:
return self.output_softmax(pre_activ), hd
def gated_step(self, xd_t, m_t, hs_t, hd_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hd_tm1 = (m_t) * hd_tm1 + (1 - m_t) * self.query_rec_activation(T.dot(hs_t, self.Wd_s_0) + self.bd_s_0)
# hd_{t - 1} = tanh(W_s_0 hs_t + bd_s_0) else hd_{t - 1} is left unchanged (m_t = 1)
# In the 'all' decoder bias type each hidden state of the decoder
# RNN receives the hs_t vector as bias without modification
if self.decoder_bias_type == 'all':
rd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_r) + T.dot(hd_tm1, self.Wd_hh_r) + T.dot(hs_t, self.Wd_s_r) + self.bd_r)
zd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_z) + T.dot(hd_tm1, self.Wd_hh_z) + T.dot(hs_t, self.Wd_s_z) + self.bd_z)
hd_tilde = self.query_rec_activation(T.dot(xd_t, self.Wd_in)
+ T.dot(rd_t * hd_tm1, self.Wd_hh)
+ T.dot(hs_t, self.Wd_s_q)
+ self.bd_hh)
hd_t = (np.float32(1.) - zd_t) * hd_tm1 + zd_t * hd_tilde
output = (hd_t, rd_t, zd_t, hd_tilde)
else:
# Do not bias all the decoder (force to store very useful information in the first state)
rd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_r) + T.dot(hd_tm1, self.Wd_hh_r) + self.bd_r)
zd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_z) + T.dot(hd_tm1, self.Wd_hh_z) + self.bd_z)
hd_tilde = self.query_rec_activation(T.dot(xd_t, self.Wd_in)
+ T.dot(rd_t * hd_tm1, self.Wd_hh)
+ self.bd_hh)
hd_t = (np.float32(1.) - zd_t) * hd_tm1 + zd_t * hd_tilde
output = (hd_t, rd_t, zd_t, hd_tilde)
return output
def plain_step(self, xd_t, m_t, hs_t, hd_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
# We already assume that xd are zeroed out
hd_tm1 = (m_t) * hd_tm1 + (1 - m_t) * self.query_rec_activation(T.dot(hs_t, self.Wd_s_0) + self.bd_s_0)
# ^ iff x_{t - 1} = </s> (m_t = 0) then x_{t-1} = 0
# and hd_{t - 1} = 0 else hd_{t - 1} is left unchanged (m_t = 1)
if self.decoder_bias_type == 'first':
# Do not bias all the decoder (force to store very useful information in the first state)
hd_t = self.query_rec_activation( T.dot(xd_t, self.Wd_in)
+ T.dot(hd_tm1, self.Wd_hh)
+ self.bd_hh )
output = (hd_t,)
elif self.decoder_bias_type == 'all':
hd_t = self.query_rec_activation( T.dot(xd_t, self.Wd_in)
+ T.dot(hd_tm1, self.Wd_hh)
+ T.dot(hs_t, self.Wd_s_q)
+ self.bd_hh )
output = (hd_t,)
return output
####
class SessionEncoderDecoder(Model):
def indices_to_words(self, seq, exclude_start_end=False):
"""
Converts a list of words to a list
of word ids. Use unk_sym if a word is not
known.
"""
def convert():
for word_index in seq:
if word_index > len(self.idx_to_str):
raise ValueError('Word index is too large for the model vocabulary!')
if word_index == self.eos_sym:
break
if not exclude_start_end or (word_index != self.eoq_sym and word_index != self.soq_sym):
yield self.idx_to_str[word_index]
return list(convert())
def words_to_indices(self, seq):
"""
Converts a list of words to a list
of word ids. Use unk_sym if a word is not
known.
"""
return [self.str_to_idx.get(word, self.unk_sym) for word in seq]
def compute_updates(self, training_cost, params):
updates = []
grads = T.grad(training_cost, params)
grads = OrderedDict(zip(params, grads))
# Clip stuff
c = numpy.float32(self.cutoff)
clip_grads = []
norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))
normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))
notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
for p, g in grads.items():
clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))
grads = OrderedDict(clip_grads)
if self.updater == 'adagrad':
updates = Adagrad(grads, self.lr)
elif self.updater == 'sgd':
raise Exception("Sgd not implemented!")
elif self.updater == 'adadelta':
updates = Adadelta(grads)
elif self.updater == 'rmsprop':
updates = RMSProp(grads, self.lr)
elif self.updater == 'adam':
updates = Adam(grads)
else:
raise Exception("Updater not understood!")
return updates
def build_train_function(self):
if not hasattr(self, 'train_fn'):
# Compile functions
logger.debug("Building train function")
self.train_fn = theano.function(
inputs=[self.x_data, self.x_ranks, self.x_max_length, self.x_cost_mask],
outputs=self.training_cost, updates=self.updates, name="train_fn")
return self.train_fn
def build_eval_function(self):
if not hasattr(self, 'eval_fn'):
# Compile functions
logger.debug("Building evaluation function")
self.eval_fn = theano.function(inputs=[self.x_data, self.x_ranks, self.x_max_length, self.x_cost_mask],
outputs=self.training_cost, name="eval_fn")
return self.eval_fn
def build_score_function(self):
if not hasattr(self, 'score_fn'):
self.score_fn = theano.function(
inputs=[self.x_data, self.x_max_length],
outputs=[self.per_example_cost],
name="score_fn")
return self.score_fn
def build_rank_prediction_function(self):
if not hasattr(self, 'rank_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
ranks = self.decoder.build_rank_layer(hs)
self.rank_fn = theano.function(
inputs=[self.x_data],
outputs=[ranks],
name="rank_fn")
return self.rank_fn
def build_get_states_function(self):
if not hasattr(self, 'get_states_fn'):
# Compile functions
logger.debug("Get states of the network")
outputs = [self.h, self.hs, self.hd, self.rs, self.us] + [x for x in self.decoder_states]
self.get_states_fn = theano.function(inputs=[self.x_data, self.x_max_length],
outputs=outputs, name="get_states_fn")
return self.get_states_fn
def build_next_probs_function(self):
if not hasattr(self, 'next_probs_fn'):
outputs, hd = self.decoder.build_next_probs_predictor(
self.beam_hs, self.beam_source, prev_hd=self.beam_hd)
self.next_probs_fn = theano.function(
inputs=[self.beam_hs, self.beam_source, self.beam_hd],
outputs=[outputs, hd],
name="next_probs_fn")
return self.next_probs_fn
def build_first_vector(self):
if not hasattr(self, 'first_vec_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
hd0 = self.decoder.query_rec_activation(T.dot(hs, self.decoder.Wd_s_0) + self.decoder.bd_s_0)
self.first_vec_fn = theano.function(inputs=[self.x_data],
outputs=[h, hs, hd0], name="first_vec_fn")
return self.first_vec_fn
def build_encoder_function(self):
if not hasattr(self, 'encoder_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
self.encoder_fn = theano.function(inputs=[self.x_data],
outputs=[h, hr, hs], name="encoder_fn")
return self.encoder_fn
def __init__(self, state):
Model.__init__(self)
self.state = state
# Compatibility towards older models
self.__dict__.update(state)
self.rng = numpy.random.RandomState(state['seed'])
# Load dictionary
raw_dict = cPickle.load(open(self.dictionary, 'r'))
# Probabilities for each term in the corpus
self.noise_probs = [x[2] for x in sorted(raw_dict, key=operator.itemgetter(1))]
self.noise_probs = numpy.array(self.noise_probs, dtype='float64')
self.noise_probs /= numpy.sum(self.noise_probs)
self.noise_probs = self.noise_probs ** 0.75
self.noise_probs /= numpy.sum(self.noise_probs)
self.t_noise_probs = theano.shared(self.noise_probs.astype('float32'), 't_noise_probs')
# Dictionaries to convert str to idx and vice-versa
self.str_to_idx = dict([(tok, tok_id) for tok, tok_id, _ in raw_dict])
self.idx_to_str = dict([(tok_id, tok) for tok, tok_id, freq in raw_dict])
if '</q>' not in self.str_to_idx \
or '</s>' not in self.str_to_idx:
raise Exception("Error, malformed dictionary!")
# Number of words in the dictionary
self.idim = len(self.str_to_idx)
self.state['idim'] = self.idim
logger.debug("Initializing encoder")
self.encoder = Encoder(self.state, self.rng, self)
logger.debug("Initializing decoder")
self.decoder = Decoder(self.state, self.rng, self, self.encoder)
# Init params
self.params = self.encoder.params + self.decoder.params
assert len(set(self.params)) == (len(self.encoder.params) + len(self.decoder.params))
self.y_neg = T.itensor3('y_neg')
self.x_data = T.imatrix('x_data')
self.x_ranks = T.imatrix('x_ranks')
self.x_cost_mask = T.matrix('cost_mask')
self.x_max_length = T.iscalar('x_max_length')
# The training is done with a trick. We append a special </q> at the beginning of the dialog
# so that we can predict also the first sent in the dialog starting from the dialog beginning token (</q>).
self.aug_x_data = T.concatenate([T.alloc(np.int32(self.eoq_sym), 1, self.x_data.shape[1]), self.x_data])
training_x = self.aug_x_data[:self.x_max_length]
training_y = self.aug_x_data[1:self.x_max_length+1]
training_ranks = self.x_ranks[:self.x_max_length-1].flatten()
training_ranks_mask = T.neq(training_ranks, 0).flatten()
# Here we find the end-of-sentence tokens in the minibatch.
training_hs_mask = T.neq(training_x, self.eoq_sym)
training_x_cost_mask = self.x_cost_mask[:self.x_max_length].flatten()
# Backward compatibility
if 'decoder_bias_type' in self.state:
logger.debug("Decoder bias type {}".format(self.decoder_bias_type))
logger.info("Build encoder")
(self.h, _), self.hs, (self.rs, self.us) = \
self.encoder.build_encoder(training_x, xmask=training_hs_mask)
logger.info("Build decoder (EVAL)")
target_probs, self.hd, self.decoder_states = \
self.decoder.build_decoder(self.hs, training_x, xmask=training_hs_mask, \
y=training_y, mode=Decoder.EVALUATION)
logger.info("Build rank predictor")
self.predicted_ranks = self.decoder.build_rank_layer(self.hs)
# Prediction cost and rank cost
self.per_example_cost = -T.log2(target_probs).reshape((self.x_max_length, self.x_data.shape[1]))
self.rank_cost = T.sum(((self.predicted_ranks[1:].flatten() - training_ranks) ** 2) * (training_ranks_mask)) / T.sum(training_ranks_mask)
self.training_cost = T.sum(-T.log2(target_probs) * training_x_cost_mask) + np.float32(self.lambda_rank) * self.rank_cost
self.updates = self.compute_updates(self.training_cost / training_x.shape[1], self.params)
# Beam-search variables
self.beam_source = T.lvector("beam_source")
self.beam_hs = T.matrix("beam_hs")
self.beam_step_num = T.lscalar("beam_step_num")
self.beam_hd = T.matrix("beam_hd")
|
nilq/baby-python
|
python
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import re
import string
import shutil
import time
from collections import Counter
import pexpect
from wlauto import BigLittleDevice, RuntimeParameter, Parameter, settings
from wlauto.exceptions import ConfigError, DeviceError
from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
from wlauto.utils.serial_port import open_serial_connection
from wlauto.utils.misc import merge_dicts
from wlauto.utils.types import boolean
BOOT_FIRMWARE = {
'uefi': {
'SCC_0x010': '0x000003E0',
'reboot_attempts': 0,
},
'bootmon': {
'SCC_0x010': '0x000003D0',
'reboot_attempts': 2,
},
}
MODES = {
'mp_a7_only': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a7',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7', 'a7'],
},
'mp_a7_bootcluster': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a7bc',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
'mp_a15_only': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a15',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15'],
},
'mp_a15_bootcluster': {
'images_file': 'images_mp.txt',
'dtb': 'mp_a15bc',
'initrd': 'init_mp',
'kernel': 'kern_mp',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15', 'a7', 'a7', 'a7'],
},
'iks_cpu': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x1032F003',
'cpus': ['a7', 'a7'],
},
'iks_a15': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a15', 'a15'],
},
'iks_a7': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7'],
},
'iks_ns_a15': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
'iks_ns_a7': {
'images_file': 'images_iks.txt',
'dtb': 'iks',
'initrd': 'init_iks',
'kernel': 'kern_iks',
'SCC_0x700': '0x0032F003',
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
},
}
A7_ONLY_MODES = ['mp_a7_only', 'iks_a7', 'iks_cpu']
A15_ONLY_MODES = ['mp_a15_only', 'iks_a15']
DEFAULT_A7_GOVERNOR_TUNABLES = {
'interactive': {
'above_hispeed_delay': 80000,
'go_hispeed_load': 85,
'hispeed_freq': 800000,
'min_sample_time': 80000,
'timer_rate': 20000,
},
'ondemand': {
'sampling_rate': 50000,
},
}
DEFAULT_A15_GOVERNOR_TUNABLES = {
'interactive': {
'above_hispeed_delay': 80000,
'go_hispeed_load': 85,
'hispeed_freq': 1000000,
'min_sample_time': 80000,
'timer_rate': 20000,
},
'ondemand': {
'sampling_rate': 50000,
},
}
ADB_SHELL_TIMEOUT = 30
class _TC2DeviceConfig(object):
name = 'TC2 Configuration'
device_name = 'TC2'
def __init__(self, # pylint: disable=R0914,W0613
root_mount='/media/VEMSD',
disable_boot_configuration=False,
boot_firmware=None,
mode=None,
fs_medium='usb',
device_working_directory='/data/local/usecase',
bm_image='bm_v519r.axf',
serial_device='/dev/ttyS0',
serial_baud=38400,
serial_max_timeout=600,
serial_log=sys.stdout,
init_timeout=120,
always_delete_uefi_entry=True,
psci_enable=True,
host_working_directory=None,
a7_governor_tunables=None,
a15_governor_tunables=None,
adb_name=None,
# Compatibility with other android devices.
enable_screen_check=None, # pylint: disable=W0613
**kwargs
):
self.root_mount = root_mount
self.disable_boot_configuration = disable_boot_configuration
if not disable_boot_configuration:
self.boot_firmware = boot_firmware or 'uefi'
self.default_mode = mode or 'mp_a7_bootcluster'
elif boot_firmware or mode:
raise ConfigError('boot_firmware and/or mode cannot be specified when disable_boot_configuration is enabled.')
self.mode = self.default_mode
self.working_directory = device_working_directory
self.serial_device = serial_device
self.serial_baud = serial_baud
self.serial_max_timeout = serial_max_timeout
self.serial_log = serial_log
self.bootmon_prompt = re.compile('^([KLM]:\\\)?>', re.MULTILINE)
self.fs_medium = fs_medium.lower()
self.bm_image = bm_image
self.init_timeout = init_timeout
self.always_delete_uefi_entry = always_delete_uefi_entry
self.psci_enable = psci_enable
self.resource_dir = os.path.join(os.path.dirname(__file__), 'resources')
self.board_dir = os.path.join(self.root_mount, 'SITE1', 'HBI0249A')
self.board_file = 'board.txt'
self.board_file_bak = 'board.bak'
self.images_file = 'images.txt'
self.host_working_directory = host_working_directory or settings.meta_directory
if not a7_governor_tunables:
self.a7_governor_tunables = DEFAULT_A7_GOVERNOR_TUNABLES
else:
self.a7_governor_tunables = merge_dicts(DEFAULT_A7_GOVERNOR_TUNABLES, a7_governor_tunables)
if not a15_governor_tunables:
self.a15_governor_tunables = DEFAULT_A15_GOVERNOR_TUNABLES
else:
self.a15_governor_tunables = merge_dicts(DEFAULT_A15_GOVERNOR_TUNABLES, a15_governor_tunables)
self.adb_name = adb_name
@property
def src_images_template_file(self):
return os.path.join(self.resource_dir, MODES[self.mode]['images_file'])
@property
def src_images_file(self):
return os.path.join(self.host_working_directory, 'images.txt')
@property
def src_board_template_file(self):
return os.path.join(self.resource_dir, 'board_template.txt')
@property
def src_board_file(self):
return os.path.join(self.host_working_directory, 'board.txt')
@property
def kernel_arguments(self):
kernel_args = ' console=ttyAMA0,38400 androidboot.console=ttyAMA0 selinux=0'
if self.fs_medium == 'usb':
kernel_args += ' androidboot.hardware=arm-versatileexpress-usb'
if 'iks' in self.mode:
kernel_args += ' no_bL_switcher=0'
return kernel_args
@property
def kernel(self):
return MODES[self.mode]['kernel']
@property
def initrd(self):
return MODES[self.mode]['initrd']
@property
def dtb(self):
return MODES[self.mode]['dtb']
@property
def SCC_0x700(self):
return MODES[self.mode]['SCC_0x700']
@property
def SCC_0x010(self):
return BOOT_FIRMWARE[self.boot_firmware]['SCC_0x010']
@property
def reboot_attempts(self):
return BOOT_FIRMWARE[self.boot_firmware]['reboot_attempts']
def validate(self):
valid_modes = MODES.keys()
if self.mode not in valid_modes:
message = 'Invalid mode: {}; must be in {}'.format(
self.mode, valid_modes)
raise ConfigError(message)
valid_boot_firmware = BOOT_FIRMWARE.keys()
if self.boot_firmware not in valid_boot_firmware:
message = 'Invalid boot_firmware: {}; must be in {}'.format(
self.boot_firmware,
valid_boot_firmware)
raise ConfigError(message)
if self.fs_medium not in ['usb', 'sdcard']:
message = 'Invalid filesystem medium: {} allowed values : usb, sdcard '.format(self.fs_medium)
raise ConfigError(message)
class TC2Device(BigLittleDevice):
name = 'TC2'
description = """
TC2 is a development board, which has three A7 cores and two A15 cores.
TC2 has a number of boot parameters which are:
:root_mount: Defaults to '/media/VEMSD'
:boot_firmware: It has only two boot firmware options, which are
uefi and bootmon. Defaults to 'uefi'.
:fs_medium: Defaults to 'usb'.
:device_working_directory: The direcitory that WA will be using to copy
files to. Defaults to 'data/local/usecase'
:serial_device: The serial device which TC2 is connected to. Defaults to
'/dev/ttyS0'.
:serial_baud: Defaults to 38400.
:serial_max_timeout: Serial timeout value in seconds. Defaults to 600.
:serial_log: Defaults to standard output.
:init_timeout: The timeout in seconds to init the device. Defaults set
to 30.
:always_delete_uefi_entry: If true, it will delete the ufi entry.
Defaults to True.
:psci_enable: Enabling the psci. Defaults to True.
:host_working_directory: The host working directory. Defaults to None.
:disable_boot_configuration: Disables boot configuration through images.txt and board.txt. When
this is ``True``, those two files will not be overwritten in VEMSD.
This option may be necessary if the firmware version in the ``TC2``
is not compatible with the templates in WA. Please note that enabling
this will prevent you form being able to set ``boot_firmware`` and
``mode`` parameters. Defaults to ``False``.
TC2 can also have a number of different booting mode, which are:
:mp_a7_only: Only the A7 cluster.
:mp_a7_bootcluster: Both A7 and A15 clusters, but it boots on A7
cluster.
:mp_a15_only: Only the A15 cluster.
:mp_a15_bootcluster: Both A7 and A15 clusters, but it boots on A15
clusters.
:iks_cpu: Only A7 cluster with only 2 cpus.
:iks_a15: Only A15 cluster.
:iks_a7: Same as iks_cpu
:iks_ns_a15: Both A7 and A15 clusters.
:iks_ns_a7: Both A7 and A15 clusters.
The difference between mp and iks is the scheduling policy.
TC2 takes the following runtime parameters
:a7_cores: Number of active A7 cores.
:a15_cores: Number of active A15 cores.
:a7_governor: CPUFreq governor for the A7 cluster.
:a15_governor: CPUFreq governor for the A15 cluster.
:a7_min_frequency: Minimum CPU frequency for the A7 cluster.
:a15_min_frequency: Minimum CPU frequency for the A15 cluster.
:a7_max_frequency: Maximum CPU frequency for the A7 cluster.
:a15_max_frequency: Maximum CPU frequency for the A7 cluster.
:irq_affinity: lambda x: Which cluster will receive IRQs.
:cpuidle: Whether idle states should be enabled.
:sysfile_values: A dict mapping a complete file path to the value that
should be echo'd into it. By default, the file will be
subsequently read to verify that the value was written
into it with DeviceError raised otherwise. For write-only
files, this check can be disabled by appending a ``!`` to
the end of the file path.
"""
has_gpu = False
a15_only_modes = A15_ONLY_MODES
a7_only_modes = A7_ONLY_MODES
not_configurable_modes = ['iks_a7', 'iks_cpu', 'iks_a15']
parameters = [
Parameter('core_names', mandatory=False, override=True,
description='This parameter will be ignored for TC2'),
Parameter('core_clusters', mandatory=False, override=True,
description='This parameter will be ignored for TC2'),
]
runtime_parameters = [
RuntimeParameter('irq_affinity', lambda d, x: d.set_irq_affinity(x.lower()), lambda: None),
RuntimeParameter('cpuidle', lambda d, x: d.enable_idle_states() if boolean(x) else d.disable_idle_states(),
lambda d: d.get_cpuidle())
]
def get_mode(self):
return self.config.mode
def set_mode(self, mode):
if self._has_booted:
raise DeviceError('Attempting to set boot mode when already booted.')
valid_modes = MODES.keys()
if mode is None:
mode = self.config.default_mode
if mode not in valid_modes:
message = 'Invalid mode: {}; must be in {}'.format(mode, valid_modes)
raise ConfigError(message)
self.config.mode = mode
mode = property(get_mode, set_mode)
def _get_core_names(self):
return MODES[self.mode]['cpus']
def _set_core_names(self, value):
pass
core_names = property(_get_core_names, _set_core_names)
def _get_core_clusters(self):
seen = set([])
core_clusters = []
cluster_id = -1
for core in MODES[self.mode]['cpus']:
if core not in seen:
seen.add(core)
cluster_id += 1
core_clusters.append(cluster_id)
return core_clusters
def _set_core_clusters(self, value):
pass
core_clusters = property(_get_core_clusters, _set_core_clusters)
@property
def cpu_cores(self):
return MODES[self.mode]['cpus']
@property
def max_a7_cores(self):
return Counter(MODES[self.mode]['cpus'])['a7']
@property
def max_a15_cores(self):
return Counter(MODES[self.mode]['cpus'])['a15']
@property
def a7_governor_tunables(self):
return self.config.a7_governor_tunables
@property
def a15_governor_tunables(self):
return self.config.a15_governor_tunables
def __init__(self, **kwargs):
super(TC2Device, self).__init__()
self.config = _TC2DeviceConfig(**kwargs)
self.working_directory = self.config.working_directory
self._serial = None
self._has_booted = None
def boot(self, **kwargs): # NOQA
mode = kwargs.get('os_mode', None)
self._is_ready = False
self._has_booted = False
self.mode = mode
self.logger.debug('Booting in {} mode'.format(self.mode))
with open_serial_connection(timeout=self.config.serial_max_timeout,
port=self.config.serial_device,
baudrate=self.config.serial_baud) as target:
if self.config.boot_firmware == 'bootmon':
self._boot_using_bootmon(target)
elif self.config.boot_firmware == 'uefi':
self._boot_using_uefi(target)
else:
message = 'Unexpected boot firmware: {}'.format(self.config.boot_firmware)
raise ConfigError(message)
try:
target.sendline('')
self.logger.debug('Waiting for the Android prompt.')
target.expect(self.android_prompt, timeout=40) # pylint: disable=E1101
except pexpect.TIMEOUT:
# Try a second time before giving up.
self.logger.debug('Did not get Android prompt, retrying...')
target.sendline('')
target.expect(self.android_prompt, timeout=10) # pylint: disable=E1101
self.logger.debug('Waiting for OS to initialize...')
started_waiting_time = time.time()
time.sleep(20) # we know it's not going to to take less time than this.
boot_completed, got_ip_address = False, False
while True:
try:
if not boot_completed:
target.sendline('getprop sys.boot_completed')
boot_completed = target.expect(['0.*', '1.*'], timeout=10)
if not got_ip_address:
target.sendline('getprop dhcp.eth0.ipaddress')
# regexes are processed in order, so ip regex has to
# come first (as we only want to match new line if we
# don't match the IP). We do a "not" make the logic
# consistent with boot_completed.
got_ip_address = not target.expect(['[1-9]\d*.\d+.\d+.\d+', '\n'], timeout=10)
except pexpect.TIMEOUT:
pass # We have our own timeout -- see below.
if boot_completed and got_ip_address:
break
time.sleep(5)
if (time.time() - started_waiting_time) > self.config.init_timeout:
raise DeviceError('Timed out waiting for the device to initialize.')
self._has_booted = True
def connect(self):
if not self._is_ready:
if self.config.adb_name:
self.adb_name = self.config.adb_name # pylint: disable=attribute-defined-outside-init
else:
with open_serial_connection(timeout=self.config.serial_max_timeout,
port=self.config.serial_device,
baudrate=self.config.serial_baud) as target:
# Get IP address and push the Gator and PMU logger.
target.sendline('su') # as of Android v5.0.2, Linux does not boot into root shell
target.sendline('netcfg')
ipaddr_re = re.compile('eth0 +UP +(.+)/.+', re.MULTILINE)
target.expect(ipaddr_re)
output = target.after
match = re.search('eth0 +UP +(.+)/.+', output)
if not match:
raise DeviceError('Could not get adb IP address.')
ipaddr = match.group(1)
# Connect to device using adb.
target.expect(self.android_prompt) # pylint: disable=E1101
self.adb_name = ipaddr + ":5555" # pylint: disable=W0201
if self.adb_name in adb_list_devices():
adb_disconnect(self.adb_name)
adb_connect(self.adb_name)
self._is_ready = True
self.execute("input keyevent 82", timeout=ADB_SHELL_TIMEOUT)
self.execute("svc power stayon true", timeout=ADB_SHELL_TIMEOUT)
def disconnect(self):
adb_disconnect(self.adb_name)
self._is_ready = False
# TC2-specific methods. You should avoid calling these in
# Workloads/Instruments as that would tie them to TC2 (and if that is
# the case, then you should set the supported_devices parameter in the
# Workload/Instrument accordingly). Most of these can be replace with a
# call to set_runtime_parameters.
def get_cpuidle(self):
return self.get_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable')
def enable_idle_states(self):
"""
Fully enables idle states on TC2.
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
"""
# Enable C1 (cluster shutdown).
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 0, verify=False)
# Enable C0 on A15 cluster.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0, verify=False)
# Enable C0 on A7 cluster.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 1, verify=False)
def disable_idle_states(self):
"""
Disable idle states on TC2.
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
"""
# Disable C1 (cluster shutdown).
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 1, verify=False)
# Disable C0.
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0xFF, verify=False)
def set_irq_affinity(self, cluster):
"""
Set's IRQ affinity to the specified cluster.
This method will only work if the device mode is mp_a7_bootcluster or
mp_a15_bootcluster. This operation does not make sense if there is only one
cluster active (all IRQs will obviously go to that), and it will not work for
IKS kernel because clusters are not exposed to sysfs.
:param cluster: must be either 'a15' or 'a7'.
"""
if self.config.mode not in ('mp_a7_bootcluster', 'mp_a15_bootcluster'):
raise ConfigError('Cannot set IRQ affinity with mode {}'.format(self.config.mode))
if cluster == 'a7':
self.execute('/sbin/set_irq_affinity.sh 0xc07', check_exit_code=False)
elif cluster == 'a15':
self.execute('/sbin/set_irq_affinity.sh 0xc0f', check_exit_code=False)
else:
raise ConfigError('cluster must either "a15" or "a7"; got {}'.format(cluster))
def _boot_using_uefi(self, target):
self.logger.debug('Booting using UEFI.')
self._wait_for_vemsd_mount(target)
self._setup_before_reboot()
self._perform_uefi_reboot(target)
# Get to the UEFI menu.
self.logger.debug('Waiting for UEFI default selection.')
target.sendline('reboot')
target.expect('The default boot selection will start in'.rstrip())
time.sleep(1)
target.sendline(''.rstrip())
# If delete every time is specified, try to delete entry.
if self.config.always_delete_uefi_entry:
self._delete_uefi_entry(target, entry='workload_automation_MP')
self.config.always_delete_uefi_entry = False
# Specify argument to be passed specifying that psci is (or is not) enabled
if self.config.psci_enable:
psci_enable = ' psci=enable'
else:
psci_enable = ''
# Identify the workload automation entry.
selection_pattern = r'\[([0-9]*)\] '
try:
target.expect(re.compile(selection_pattern + 'workload_automation_MP'), timeout=5)
wl_menu_item = target.match.group(1)
except pexpect.TIMEOUT:
self._create_uefi_entry(target, psci_enable, entry_name='workload_automation_MP')
# At this point the board should be rebooted so we need to retry to boot
self._boot_using_uefi(target)
else: # Did not time out.
try:
#Identify the boot manager menu item
target.expect(re.compile(selection_pattern + 'Boot Manager'))
boot_manager_menu_item = target.match.group(1)
#Update FDT
target.sendline(boot_manager_menu_item)
target.expect(re.compile(selection_pattern + 'Update FDT path'), timeout=15)
update_fdt_menu_item = target.match.group(1)
target.sendline(update_fdt_menu_item)
target.expect(re.compile(selection_pattern + 'NOR Flash .*'), timeout=15)
bootmonfs_menu_item = target.match.group(1)
target.sendline(bootmonfs_menu_item)
target.expect('File path of the FDT blob:')
target.sendline(self.config.dtb)
#Return to main manu and boot from wl automation
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
return_to_main_menu_item = target.match.group(1)
target.sendline(return_to_main_menu_item)
target.sendline(wl_menu_item)
except pexpect.TIMEOUT:
raise DeviceError('Timed out')
def _setup_before_reboot(self):
if not self.config.disable_boot_configuration:
self.logger.debug('Performing pre-boot setup.')
substitution = {
'SCC_0x010': self.config.SCC_0x010,
'SCC_0x700': self.config.SCC_0x700,
}
with open(self.config.src_board_template_file, 'r') as fh:
template_board_txt = string.Template(fh.read())
with open(self.config.src_board_file, 'w') as wfh:
wfh.write(template_board_txt.substitute(substitution))
with open(self.config.src_images_template_file, 'r') as fh:
template_images_txt = string.Template(fh.read())
with open(self.config.src_images_file, 'w') as wfh:
wfh.write(template_images_txt.substitute({'bm_image': self.config.bm_image}))
shutil.copyfile(self.config.src_board_file,
os.path.join(self.config.board_dir, self.config.board_file))
shutil.copyfile(self.config.src_images_file,
os.path.join(self.config.board_dir, self.config.images_file))
os.system('sync') # make sure everything is flushed to microSD
else:
self.logger.debug('Boot configuration disabled proceeding with existing board.txt and images.txt.')
def _delete_uefi_entry(self, target, entry): # pylint: disable=R0201
"""
this method deletes the entry specified as parameter
as a precondition serial port input needs to be parsed AT MOST up to
the point BEFORE recognizing this entry (both entry and boot manager has
not yet been parsed)
"""
try:
selection_pattern = r'\[([0-9]+)\] *'
try:
target.expect(re.compile(selection_pattern + entry), timeout=5)
wl_menu_item = target.match.group(1)
except pexpect.TIMEOUT:
return # Entry does not exist, nothing to delete here...
# Identify and select boot manager menu item
target.expect(selection_pattern + 'Boot Manager', timeout=15)
bootmanager_item = target.match.group(1)
target.sendline(bootmanager_item)
# Identify and select 'Remove entry'
target.expect(selection_pattern + 'Remove Boot Device Entry', timeout=15)
new_entry_item = target.match.group(1)
target.sendline(new_entry_item)
# Delete entry
target.expect(re.compile(selection_pattern + entry), timeout=5)
wl_menu_item = target.match.group(1)
target.sendline(wl_menu_item)
# Return to main manu
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
return_to_main_menu_item = target.match.group(1)
target.sendline(return_to_main_menu_item)
except pexpect.TIMEOUT:
raise DeviceError('Timed out while deleting UEFI entry.')
def _create_uefi_entry(self, target, psci_enable, entry_name):
"""
Creates the default boot entry that is expected when booting in uefi mode.
"""
self._wait_for_vemsd_mount(target)
try:
selection_pattern = '\[([0-9]+)\] *'
# Identify and select boot manager menu item.
target.expect(selection_pattern + 'Boot Manager', timeout=15)
bootmanager_item = target.match.group(1)
target.sendline(bootmanager_item)
# Identify and select 'add new entry'.
target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15)
new_entry_item = target.match.group(1)
target.sendline(new_entry_item)
# Identify and select BootMonFs.
target.expect(selection_pattern + 'NOR Flash .*', timeout=15)
BootMonFs_item = target.match.group(1)
target.sendline(BootMonFs_item)
# Specify the parameters of the new entry.
target.expect('.+the kernel', timeout=5)
target.sendline(self.config.kernel) # kernel path
target.expect('Has FDT support\?.*\[y\/n\].*', timeout=5)
time.sleep(0.5)
target.sendline('y') # Has Fdt support? -> y
target.expect('Add an initrd.*\[y\/n\].*', timeout=5)
time.sleep(0.5)
target.sendline('y') # add an initrd? -> y
target.expect('.+the initrd.*', timeout=5)
time.sleep(0.5)
target.sendline(self.config.initrd) # initrd path
target.expect('.+to the binary.*', timeout=5)
time.sleep(0.5)
_slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary
time.sleep(0.5)
target.expect('.+new Entry.+', timeout=5)
_slow_sendline(target, entry_name) # Entry name
target.expect('Choice.+', timeout=15)
time.sleep(2)
except pexpect.TIMEOUT:
raise DeviceError('Timed out while creating UEFI entry.')
self._perform_uefi_reboot(target)
def _perform_uefi_reboot(self, target):
self._wait_for_vemsd_mount(target)
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
def _wait_for_vemsd_mount(self, target, timeout=100):
attempts = 1 + self.config.reboot_attempts
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
return
self.logger.debug('Waiting for VEMSD to mount...')
for i in xrange(attempts):
if i: # Do not reboot on the first attempt.
target.sendline('reboot')
target.sendline('usb_on')
for _ in xrange(timeout):
time.sleep(1)
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
return
raise DeviceError('Timed out waiting for VEMSD to mount.')
def _boot_using_bootmon(self, target):
"""
This method Boots TC2 using the bootmon interface.
"""
self.logger.debug('Booting using bootmon.')
try:
self._wait_for_vemsd_mount(target, timeout=20)
except DeviceError:
# OK, something's wrong. Reboot the board and try again.
self.logger.debug('VEMSD not mounted, attempting to power cycle device.')
target.sendline(' ')
state = target.expect(['Cmd> ', self.config.bootmon_prompt, self.android_prompt]) # pylint: disable=E1101
if state == 0 or state == 1:
# Reboot - Bootmon
target.sendline('reboot')
target.expect('Powering up system...')
elif state == 2:
target.sendline('reboot -n')
target.expect('Powering up system...')
else:
raise DeviceError('Unexpected board state {}; should be 0, 1 or 2'.format(state))
self._wait_for_vemsd_mount(target)
self._setup_before_reboot()
# Reboot - Bootmon
self.logger.debug('Rebooting into bootloader...')
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
target.expect('Powering up system...')
target.expect(self.config.bootmon_prompt)
# Wait for VEMSD to mount
self._wait_for_vemsd_mount(target)
#Boot Linux - Bootmon
target.sendline('fl linux fdt ' + self.config.dtb)
target.expect(self.config.bootmon_prompt)
target.sendline('fl linux initrd ' + self.config.initrd)
target.expect(self.config.bootmon_prompt)
#Workaround TC2 bootmon serial issue for loading large initrd blob
target.sendline(' ')
target.expect(self.config.bootmon_prompt)
target.sendline('fl linux boot ' + self.config.kernel + self.config.kernel_arguments)
# Utility functions.
def _slow_sendline(target, line):
for c in line:
target.send(c)
time.sleep(0.1)
target.sendline('')
|
nilq/baby-python
|
python
|
# 源程序文件名
SOURCE_FILE = "{filename}.hs"
# 输出程序文件名
OUTPUT_FILE = "{filename}.out"
# 编译命令行
COMPILE = "ghc {source} -o {output} {extra}"
# 运行命令行
RUN = 'sh -c "./{program} {redirect}"'
# 显示名
DISPLAY = "Haskell"
# 版本
VERSION = "GHC 8.0.2"
# Ace.js模式
ACE_MODE = "haskell"
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.7 on 2019-10-03 20:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0017_auto_20190921_1849'),
]
operations = [
migrations.RemoveField(
model_name='estruturacurricular',
name='ano_periodo',
),
migrations.RemoveField(
model_name='estruturacurricular',
name='sigla',
),
migrations.AddField(
model_name='estruturacurricular',
name='ano_entrada_vigor',
field=models.IntegerField(default=2019),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_atividade_obrigatoria',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_complementar_minima',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_ideal_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_maxima_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_minima_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_nao_atividade_obrigatoria',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_optativas_minima',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='ch_total_minima',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_ideal_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_maximo_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_minimo_semestre',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_nao_atividade_obrigatorio',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='cr_total_minimo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='curso',
field=models.ForeignKey(default=7191770, on_delete=django.db.models.deletion.PROTECT, to='core.Curso'),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='id_curriculo',
field=models.IntegerField(default=0, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='max_eletivos',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='meses_conclusao_ideal',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='meses_conclusao_maximo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='meses_conclusao_minimo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='observacao',
field=models.TextField(default='', max_length=500),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='periodo_entrada_vigor',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='semestre_conclusao_ideal',
field=models.IntegerField(default=8),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='semestre_conclusao_maximo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='estruturacurricular',
name='semestre_conclusao_minimo',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='estruturacurricular',
name='codigo',
field=models.CharField(max_length=10, unique=True),
),
]
|
nilq/baby-python
|
python
|
import torch
from torch.optim import Adam,SGD
from opt import opt
import math
import random
import collections
from torch.utils.data import sampler
import torch.nn as nn
def extract_feature( model, loader):
features = torch.FloatTensor()
for (inputs, labels) in loader:
ff = torch.FloatTensor(inputs.size(0),2048).zero_()
for i in range(2):
if i == 1:
inputs = inputs.index_select(3, torch.arange(inputs.size(3) - 1, -1, -1).long())
input_img = inputs.to('cuda')
outputs = model(input_img)
f = outputs[0].data.cpu()
ff = ff + f
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features, ff), 0)
return features
def get_optimizer(net):
if opt.freeze:
for p in net.parameters():
p.requires_grad = True
for q in net.backbone.parameters():
q.requires_grad = False
optimizer = Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=opt.lr, weight_decay=5e-4,amsgrad=True)
else:
#optimizer = SGD(net.parameters(), lr=opt.lr,momentum=0.9, weight_decay=5e-4)
optimizer = Adam(net.parameters(), lr=opt.lr, weight_decay=5e-4, amsgrad=True)
return optimizer
class TripletLoss(nn.Module):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.
Args:
margin (float): margin for triplet.
"""
def __init__(self, margin=0.3, mutual_flag=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
self.mutual = mutual_flag
def forward(self, inputs, targets):
"""
Args:
inputs: feature matrix with shape (batch_size, feat_dim)
targets: ground truth labels with shape (num_classes)
"""
n = inputs.size(0)
# inputs = 1. * inputs / (torch.norm(inputs, 2, dim=-1, keepdim=True).expand_as(inputs) + 1e-12)
# Compute pairwise distance, replace by the official when merged
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist.addmm_(1, -2, inputs, inputs.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
# For each anchor, find the hardest positive and negative
mask = targets.expand(n, n).eq(targets.expand(n, n).t())
dist_ap, dist_an = [], []
for i in range(n):
dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
y = torch.ones_like(dist_an)
loss = self.ranking_loss(dist_an, dist_ap, y)
if self.mutual:
return loss, dist
return loss
class RandomSampler(sampler.Sampler):
def __init__(self, data_source, batch_id, batch_image):
super(RandomSampler, self).__init__(data_source)
self.data_source = data_source
self.batch_image = batch_image
self.batch_id = batch_id
self._id2index = collections.defaultdict(list)
for idx, path in enumerate(data_source.imgs):
_id = data_source.id(path)
self._id2index[_id].append(idx)
def __iter__(self):
unique_ids = self.data_source.unique_ids
random.shuffle(unique_ids)
imgs = []
for _id in unique_ids:
imgs.extend(self._sample(self._id2index[_id], self.batch_image))
return iter(imgs)
def __len__(self):
return len(self._id2index) * self.batch_image
@staticmethod
def _sample(population, k):
if len(population) < k:
population = population * k
return random.sample(population, k)
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
class SquareErasing(object):
def __init__(self,width=30,height=30):
self.width=width
self.height=height
def __call__(self, img):
channel=img.size()[0]
h=img.size()[1]
w=img.size()[2]
if channel==1:
img[0,0:self.height,0:self.width]=0
img[0,h-self.height:h,0:self.width]=0
img[0,0:self.height,w-self.width:w]=0
img[0,h-self.height:h,w-self.width:w]=0
else:
for i in range(3):
img[i, 0:self.height, 0:self.width] = 0
img[i, h - self.height:h, 0:self.width] = 0
img[i, 0:self.height, w - self.width:w] = 0
img[i, h - self.height:h, w - self.width:w] = 0
return img
|
nilq/baby-python
|
python
|
# WRITE YOUR SOLUTION HERE:
def add_numbers_to_list(numbers: list):
if len(numbers) % 5 != 0:
numbers.append(numbers[-1] +1 )
add_numbers_to_list(numbers)
if __name__=="__main__":
numbers = [1,3,4,5,10,11]
add_numbers_to_list(numbers)
print(numbers)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os, subprocess
from autopkglib import Processor, ProcessorError
__all__ = ["IzPackExecutor"]
class IzPackExecutor(Processor):
"""Runs IzPack installer with all install options checked."""
input_variables = {
"app_root": {
"required": True,
"description": "Path where the app should be temporarily unpacked (installed in this case)"
},
"app_installer": {
"required": True,
"description": "Path to IzPack installer JAR"
}
}
output_variables = {
}
description = __doc__
def main(self):
real_path = os.path.realpath(__file__)
expect_path = real_path.replace(".pyc", "-install.expect").replace(".py", "-install.expect")
subprocess.call(["expect", expect_path, self.env["app_installer"], self.env["app_root"]])
zsh_path = real_path.replace(".pyc", "-get_version.zsh").replace(".py", "-get_version.zsh")
izpack_app_ver = subprocess.check_output(["zsh", zsh_path, self.env["app_root"]]).decode('ascii').replace("\r\n", "").replace("\r", "").replace("\n", "")
self.env["izpack_app_ver"] = izpack_app_ver
print(izpack_app_ver)
if __name__ == "__main__":
processor = IzPackExecutor()
processor.execute_shell()
|
nilq/baby-python
|
python
|
"""
@author Yuto Watanabe
@version 1.0.0
Copyright (c) 2020 Yuto Watanabe
"""
|
nilq/baby-python
|
python
|
import cProfile
import argparse
from app import Application
def parse_args():
parser = argparse.ArgumentParser(
description="A keyboard-oriented image viewer")
parser.add_argument("path", type=str, nargs='?', default="",
help="the file or directory to open")
parser.add_argument("--profile", action="store_true", default=False,
help="the file or directory to open")
return parser.parse_args()
def run(args):
try:
app = Application(args.path)
app.run()
except IOError as e:
print("error: failed to open file \"%s\"" % args.path)
def main():
args = parse_args()
if args.profile:
profiler = cProfile.Profile()
profiler.runcall(run, args)
profiler.print_stats(sort=1)
else:
run(args)
|
nilq/baby-python
|
python
|
__author__ = 'Mario'
import wx
import wx.xrc
from Engine_Asian import AsianOption
###########################################################################
## Class MainPanel
###########################################################################
class PanelAsian ( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 500,300 ), style = wx.TAB_TRAVERSAL )
txtCtrlSizer = wx.BoxSizer( wx.VERTICAL )
self.StockPrice = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.StockPrice, 0, wx.ALL, 5 )
self.StockPriceText = wx.StaticText(self, -1, 'Stock Price', pos = wx.Point(125, 10))
self.OptionPrice = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.OptionPrice, 0, wx.ALL, 5 )
self.OptionStrikeText = wx.StaticText(self, -1, 'Option Strike Price', pos = wx.Point(125, 42))
self.OptionYears = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.OptionYears, 0, wx.ALL, 5 )
self.OptionYearsText = wx.StaticText(self, -1, 'Option Time Length', pos = wx.Point(125, 75))
self.Riskfree = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.Riskfree, 0, wx.ALL, 5 )
self.RiskFreeText = wx.StaticText(self, -1, 'Risk Free Rate', pos = wx.Point(125, 110))
self.Volatility = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
txtCtrlSizer.Add( self.Volatility, 0, wx.ALL, 5 )
self.VolatilityText = wx.StaticText(self, -1, 'Input Volatility', pos = wx.Point(125, 142))
self.Fixings = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
txtCtrlSizer.Add(self.Fixings, 0, wx.ALL, 5)
self.FixingsText = wx.StaticText(self, -1, 'Number of Price Fixings', pos = wx.Point(125, 174))
self.Iterations = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
txtCtrlSizer.Add(self.Iterations, 0, wx.ALL, 5)
self.IterationsText = wx.StaticText(self, -1, 'Number of Iterations', pos = wx.Point(125, 206))
Choices = ['Call', 'Put']
self.ChoiceBox = wx.Choice(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, Choices, 0)
# self.ChoiceBox.SetSelection(0)
txtCtrlSizer.Add(self.ChoiceBox, 0, wx.ALL, 5)
buttonSizer = wx.BoxSizer( wx.HORIZONTAL )
self.computeButton = wx.Button( self, wx.ID_ANY, u"Compute", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonSizer.Add( self.computeButton, 0, wx.ALL, 5 )
self.clearButton = wx.Button( self, wx.ID_ANY, u"Clear", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonSizer.Add( self.clearButton, 0, wx.ALL, 5 )
## Bindings
self.computeButton.Bind(wx.EVT_BUTTON, self.OnCompute)
self.clearButton.Bind(wx.EVT_BUTTON, self.OnClear)
txtCtrlSizer.Add( buttonSizer, 1, wx.EXPAND, 5 )
self.SetSizer( txtCtrlSizer )
self.Layout()
def OnCompute(self, event):
stockPrice = self.StockPrice.GetValue()
optionStrike = self.OptionPrice.GetValue()
optionYears = self.OptionYears.GetValue()
Riskfree = self.Riskfree.GetValue()
Volatility = self.Volatility.GetValue()
Fixings = self.Fixings.GetValue()
Iter = self.Iterations.GetValue()
flag = 'c' if self.ChoiceBox.GetString(self.ChoiceBox.GetCurrentSelection()) == 'Call' else 'p'
EuroOption = AsianOption(stockPrice, Riskfree, Volatility, optionYears, Fixings, Iter, optionStrike, flag)
EuroOption.GetPrice()
print( 'The MonteCarlo Price of the European Option is:', EuroOption.GetPrice()[0])
print( 'The associated standard deviation and standard errors are:', EuroOption.GetPrice()[1], EuroOption.GetPrice()[2])
# print(stockPrice, optionStrike, optionYears, Riskfree, Volatility)
#
def OnClear(self, event):
self.StockPrice.Clear()
self.OptionPrice.Clear()
self.OptionYears.Clear()
self.Riskfree.Clear()
self.Volatility.Clear()
self.Fixings.Clear()
self.Iterations.Clear()
self.ChoiceBox.Clear()
# pass
def __del__( self ):
pass
|
nilq/baby-python
|
python
|
import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from models.generator import ResNetGenerator
from train_script.utils import *
from utils.val import validation
from utils.quantize_model import *
def adjust_learning_rate(optimizer, epoch, base_lr):
lr = base_lr * (0.1 ** (epoch // 100))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def train_GDFQ(fp_model, q_model, val_dataloder,
num_class=1000, batch_size = 32, img_size = 224,
warmup_epoch = 4, total_epoch = 400, iter_per_epoch = 200,
q_lr = 1e-6, g_lr = 1e-3,
beta=0.1, gamma=1, for_incep=False):
default_iter = 200
train_iter = default_iter
FloatTensor = torch.cuda.FloatTensor
LongTensor = torch.cuda.LongTensor
generator = ResNetGenerator(num_classes=num_class, dim_z=100, img_size=img_size)
fp_model.cuda()
# freeze fp model weight and bn
for param in fp_model.parameters():
param.requires_grad = False
fp_model = freeze_bn(fp_model)
generator.train()
q_model.train()
q_model = freeze_bn(q_model)
q_model = un_freeze_act(q_model)
# fp_model = nn.DataParallel(fp_model).cuda()
generator = nn.DataParallel(generator).cuda()
q_model = nn.DataParallel(q_model).cuda()
g_optimizer = torch.optim.Adam(generator.parameters(), lr=g_lr)
q_optimizer = torch.optim.SGD(q_model.parameters(), lr=q_lr, momentum=0.9, weight_decay = 1e-4)
hooks, hook_handles, bn_stats = [], [], []
# get number of BatchNorm layers in the model
layers = sum([
1 if isinstance(layer, nn.BatchNorm2d) else 0
for layer in fp_model.modules()
])
eps = 0.8
for n, m in fp_model.named_modules():
# last layer (linear) does not follow with batch norm , so ignore linear linear
if isinstance(m, nn.Conv2d) and len(hook_handles) < layers:
hook = output_hook()
hooks.append(hook)
hook_handles.append(m.register_forward_hook(hook.hook))
if isinstance(m, nn.BatchNorm2d):
# get the statistics in the BatchNorm layers
bn_stats.append(
(m.running_mean.detach().clone().flatten().cuda(),
torch.sqrt(m.running_var + eps).detach().clone().flatten().cuda()))
assert len(hooks) == len(bn_stats)
criterion = nn.CrossEntropyLoss()
for epoch in range(total_epoch):
# both decay by 0.1 every 100 epoch
adjust_learning_rate(g_optimizer, epoch, g_lr)
adjust_learning_rate(q_optimizer, epoch, q_lr)
pbar = tqdm.trange(train_iter)
for _ in pbar:
input_mean = torch.zeros(1, 3).cuda()
input_std = torch.ones(1, 3).cuda()
fp_model.zero_grad()
g_optimizer.zero_grad()
train_gaussian_noise = np.random.normal(0, 1, (batch_size, 100))
train_gaussian_label = np.random.randint(0, num_class, batch_size)
input_data = Variable(FloatTensor(train_gaussian_noise)).cuda()
input_label = Variable(LongTensor(train_gaussian_label)).cuda()
fake_data = generator(input_data, input_label)
for hook in hooks:
hook.clear()
fake_label = fp_model(fake_data)
# BNS loss
mean_loss = 0
std_loss = 0
# compute the loss according to the BatchNorm statistics and the statistics of intermediate output
for cnt, (bn_stat, hook) in enumerate(zip(bn_stats, hooks)):
tmp_output = hook.outputs
bn_mean, bn_std = bn_stat[0], bn_stat[1]
# get batch's norm
tmp_mean = torch.mean(
tmp_output.view(
tmp_output.size(0),
tmp_output.size(1),
-1), dim=2)
tmp_std = torch.sqrt(
torch.var(
tmp_output.view(tmp_output.size(0),
tmp_output.size(1), -1),
dim=2
) + eps
)
mean_loss += own_loss(bn_mean, tmp_mean)
std_loss += own_loss(bn_std, tmp_std)
tmp_mean = torch.mean(fake_data.view(fake_data.size(0), 3,-1), dim=2)
tmp_std = torch.sqrt( torch.var(fake_data.view(fake_data.size(0), 3, -1), dim=2) + eps)
mean_loss += own_loss(input_mean, tmp_mean)
std_loss += own_loss(input_std, tmp_std)
bns_loss = mean_loss + std_loss
g_loss = criterion(fake_label, input_label)
g_loss = g_loss + beta * bns_loss
g_loss.backward()
g_optimizer.step()
# train q model
q_optimizer.zero_grad()
fp_model.zero_grad()
detach_fake_data = fake_data.detach()
# update activation
q_result = q_model(detach_fake_data)
if epoch >= warmup_epoch:
q_loss = criterion(q_result, input_label)
q_logit = F.log_softmax(q_model(detach_fake_data), dim = 1)
with torch.no_grad():
fp_logit = F.log_softmax(fp_model(detach_fake_data), dim = 1)
kd_loss = F.kl_div(q_logit, fp_logit, reduction='batchmean')
q_loss = q_loss + gamma * kd_loss
q_loss.backward()
q_optimizer.step()
pbar.set_description("epoch: {}, G_lr:{}, G_loss: {}, Q_lr:{}, Q_loss: {}".format(epoch+1,
get_lr(g_optimizer) , g_loss.item(),
get_lr(q_optimizer), q_loss.item()))
else:
pbar.set_description("epoch: {}, g_lr: {} ==> warm up ==> G_loss: {}".format(epoch+1, get_lr(g_optimizer), g_loss.item()))
if (epoch+1) < warmup_epoch:
pass
elif (epoch+1) == warmup_epoch:
print("Free activaiton after warm up")
q_model = freeze_act(q_model)
print("Eval after warmup")
q_top_1, q_top_5 = validation(val_dataloder, q_model, criterion)
else:
if (epoch+1) % 10 == 0:
q_top_1, q_top_5 = validation(val_dataloder, q_model, criterion)
torch.save(q_model.state_dict(), "q_model.pkl")
torch.save(generator.state_dict(), "generator.pkl")
for handle in hook_handles:
handle.remove()
return q_model
|
nilq/baby-python
|
python
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root@localhost/flask_db'
db = SQLAlchemy(app)
class UserDB(db.Model):
id = db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(32),unique=True)
password = db.Column(db.String(32))
def __init__(self,username,password):
self.username = username
self.password = password
def add(self):
try:
db.session.add(self)
db.session.commit()
return self.id
except Exception,e:
db.session.rollback()
return e
finally:
return 0
def isExisted(self):
temUser = UserDB.query.filter_by(username=self.username,password=self.password).first()
if temUser is None:
return 0
else:
return 1
class User(object):
def __init__(self,user_id,user_name):
self.user_id = user_id
self.user_name = user_name
|
nilq/baby-python
|
python
|
from .DtnAbstractParser import DtnAbstractParser
from enum import Enum
from pydantic import PositiveInt, PositiveFloat
import sys
from typing import List, Optional, Set, Union
class RouterMode(str, Enum):
FAST = 'fast'
SLOW = 'slow'
class RouterAlgorithm(str, Enum):
CGR = 'cgr'
BFS = 'bfs'
class DtnLookupRouterParser(DtnAbstractParser):
""" Validator for YAML configuration parameters of DtnCGRouter """
# Excel file containing routes
routes: str
# Router mode
mode: RouterMode = RouterMode.FAST
# If True, all routes will be recomputed even if they there is a
# route file provided
recompute_routes: bool = False
# Excluded routes specified as a list
# Example: [['MOC', 'PSH', 'MCC'], ['MCC', 'MRO', 'MCC']]
excluded_routes: Optional[List[List[str]]] = None
# Maximum number of hops a valid route can have
max_relay_hops: PositiveInt = sys.maxsize
# Number of cores to use during the computation of the routes
num_cores: PositiveInt = 1
# Maximum number of neighbors to send a critical bundle.
# e.g. if a node has 10 neighbors and ``max_crit=2``, then only the
# two best neighbors will be used
max_crit: Optional[int] = None
# List of nodes that can be used as relays
relays: Union[Set[str], List[str], str] = set()
# Maximum speed of any node in the system in [miles/sec]
# Based on latest SABR specification
max_speed: PositiveFloat = 125
# Algorithm to use for route computation.
algorithm: RouterAlgorithm = RouterAlgorithm.BFS
|
nilq/baby-python
|
python
|
import numpy as np
import tensorflow as tf
class InferenceGraph:
def __init__(self):
pass
def run_inference_for_single_input_frame(self, model, input_frame,log, log_path):
"""
Method Name: run_inference_for_single_input_frame
Description: This function make prediction on the given input frame and provides us the results
in a dictionary format
Output: output_dict
"""
log_file = open(log_path + 'run_inference_for_single_input_frame.txt', 'a+')
try:
input_tensor = tf.convert_to_tensor(input_frame)
# Initialize the model with a default set of data attributes that were used to build it
model_fn = model.signatures['serving_default']
# Make predictions for the input_frame from the model
output_dict = model_fn(input_tensor)
# Took out the num_detection from dictionary because od its 1D shape=(1,)
num_detections = int(output_dict.pop('num_detections'))
# Convert the output dictionary tensor values in numpy array
output_dict = {key: value[0, :num_detections].numpy() for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int16)
log.log(log_file, 'Prediction from the input frame was successful')
log_file.close()
return output_dict
except Exception as e:
log.log(log_file, 'Error during prediction from the input frame')
log.log(log_file, str(e))
log_file.close()
|
nilq/baby-python
|
python
|
import selenium
import glob
from numpy import arange
from random import sample
from sys import exit
from time import sleep
from progress.spinner import Spinner
from progress.bar import ChargingBar
from threading import Thread
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
def worker():
finish = False
sp = Spinner('Loading ')
cont = 0
while(not finish):
sleep(1)
cont +=1
if(cont == 60):
finish = True
sp.next()
return
class YouMotivate:
def __init__(self):
# UNCOMMENT FOR TEMPORAL BARS WHILE LOADING WITH 60 SECONDS TIME {
# print('Managing Firefox Info')
# t = Thread(target=worker)
# t.start()
# }
opts = Options()
# UNCOMMENT FOR adding firefox user info {
users = glob.glob(r"C:\Users\*")
print("PC USERS:")
users = [user.split("\\")[len(user.split("\\"))-1] for user in users]
print(users)
print("Choose one: ")
user = input()
if(not user in users):
print("That user does not exist")
exit()
binary = FirefoxBinary(r'C:\Program Files\Mozilla Firefox\firefox.exe')
profiles = glob.glob('C:\\Users\\'+str(user)+'\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\*')
profiles = [profile.split("\\")[len(profile.split("\\"))-1] for profile in profiles]
print("choose profile (normally the one with default-release): ")
print(profiles)
profile = input()
if(not profile in profiles):
print("That profile does not exist")
exit()
fp = ('C:\\Users\\'+str(user)+'\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\'+str(profile))
opts.profile = fp
# }
self.driver = webdriver.Firefox(options=opts,
executable_path='geckodriver')
print('Firefox Info Loaded succesfully')
print('Opening Youtube...')
self.driver.get("https://www.youtube.com/playlist?list=FLHcrPEhUkZW37RI7c5FQvtw")
sleep(4)
#get num of videos in the list
num = self.driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-browse/ytd-playlist-sidebar-renderer/div/ytd-playlist-sidebar-primary-info-renderer/div[1]/yt-formatted-string[1]')
num = int(num.text.split(' ')[0])
# print('NUM OF VIDEOS:\t' + str(num))
vids = sample(list(arange(1,num+1)), 3)
# print('CHOOSEN:\t' + str(vids))
#choose those videos and open it in new tabs
bar = ChargingBar(' Opening videos', max=len(vids))
for i in vids:
vid_ref = self.driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-browse/ytd-two-column-browse-results-renderer/div[1]/ytd-section-list-renderer/div[2]/ytd-item-section-renderer/div[3]/ytd-playlist-video-list-renderer/div[3]/ytd-playlist-video-renderer['+str(i)+']/div[2]/a')
ref = vid_ref.get_attribute("href")
# print(ref)
self.driver.execute_script("window.open('"+str(ref)+"', 'new_window_"+str(i)+"')")
# self.driver.execute_script('browser.tabs.create({url:"'+str(ref)+'"});')
bar.next()
bar.finish()
ym = YouMotivate()
exit()
|
nilq/baby-python
|
python
|
from .page_article import *
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.