hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9792aaefc85c0b127b76e6415b188054e8619c1 | 4,402 | py | Python | test/functional/merkle_blocks.py | aixinwang/Gfc | 4a7fdac234f5f51055e471e77aaff62cfa4c6eab | [
"MIT"
] | null | null | null | test/functional/merkle_blocks.py | aixinwang/Gfc | 4a7fdac234f5f51055e471e77aaff62cfa4c6eab | [
"MIT"
] | null | null | null | test/functional/merkle_blocks.py | aixinwang/Gfc | 4a7fdac234f5f51055e471e77aaff62cfa4c6eab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The GFC coin bt developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gettxoutproof and verifytxoutproof RPCs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing
self.extra_args = [[], [], [], ["-txindex"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.sync_all()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
# This will raise an exception because the transaction is not yet in a block
assert_raises_jsonrpc(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})
txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
assert_raises_jsonrpc(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_jsonrpc(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_jsonrpc(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3])
if __name__ == '__main__':
MerkleBlockTest().main()
| 51.186047 | 140 | 0.683099 |
66ae8bcd4ae9ae32e098a0f12994c9a728a67114 | 3,745 | py | Python | chul/filters.py | Jenks18/mfl_api | ecbb8954053be06bbcac7e1132811d73534c78d9 | [
"MIT"
] | 19 | 2015-04-16T09:37:08.000Z | 2022-02-10T11:50:30.000Z | chul/filters.py | Jenks18/mfl_api | ecbb8954053be06bbcac7e1132811d73534c78d9 | [
"MIT"
] | 125 | 2015-03-26T14:05:49.000Z | 2020-05-14T08:16:50.000Z | chul/filters.py | Jenks18/mfl_api | ecbb8954053be06bbcac7e1132811d73534c78d9 | [
"MIT"
] | 39 | 2015-04-15T09:17:33.000Z | 2022-03-28T18:08:16.000Z | import django_filters
from django.db.models import Q
from distutils.util import strtobool
from .models import (
CommunityHealthUnit,
CommunityHealthWorker,
CommunityHealthWorkerContact,
Status,
CommunityHealthUnitContact,
CHUService,
CHURating,
ChuUpdateBuffer
)
from common.filters.filter_shared import (
CommonFieldsFilterset,
ListCharFilter)
from common.constants import BOOLEAN_CHOICES, TRUTH_NESS
class ChuUpdateBufferFilter(CommonFieldsFilterset):
class Meta(object):
model = ChuUpdateBuffer
class CHUServiceFilter(CommonFieldsFilterset):
name = django_filters.CharFilter(lookup_type='icontains')
description = django_filters.CharFilter(lookup_type='icontains')
class Meta(object):
model = CHUService
class StatusFilter(CommonFieldsFilterset):
name = django_filters.CharFilter(lookup_type='icontains')
description = django_filters.CharFilter(lookup_type='icontains')
class Meta(object):
model = Status
class CommunityHealthUnitContactFilter(CommonFieldsFilterset):
health_unit = django_filters.AllValuesFilter(lookup_type='exact')
contact = django_filters.AllValuesFilter(lookup_type='exact')
class Meta(object):
model = CommunityHealthUnitContact
class CommunityHealthUnitFilter(CommonFieldsFilterset):
def chu_pending_approval(self, value):
if value in TRUTH_NESS:
return self.filter(
Q(is_approved=False, is_rejected=False, has_edits=False) |
Q(is_approved=True, is_rejected=False, has_edits=True) |
Q(is_approved=False, is_rejected=True, has_edits=True)
)
else:
return self.filter(
Q(is_approved=True, is_rejected=False, has_edits=False) |
Q(is_approved=False, is_rejected=True, has_edits=False)
)
name = django_filters.CharFilter(lookup_type='icontains')
ward = ListCharFilter(name='facility__ward')
constituency = ListCharFilter(
name='facility__ward__constituency')
county = ListCharFilter(
name='facility__ward__constituency__county')
is_approved = django_filters.TypedChoiceFilter(
choices=BOOLEAN_CHOICES, coerce=strtobool
)
is_rejected = django_filters.TypedChoiceFilter(
choices=BOOLEAN_CHOICES, coerce=strtobool
)
has_edits = django_filters.TypedChoiceFilter(
choices=BOOLEAN_CHOICES, coerce=strtobool
)
pending_approval = django_filters.MethodFilter(
action=chu_pending_approval)
class Meta(object):
model = CommunityHealthUnit
class CommunityHealthWorkerFilter(CommonFieldsFilterset):
first_name = django_filters.CharFilter(lookup_type='icontains')
last_name = django_filters.CharFilter(lookup_type='icontains')
username = django_filters.CharFilter(lookup_type='icontains')
ward = django_filters.CharFilter(name='health_unit__community__ward')
constituency = django_filters.CharFilter(
name='health_unit__community_ward__constituency')
county = django_filters.CharFilter(
name='health_unit__community__ward__constituency__county')
class Meta(object):
model = CommunityHealthWorker
class CommunityHealthWorkerContactFilter(CommonFieldsFilterset):
health_worker = django_filters.AllValuesFilter(lookup_type='exact')
contact = django_filters.AllValuesFilter(lookup_type='icontains')
class Meta(object):
model = CommunityHealthWorkerContact
class CHURatingFilter(CommonFieldsFilterset):
chu = django_filters.AllValuesFilter(lookup_type='exact')
rating = django_filters.NumberFilter(lookup_type='exact')
class Meta(object):
model = CHURating
| 31.208333 | 74 | 0.73725 |
84db43d40c527d5188e018ec2a222dc81e2021f6 | 2,545 | py | Python | chorderator/utils/pipeline.py | billyblu2000/Chorderator | 6e5e077da649966e872dbd1494f3606a23937e8b | [
"MIT"
] | 7 | 2021-04-12T08:17:10.000Z | 2022-01-30T05:55:25.000Z | chorderator/utils/pipeline.py | billyblu2000/Chorderator | 6e5e077da649966e872dbd1494f3606a23937e8b | [
"MIT"
] | null | null | null | chorderator/utils/pipeline.py | billyblu2000/Chorderator | 6e5e077da649966e872dbd1494f3606a23937e8b | [
"MIT"
] | null | null | null | from ..chords.ChordProgression import read_progressions
from .excp import handle_exception
from .utils import Logging, pickle_read, combine_ins
class Pipeline:
def __init__(self, pipeline):
self.meta = None
self.melo = None
self.final_output = None
self.final_output_log = None
self.state = 0
self.pipeline = pipeline
if len(pipeline) != 3:
Logging.critical('Pipeline length not match!')
def send_in(self, midi_path, **kwargs):
self.state = 1
Logging.warning('Pre-processing...')
self.melo, splited_melo, self.meta = self.__preprocess(midi_path, **kwargs)
Logging.warning('Pre-process done!')
self.state = 2
Logging.warning('Solving...')
progression_list = self.__main_model(splited_melo, self.meta)
Logging.warning('Solved!')
self.state = 3
Logging.warning('Post-processing...')
self.final_output, self.final_output_log = self.__postprocess(progression_list, **kwargs)
Logging.warning('Post-process done!')
self.state = 4
def __preprocess(self, midi_path, **kwargs):
try:
processor = self.pipeline[0](midi_path, kwargs['phrase'], kwargs['meta'])
return processor.get()
except:
handle_exception(500)
def __main_model(self, splited_melo, meta):
templates = read_progressions('rep')
meta['metre'] = meta['meter']
try:
processor = self.pipeline[1](splited_melo, meta, templates)
processor.solve()
return processor.get()
except Exception as e:
handle_exception(600)
def __postprocess(self, progression_list, **kwargs):
templates = read_progressions('dict')
lib = pickle_read('lib')
try:
processor = self.pipeline[2](progression_list,
templates,
lib,
self.meta,
kwargs['output_chord_style'],
kwargs['output_progression_style'])
return processor.get()
except Exception as e:
handle_exception(700)
def send_out(self):
if self.final_output:
return combine_ins(self.melo,self.final_output), self.final_output_log
else:
Logging.critical('Nothing is in pipeline yet!')
if __name__ == '__main__':
pass
| 34.863014 | 97 | 0.576031 |
9f45fb0494e57243b1c60bb75e69410362d57269 | 4,623 | py | Python | toykoin/tests/test_block.py | fakecoinbase/giacomocaironislashtoykoin | cd5c891819338479eab50bc83bf7cf867394ed5a | [
"MIT"
] | null | null | null | toykoin/tests/test_block.py | fakecoinbase/giacomocaironislashtoykoin | cd5c891819338479eab50bc83bf7cf867394ed5a | [
"MIT"
] | null | null | null | toykoin/tests/test_block.py | fakecoinbase/giacomocaironislashtoykoin | cd5c891819338479eab50bc83bf7cf867394ed5a | [
"MIT"
] | null | null | null | from toykoin.core.tx import TxIn, TxOut, Tx, OutPoint
from toykoin.core.script import Script
from toykoin.core.block import Block, BlockHeader, RevBlock
from toykoin.core.utils import generate_merkle_root
import pytest
def test_valid_serialization_1():
tx_in = TxIn(OutPoint("ff" * 32, 0), Script())
tx_out = TxOut(10, Script())
tx_1 = Tx([tx_in], [tx_out])
header = BlockHeader()
block = Block(header, [tx_1])
header.merkle_root = generate_merkle_root(block.transactions)
header.previous_pow = "00" * 32
assert Block.deserialize(block.serialize()) == block
def test_invalid_serialization_1():
tx_in = TxIn(OutPoint("ff" * 31, 0), Script())
tx_out = TxOut(10, Script())
tx_1 = Tx([tx_in], [tx_out])
header = BlockHeader()
block = Block(header, [tx_1])
header.merkle_root = generate_merkle_root(block.transactions)
header.previous_pow = "00" * 32
assert not Block.deserialize(block.serialize()) == block
def test_validation_1():
tx_in = TxIn(OutPoint("ff" * 32, 0), Script())
tx_out = TxOut(10 ** 10, Script())
tx_1 = Tx([tx_in], [tx_out])
header = BlockHeader()
block = Block(header, [tx_1])
assert not block.is_valid()
header.merkle_root = generate_merkle_root(block.transactions)
header.previous_pow = "00" * 32
assert header.is_valid()
assert not block.is_valid() # has not a coinbase tx
def test_validation_2():
tx_in = TxIn(OutPoint("00" * 32, 0), Script())
tx_out = TxOut(10 ** 10, Script())
tx_1 = Tx([tx_in], [tx_out])
header = BlockHeader()
block = Block(header, [tx_1])
assert not block.is_valid()
header.merkle_root = generate_merkle_root(block.transactions)
header.previous_pow = "00" * 32
assert header.is_valid()
assert block.is_valid() # has a coinbase tx
def test_validation_3():
tx_in = TxIn(OutPoint("ff" * 32, 256 ** 2 - 1), Script())
tx_out = TxOut(10, Script())
tx_1 = Tx([tx_in], [tx_out])
tx_in_2 = TxIn(OutPoint("ff" * 32, 256 ** 2 - 1), Script())
tx_out_2 = TxOut(10, Script())
tx_2 = Tx([tx_in_2], [tx_out_2])
header = BlockHeader()
block = Block(header, [tx_1, tx_2])
header.merkle_root = generate_merkle_root(block.transactions)
header.previous_pow = "00" * 32
assert header.is_valid()
assert not block.is_valid() # two coinbases
def test_reverse_serialization():
rev_block_bytes = b"U\x91\xfb\x04\xe7t\x1c4\xc5_\xef\xd9\x00\xa6Nc\x9c5[\xd9\xa4\x86:\xeb\xdahH\x8c\xfeY\xb1\x8e\x00\x01\xb8eq\x0e\x05\x8a\xca\x8c\x02\xf2\xae\xfa)\xd1\x0bZP\x94L<9\xbc\x11N1\xb5\xc9CZ\x89\xdb\x1e\x00\x00\x00\n\x00\x00\x00\x02T\x0b\xe4\x00\x00\x00\x00\x02U\xa1\xdf\xbd.g5{(\x18\xf0P\x9f\x9a?\xca/j\xc4\x99\xf1<\xba0\xfd\xb5\x18|\x9c>\x1f\xbc\x00\x00\xc4|v\xb4\x07\x08\x08\x9fQ\xc8?\x9d\xd6\x81b\x16Y)0\x800^\x98\x9d\xfa\xae.4\xfft\x7f\x13\x00\x00"
assert RevBlock.deserialize(rev_block_bytes).serialize() == rev_block_bytes
def test_double_coinbase():
coinbase_1 = Tx(
[TxIn(OutPoint("00" * 32, 0), Script.from_hex("00030000aa"))],
[TxOut(10 ** 10, Script())],
)
coinbase_2 = Tx(
[TxIn(OutPoint("00" * 32, 0), Script.from_hex("00030000bb"))],
[TxOut(10 ** 10, Script())],
)
header = BlockHeader()
block = Block(header, [coinbase_1, coinbase_2])
header.merkle_root = generate_merkle_root(block.transactions)
header.previous_pow = "00" * 32
assert not block.is_valid()
def test_block_header_invalid_length():
header = BlockHeader()
header.previous_pow = "00" * 32
assert not header.is_valid()
def test_empty_block():
header = BlockHeader()
block = Block(header, [])
header.merkle_root = "00" * 32
header.previous_pow = "00" * 32
assert not block.is_valid()
def test_invalid_merkleroot():
tx_in = TxIn(OutPoint("ff" * 32, 0), Script())
tx_out = TxOut(10, Script())
tx_1 = Tx([tx_in], [tx_out])
header = BlockHeader()
block = Block(header, [tx_1])
header.merkle_root = "00" * 32
header.previous_pow = "00" * 32
assert not block.is_valid()
def test_rev_block_invalid_1():
rev_block = RevBlock("", [], [])
assert not rev_block.is_valid()
def test_rev_block_invalid_2():
rev_block = RevBlock("", [], [])
rev_block.pow = "00" * 32
rev_block.old_txout = [[OutPoint("ff" * 32, 0).hex, TxOut(-1)]]
assert not rev_block.is_valid()
def test_rev_block_invalid_3():
rev_block = RevBlock("", [], [])
rev_block.pow = "00" * 32
rev_block.old_txout = [[OutPoint("00" * 32, 0).hex, TxOut()]]
assert not rev_block.is_valid()
| 32.104167 | 467 | 0.658447 |
d5afffe3d88fc35314d0dcbc8576f72efde80cf9 | 1,207 | py | Python | Greedy/55_Jump_Game.py | hren-ron/LeetCode | 3ba2766f8e6ad2bfb5c9686b362f000824e78474 | [
"Apache-2.0"
] | null | null | null | Greedy/55_Jump_Game.py | hren-ron/LeetCode | 3ba2766f8e6ad2bfb5c9686b362f000824e78474 | [
"Apache-2.0"
] | null | null | null | Greedy/55_Jump_Game.py | hren-ron/LeetCode | 3ba2766f8e6ad2bfb5c9686b362f000824e78474 | [
"Apache-2.0"
] | null | null | null | '''
Given an array of non-negative integers, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Determine if you are able to reach the last index.
Example 1:
Input: [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum
jump length is 0, which makes it impossible to reach the last index.
'''
class Solution {
public:
/*
bool canjump(int position,vector<int>& nums){
if(position==nums.size()-1)
return true;
int n=position+nums[position]<=nums.size()-1?position+nums[position]:nums.size()-1;
for(int i=position+1;i<=n;i++){
if(canjump(i,nums))
return true;
}
return(false);
}
*/
bool canJump(vector<int>& nums) {
//return canjump(0,nums);
int last=nums.size()-1;
for(int i=nums.size()-2;i>=0;i--){
if(i+nums[i]>=last)
last=i;
}
return(last==0);
}
};
| 26.23913 | 102 | 0.597349 |
b2dab487ea9550fb54485637a426376eee75c1fa | 5,472 | py | Python | database/test_mongo_connector.py | timburbank/openrvdas | ba77d3958075abd21ff94a396e4a97879962ac0c | [
"BSD-2-Clause"
] | 1 | 2020-06-29T17:25:44.000Z | 2020-06-29T17:25:44.000Z | database/test_mongo_connector.py | timburbank/openrvdas | ba77d3958075abd21ff94a396e4a97879962ac0c | [
"BSD-2-Clause"
] | null | null | null | database/test_mongo_connector.py | timburbank/openrvdas | ba77d3958075abd21ff94a396e4a97879962ac0c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import logging
import random
import sys
import time
import unittest
import warnings
sys.path.append('.')
from logger.utils.das_record import DASRecord
from logger.utils.nmea_parser import NMEAParser
try:
from database.settings import MONGO_ENABLED
from database.mongo_connector import MongoConnector
# from mysql.connector.errors import ProgrammingError
except ModuleNotFoundError:
MONGO_ENABLED = False
SAMPLE_DATA = [
's330 2017-11-04T05:12:19.479303Z $INZDA,000000.17,07,08,2014,,*78',
's330 2017-11-04T05:12:19.729748Z $INGGA,000000.16,3934.831698,S,03727.695242,W,1,12,0.7,0.82,M,-3.04,M,,*6F',
's330 2017-11-04T05:12:19.984911Z $INVTG,227.19,T,245.64,M,10.8,N,20.0,K,A*36',
's330 2017-11-04T05:12:20.240177Z $INRMC,000000.16,A,3934.831698,S,03727.695242,W,10.8,227.19,070814,18.5,W,A*00',
's330 2017-11-04T05:12:20.495430Z $INHDT,235.18,T*18',
's330 2017-11-04T05:12:20.748665Z $PSXN,20,1,0,0,0*3A',
's330 2017-11-04T05:12:21.000716Z $PSXN,22,-0.05,-0.68*32',
's330 2017-11-04T05:12:21.256010Z $PSXN,23,-2.82,1.00,235.18,-1.66*3D',
]
SINGLE_RESULTS = [
{'S330GPSTime': [(1509772339.479303, 0.17)]},
{'S330GPSDay': [(1509772339.479303, 7)]},
{'S330GPSMonth': [(1509772339.479303, 8)]},
{'S330GPSYear': [(1509772339.479303, 2014)]},
{'S330GPSTime': [(1509772339.729748, 0.16)]},
{'S330Lat': [(1509772339.729748, 3934.831698)]},
{'S330NorS': [(1509772339.729748, 'S')]},
{'S330Lon': [(1509772339.729748, 3727.695242)]},
{'S330EorW': [(1509772339.729748, 'W')]},
{'S330FixQuality': [(1509772339.729748, 1)]},
{'S330NumSats': [(1509772339.729748, 12)]},
{'S330HDOP': [(1509772339.729748, 0.7)]},
{'S330AntennaHeight': [(1509772339.729748, 0.82)]},
{'S330CourseTrue': [(1509772339.984911, 227.19)]},
{'S330CourseMag': [(1509772339.984911, 245.64)]},
{'S330SOGKt': [(1509772339.984911, 10.8)]},
{'S330GPSTime': [(1509772340.240177, 0.16)]},
{'S330Lat': [(1509772340.240177, 3934.831698)]},
{'S330NorS': [(1509772340.240177, 'S')]},
{'S330Lon': [(1509772340.240177, 3727.695242)]},
{'S330EorW': [(1509772340.240177, 'W')]},
{'S330Speed': [(1509772340.240177, 10.8)]},
{'S330CourseTrue': [(1509772340.240177, 227.19)]},
{'S330Date': [(1509772340.240177, '070814')]},
{'S330MagVar': [(1509772340.240177, 18.5)]},
{'S330MagVarEorW': [(1509772340.240177, 'W')]},
{'S330HeadingTrue': [(1509772340.49543, 235.18)]},
{'S330HorizQual': [(1509772340.748665, 1)]},
{'S330HeightQual': [(1509772340.748665, 0)]},
{'S330HeadingQual': [(1509772340.748665, 0)]},
{'S330RollPitchQual': [(1509772340.748665, 0)]},
{'S330GyroCal': [(1509772341.000716, -0.05)]},
{'S330GyroOffset': [(1509772341.000716, -0.68)]},
{'S330Roll': [(1509772341.25601, -2.82)]},
{'S330Pitch': [(1509772341.25601, 1.0)]},
{'S330HeadingTrue': [(1509772341.25601, 235.18)]}
]
RESET_RESULTS = [
{'S330CourseTrue': [(1509772339.984911, 227.19)]},
{'S330CourseMag': [(1509772339.984911, 245.64)]},
{'S330CourseTrue': [(1509772340.240177, 227.19)]}
]
BATCH_RESULTS = [
{'S330CourseTrue': [(1509772339.984911, 227.19), (1509772340.240177, 227.19)], 'S330CourseMag': [(1509772339.984911, 245.64)]},
]
class TestDatabase(unittest.TestCase):
############################
@unittest.skipUnless(MONGO_ENABLED, 'Mongo not installed; tests of MongoDB '
'functionality will not be run.')
def test_mongo_connector(self):
parser = NMEAParser()
try:
db = MongoConnector(database='test', host='localhost',
user='test', password='test')
# db.exec_sql_command('truncate table data')
except Exception as e:
self.assertTrue(False,'Unable to create database connection. Have you '
'set up the appropriate setup script in database/setup?')
records = [parser.parse_record(s) for s in SAMPLE_DATA]
for record in records:
db.write_record(record)
for r in SINGLE_RESULTS:
result = db.read()
self.assertEqual(result, r)
logging.info('Read record: %s', str(result))
self.assertEqual(db.read(), {})
logging.info('###### Resetting')
db.seek(0, 'start')
for r in RESET_RESULTS:
result = db.read('S330CourseTrue,S330CourseMag')
self.assertEqual(result, r)
logging.info('Read record: %s', str(result))
self.assertEqual(db.read('S330CourseTrue,S330CourseMag'), {})
logging.info('###### Resetting')
db.seek(0, 'start')
for r in BATCH_RESULTS:
result = db.read('S330CourseTrue,S330CourseMag', num_records=None)
self.assertEqual(result, r)
logging.info('Read record: %s', str(result))
self.assertEqual(db.read('S330CourseTrue,S330CourseMag', num_records=None), {})
logging.info('Cleaning up test database')
db.delete_table("data")
db.delete_table("source")
db.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbosity', dest='verbosity',
default=0, action='count',
help='Increase output verbosity')
args = parser.parse_args()
LOGGING_FORMAT = '%(asctime)-15s %(filename)s:%(lineno)d %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
LOG_LEVELS ={0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG}
args.verbosity = min(args.verbosity, max(LOG_LEVELS))
logging.getLogger().setLevel(LOG_LEVELS[args.verbosity])
unittest.main(warnings='ignore')
| 37.479452 | 129 | 0.658991 |
4e91fcf120f0a34f063a1ff4554eefaf3f0dfe4a | 22,338 | py | Python | pandapower/pypower/pips.py | hmaschke/pandapower-1 | 2e93969050d3d468ce57f73d358e97fabc6e5141 | [
"BSD-3-Clause"
] | 2 | 2019-11-01T11:01:41.000Z | 2022-02-07T12:55:55.000Z | pandapower/pypower/pips.py | hmaschke/pandapower-1 | 2e93969050d3d468ce57f73d358e97fabc6e5141 | [
"BSD-3-Clause"
] | null | null | null | pandapower/pypower/pips.py | hmaschke/pandapower-1 | 2e93969050d3d468ce57f73d358e97fabc6e5141 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
"""Python Interior Point Solver (PIPS).
"""
from numpy import array, Inf, any, isnan, ones, r_, finfo, \
zeros, dot, absolute, log, flatnonzero as find
from numpy.linalg import norm
from pandapower.pypower.pipsver import pipsver
from scipy.sparse import vstack, hstack, eye, csr_matrix as sparse
from scipy.sparse.linalg import spsolve
EPS = finfo(float).eps
def pips(f_fcn, x0=None, A=None, l=None, u=None, xmin=None, xmax=None,
gh_fcn=None, hess_fcn=None, opt=None):
"""Primal-dual interior point method for NLP (nonlinear programming).
Minimize a function F(X) beginning from a starting point M{x0}, subject to
optional linear and nonlinear constraints and variable bounds::
min f(x)
x
subject to::
g(x) = 0 (nonlinear equalities)
h(x) <= 0 (nonlinear inequalities)
l <= A*x <= u (linear constraints)
xmin <= x <= xmax (variable bounds)
Note: The calling syntax is almost identical to that of FMINCON from
MathWorks' Optimization Toolbox. The main difference is that the linear
constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B},
C{Aeq}, C{Beq}. The functions for evaluating the objective function,
constraints and Hessian are identical.
Example from U{http://en.wikipedia.org/wiki/Nonlinear_programming}:
>>> from numpy import array, r_, float64, dot
>>> from scipy.sparse import csr_matrix
>>> def f2(x):
... f = -x[0] * x[1] - x[1] * x[2]
... df = -r_[x[1], x[0] + x[2], x[1]]
... # actually not used since 'hess_fcn' is provided
... d2f = -array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], float64)
... return f, df, d2f
>>> def gh2(x):
... h = dot(array([[1, -1, 1],
... [1, 1, 1]]), x**2) + array([-2.0, -10.0])
... dh = 2 * csr_matrix(array([[ x[0], x[0]],
... [-x[1], x[1]],
... [ x[2], x[2]]]))
... g = array([])
... dg = None
... return h, g, dh, dg
>>> def hess2(x, lam, cost_mult=1):
... mu = lam["ineqnonlin"]
... a = r_[dot(2 * array([1, 1]), mu), -1, 0]
... b = r_[-1, dot(2 * array([-1, 1]), mu),-1]
... c = r_[0, -1, dot(2 * array([1, 1]), mu)]
... Lxx = csr_matrix(array([a, b, c]))
... return Lxx
>>> x0 = array([1, 1, 0], float64)
>>> solution = pips(f2, x0, gh_fcn=gh2, hess_fcn=hess2)
>>> round(solution["f"], 11) == -7.07106725919
True
>>> solution["output"]["iterations"]
8
Ported by Richard Lincoln from the MATLAB Interior Point Solver (MIPS)
(v1.9) by Ray Zimmerman. MIPS is distributed as part of the MATPOWER
project, developed at the Power System Engineering Research Center (PSERC) (PSERC),
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
MIPS was ported by Ray Zimmerman from C code written by H. Wang for his
PhD dissertation:
- "On the Computation and Application of Multi-period
Security-Constrained Optimal Power Flow for Real-time
Electricity Market Operations", Cornell University, May 2007.
See also:
- H. Wang, C. E. Murillo-Sanchez, R. D. Zimmerman, R. J. Thomas,
"On Computational Issues of Market-Based Optimal Power Flow",
IEEE Transactions on Power Systems, Vol. 22, No. 3, Aug. 2007,
pp. 1185-1193.
All parameters are optional except C{f_fcn} and C{x0}.
@param f_fcn: Function that evaluates the objective function, its gradients
and Hessian for a given value of M{x}. If there are
nonlinear constraints, the Hessian information is provided
by the 'hess_fcn' argument and is not required here.
@type f_fcn: callable
@param x0: Starting value of optimization vector M{x}.
@type x0: array
@param A: Optional linear constraints.
@type A: csr_matrix
@param l: Optional linear constraints. Default values are M{-Inf}.
@type l: array
@param u: Optional linear constraints. Default values are M{Inf}.
@type u: array
@param xmin: Optional lower bounds on the M{x} variables, defaults are
M{-Inf}.
@type xmin: array
@param xmax: Optional upper bounds on the M{x} variables, defaults are
M{Inf}.
@type xmax: array
@param gh_fcn: Function that evaluates the optional nonlinear constraints
and their gradients for a given value of M{x}.
@type gh_fcn: callable
@param hess_fcn: Handle to function that computes the Hessian of the
Lagrangian for given values of M{x}, M{lambda} and M{mu},
where M{lambda} and M{mu} are the multipliers on the
equality and inequality constraints, M{g} and M{h},
respectively.
@type hess_fcn: callable
@param opt: optional options dictionary with the following keys, all of
which are also optional (default values shown in parentheses)
- C{verbose} (False) - Controls level of progress output
displayed
- C{feastol} (1e-6) - termination tolerance for feasibility
condition
- C{gradtol} (1e-6) - termination tolerance for gradient
condition
- C{comptol} (1e-6) - termination tolerance for
complementarity condition
- C{costtol} (1e-6) - termination tolerance for cost
condition
- C{max_it} (150) - maximum number of iterations
- C{step_control} (False) - set to True to enable step-size
control
- C{max_red} (20) - maximum number of step-size reductions if
step-control is on
- C{cost_mult} (1.0) - cost multiplier used to scale the
objective function for improved conditioning. Note: This
value is also passed as the 3rd argument to the Hessian
evaluation function so that it can appropriately scale the
objective function term in the Hessian of the Lagrangian.
@type opt: dict
@rtype: dict
@return: The solution dictionary has the following keys:
- C{x} - solution vector
- C{f} - final objective function value
- C{converged} - exit status
- True = first order optimality conditions satisfied
- False = maximum number of iterations reached
- None = numerically failed
- C{output} - output dictionary with keys:
- C{iterations} - number of iterations performed
- C{hist} - list of arrays with trajectories of the
following: feascond, gradcond, compcond, costcond, gamma,
stepsize, obj, alphap, alphad
- C{message} - exit message
- C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker
multipliers on the constraints, with keys:
- C{eqnonlin} - nonlinear equality constraints
- C{ineqnonlin} - nonlinear inequality constraints
- C{mu_l} - lower (left-hand) limit on linear constraints
- C{mu_u} - upper (right-hand) limit on linear constraints
- C{lower} - lower bound on optimization variables
- C{upper} - upper bound on optimization variables
@see: U{http://www.pserc.cornell.edu/matpower/}
@author: Ray Zimmerman (PSERC Cornell)
@author: Richard Lincoln
"""
if isinstance(f_fcn, dict): ## problem dict
p = f_fcn
f_fcn = p['f_fcn']
x0 = p['x0']
if 'opt' in p: opt = p['opt']
if 'hess_fcn' in p: hess_fcn = p['hess_fcn']
if 'gh_fcn' in p: gh_fcn = p['gh_fcn']
if 'xmax' in p: xmax = p['xmax']
if 'xmin' in p: xmin = p['xmin']
if 'u' in p: u = p['u']
if 'l' in p: l = p['l']
if 'A' in p: A = p['A']
nx = x0.shape[0] # number of variables
nA = A.shape[0] if A is not None else 0 # number of original linear constr
# default argument values
if l is None or len(l) == 0: l = -Inf * ones(nA)
if u is None or len(u) == 0: u = Inf * ones(nA)
if xmin is None or len(xmin) == 0: xmin = -Inf * ones(x0.shape[0])
if xmax is None or len(xmax) == 0: xmax = Inf * ones(x0.shape[0])
if gh_fcn is None:
nonlinear = False
gn = array([])
hn = array([])
else:
nonlinear = True
if opt is None: opt = {}
# options
if "feastol" not in opt:
opt["feastol"] = 1e-06
if "gradtol" not in opt:
opt["gradtol"] = 1e-06
if "comptol" not in opt:
opt["comptol"] = 1e-06
if "costtol" not in opt:
opt["costtol"] = 1e-06
if "max_it" not in opt:
opt["max_it"] = 150
if "max_red" not in opt:
opt["max_red"] = 20
if "step_control" not in opt:
opt["step_control"] = False
if "cost_mult" not in opt:
opt["cost_mult"] = 1
if "verbose" not in opt:
opt["verbose"] = 0
# initialize history
hist = []
# constants
xi = 0.99995
sigma = 0.1
z0 = 1
alpha_min = 1e-8
rho_min = 0.95
rho_max = 1.05
mu_threshold = 1e-5
# initialize
i = 0 # iteration counter
converged = False # flag
eflag = False # exit flag
# add var limits to linear constraints
eyex = eye(nx, nx, format="csr")
AA = eyex if A is None else vstack([eyex, A], "csr")
ll = r_[xmin, l]
uu = r_[xmax, u]
# split up linear constraints
ieq = find( absolute(uu - ll) <= EPS )
igt = find( (uu >= 1e10) & (ll > -1e10) )
ilt = find( (ll <= -1e10) & (uu < 1e10) )
ibx = find( (absolute(uu - ll) > EPS) & (uu < 1e10) & (ll > -1e10) )
# zero-sized sparse matrices unsupported
Ae = AA[ieq, :] if len(ieq) else None
if len(ilt) or len(igt) or len(ibx):
idxs = [(1, ilt), (-1, igt), (1, ibx), (-1, ibx)]
Ai = vstack([sig * AA[idx, :] for sig, idx in idxs if len(idx)], 'csr')
else:
Ai = None
be = uu[ieq]
bi = r_[uu[ilt], -ll[igt], uu[ibx], -ll[ibx]]
# evaluate cost f(x0) and constraints g(x0), h(x0)
x = x0
f, df = f_fcn(x) # cost
f = f * opt["cost_mult"]
df = df * opt["cost_mult"]
if nonlinear:
hn, gn, dhn, dgn = gh_fcn(x) # nonlinear constraints
h = hn if Ai is None else r_[hn.reshape(len(hn),), Ai * x - bi] # inequality constraints
g = gn if Ae is None else r_[gn, Ae * x - be] # equality constraints
if (dhn is None) and (Ai is None):
dh = None
elif dhn is None:
dh = Ai.T
elif Ai is None:
dh = dhn
else:
dh = hstack([dhn, Ai.T])
if (dgn is None) and (Ae is None):
dg = None
elif dgn is None:
dg = Ae.T
elif Ae is None:
dg = dgn
else:
dg = hstack([dgn, Ae.T])
else:
h = -bi if Ai is None else Ai * x - bi # inequality constraints
g = -be if Ae is None else Ae * x - be # equality constraints
dh = None if Ai is None else Ai.T # 1st derivative of inequalities
dg = None if Ae is None else Ae.T # 1st derivative of equalities
# some dimensions
neq = g.shape[0] # number of equality constraints
niq = h.shape[0] # number of inequality constraints
neqnln = gn.shape[0] # number of nonlinear equality constraints
niqnln = hn.shape[0] # number of nonlinear inequality constraints
nlt = len(ilt) # number of upper bounded linear inequalities
ngt = len(igt) # number of lower bounded linear inequalities
nbx = len(ibx) # number of doubly bounded linear inequalities
# initialize gamma, lam, mu, z, e
gamma = 1 # barrier coefficient
lam = zeros(neq)
z = z0 * ones(niq)
mu = z0 * ones(niq)
k = find(h < -z0)
z[k] = -h[k]
k = find((gamma / z) > z0)
mu[k] = gamma / z[k]
e = ones(niq)
# check tolerance
f0 = f
if opt["step_control"]:
L = f + dot(lam, g) + dot(mu, h + z) - gamma * sum(log(z))
Lx = df.copy()
Lx = Lx + dg * lam if dg is not None else Lx
Lx = Lx + dh * mu if dh is not None else Lx
maxh = zeros(1) if len(h) == 0 else max(h)
gnorm = norm(g, Inf) if len(g) else 0.0
lam_norm = norm(lam, Inf) if len(lam) else 0.0
mu_norm = norm(mu, Inf) if len(mu) else 0.0
znorm = norm(z, Inf) if len(z) else 0.0
feascond = \
max([gnorm, maxh]) / (1 + max([norm(x, Inf), znorm]))
gradcond = \
norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
compcond = dot(z, mu) / (1 + norm(x, Inf))
costcond = absolute(f - f0) / (1 + absolute(f0))
# save history
hist.append({'feascond': feascond, 'gradcond': gradcond,
'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
'stepsize': 0, 'obj': f / opt["cost_mult"], 'alphap': 0, 'alphad': 0})
if opt["verbose"]: # pragma: no cover
s = '-sc' if opt["step_control"] else ''
v = pipsver('all')
print('Python Interior Point Solver - PIPS%s, Version %s, %s' %
(s, v['Version'], v['Date']))
if opt['verbose'] > 1:
print(" it objective step size feascond gradcond "
"compcond costcond ")
print("---- ------------ --------- ------------ ------------ "
"------------ ------------")
print("%3d %12.8g %10s %12g %12g %12g %12g" %
(i, (f / opt["cost_mult"]), "",
feascond, gradcond, compcond, costcond))
if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
compcond < opt["comptol"] and costcond < opt["costtol"]:
converged = True
if opt["verbose"]:
print("Converged!")
# do Newton iterations
while (not converged) and (i < opt["max_it"]):
# update iteration counter
i += 1
# compute update step
lmbda = {"eqnonlin": lam[range(neqnln)],
"ineqnonlin": mu[range(niqnln)]}
if nonlinear:
if hess_fcn is None:
print("pips: Hessian evaluation via finite differences "
"not yet implemented.\nPlease provide "
"your own hessian evaluation function.")
Lxx = hess_fcn(x, lmbda, opt["cost_mult"])
else:
_, _, d2f = f_fcn(x, True) # cost
Lxx = d2f * opt["cost_mult"]
rz = range(len(z))
zinvdiag = sparse((1.0 / z, (rz, rz))) if len(z) else None
rmu = range(len(mu))
mudiag = sparse((mu, (rmu, rmu))) if len(mu) else None
dh_zinv = None if dh is None else dh * zinvdiag
M = Lxx if dh is None else Lxx + dh_zinv * mudiag * dh.T
N = Lx if dh is None else Lx + dh_zinv * (mudiag * h + gamma * e)
Ab = sparse(M) if dg is None else vstack([
hstack([M, dg]),
hstack([dg.T, sparse((neq, neq))])
])
bb = r_[-N, -g]
dxdlam = spsolve(Ab.tocsr(), bb)
if any(isnan(dxdlam)):
if opt["verbose"]:
print('\nNumerically Failed\n')
eflag = -1
break
dx = dxdlam[:nx]
dlam = dxdlam[nx:nx + neq]
dz = -h - z if dh is None else -h - z - dh.T * dx
dmu = -mu if dh is None else -mu + zinvdiag * (gamma * e - mudiag * dz)
# do the update
k = find(dz < 0.0)
alphap = min([xi * min(z[k] / -dz[k]), 1]) if len(k) else 1.0
k = find(dmu < 0.0)
alphad = min([xi * min(mu[k] / -dmu[k]), 1]) if len(k) else 1.0
x = x + alphap * dx
z = z + alphap * dz
lam = lam + alphad * dlam
mu = mu + alphad * dmu
if niq > 0:
gamma = sigma * dot(z, mu) / niq
# evaluate cost, constraints, derivatives
f, df = f_fcn(x) # cost
f = f * opt["cost_mult"]
df = df * opt["cost_mult"]
if nonlinear:
hn, gn, dhn, dgn = gh_fcn(x) # nln constraints
# g = gn if Ai is None else r_[gn, Ai * x - bi] # ieq constraints
# h = hn if Ae is None else r_[hn, Ae * x - be] # eq constraints
h = hn if Ai is None else r_[hn.reshape(len(hn),), Ai * x - bi] # ieq constr
g = gn if Ae is None else r_[gn, Ae * x - be] # eq constr
if (dhn is None) and (Ai is None):
dh = None
elif dhn is None:
dh = Ai.T
elif Ai is None:
dh = dhn
else:
dh = hstack([dhn, Ai.T])
if (dgn is None) and (Ae is None):
dg = None
elif dgn is None:
dg = Ae.T
elif Ae is None:
dg = dgn
else:
dg = hstack([dgn, Ae.T])
else:
h = -bi if Ai is None else Ai * x - bi # inequality constraints
g = -be if Ae is None else Ae * x - be # equality constraints
# 1st derivatives are constant, still dh = Ai.T, dg = Ae.T
Lx = df
Lx = Lx + dg * lam if dg is not None else Lx
Lx = Lx + dh * mu if dh is not None else Lx
if len(h) == 0:
maxh = zeros(1)
else:
maxh = max(h)
gnorm = norm(g, Inf) if len(g) else 0.0
lam_norm = norm(lam, Inf) if len(lam) else 0.0
mu_norm = norm(mu, Inf) if len(mu) else 0.0
znorm = norm(z, Inf) if len(z) else 0.0
feascond = \
max([gnorm, maxh]) / (1 + max([norm(x, Inf), znorm]))
gradcond = \
norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
compcond = dot(z, mu) / (1 + norm(x, Inf))
costcond = float(absolute(f - f0) / (1 + absolute(f0)))
hist.append({'feascond': feascond, 'gradcond': gradcond,
'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
'stepsize': norm(dx), 'obj': f / opt["cost_mult"],
'alphap': alphap, 'alphad': alphad})
if opt["verbose"] > 1:
print("%3d %12.8g %10.5g %12g %12g %12g %12g" %
(i, (f / opt["cost_mult"]), norm(dx), feascond, gradcond,
compcond, costcond))
if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
compcond < opt["comptol"] and costcond < opt["costtol"]:
converged = True
if opt["verbose"]:
print("Converged!")
else:
if any(isnan(x)) or (alphap < alpha_min) or \
(alphad < alpha_min) or (gamma < EPS) or (gamma > 1.0 / EPS):
if opt["verbose"]:
print("Numerically failed.")
eflag = -1
break
f0 = f
if opt["step_control"]:
L = f + dot(lam, g) + dot(mu, (h + z)) - gamma * sum(log(z))
if opt["verbose"]:
if not converged:
print("Did not converge in %d iterations." % i)
# package results
if eflag != -1:
eflag = converged
if eflag == 0:
message = 'Did not converge'
elif eflag == 1:
message = 'Converged'
elif eflag == -1:
message = 'Numerically failed'
else:
raise
output = {"iterations": i, "hist": hist, "message": message}
# zero out multipliers on non-binding constraints
mu[find( (h < -opt["feastol"]) & (mu < mu_threshold) )] = 0.0
# un-scale cost and prices
f = f / opt["cost_mult"]
lam = lam / opt["cost_mult"]
mu = mu / opt["cost_mult"]
# re-package multipliers into struct
lam_lin = lam[neqnln:neq] # lambda for linear constraints
mu_lin = mu[niqnln:niq] # mu for linear constraints
kl = find(lam_lin < 0.0) # lower bound binding
ku = find(lam_lin > 0.0) # upper bound binding
mu_l = zeros(nx + nA)
mu_l[ieq[kl]] = -lam_lin[kl]
mu_l[igt] = mu_lin[nlt:nlt + ngt]
mu_l[ibx] = mu_lin[nlt + ngt + nbx:nlt + ngt + nbx + nbx]
mu_u = zeros(nx + nA)
mu_u[ieq[ku]] = lam_lin[ku]
mu_u[ilt] = mu_lin[:nlt]
mu_u[ibx] = mu_lin[nlt + ngt:nlt + ngt + nbx]
lmbda = {'mu_l': mu_l[nx:], 'mu_u': mu_u[nx:],
'lower': mu_l[:nx], 'upper': mu_u[:nx]}
if niqnln > 0:
lmbda['ineqnonlin'] = mu[:niqnln]
if neqnln > 0:
lmbda['eqnonlin'] = lam[:neqnln]
# lmbda = {"eqnonlin": lam[:neqnln], 'ineqnonlin': mu[:niqnln],
# "mu_l": mu_l[nx:], "mu_u": mu_u[nx:],
# "lower": mu_l[:nx], "upper": mu_u[:nx]}
solution = {"x": x, "f": f, "eflag": converged,
"output": output, "lmbda": lmbda}
return solution
| 40.032258 | 97 | 0.51155 |
52ff354ba84b6edb439bd73cb831c5c618d0dcda | 5,995 | py | Python | tests/test_create_post.py | gc-plp/reddit-moderator-bot | 7fe7003002ec2605004608752a9cc60d76a16e84 | [
"Unlicense"
] | 5 | 2019-02-28T05:35:52.000Z | 2022-01-05T09:39:51.000Z | tests/test_create_post.py | gc-plp/reddit-moderator-bot | 7fe7003002ec2605004608752a9cc60d76a16e84 | [
"Unlicense"
] | 5 | 2019-12-20T11:29:43.000Z | 2020-03-14T15:00:39.000Z | tests/test_create_post.py | gc-plp/reddit-moderator-bot | 7fe7003002ec2605004608752a9cc60d76a16e84 | [
"Unlicense"
] | 1 | 2022-01-05T09:39:53.000Z | 2022-01-05T09:39:53.000Z | import pytest
import modbot.input.test as test
TEST_SUBREDDIT = "testsub123"
@pytest.fixture
def create_bot():
test.create_bot(TEST_SUBREDDIT)
def test_create_post(create_bot):
# Test basic commands
test.get_reddit().inbox.add_message(
"mod1",
"/create_post --subreddit=testsub123 --sticky --title test1 test2 test3 --body zzz ddd")
test.advance_time_10m()
_, body = test.get_user("mod1").inbox[-1]
# Get first line
fline = body.split("\n")[0]
# Get id
target_sub = None
id = fline.split(" ")[1]
for sub in test.cache_submissions.values():
if sub.shortlink == id:
target_sub = sub
# Create comments
comm1 = target_sub.add_comment("asd", "xxx1")
comm2 = target_sub.add_comment("asd", "qwe1")
# Tell the bot to add them
test.get_reddit().inbox.add_message(
"mod1",
"/integrate_comment --sub_link %s --comment_link %s" %
(target_sub.shortlink, comm1.permalink))
test.get_reddit().inbox.add_message(
"mod1",
"/integrate_comment --sub_link %s --comment_link %s" %
(target_sub.shortlink, comm2.permalink))
# Check if added
test.advance_time_10m()
assert "xxx1" in target_sub.body
assert "qwe1" in target_sub.body
# Edit and check again
comm1.edit("xxx2")
comm2.edit("qwe2")
# Check if added again
test.advance_time_10m()
assert "xxx2" in target_sub.body
assert "qwe2" in target_sub.body
test.get_reddit().inbox.add_message(
"mod1",
"/nointegrate_comment --sub_link %s --comment_link %s" %
(target_sub.shortlink, comm1.permalink))
# Check if added again
test.advance_time_10m()
assert "xxx2" not in target_sub.body
assert "qwe2" in target_sub.body
# Unsticky the comment
target_sub.mod.sticky(False, False)
# Edit the comments
comm2.edit("qwe3")
# Make sure that comments were not added
test.advance_time_10m()
assert "qwe2" in target_sub.body
# resticky the comment
test.get_reddit().inbox.add_message(
"mod1",
"/resticky --sub_link %s" % (target_sub.shortlink))
# Check if it was updated
test.advance_time_10m()
assert "qwe3" in target_sub.body
def test_clone_post(create_bot):
# Test cloned post
test_submission = test.FakeSubmission(
subreddit_name=TEST_SUBREDDIT,
author_name="JohnDoe1",
title="title_test",
body="asd1234")
test.get_reddit().inbox.add_message(
"mod1",
"/clone_post --subreddit=testsub123 --sticky --title=test2 --sub_link=%s" % test_submission.shortlink)
test.advance_time_10m()
_, body = test.get_user("mod1").inbox[-1]
# Get first line
fline = body.split("\n")[0]
# Get id
target_sub = None
id = fline.split(" ")[1]
for sub in test.cache_submissions.values():
if sub.shortlink == id:
target_sub = sub
# Check for content
assert "asd1234" in target_sub.body
# Edit the original body
test_submission.edit("asd5678")
test.advance_time_10m()
# Check for content
assert "asd5678" in target_sub.body
def test_create_from_wiki(create_bot):
content = """
content
multi
line
"""
sub = test.get_subreddit(TEST_SUBREDDIT)
# Tell the bot to update the control panel
test.get_reddit().inbox.add_message(
"mod1", "/update_control_panel --subreddit %s" % TEST_SUBREDDIT)
# Update control panel and plugin wiki
sub.edit_wiki("wiki123", content)
test.get_reddit().inbox.add_message(
"mod1",
"/create_post --subreddit=%s --sticky --title=test --wikibody=wiki123" % TEST_SUBREDDIT)
test.advance_time_10m()
_, body = test.get_user("mod1").inbox[-1]
# Get first line
fline = body.split("\n")[0]
# Get id
target_sub = None
id = fline.split(" ")[1]
for s in test.cache_submissions.values():
if s.shortlink == id:
target_sub = s
# Check for a word
assert "multi" in target_sub.body
# Edit the wiki
sub.edit_wiki("wiki123", content + "XXX")
# Check it again
test.advance_time_10m()
assert "XXX" in target_sub.body
def test_sched_post(create_bot):
enable_sched_posts = """
[Enabled Plugins]
schedule_posts
"""
sub = test.get_subreddit(TEST_SUBREDDIT)
test.set_time(22 * 60 * 60)
test_submission = test.FakeSubmission(
subreddit_name=TEST_SUBREDDIT,
author_name="JohnDoe1",
title="title_test",
body="asd1234")
wiki_sched_posts = r"""
[post_at_12AM]
title=test1 test2 ${DAY}.${MONTH}.${YEAR}
body=aaa
bbb
ccc
interval= 0 0 * * * *
[post_at_1AM]
title=test3 test4
wikibody=post1AM
interval= 0 1 * * * *
[post_at_2AM]
title=test5 test6
clonepost=%s
interval= 0 2 * * * *
""" % test_submission.permalink
sub.edit_wiki("post1AM", "xx1")
# Update control panel and plugin wiki
sub.edit_wiki("control_panel", enable_sched_posts)
sub.edit_wiki("schedule_posts", wiki_sched_posts)
# Tell the bot to update the control panel
test.get_reddit().inbox.add_message(
"mod1", "/update_control_panel --subreddit %s" % TEST_SUBREDDIT)
test.advance_time_10m()
test.advance_time_1h()
test.advance_time_1h()
test.advance_time_1h()
test.advance_time_1h()
post_12am = None
post_1am = None
post_2am = None
# Get the posts
for post in test.cache_submissions.values():
if post.title.startswith("test1 test2"):
post_12am = post
if post.title == "test3 test4":
post_1am = post
if post.title == "test5 test6":
post_2am = post
assert post_12am
assert post_1am
assert post_2am
assert post_12am.created_utc - 86400 < 60
assert post_1am.created_utc - 90000 < 60
assert post_2am.created_utc - 93600 < 60
| 24.569672 | 110 | 0.637531 |
b09d9b508d3e5793a830b4b57d452963756cdea9 | 861 | py | Python | tests/application/test_utils.py | racedisparityaudit/rd_cms | a12f0e3f5461cc41eed0077ed02e11efafc5dd76 | [
"MIT"
] | 1 | 2021-10-06T13:48:36.000Z | 2021-10-06T13:48:36.000Z | tests/application/test_utils.py | racedisparityaudit/ethnicity-facts-and-figures-publisher | 63a3bd5618a04b2b853868aae35d54730077f14c | [
"MIT"
] | 116 | 2018-11-02T17:20:47.000Z | 2022-02-09T11:06:22.000Z | tests/application/test_utils.py | racedisparityaudit/rd_cms | a12f0e3f5461cc41eed0077ed02e11efafc5dd76 | [
"MIT"
] | 2 | 2018-11-09T16:47:35.000Z | 2020-04-09T13:06:48.000Z | from application.utils import get_csv_data_for_download
def test_adds_quotes():
csv_with_no_quotes = "./tests/test_data/csv_with_no_quotes.csv"
csv_with_quotes = '"Ethnicity","Value"\n"Black","10"\n"White","12.2"\n'
assert get_csv_data_for_download(csv_with_no_quotes) == csv_with_quotes
def test_only_adds_quotes_to_non_quoted_values():
csv_with_embedded_quotes = "./tests/test_data/csv_with_embedded_quotes.csv"
csv_with_quotes = '"Ethnicity","Value","Description"\n"Black","10","Test"\n"White","12.2","This is a ""test"""\n'
assert get_csv_data_for_download(csv_with_embedded_quotes) == csv_with_quotes
def test_base_template_renders_page_built_at_comment(test_app_client, logged_in_rdu_user):
response = test_app_client.get("/", follow_redirects=True)
assert "<!-- Page built at" in response.get_data(as_text=True)
| 37.434783 | 117 | 0.770035 |
94beef65234943b029426211598f6812ba54786b | 11,110 | py | Python | tempest/api/workloadmgr/upgrade/test_before_upgrade.py | deepanshusagar/tempest | 910919eef3e5dea089a82e4074e85704bb8f7a2b | [
"Apache-2.0"
] | null | null | null | tempest/api/workloadmgr/upgrade/test_before_upgrade.py | deepanshusagar/tempest | 910919eef3e5dea089a82e4074e85704bb8f7a2b | [
"Apache-2.0"
] | null | null | null | tempest/api/workloadmgr/upgrade/test_before_upgrade.py | deepanshusagar/tempest | 910919eef3e5dea089a82e4074e85704bb8f7a2b | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.workloadmgr import base
from tempest import config
from tempest import test
import json
import sys
from tempest import api
from oslo_log import log as logging
from tempest.common import waiters
from tempest import tvaultconf
from tempest import reporting
from tempest import command_argument_string
from tempest.util import cli_parser
import time
LOG = logging.getLogger(__name__)
CONF = config.CONF
class WorkloadsTest(base.BaseWorkloadmgrTest):
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(WorkloadsTest, cls).setup_clients()
cls.client = cls.os.wlm_client
reporting.add_test_script(str(__name__))
@test.attr(type='smoke')
@test.idempotent_id('9fe07175-912e-49a5-a629-5f52eeada4c9')
def test_before_upgrade(self):
self.vms_per_workload=1
self.volume_size=1
self.workload_instances = []
self.workload_volumes = []
try:
f = open("tempest/upgrade_data_conf.py", "w")
#Get global job scheduler status
self.scheduler_status = self.get_global_job_scheduler_status()
if tvaultconf.global_job_scheduler:
self.scheduler_status = self.enable_global_job_scheduler()
if (self.scheduler_status == 'false'):
reporting.add_test_step("Enable global job scheduler", tvaultconf.FAIL)
raise Exception("Enable global job scheduler failed")
else:
reporting.add_test_step("Enable global job scheduler", tvaultconf.PASS)
else:
self.scheduler_status = self.disable_global_job_scheduler()
if (self.scheduler_status == 'true'):
reporting.add_test_step("Disable global job scheduler", tvaultconf.FAIL)
raise Exception("Disable global job scheduler failed")
else:
reporting.add_test_step("Disable global job scheduler", tvaultconf.PASS)
#Fetch license details
self.license_details = self.get_license_list()
LOG.debug("License details: " + str(self.license_details))
f.write("license_details=" + str(self.license_details) + "\n")
#Update user email in openstack
self.update_user_email = self.update_user_email(CONF.identity.user_id, CONF.identity.user_email, CONF.identity.tenant_id)
f.write("update_user_email_in_openstack=" + str(self.update_user_email) + "\n")
if self.update_user_email:
reporting.add_test_step("Update email for user in openstack", tvaultconf.PASS)
#Fetch existing settings
self.existing_setting = self.get_settings_list()
LOG.debug("Existing setting list: " + str(self.existing_setting))
#Delete any existing settings
flag = False
if(self.existing_setting != {}):
for k,v in self.existing_setting.items():
if (self.delete_setting(k) == False):
flag = True
if flag:
reporting.add_test_step("Delete existing setting", tvaultconf.FAIL)
else:
#Update trilioVault email settings
self.settings_resp = self.update_email_setings(tvaultconf.setting_data)
f.write("settings_list=" + str(self.settings_resp) + "\n")
self.setting_data_from_resp = {}
for i in range(0,len(self.settings_resp)):
self.setting_data_from_resp[self.settings_resp[i]['name']] = self.settings_resp[i]['value']
LOG.debug("Settings data from response: " + str(self.setting_data_from_resp) + " ; original setting data: " + str(tvaultconf.setting_data))
if(cmp(self.setting_data_from_resp, tvaultconf.setting_data) == 0):
reporting.add_test_step("Update email settings", tvaultconf.PASS)
#Enable email notification for project
self.enable_email_resp = self.update_email_setings(tvaultconf.enable_email_notification)[0]
f.write("email_enabled_settings=" + str(self.enable_email_resp) + "\n")
if((str(self.enable_email_resp['name']) == 'smtp_email_enable') and (str(self.enable_email_resp['value']) == '1')):
reporting.add_test_step("Enable email notification for project", tvaultconf.PASS)
else:
reporting.add_test_step("Enable email notification for project", tvaultconf.FAIL)
reporting.set_test_script_status(tvaultconf.FAIL)
else:
reporting.add_test_step("Update email settings", tvaultconf.FAIL)
reporting.set_test_script_status(tvaultconf.FAIL)
else:
reporting.add_test_step("Update email for user in openstack", tvaultconf.FAIL)
reporting.set_test_script_status(tvaultconf.FAIL)
#Create workload-1
for vm in range(0,self.vms_per_workload):
volume_id1 = self.create_volume()
self.workload_volumes.append(volume_id1)
vm_id = self.create_vm(vm_cleanup=False)
self.workload_instances.append(vm_id)
f.write("instance_id=" + str(self.workload_instances) + "\n")
self.attach_volume(volume_id1, vm_id, device="/dev/vdb")
f.write("volume_ids=" + str(self.workload_volumes) + "\n")
self.start_date = time.strftime("%x")
self.start_time = time.strftime("%X")
self.jobschedule = { "fullbackup_interval": "-1",
"retention_policy_type": tvaultconf.retention_policy_type,
"enabled": True,
"start_date": self.start_date,
"start_time": self.start_time,
"interval": tvaultconf.interval,
"retention_policy_value": tvaultconf.retention_policy_value }
self.workload_id=self.workload_create(self.workload_instances,tvaultconf.parallel, self.jobschedule, workload_cleanup=False)
if(self.wait_for_workload_tobe_available(self.workload_id)):
reporting.add_test_step("Create Workload 1 for attached volume instance with scheduler enabled", tvaultconf.PASS)
else:
reporting.add_test_step("Create Workload 1 for attached volume instance with scheduler enabled", tvaultconf.FAIL)
raise Exception("Workload creation failed")
f.write("workload_id=\"" + str(self.workload_id) + "\"\n")
#Create workload-2
self.volumes = []
self.instances = []
self.volume_id = self.create_volume(size=tvaultconf.bootfromvol_vol_size, image_id=CONF.compute.image_ref, volume_type_id=CONF.volume.volume_type_id)
self.set_volume_as_bootable(self.volume_id)
self.block_mapping_details = [{ "source_type": "volume",
"delete_on_termination": "false",
"boot_index": 0,
"uuid": self.volume_id,
"destination_type": "volume"}]
self.volumes.append(self.volume_id)
f.write("volume_ids_2=" + str(self.volumes) + "\n")
self.vm_id = self.create_vm(image_id="", block_mapping_data=self.block_mapping_details)
self.instances.append(self.vm_id)
f.write("instance_id_2=" + str(self.instances) + "\n")
self.workload_id2=self.workload_create(self.instances,tvaultconf.parallel, jobschedule={'enabled': False}, workload_cleanup=False)
if(self.wait_for_workload_tobe_available(self.workload_id2)):
reporting.add_test_step("Create Workload 2 for boot from volume instance with scheduler disabled", tvaultconf.PASS)
else:
reporting.add_test_step("Create Workload 2 for boot from volume instance with scheduler disabled", tvaultconf.FAIL)
raise Exception("Workload creation failed")
f.write("workload_id_2=\"" + str(self.workload_id2) + "\"\n")
#Fetch workload scheduler and retention settings for workloads
self.workloads = [self.workload_id, self.workload_id2]
for i in range(0, len(self.workloads)):
self.scheduler_settings = self.getSchedulerDetails(self.workloads[i])
LOG.debug("Workload scheduler settings: " + str(self.scheduler_settings))
if(i == 0):
f.write("scheduler_settings=" + str(self.scheduler_settings) + "\n")
else:
f.write("scheduler_settings_2=" + str(self.scheduler_settings) + "\n")
#Create full snapshots for workloads 1 & 2
self.snapshots = []
for i in range(0, len(self.workloads)):
self.snapshot_id=self.workload_snapshot(self.workloads[i], True, snapshot_cleanup=False)
self.snapshots.append(self.snapshot_id)
if(i == 0):
f.write("full_snapshot_id=\"" + str(self.snapshot_id) + "\"\n")
else:
f.write("full_snapshot_id_2=\"" + str(self.snapshot_id) + "\"\n")
for i in range(0, len(self.workloads)):
self.wait_for_workload_tobe_available(self.workloads[i])
if(self.getSnapshotStatus(self.workloads[i], self.snapshots[i]) == "available"):
reporting.add_test_step("Create full snapshot for workload " + str(i+1), tvaultconf.PASS)
else:
reporting.add_test_step("Create full snapshot for workload " + str(i+1), tvaultconf.FAIL)
reporting.set_test_script_status(tvaultconf.FAIL)
#Fetch trust details
self.trust_details = self.get_trust_list()
LOG.debug("Trust details: " + str(self.trust_details))
f.write("trust_details=" + str(self.trust_details) + "\n")
f.close()
reporting.test_case_to_write()
except Exception as e:
LOG.error("Exception: " + str(e))
reporting.set_test_script_status(tvaultconf.FAIL)
reporting.test_case_to_write()
| 52.654028 | 162 | 0.616832 |
8bfe838ed33b811e0ec6e8c39e6327f50af49fa7 | 31 | py | Python | QueryLMS/__init__.py | txoof/querylms | b3c0bb587d76bf71cccf647292a4e286f0e0f7d5 | [
"MIT"
] | null | null | null | QueryLMS/__init__.py | txoof/querylms | b3c0bb587d76bf71cccf647292a4e286f0e0f7d5 | [
"MIT"
] | null | null | null | QueryLMS/__init__.py | txoof/querylms | b3c0bb587d76bf71cccf647292a4e286f0e0f7d5 | [
"MIT"
] | 1 | 2021-10-09T16:20:59.000Z | 2021-10-09T16:20:59.000Z | from .QueryLMS import QueryLMS
| 15.5 | 30 | 0.83871 |
09ff00ba28d02b47e4ecfc59b00889baa9bd04e8 | 1,206 | py | Python | src/the_tale/the_tale/game/exceptions.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 85 | 2017-11-21T12:22:02.000Z | 2022-03-27T23:07:17.000Z | src/the_tale/the_tale/game/exceptions.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 545 | 2017-11-04T14:15:04.000Z | 2022-03-27T14:19:27.000Z | src/the_tale/the_tale/game/exceptions.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 45 | 2017-11-11T12:36:30.000Z | 2022-02-25T06:10:44.000Z |
import smart_imports
smart_imports.all()
class GameError(utils_exceptions.TheTaleError):
MSG = 'game error'
class HeroAlreadyRegisteredError(GameError):
MSG = 'Hero with id "%(hero_id)d" has already registerd in storage, probably on initialization step'
class RemoveActionFromMiddleError(GameError):
MSG = 'try to remove action (%(action)r) from the middle of actions list, last action: (%(last_action)r). Actions list: %(actions_list)r'
class SupervisorTaskMemberMissedError(GameError):
MSG = 'try process supervisor task %(task_id)d when not all members captured; members: %(members)r, captured members: %(captured_members)r'
class UnknownNextStepError(GameError):
MSG = 'unknown next_step value %(next_step)s in ComplexChangeTask'
class DublicateAccountRegistration(GameError):
MSG = 'try to double register one account: id=%(account_id)s, owner: %(owner)s'
#########################
# highlevel
#########################
class HighlevelError(GameError):
MSG = 'highlevel error'
class WrongHighlevelTurnNumber(HighlevelError):
MSG = 'desinchonization: workers turn number %(expected_turn_number)d not equal to command turn number %(new_turn_number)d'
| 28.714286 | 143 | 0.729685 |
877dcacdb511a9b55a5d5fd65c0b7333c78af524 | 1,687 | py | Python | tests/test_scipts.py | pozytywnie/webapp-health-monitor | c90486d1ba0e079bc03b197e693c2b85a0038ae4 | [
"MIT"
] | null | null | null | tests/test_scipts.py | pozytywnie/webapp-health-monitor | c90486d1ba0e079bc03b197e693c2b85a0038ae4 | [
"MIT"
] | 20 | 2015-01-08T09:22:05.000Z | 2021-06-05T20:36:49.000Z | tests/test_scipts.py | pozytywnie/webapp-health-monitor | c90486d1ba0e079bc03b197e693c2b85a0038ae4 | [
"MIT"
] | 1 | 2015-07-21T10:08:24.000Z | 2015-07-21T10:08:24.000Z | from unittest import TestCase
from webapp_health_monitor.scripts import _webapp_health_monitor
from webapp_health_monitor import verificators_register
try:
from unittest import mock
except ImportError:
import mock
class WebbappHealthMonitorTest(TestCase):
@mock.patch('sys.stderr')
@mock.patch('sys.stdout')
def test_no_arguments(self, stdout, stderr):
self.assertRaises(SystemExit, _webapp_health_monitor, [])
@mock.patch('sys.stdout')
@mock.patch('webapp_health_monitor.scripts.importlib.import_module')
def test_import(self, import_module, stdout):
import_module.side_effect = ImportError()
self.assertRaises(ImportError,
_webapp_health_monitor, ['random_module'])
import_module.assert_called_with('random_module')
@mock.patch('webapp_health_monitor.scripts.importlib.import_module')
def test_no_verificators(self, import_module):
with mock.patch('sys.stdout') as stdout:
result = _webapp_health_monitor(['random_module'])
self.assertEqual(1, result)
self.assertEqual([mock.call('No verificators found.\n\n')],
stdout.write.mock_calls)
@mock.patch('webapp_health_monitor.scripts.importlib.import_module')
def test_success(self, import_module):
verificator = mock.Mock()
verificators_register.register(verificator)
with mock.patch('sys.stdout') as stdout:
result = _webapp_health_monitor(['random_module'])
self.assertEqual(0, result)
self.assertEqual(
[mock.call('{}: OK\n'.format(verificator.return_value))],
stdout.write.mock_calls)
| 39.232558 | 72 | 0.698281 |
a91fff607051bca97143c8d904f54c6e4917ed0c | 4,232 | py | Python | nicos_mlz/poli/devices/magnet.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/poli/devices/magnet.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 4 | 2019-11-08T10:18:16.000Z | 2021-01-13T13:07:29.000Z | nicos_mlz/poli/devices/magnet.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <georg.brandl@frm2.tum.de>
#
# *****************************************************************************
"""Special devices for magnets."""
from nicos.core import Attach, ComputationError, Moveable, Param, listof, \
tupleof
def to_range(x):
"""Move x within the angular range -180...180 deg."""
while x < -180:
x += 360
while x > 180:
x -= 360
return x
def in_range(x, a1, a2):
"""Check if (modulo 360) x is in the range a1...a2. a1 must be < a2."""
a1 %= 360.
a2 %= 360.
if a1 <= a2: # "normal" range (not including 0)
return a1 <= x <= a2
# "jumping" range (around 0)
return a1 <= x or x <= a2
class MagnetSampleTheta(Moveable):
"""Class for controlling the sample rotation inside a magnet that is built
with significant dark angles that must be avoided for incoming and
outgoing beam, by rotating the magnet itself on the sample table.
"""
attached_devices = {
'sample_theta': Attach('Sample-only theta motor', Moveable),
'magnet_theta': Attach('Magnet-plus-sample motor', Moveable),
'two_theta': Attach('Scattering angle', Moveable),
}
parameters = {
'blocked': Param('Blocked angle range in the magnet. 0 is the '
'incoming beam direction', unit='deg',
type=listof(tupleof(float, float))),
'windowstep': Param('Steps in which to move the magnet when looking '
'for free windows', unit='deg', type=int,
default=5),
}
def _find_window(self, gamma, magnet):
# find a free window for incoming and outgoing beam, which is closest
# to the current position of the magnet
result = []
for pos in range(0, 360, self.windowstep):
for (a1, a2) in self.blocked:
# check for blocked incoming beam
if in_range(pos, -a2, -a1):
break
# check for blocked outgoing beam
if in_range(pos, -a2 + 180 + gamma, -a1 + 180 + gamma):
break
else: # no "break"
result.append(pos)
self.log.debug('gamma: %.3f, magnet: %.3f', gamma, magnet)
self.log.debug('new possible positions: %s', result)
if not result:
raise ComputationError(self, 'no position found for magnet with '
'incoming and outgoing beam free')
return min(result, key=lambda pos: abs(pos - 0.1))
def doStart(self, target):
# get target for scattering angle
gamma = self._attached_two_theta.target
magnet = self._attached_magnet_theta.read(0)
# determine nearest free window
new_magnet = self._find_window(gamma, magnet)
self._attached_magnet_theta.start(to_range(new_magnet))
self._attached_sample_theta.start(to_range(target - new_magnet))
def _getWaiters(self):
return [self._attached_sample_theta, self._attached_magnet_theta]
def doRead(self, maxage=0):
angle = self._attached_magnet_theta.read(maxage) + \
self._attached_sample_theta.read(maxage)
return to_range(angle)
| 39.185185 | 79 | 0.600898 |
fe822764296a3ab8c1095f4d77aa6784b0d59c75 | 3,479 | py | Python | tools/generate_taint_models/tests/get_graphql_sources_test.py | fabiomassimo/pyre-check | e2f2be7c14a4125d19158b265aebcc666fdd0600 | [
"MIT"
] | 1 | 2020-08-12T14:33:46.000Z | 2020-08-12T14:33:46.000Z | tools/generate_taint_models/tests/get_graphql_sources_test.py | fabiomassimo/pyre-check | e2f2be7c14a4125d19158b265aebcc666fdd0600 | [
"MIT"
] | null | null | null | tools/generate_taint_models/tests/get_graphql_sources_test.py | fabiomassimo/pyre-check | e2f2be7c14a4125d19158b265aebcc666fdd0600 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os # noqa
import unittest
from typing import Callable
from unittest.mock import patch
from graphql.type import (
GraphQLBoolean,
GraphQLField,
GraphQLID,
GraphQLNonNull,
GraphQLObjectType,
)
from graphql.type.definition import GraphQLType
from tools.pyre.tools.generate_taint_models import get_graphql_sources
from tools.pyre.tools.generate_taint_models.get_graphql_sources import (
GraphQLSourceGenerator,
)
from .test_functions import __name__ as qualifier, all_functions
class GetGraphQLSourcesTest(unittest.TestCase):
@patch.object(get_graphql_sources, "Configuration")
def test_gather_functions_to_model(self, configuration) -> None:
configuration.graphql_module = "tools.pyre.tools.generate_taint_models.tests"
configuration.graphql_object_type = GraphQLObjectType
functions = GraphQLSourceGenerator().gather_functions_to_model()
self.assertSetEqual(set(functions), {function_1, function_2})
# Run the same test again, passing in a list for 'graphql_module', to
# ensure both work
configuration.graphql_module = ["tools.pyre.tools.generate_taint_models.tests"]
configuration.graphql_object_type = GraphQLObjectType
functions = GraphQLSourceGenerator().gather_functions_to_model()
self.assertSetEqual(set(functions), {function_1, function_2})
def test_compute_models(self) -> None:
source = "TaintSource[UserControlled]"
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
list(GraphQLSourceGenerator().compute_models(all_functions)),
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args: {source}) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args: {source}) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs: {source}) -> {sink}: ...",
],
)
# Defined for testing purposes (see 'test_gather_functions_to_model')
# These functions are not used otherwise.
def function_1() -> None:
pass
def function_2() -> None:
pass
# Create an object directly at the top level of the file so that
# 'test_gather_functions_to_model' can verify that we correctly identify the
# resolver
DirectObjectType = GraphQLObjectType(
name="DirectObjectType",
description="GraphQLObject directly created at top level",
fields={
"no_resolver": GraphQLField(GraphQLNonNull(GraphQLID)),
"resolver": GraphQLField(GraphQLBoolean, resolver=function_1),
"lambda_resolver": GraphQLField(GraphQLBoolean, resolver=lambda x: x),
},
)
def add_field(type: GraphQLType, name: str, resolver: Callable) -> None:
# pyre-ignore[16]: Undefined attribute
type._fields[name] = GraphQLField(GraphQLNonNull(GraphQLID), resolver=resolver)
# Indirectly add in an additional resolver, so that
# 'test_gather_functions_to_model' can verify that that resolver is detected
IndirectObjectType = add_field(
type=DirectObjectType, name="indirect", resolver=function_2
)
| 35.865979 | 91 | 0.698189 |
37002220078a4021cf64eb26826129a2f2d84490 | 10,311 | py | Python | hexmachina/parametrization.py | dnkrtz/hexmachina | 4f1ec7407fb903efe2c1d3d38874eb114611d072 | [
"MIT"
] | 21 | 2017-10-29T20:04:53.000Z | 2022-02-11T10:08:02.000Z | hexmachina/parametrization.py | dnkrtz/hexmachina | 4f1ec7407fb903efe2c1d3d38874eb114611d072 | [
"MIT"
] | 3 | 2017-08-20T11:08:33.000Z | 2018-04-30T16:53:42.000Z | hexmachina/parametrization.py | dnkrtz/hexmachina | 4f1ec7407fb903efe2c1d3d38874eb114611d072 | [
"MIT"
] | 11 | 2017-07-29T05:33:59.000Z | 2021-07-01T09:22:17.000Z | '''
File: parametrization.py
License: MIT
Author: Aidan Kurtz
Created: 25/08/2016
Python Version: 3.5
========================
Hexahedral parametrization based on the discrete 3D frame field.
(This module is currently broken)
'''
import bisect
import numpy as np
from scipy import sparse
import sys
from machina import *
from transforms import *
from utils import *
from visual import *
#
def var_index(ti, vi, ci):
"""The flattened index corresponding to the tet index ti,
local vertex index vi, and coordinate index ci.
"""
if not isinstance(ci, range):
ci = [ ci ]
return [ ( 12 * ti + 3 * vi + i ) for i in ci ]
def drop_rows(M, i):
"""Remove row(s) i from matrix"""
M = M.tolil()
M.rows = np.delete(M.rows, i)
M.data = np.delete(M.data, i)
M._shape = (M._shape[0] - len(i), M._shape[1])
return M
# Remove variables from system.
# The 'b' matrix should have its i value(s) set before calling.
def reduce_system(A, x, b, i):
"""Remove variable(s) i from system.
Row(s) i of matrix 'b' must be set before this gets called."""
# Convert all the lil format.
A = sparse.lil_matrix(A)
x = sparse.lil_matrix(x.reshape((len(x),1)))
b = sparse.lil_matrix(b.reshape((len(b),1)))
# Update rhs b (absorbs vars).
for i in var_i:
b = b - x[i,0] * A.getcol(i)
# Drop rows form b vector.
b = drop_rows(b, i)
# Drop rows from the x vector.
x = drop_rows(x, i)
# Drop rows from the A matrix.
A = drop_rows(A, i)
# Drop cols from the A matrix.
A = drop_rows(A.transpose(), i)
return A, x, b
def linear_system(machina, mst_edges, singular_vertices):
"""Define linear system that represents the parametrization.
This involes an atlas of maps defining a uvw iso-value at each vertex.
A single vertex can have multiple uvw values."""
ne = len(machina.tet_mesh.elements)
C = sparse.lil_matrix( (9 * 12 * ne, 12*ne) )
ccount = 0 # constraint counter
for fi, adj_ti in enumerate(machina.tet_mesh.adjacent_elements):
s, t = adj_ti[0], adj_ti[1]
# Boundary face.
if -1 in [s, t]:
t = s if s != -1 else t # tet index
vi_t = [] # local tet vertex indices of face.
for vi in machina.tet_mesh.faces[fi]:
vi_t.append(machina.tet_mesh.elements[t].index(vi))
# Constrain surface normal.
pqr_w = [ var_index(t, vi_t[i], 2) for i in range(3) ]
for i in [1,2]: # points qr
C[ccount, pqr_w[0]] = 1
C[ccount, pqr_w[i]] = -1
ccount += 1
# Internal face with two tets in common.
else:
match = chiral_symmetries[machina.matchings[fi]]
# Get local tet vertex indices of shared face vertices.
vi_s, vi_t = [], []
for vi in machina.tet_mesh.faces[fi]:
# Store the ordered indices of each vertex on the face.
# In other words, vi_s[0] and vi_t[0] are the same vertex.
vi_s.append(machina.tet_mesh.elements[s].index(vi))
vi_t.append(machina.tet_mesh.elements[t].index(vi))
# The point variable index range for the uvw values of each point.
pqr_t = [ var_index(t, vi_t[i], range(3)) for i in range(3) ]
pqr_s = [ var_index(s, vi_s[i], range(3)) for i in range(3) ]
# Next, apply constraints.
# If gap is 0 (minimum spanning tree).
if fi in mst_edges:
for i in [0,1,2]: # points pqr
# Enforce 0 translation, but possible chiral rotation.
C[ccount:ccount+3, pqr_t[i]] = - sparse.eye(3)
C[ccount:ccount+3, pqr_s[i]] = match
ccount += 3
else:
# If gap isn't 0, enforce that it be constant.
# In other words, constrain edges.
for i in [1,2]: # points qr
# Constraint.
C[ccount:ccount+3, pqr_t[0]] = sparse.eye(3)
C[ccount:ccount+3, pqr_t[i]] = - sparse.eye(3)
C[ccount:ccount+3, pqr_s[0]] = - match
C[ccount:ccount+3, pqr_s[i]] = match
ccount += 3
# Remove zero-rows from constraint matrix.
C = C.tocsr()
num_nonzeros = np.diff(C.indptr)
C = C[num_nonzeros != 0]
# Create laplacian of local tetrahedron connectivity.
L = sparse.diags([1,1,1,-3,1,1,1],[-9,-6,-3,0,3,6,9],(12*ne,12*ne))
return L, C
def flag_integer_vars(machina, singular_vertices):
"""Compute which variables are integer-constrained.
Return the indices of these variables."""
int_vars = set()
# Iterate through all variables
for ti, tet in enumerate(machina.tet_mesh.elements):
for local_vi, vi in enumerate(tet):
# Singular vertices constrain two of their variables.
# If singularity type is Jw, then u, v must be integers.
if vi in singular_vertices:
if singular_vertices[vi] < 4:
int_vars.add(var_index(ti, local_vi, 0))
int_vars.add(var_index(ti, local_vi, 1))
elif singular_vertices[vi] < 7:
int_vars.add(var_index(ti, local_vi, 1))
int_vars.add(var_index(ti, local_vi, 2))
else: # Doesn't check for improper.
int_vars.add(var_index(ti, local_vi, 2))
int_vars.add(var_index(ti, local_vi, 0))
# Surface vertices must be integer in w.
if vi in machina.surf_mesh.vertex_map:
int_vars.add(var_index(ti, local_vi, 2))
return int_vars
def adaptive_rounding(machina, A, x, b, singular_vertices):
"""Adaptively round the solution vector in a greedy manner."""
int_vars = flag_integer_vars(machina, singular_vertices)
# Enforce integer variables
vars_fixed = dict()
ne = len(machina.tet_mesh.elements)
# The reduction array is used to keep track of global vs. reduced indices
# as we progressively round the variables.
# row index: reduced index, col 0: global index, col 1: is_int boolean.
reduction_arr = np.zeros((12*ne, 2))
reduction_arr[:,0] = np.arange(12*ne)
for vi in int_vars:
reduction_arr[vi,1] = 1
# Loop until all integer variables are fixed.
while (len(vars_fixed) < len(int_vars)):
# Identify integer variables not yet fixed.
vars_left = dict()
for ri in range(reduction_arr.shape[0]):
if reduction_arr[ri,1]:
vars_left[ri] = reduction_arr[ri,0]
print('Conjugate gradient... (%i integers left)' % len(vars_left))
# Identify fixeable variables
# First, variables with a small deviation should
# be rounded to their nearest integer.
vars_to_fix = []
# gvi: global variable index, rvi: reduced variable index.
for rvi, gvi in vars_left.items():
value = x[rvi]
rounded = int(round(value))
if np.abs(value - rounded) > 1e-4:
continue
# Otherwise, delta is small enough to round.
x[rvi] = rounded
vars_fixed[gvi] = rounded
vars_to_fix.append(rvi)
# If no variable is fixed, fix the one with the smallest
# deviation from its rounded integer.
if len(vars_to_fix) == 0:
key_list = list(vars_left.keys())
rvi = np.argmin([ np.abs(x[rvi] - round(x[rvi])) for rvi in key_list ])
rvi = key_list[rvi]
x[rvi] = round(x[rvi])
vars_fixed[vars_left[rvi]] = x[rvi]
vars_to_fix.append(rvi)
# Update linear system.
A, x, b = reduce_system(A, x, b, vars_to_fix)
b = b.toarray()
x = x.toarray()
# Run conjugate gradient on reduced system.
x, info = sparse.linalg.cg(A, b, x0=x, tol = 1e-2)
# Update the reduction array.
reduction_arr = np.delete(reduction_arr, vars_to_fix, axis=0)
# Final map.
uvw_map = np.zeros(12*ne)
count = 0
for i in range(12*ne):
if i in vars_fixed:
uvw_map[i] = vars_fixed[i]
count += 1
else:
uvw_map[i] = x[i - count]
return uvw_map
def parametrize_volume(machina, singular_vertices, h):
"""Parametrize the volume as an atlas of maps based on the 3d frame field.
Returns the discretized uvw map atlas (vertex-based)."""
# Each vertex has multiple values, depending
# on the number of tets it's a part of.
ne = len(machina.tet_mesh.elements)
# Minimum spanning tree of dual mesh as list of face indices.
# Span until all tets have been visited.
ti = 0
mst_edges = set()
visited_tets = set()
while ti < len(machina.tet_mesh.elements):
for neigh_ti in machina.tet_mesh.neighbors[ti]:
if neigh_ti in visited_tets or neigh_ti == -1:
continue
# Get face index from s-t tet pair.
fi = machina.dual_edges[frozenset([ti, neigh_ti])]
mst_edges.add(fi)
visited_tets.add(ti)
ti += 1
print('Computing laplacian and constraints...')
# Create linear system based on laplacian and constraints.
laplacian, cons = linear_system(machina, mst_edges, singular_vertices)
n_cons = cons.get_shape()[0]
A = sparse.bmat(([[laplacian, cons.transpose()],[cons, None]]), dtype=np.int32)
# Discrete frame divergence.
b = np.zeros((12*ne + n_cons))
for ti in range(ne):
tet_vol = tet_volume(machina.tet_mesh, ti)
frame = machina.frames[ti]
div = [ np.sum(frame.uvw[:,0]),
np.sum(frame.uvw[:,1]),
np.sum(frame.uvw[:,2]) ]
b[12*ti : 12*(ti+1)] = np.hstack([ div for _ in range(4)])
b = np.divide(b, h)
print("Conjugate Gradient... (Round 1)", end=" ")
sys.stdout.flush()
x, info = sparse.linalg.cg(A, b, tol = 1e-4)
say_ok()
print('Adaptive rounding...')
# uvw_map = adaptive_rounding(machina, A, x, b, singular_vertices)
uvw_map = x
return uvw_map
| 35.802083 | 83 | 0.580545 |
479385722967fd418f39099dd82df3396be4d6c9 | 30,965 | py | Python | cinder/tests/unit/volume/test_image.py | alexisries/openstack-cinder | 7cc6e45c5ddb8bf771bdb01b867628e41761ae11 | [
"Apache-2.0"
] | 1 | 2018-09-02T11:13:23.000Z | 2018-09-02T11:13:23.000Z | cinder/tests/unit/volume/test_image.py | alexisries/openstack-cinder | 7cc6e45c5ddb8bf771bdb01b867628e41761ae11 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/volume/test_image.py | alexisries/openstack-cinder | 7cc6e45c5ddb8bf771bdb01b867628e41761ae11 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for volume and images."""
import datetime
import mock
import os
import tempfile
from oslo_utils import imageutils
from oslo_utils import units
from cinder import db
from cinder import exception
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder.tests import fake_driver
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
import cinder.volume
from cinder.volume import manager as vol_manager
QUOTAS = quota.QUOTAS
NON_EXISTENT_IMAGE_ID = '003f540f-ec6b-4293-a3f9-7c68646b0f5c'
class FakeImageService(object):
def __init__(self, db_driver=None, image_service=None):
pass
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
def fake_local_path(self, volume):
return self.dst_path
def setUp(self):
super(CopyVolumeToImageTestCase, self).setUp()
self.dst_fd, self.dst_path = tempfile.mkstemp()
self.addCleanup(os.unlink, self.dst_path)
os.close(self.dst_fd)
self.mock_object(self.volume.driver, 'local_path',
self.fake_local_path)
self.mock_cache = mock.MagicMock()
self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.image_meta = {
'id': self.image_id,
'container_format': 'bare',
'disk_format': 'raw'
}
self.volume_id = fake.VOLUME_ID
self.addCleanup(db.volume_destroy, self.context, self.volume_id)
self.volume_attrs = {
'id': self.volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'host': 'dummy'
}
def test_copy_volume_to_image_status_available(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_over_image_quota(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
volume = db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.ImageLimitExceeded
# test with image not in queued state
self.assertRaises(exception.ImageLimitExceeded,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
# Assert a user message was created
self.volume.message_api.create.assert_called_once_with(
self.context,
message_field.Action.COPY_VOLUME_TO_IMAGE,
resource_uuid=volume['id'],
exception=mock.ANY,
detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME)
def test_copy_volume_to_image_instance_deleted(self):
# During uploading volume to image if instance is deleted,
# volume should be in available status.
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# Creating volume testdata
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
'45b1161abb02'
db.volume_create(self.context, self.volume_attrs)
method = 'volume_update_status_based_on_attachment'
with mock.patch.object(db, method,
wraps=getattr(db, method)) as mock_update:
# Start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
# Check 'volume_update_status_after_copy_volume_to_image'
# is called 1 time
self.assertEqual(1, mock_update.call_count)
# Check volume status has changed to available because
# instance is deleted
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_status_use(self):
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_exception(self):
self.image_meta['id'] = NON_EXISTENT_IMAGE_ID
# creating volume testdata
self.volume_attrs['status'] = 'in-use'
db.volume_create(self.context, self.volume_attrs)
# start test
self.assertRaises(exception.ImageNotFound,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_driver_not_initialized(self):
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# set initialized to False
self.volume.driver._initialized = False
# start test
self.assertRaises(exception.DriverNotInitialized,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume.status)
def test_copy_volume_to_image_driver_exception(self):
self.image_meta['id'] = self.image_id
image_service = fake_image.FakeImageService()
# create new image in queued state
queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9'
queued_image_meta = image_service.show(self.context, self.image_id)
queued_image_meta['id'] = queued_image_id
queued_image_meta['status'] = 'queued'
image_service.create(self.context, queued_image_meta)
# create new image in saving state
saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
saving_image_meta = image_service.show(self.context, self.image_id)
saving_image_meta['id'] = saving_image_id
saving_image_meta['status'] = 'saving'
image_service.create(self.context, saving_image_meta)
# create volume
self.volume_attrs['status'] = 'available'
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.VolumeDriverException(
"Error")
# test with image not in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
# Make sure we are passing an OVO instance and not an ORM instance
# to the driver
self.assertIsInstance(driver_copy_mock.call_args[0][1],
objects.Volume)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image shouldn't be deleted if it is not in queued state
image_service.show(self.context, self.image_id)
# test with image in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
queued_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# queued image should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
queued_image_id)
# test with image in saving state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
saving_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image in saving state should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
saving_image_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(vol_manager.VolumeManager, 'create_volume')
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver,
'copy_volume_to_image')
def _test_copy_volume_to_image_with_image_volume(
self, mock_copy, mock_create, mock_quota_commit,
mock_quota_reserve):
self.volume.driver.configuration.image_upload_use_cinder_backend = True
self.addCleanup(fake_image.FakeImageService_reset)
image_service = fake_image.FakeImageService()
def add_location_wrapper(ctx, id, uri, metadata):
try:
volume = db.volume_get(ctx, id)
self.assertEqual(ctx.project_id,
volume['metadata']['image_owner'])
except exception.VolumeNotFound:
pass
return image_service.add_location_orig(ctx, id, uri, metadata)
image_service.add_location_orig = image_service.add_location
image_service.add_location = add_location_wrapper
image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
self.image_meta['id'] = image_id
self.image_meta['status'] = 'queued'
image_service.create(self.context, self.image_meta)
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
def fake_create(context, volume, **kwargs):
db.volume_update(context, volume.id, {'status': 'available'})
mock_create.side_effect = fake_create
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# return create image
image = image_service.show(self.context, image_id)
image_service.delete(self.context, image_id)
return image
def test_copy_volume_to_image_with_image_volume(self):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
def test_copy_volume_to_image_with_image_volume_qcow2(self):
self.image_meta['disk_format'] = 'qcow2'
image = self._test_copy_volume_to_image_with_image_volume()
self.assertNotIn('locations', image)
@mock.patch.object(vol_manager.VolumeManager, 'delete_volume')
@mock.patch.object(fake_image._FakeImageService, 'add_location',
side_effect=exception.Invalid)
def test_copy_volume_to_image_with_image_volume_failure(
self, mock_add_location, mock_delete):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertNotIn('locations', image)
self.assertTrue(mock_delete.called)
@mock.patch('cinder.volume.manager.'
'VolumeManager._clone_image_volume')
@mock.patch('cinder.volume.manager.'
'VolumeManager._create_image_cache_volume_entry')
def test_create_image_cache_volume_entry(self,
mock_cache_entry,
mock_clone_image_volume):
image_id = self.image_id
image_meta = self.image_meta
self.mock_cache.get_entry.return_value = mock_cache_entry
if mock_cache_entry:
# Entry is in cache, so basically don't do anything.
# Make sure we didn't try and create a cache entry
self.assertFalse(self.mock_cache.ensure_space.called)
self.assertFalse(self.mock_cache.create_cache_entry.called)
else:
result = self.volume._create_image_cache_volume_entry(
self.context, mock_clone_image_volume, image_id, image_meta)
self.assertNotEqual(False, result)
cache_entry = self.image_volume_cache.get_entry(
self.context, mock_clone_image_volume, image_id, image_meta)
self.assertIsNotNone(cache_entry)
class ImageVolumeCacheTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(ImageVolumeCacheTestCase, self).setUp()
self.volume.driver.set_initialized()
@mock.patch('oslo_utils.importutils.import_object')
def test_cache_configs(self, mock_import_object):
opts = {
'image_volume_cache_enabled': True,
'image_volume_cache_max_size_gb': 100,
'image_volume_cache_max_count': 20
}
def conf_get(option):
if option in opts:
return opts[option]
else:
return None
mock_driver = mock.Mock()
mock_driver.configuration.safe_get.side_effect = conf_get
mock_driver.configuration.extra_capabilities = 'null'
def import_obj(*args, **kwargs):
return mock_driver
mock_import_object.side_effect = import_obj
manager = vol_manager.VolumeManager(volume_driver=mock_driver)
self.assertIsNotNone(manager)
self.assertIsNotNone(manager.image_volume_cache)
self.assertEqual(100, manager.image_volume_cache.max_cache_size_gb)
self.assertEqual(20, manager.image_volume_cache.max_cache_size_count)
def test_delete_image_volume(self):
volume_params = {
'status': 'creating',
'host': 'some_host',
'cluster_name': 'some_cluster',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
volume.status = 'available'
volume.save()
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
db.image_volume_cache_create(self.context,
volume['host'],
volume_params['cluster_name'],
image_id,
datetime.datetime.utcnow(),
volume['id'],
volume['size'])
volume_api.delete(self.context, volume)
entry = db.image_volume_cache_get_by_volume_id(self.context,
volume['id'])
self.assertIsNone(entry)
def test_delete_volume_with_keymanager_exception(self):
volume_params = {
'host': 'some_host',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
with mock.patch.object(
volume_api.key_manager, 'delete') as key_del_mock:
key_del_mock.side_effect = Exception("Key not found")
volume_api.delete(self.context, volume)
class ImageVolumeTestCases(base.BaseVolumeTestCase):
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_cloned_volume')
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_clone_image_volume(self, mock_reserve, mock_commit,
mock_rollback, mock_cloned_volume):
vol = tests_utils.create_volume(self.context,
**self.volume_params)
# unnecessary attributes should be removed from image volume
vol.consistencygroup = None
result = self.volume._clone_image_volume(self.context, vol,
{'id': fake.VOLUME_ID})
self.assertNotEqual(False, result)
mock_reserve.assert_called_once_with(self.context, volumes=1,
gigabytes=vol.size)
mock_commit.assert_called_once_with(self.context, ["RESERVATION"],
project_id=vol.project_id)
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_clone_image_volume_creation_failure(self, mock_reserve,
mock_commit, mock_rollback):
vol = tests_utils.create_volume(self.context, **self.volume_params)
with mock.patch.object(objects, 'Volume', side_effect=ValueError):
self.assertIsNone(self.volume._clone_image_volume(
self.context, vol, {'id': fake.VOLUME_ID}))
mock_reserve.assert_called_once_with(self.context, volumes=1,
gigabytes=vol.size)
mock_rollback.assert_called_once_with(self.context, ["RESERVATION"])
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_cloned_status_available(
self, mock_qemu_info):
"""Test create volume from image via cloning.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image()
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_not_cloned_status_available(
self, mock_qemu_info):
"""Test create volume from image via full copy.
Verify that after copying image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image(fakeout_clone_image=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
def test_create_volume_from_image_exception(self):
"""Test create volume from a non-existing image.
Verify that create volume from a non-existing image, the volume
status is 'error' and is not bootable.
"""
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.mock_object(self.volume.driver, 'local_path', lambda x: dst_path)
# creating volume testdata
kwargs = {'display_description': 'Test Desc',
'size': 20,
'availability_zone': 'fake_availability_zone',
'status': 'creating',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'host': 'dummy'}
volume = objects.Volume(context=self.context, **kwargs)
volume.create()
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
self.context,
volume,
{'image_id': NON_EXISTENT_IMAGE_ID})
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error", volume['status'])
self.assertFalse(volume['bootable'])
# cleanup
volume.destroy()
os.unlink(dst_path)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_copy_exception_rescheduling(
self, mock_qemu_info):
"""Test create volume with ImageCopyFailure
This exception should not trigger rescheduling and allocated_capacity
should be incremented so we're having assert for that here.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
def fake_copy_image_to_volume(context, volume, image_service,
image_id):
raise exception.ImageCopyFailure()
self.mock_object(self.volume.driver, 'copy_image_to_volume',
fake_copy_image_to_volume)
mock_delete = self.mock_object(self.volume.driver, 'delete_volume')
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image)
# NOTE(dulek): Rescheduling should not occur, so lets assert that
# allocated_capacity is incremented.
self.assertDictEqual(self.volume.stats['pools'],
{'_pool0': {'allocated_capacity_gb': 1}})
# NOTE(dulek): As we haven't rescheduled, make sure no delete_volume
# was called.
self.assertFalse(mock_delete.called)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.brick_get_connector')
@mock.patch('cinder.volume.driver.BaseVD.secure_file_operations_enabled')
@mock.patch('cinder.volume.driver.BaseVD._detach_volume')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_unavailable(
self, mock_qemu_info, mock_detach, mock_secure, *args):
"""Test create volume with ImageCopyFailure
We'll raise an exception inside _connect_device after volume has
already been attached to confirm that it detaches the volume.
"""
mock_secure.side_effect = NameError
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume
bound_copy_method = unbound_copy_method.__get__(self.volume.driver)
with mock.patch.object(self.volume.driver, 'copy_image_to_volume',
side_effect=bound_copy_method):
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image,
fakeout_copy_image_to_volume=False)
# We must have called detach method.
self.assertEqual(1, mock_detach.call_count)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.brick_get_connector')
@mock.patch('cinder.volume.driver.BaseVD._connect_device')
@mock.patch('cinder.volume.driver.BaseVD._detach_volume')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_unavailable_no_attach_info(
self, mock_qemu_info, mock_detach, mock_connect, *args):
"""Test create volume with ImageCopyFailure
We'll raise an exception on _connect_device call to confirm that it
detaches the volume even if the exception doesn't have attach_info.
"""
mock_connect.side_effect = NameError
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume
bound_copy_method = unbound_copy_method.__get__(self.volume.driver)
with mock.patch.object(self.volume.driver, 'copy_image_to_volume',
side_effect=bound_copy_method):
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image,
fakeout_copy_image_to_volume=False)
# We must have called detach method.
self.assertEqual(1, mock_detach.call_count)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_clone_image_volume(self, mock_qemu_info):
"""Test create volume from image via image volume.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image(clone_image_volume=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
def test_create_volume_from_exact_sized_image(self):
"""Test create volume from an image of the same size.
Verify that an image which is exactly the same size as the
volume, will work correctly.
"""
try:
volume_id = None
volume_api = cinder.volume.api.API(
image_service=FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=self.FAKE_UUID)
volume_id = volume['id']
self.assertEqual('creating', volume['status'])
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_volume_from_oversized_image(self):
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi + 1,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_mindisk_error(self):
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_deleted_imaged(self):
"""Verify create volume from image will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'deleted'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_copy_volume_to_image_maintenance(self):
"""Test copy volume to image in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.copy_volume_to_image,
self.context,
volume,
test_meta1,
force=True)
| 43.126741 | 79 | 0.617342 |
f1f7e3941d8eba5551c3b8c9e9c17125b601b93c | 1,669 | py | Python | Fig6_S7/Siegel_testSet/fastUTR_predict.py | vagarwal87/saluki_paper | 3aa4e56a19bbbf87ac9f5f0a251098cf749ad6bc | [
"Apache-2.0"
] | null | null | null | Fig6_S7/Siegel_testSet/fastUTR_predict.py | vagarwal87/saluki_paper | 3aa4e56a19bbbf87ac9f5f0a251098cf749ad6bc | [
"Apache-2.0"
] | null | null | null | Fig6_S7/Siegel_testSet/fastUTR_predict.py | vagarwal87/saluki_paper | 3aa4e56a19bbbf87ac9f5f0a251098cf749ad6bc | [
"Apache-2.0"
] | null | null | null | import os, sys
import argparse, json, h5py, time
import numpy as np
import tensorflow as tf
from basenji import dataset
from basenji import dna_io
import pandas as pd
try:
import rnann
except:
from basenji import rnann
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
MAXLEN = 12288
##########
# inputs #
##########
parser = argparse.ArgumentParser(description='In silico MPRA experiment')
parser.add_argument(dest='pfile', help='params file')
parser.add_argument(dest='mfile', help='model file')
parser.add_argument(dest='inputfile', help='input file')
args = parser.parse_args()
params_file = args.pfile #train_gru/params.json
model_file = args.mfile #train_gru/f0_c0/train/model0_best.h5
inputfile = args.inputfile #train_gru/f0_c0/train/model0_best.h5
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_model = params['model']
params_train = params['train']
# initialize model
seqnn_model = rnann.RnaNN(params_model)
seqnn_model.restore(model_file)
construct = pd.read_table("BTV_construct.txt", index_col=0, header=None).values
aa_len = int(len(construct[1][0])/3)
coding = np.append(np.zeros(len(construct[0][0])), np.tile([1,0,0], aa_len))
reporter = construct[0]+construct[1]+construct[2]
seq = pd.read_table(inputfile, header=None)[[0]][0].values
print('%s\t%s' % ("seq", "pred"))
for i in seq: # iterate through all sequences
batch = np.zeros((1,MAXLEN,6))
myseq = (reporter+i+construct[3])[0]
batch[0,0:len(myseq),0:4] = dna_io.dna_1hot(myseq)
batch[0,0:len(coding),4] = coding
pred = seqnn_model.predict(batch)
print('%s\t%s' % (i, pred[0][0]))
| 29.803571 | 79 | 0.72139 |
f8dd04f1ea96f3156afab92483d20bbd37f617ca | 28,769 | py | Python | Adelphi Academic Calendar/skill/skill_env/Lib/site.py | EnriqueGambra/Amazon-Alexa-Skill | 198ed51bef555eee006041fef0bcbf5c955142d5 | [
"MIT"
] | null | null | null | Adelphi Academic Calendar/skill/skill_env/Lib/site.py | EnriqueGambra/Amazon-Alexa-Skill | 198ed51bef555eee006041fef0bcbf5c955142d5 | [
"MIT"
] | null | null | null | Adelphi Academic Calendar/skill/skill_env/Lib/site.py | EnriqueGambra/Amazon-Alexa-Skill | 198ed51bef555eee006041fef0bcbf5c955142d5 | [
"MIT"
] | 1 | 2019-10-11T17:15:20.000Z | 2019-10-11T17:15:20.000Z | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import os
import sys
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, "maxsize", None) or getattr(sys, "maxint")) > 2 ** 32
_is_pypy = hasattr(sys, "pypy_version_info")
def makepath(*paths):
dir = os.path.join(*paths)
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
f = getattr(m, "__file__", None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.{}-{}.{}".format(get_platform(), *sys.version_info)
if hasattr(sys, "gettotalrefcount"):
s += "-pydebug"
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "r")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ("os2emx", "riscos"):
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, "site-packages")]
elif sys.platform == "darwin" and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [
os.path.join("/Library/Python", "{}.{}".format(*sys.version_info), "site-packages"),
os.path.join(prefix, "Extras", "lib", "python"),
]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib", "python{}.{}".format(*sys.version_info), "site-packages")]
elif os.sep == "/":
sitedirs = [
os.path.join(prefix, "lib", "python{}.{}".format(*sys.version_info), "site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python{}.{}".format(*sys.version_info), "lib-dynload"),
]
lib64_dir = os.path.join(prefix, "lib64", "python{}.{}".format(*sys.version_info), "site-packages")
if os.path.exists(lib64_dir) and os.path.realpath(lib64_dir) not in [
os.path.realpath(p) for p in sitedirs
]:
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], "debug"))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(
os.path.join(prefix, "local/lib", "python{}.{}".format(*sys.version_info), "dist-packages")
)
if sys.version_info[0] == 2:
sitedirs.append(
os.path.join(prefix, "lib", "python{}.{}".format(*sys.version_info), "dist-packages")
)
else:
sitedirs.append(
os.path.join(prefix, "lib", "python{}".format(sys.version_info[0]), "dist-packages")
)
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if "Python.framework" in prefix:
home = os.environ.get("HOME")
if home:
sitedirs.append(
os.path.join(home, "Library", "Python", "{}.{}".format(*sys.version_info), "site-packages")
)
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, "flags") and getattr(sys.flags, "no_user_site", False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for tmp.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE, "Python{}{}".format(*sys.version_info), "site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib", "python{}.{}".format(*sys.version_info), "site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir, "python{}.{}".format(*sys.version_info), "dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ["BEGINLIBPATH"].split(";")
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ["BEGINLIBPATH"] = ";".join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ":":
eof = "Cmd-Q"
elif os.sep == "\\":
eof = "Ctrl-Z plus Return"
else:
eof = "Ctrl-D (i.e. EOF)"
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Use {}() or {} to exit".format(self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter("quit")
builtins.exit = Quitter("exit")
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "r")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split("\n")
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,) * 2)
def __call__(self):
self.__setup()
prompt = "Hit Return for more, or q (and Return) to quit: "
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ("", "q"):
key = None
if key == "q":
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_pypy:
builtins.credits = _Printer("credits", "PyPy is maintained by the PyPy developers: http://pypy.org/")
else:
builtins.credits = _Printer(
"credits",
"""\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""",
)
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license",
"See https://www.python.org/psf/license/",
["LICENSE.txt", "LICENSE"],
[sys.prefix, os.path.join(here, os.pardir), here, os.curdir],
)
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " "or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == "win32":
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith("cp"): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = "mbcs"
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), "orig-prefix.txt"))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == "":
pos += 1
if _is_pypy:
if sys.version_info > (3, 2):
cpyver = "%d" % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = "%d.%d" % sys.version_info[:2]
else:
cpyver = "%d.%d.%d" % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, "lib_pypy"), os.path.join(sys.real_prefix, "lib-python", cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix, "lib-python", "modified-%s" % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, "plat-%s" % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == "win32":
paths = [os.path.join(sys.real_prefix, "Lib"), os.path.join(sys.real_prefix, "DLLs")]
else:
paths = [os.path.join(sys.real_prefix, "lib", "python{}.{}".format(*sys.version_info))]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, "lib64", "python{}.{}".format(*sys.version_info))
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, "implementation", sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, "lib", "python{}.{}".format(*sys.version_info), "plat-%s" % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, "lib-tk")
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == "darwin":
hardcoded_paths = [
os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ("plat-darwin", "plat-mac", "plat-mac/lib-scriptpackages")
]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, "__egginsert", 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def enablerlcompleter():
"""Enable default readline configuration on interactive prompts, by
registering a sys.__interactivehook__.
If the readline module can be imported, the hook will set the Tab key
as completion key and register ~/.python_history as history file.
This can be overridden in the sitecustomize or usercustomize module,
or in a PYTHONSTARTUP file.
"""
def register_readline():
import atexit
try:
import readline
import rlcompleter
except ImportError:
return
# Reading the initialization (config) file may not be enough to set a
# completion key, so we set one first and then read the file.
readline_doc = getattr(readline, "__doc__", "")
if readline_doc is not None and "libedit" in readline_doc:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
try:
readline.read_init_file()
except OSError:
# An OSError here could have many causes, but the most likely one
# is that there's no .inputrc file (or .editrc file in the case of
# Mac OS X + libedit) in the expected location. In that case, we
# want to ignore the exception.
pass
if readline.get_current_history_length() == 0:
# If no history was loaded, default to .python_history.
# The guard is necessary to avoid doubling history size at
# each interpreter exit when readline was already configured
# through a PYTHONSTARTUP hook, see:
# http://bugs.python.org/issue5845#msg198636
history = os.path.join(os.path.expanduser("~"), ".python_history")
try:
readline.read_history_file(history)
except OSError:
pass
def write_history():
try:
readline.write_history_file(history)
except (FileNotFoundError, PermissionError):
# home directory does not exist or is not writable
# https://bugs.python.org/issue19891
pass
atexit.register(write_history)
sys.__interactivehook__ = register_readline
if _is_pypy:
def import_builtin_stuff():
"""PyPy specific: some built-in modules should be pre-imported because
some programs expect them to be in sys.modules on startup. This is ported
from PyPy's site.py.
"""
import encodings
if "exceptions" in sys.builtin_module_names:
import exceptions
if "zipimport" in sys.builtin_module_names:
import zipimport
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
if _is_pypy:
import_builtin_stuff()
abs__file__()
paths_in_sys = removeduppaths()
if os.name == "posix" and sys.path and os.path.basename(sys.path[-1]) == "Modules":
addbuilddir()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), "no-global-site-packages.txt"))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == "os2emx":
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
if sys.version_info[0] == 3:
enablerlcompleter()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" {!r},".format(dir))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: {!r} ({})".format(USER_BASE, exists(USER_BASE)))
print("USER_SITE: {!r} ({})".format(USER_SITE, exists(USER_SITE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if "--user-base" in args:
buffer.append(USER_BASE)
if "--user-site" in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == "__main__":
_script()
| 34.661446 | 119 | 0.602419 |
86de7156826e200201aea54ff4c78654279bf7d5 | 1,472 | py | Python | edbdeploy/spec/aws_rds.py | vincentp7212/postgres-deployment | ea0ed0e06a4eb99cc28600398eddcf2320778113 | [
"BSD-3-Clause"
] | 58 | 2020-02-24T21:02:50.000Z | 2022-03-28T14:51:56.000Z | edbdeploy/spec/aws_rds.py | vincentp7212/postgres-deployment | ea0ed0e06a4eb99cc28600398eddcf2320778113 | [
"BSD-3-Clause"
] | 108 | 2020-09-18T12:53:44.000Z | 2022-02-02T09:02:31.000Z | edbdeploy/spec/aws_rds.py | vincentp7212/postgres-deployment | ea0ed0e06a4eb99cc28600398eddcf2320778113 | [
"BSD-3-Clause"
] | 47 | 2020-03-04T15:51:01.000Z | 2022-02-27T13:48:05.000Z | from . import DefaultAWSSpec
from . import SpecValidator
RDSSpec = {
'postgres_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'db.t3.micro', 'db.r5.xlarge', 'db.r5.2xlarge',
'db.r5.4xlarge', 'db.r5.8xlarge'
],
default='db.r5.2xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1'],
default='io1'
),
'size': SpecValidator(
type='integer',
min=100,
max=16384,
default=1000
),
'iops': SpecValidator(
type='integer',
min=1000,
max=80000,
default=10000
)
}
}
}
AWSRDSSpec = {**DefaultAWSSpec, **RDSSpec}
TPROCC_GUC = {
'small': {
'effective_cache_size': '524288',
'shared_buffers': '3145728',
'max_wal_size': '51200',
},
'medium': {
'effective_cache_size': '4718592',
'shared_buffers': '3145728',
'max_wal_size': '102400',
},
'large': {
'effective_cache_size': '13107200',
'shared_buffers': '3145728',
'max_wal_size': '204800',
},
'xl': {
'effective_cache_size': '29884416',
'shared_buffers': '3145728',
'max_wal_size': '409600',
},
}
| 24.533333 | 63 | 0.452446 |
acb6a679e922f5cf6daff92959433ac7d1e0bbdc | 568 | py | Python | 01_basics/03_advanced_expressions/01_basic_indexing.py | johny-c/theano_exercises | 7fd43315bf7c475a6f218091316c0bd34e0688c4 | [
"BSD-3-Clause"
] | 711 | 2015-01-10T05:39:21.000Z | 2022-03-15T23:45:45.000Z | 01_basics/03_advanced_expressions/01_basic_indexing.py | dachylong/theano_exercises | 7fd43315bf7c475a6f218091316c0bd34e0688c4 | [
"BSD-3-Clause"
] | 2 | 2016-06-13T06:46:58.000Z | 2017-04-14T08:21:20.000Z | 01_basics/03_advanced_expressions/01_basic_indexing.py | dachylong/theano_exercises | 7fd43315bf7c475a6f218091316c0bd34e0688c4 | [
"BSD-3-Clause"
] | 371 | 2015-01-16T01:31:41.000Z | 2022-03-15T11:37:30.000Z | # Fill in the TODOs in this exercise, then run the script to see if your
# solution works.
import numpy as np
import theano.tensor as T
def increment_odd(x):
"""
x: a Theano vector
Returns:
y: a Theano vector equal to x, but with all odd-numbered elements
incremented by 1.
"""
raise NotImplementedError("TODO: implement the function.")
if __name__ == "__main__":
x = T.vector()
xv = np.zeros((4,), dtype=x.dtype)
yv = increment_odd(x).eval({x:xv})
assert np.allclose(yv, np.array([0., 1., 0., 1.]))
print "SUCCESS!"
| 25.818182 | 72 | 0.644366 |
ae367bed8e4720393022a5edd561bcdd948a5b82 | 18,030 | py | Python | appengine/components/tools/gae.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | null | null | null | appengine/components/tools/gae.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | null | null | null | appengine/components/tools/gae.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | 1 | 2020-07-05T19:54:40.000Z | 2020-07-05T19:54:40.000Z | #!/usr/bin/env python
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Wrapper around GAE SDK tools to simplify working with multi module apps."""
__version__ = '1.2'
import atexit
import code
import optparse
import os
import signal
import sys
import tempfile
import urllib2
try:
import readline
except ImportError:
readline = None
# In case gae.py was run via symlink, find the original file since it's where
# third_party libs are. Handle a chain of symlinks too.
SCRIPT_PATH = os.path.abspath(__file__)
IS_SYMLINKED = False
while True:
try:
SCRIPT_PATH = os.path.abspath(
os.path.join(os.path.dirname(SCRIPT_PATH), os.readlink(SCRIPT_PATH)))
IS_SYMLINKED = True
except OSError:
break
ROOT_DIR = os.path.dirname(os.path.dirname(SCRIPT_PATH))
sys.path.insert(0, ROOT_DIR)
sys.path.insert(0, os.path.join(ROOT_DIR, '..', 'third_party_local'))
import colorama
from depot_tools import subcommand
from tool_support import gae_sdk_utils
from tools import calculate_version
from tools import log_since
def _print_version_log(app, to_version):
"""Queries the server active version and prints the log between the active
version and the new version.
"""
from_versions = set(service['id'] for service in app.get_actives())
if len(from_versions) > 1:
print >> sys.stderr, (
'Error: found multiple modules with different active versions. Use '
'"gae active" to get the curent list of active version. Please use the '
'Web UI to fix. Aborting.')
return 1
if from_versions:
from_version = list(from_versions)[0]
start = int(from_version.split('-', 1)[0])
end = int(to_version.split('-', 1)[0])
if start < end:
pseudo_revision, mergebase = calculate_version.get_remote_pseudo_revision(
app.app_dir, 'origin/master')
logs, _ = log_since.get_logs(
app.app_dir, pseudo_revision, mergebase, start, end)
print('\nLogs between %s and %s:' % (from_version, to_version))
print('%s\n' % logs)
##
def CMDappcfg_login(parser, args):
"""Sets up authentication for appcfg.py usage [DEPRECATED]."""
app, _, _ = parser.parse_args(args)
print (
'Since appcfg.py doesn\'t support explicit login command, we\'ll run '
'innocent "list_version" instead. It will trigger appcfg\'s login flow. '
'\n'
'It\'s fine if "list_version" call itself fails - at this point we have '
'the necessary credentials cached and other subcommands should be able '
'to use them.\n')
gae_sdk_utils.appcfg_login(app)
return 0
def CMDactive(parser, args):
"""Prints the active versions on the server.
This is an approximation of querying which version is the default.
"""
parser.add_option(
'-b', '--bare', action='store_true',
help='Only print the version(s), nothing else')
app, options, _modules = parser.parse_args(args)
data = app.get_actives()
if options.bare:
print('\n'.join(sorted(set(i['id'] for i in data))))
return 0
print('%s:' % app.app_id)
for service in data:
print(
' %s: %s by %s at %s' % (
service['service'], service['id'], service['deployer'],
service['creationTime']))
return 0
def CMDapp_dir(parser, args):
"""Prints a root directory of the application."""
# parser.app_dir is None if app root directory discovery fails. Fail the
# command even before invoking CLI parser, or it will ask to pass --app_dir to
# 'app-dir' subcommand, which is ridiculous.
if not parser.app_dir:
print >> sys.stderr, 'Can\'t discover an application root directory.'
return 1
parser.add_tag_option()
app, _, _ = parser.parse_args(args)
print app.app_dir
return 0
@subcommand.usage('[version_id version_id ...]')
def CMDcleanup(parser, args):
"""Removes old versions of GAE application modules.
Removes the specified versions from all app modules. If no versions are
provided via command line, will ask interactively.
When asking interactively, uses EDITOR environment variable to edit the list
of versions. Otherwise uses notepad.exe on Windows, or vi otherwise.
"""
parser.add_force_option()
parser.allow_positional_args = True
app, options, versions_to_remove = parser.parse_args(args)
if not versions_to_remove:
# List all deployed versions, dump them to a temp file to be edited.
versions = app.get_uploaded_versions()
fd, path = tempfile.mkstemp()
atexit.register(lambda: os.remove(path))
with os.fdopen(fd, 'w') as f:
header = (
'# Remove lines that correspond to versions\n'
'# you\'d like to delete from \'%s\'.\n')
f.write(header % app.app_id + '\n'.join(versions) + '\n')
# Let user remove versions that are no longer needed.
editor = os.environ.get(
'EDITOR', 'notepad.exe' if sys.platform == 'win32' else 'vi')
exit_code = os.system('%s %s' % (editor, path))
if exit_code:
print('Aborted.')
return exit_code
# Read back the file that now contains only versions to keep.
keep = []
with open(path, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if line not in versions:
print >> sys.stderr, 'Unknown version: %s' % line
return 1
if line not in keep:
keep.append(line)
# Calculate a list of versions to remove.
versions_to_remove = [v for v in versions if v not in keep]
if not versions_to_remove:
print('Nothing to do.')
return 0
# Deleting a version is a destructive operation, confirm.
if not options.force:
ok = gae_sdk_utils.confirm(
'Delete the following versions?', app, versions_to_remove)
if not ok:
print('Aborted.')
return 1
for version in versions_to_remove:
print('Deleting %s...' % version)
app.delete_version(version)
return 0
@subcommand.usage('[extra arguments for dev_appserver.py]')
def CMDdevserver(parser, args):
"""Runs the app locally via dev_appserver.py."""
parser.allow_positional_args = True
parser.disable_interspersed_args()
parser.add_option(
'-o', '--open', action='store_true',
help='Listen to all interfaces (less secure)')
app, options, args = parser.parse_args(args)
# Let dev_appserver.py handle Ctrl+C interrupts.
signal.signal(signal.SIGINT, signal.SIG_IGN)
return app.run_dev_appserver(args, options.open)
@subcommand.usage('[module_id version_id]')
def CMDshell(parser, args):
"""Opens interactive remote shell with app's GAE environment.
Connects to a specific version of a specific module (an active version of
'default' module by default). The app must have 'remote_api: on' builtin
enabled in app.yaml.
Always uses password based authentication.
"""
parser.allow_positional_args = True
parser.add_option(
'-H', '--host', help='Only necessary if not hosted on .appspot.com')
parser.add_option(
'--local', action='store_true',
help='Operates locally on an empty dev instance')
app, options, args = parser.parse_args(args)
module = 'default'
version = None
if len(args) == 2:
module, version = args
elif len(args) == 1:
module = args[0]
elif args:
parser.error('Unknown args: %s' % args)
if module not in app.modules:
parser.error('No such module: %s' % module)
if not options.host and not options.local:
prefixes = filter(None, (version, module, app.app_id))
options.host = '%s.appspot.com' % '-dot-'.join(prefixes)
# Ensure remote_api is initialized and GAE sys.path is set.
gae_sdk_utils.setup_env(
app.app_dir, app.app_id, version, module, remote_api=True)
if options.host:
# Open the connection.
from google.appengine.ext.remote_api import remote_api_stub
try:
print('If asked to login, run:\n')
print(
'gcloud auth application-default login '
'--scopes=https://www.googleapis.com/auth/appengine.apis,'
'https://www.googleapis.com/auth/userinfo.email\n')
remote_api_stub.ConfigureRemoteApiForOAuth(
options.host, '/_ah/remote_api')
except urllib2.URLError:
print >> sys.stderr, 'Failed to access %s' % options.host
return 1
remote_api_stub.MaybeInvokeAuthentication()
def register_sys_path(*path):
abs_path = os.path.abspath(os.path.join(*path))
if os.path.isdir(abs_path) and not abs_path in sys.path:
sys.path.insert(0, abs_path)
# Simplify imports of app modules (with dependencies). This code is optimized
# for layout of apps that use 'components'.
register_sys_path(app.app_dir)
register_sys_path(app.app_dir, 'third_party')
register_sys_path(app.app_dir, 'components', 'third_party')
# Import some common modules into interactive console namespace.
def setup_context():
# pylint: disable=unused-variable
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
return locals().copy()
context = setup_context()
# Fancy readline support.
if readline is not None:
readline.parse_and_bind('tab: complete')
history_file = os.path.expanduser(
'~/.config/gae_tool/remote_api_%s' % app.app_id)
if not os.path.exists(os.path.dirname(history_file)):
os.makedirs(os.path.dirname(history_file))
atexit.register(lambda: readline.write_history_file(history_file))
if os.path.exists(history_file):
readline.read_history_file(history_file)
prompt = [
'App Engine interactive console for "%s".' % app.app_id,
'Available symbols:',
]
prompt.extend(sorted(' %s' % symbol for symbol in context))
code.interact('\n'.join(prompt), None, context)
return 0
@subcommand.usage('[version_id]')
def CMDswitch(parser, args):
"""Switches default version of all app modules.
The version must be uploaded already. If no version is provided via command
line, will ask interactively.
"""
parser.add_switch_option()
parser.add_force_option()
parser.allow_positional_args = True
app, options, version = parser.parse_args(args)
if len(version) > 1:
parser.error('Unknown args: %s' % version[1:])
version = None if not version else version[0]
# Interactively pick a version if not passed via command line.
if not version:
versions = app.get_uploaded_versions()
if not versions:
print('Upload a version first.')
return 1
print('Specify a version to switch to:')
for version in versions:
print(' %s' % version)
version = (
raw_input('Switch to version [%s]: ' % versions[-1]) or versions[-1])
if version not in versions:
print('No such version.')
return 1
_print_version_log(app, version)
# Switching a default version is disruptive operation. Require confirmation.
if (not options.force and
not gae_sdk_utils.confirm('Switch default version?', app, version)):
print('Aborted.')
return 1
app.set_default_version(version)
return 0
@subcommand.usage('[module_id module_id ...]')
def CMDupload(parser, args):
"""Uploads a new version of specific (or all) modules of an app.
Note that module yamls are expected to be named module-<module name>.yaml
Version name looks like <number>-<commit sha1>[-tainted-<who>], where:
number git commit number, monotonically increases with each commit
commit sha1 upstream commit hash the branch is based of
tainted git repo has local modifications compared to upstream branch
who username who uploads the tainted version
Doesn't make it a default unless --switch is specified. Use 'switch'
subcommand to change default serving version.
"""
parser.add_tag_option()
parser.add_option(
'-x', '--switch', action='store_true',
help='Switch version after uploading new code')
parser.add_switch_option()
parser.add_force_option()
parser.allow_positional_args = True
app, options, modules = parser.parse_args(args)
for module in modules:
if module not in app.modules:
parser.error('No such module: %s' % module)
# Additional chars is for the app_id as well as 5 chars for '-dot-'.
version = calculate_version.calculate_version(
app.app_dir, options.tag, len(app.app_id)+5)
# Updating indexes, queues, etc is a disruptive operation. Confirm.
if not options.force:
approved = gae_sdk_utils.confirm(
'Upload new version, update indexes, queues and cron jobs?',
app, version, modules, default_yes=True)
if not approved:
print('Aborted.')
return 1
app.update(version, modules)
print('-' * 80)
print('New version:')
print(' %s' % version)
print('Uploaded as:')
print(' https://%s-dot-%s.appspot.com' % (version, app.app_id))
print('Manage at:')
print(' https://console.cloud.google.com/appengine/versions?project=' +
app.app_id)
print('-' * 80)
if not options.switch:
return 0
if 'tainted-' in version:
print('')
print >> sys.stderr, 'Can\'t use --switch with a tainted version!'
return 1
_print_version_log(app, version)
print('Switching as default version')
app.set_default_version(version)
return 0
def CMDversion(parser, args):
"""Prints version name that correspond to current state of the checkout.
'update' subcommand uses this version when uploading code to GAE.
Version name looks like <number>-<commit sha1>[-tainted-<who>], where:
number git commit number, monotonically increases with each commit
commit sha1 upstream commit hash the branch is based of
tainted git repo has local modifications compared to upstream branch
who username who uploads the tainted version
"""
parser.add_tag_option()
app, options, _ = parser.parse_args(args)
# Additional chars is for the app_id as well as 5 chars for '-dot-'.
print(calculate_version.calculate_version(
app.app_dir, options.tag, len(app.app_id)+5))
return 0
class OptionParser(optparse.OptionParser):
"""OptionParser with some canned options."""
def __init__(self, app_dir, **kwargs):
optparse.OptionParser.__init__(
self,
version=__version__,
description=sys.modules['__main__'].__doc__,
**kwargs)
self.default_app_dir = app_dir
self.allow_positional_args = False
def add_tag_option(self):
self.add_option('-t', '--tag', help='Tag to attach to a tainted version')
def add_switch_option(self):
self.add_option(
'-n', '--no-log', action='store_true',
help='Do not print logs from the current server active version to the '
'one being switched to')
def add_force_option(self):
self.add_option(
'-f', '--force', action='store_true',
help='Do not ask for confirmation')
def parse_args(self, *args, **kwargs):
gae_sdk_utils.add_sdk_options(self, self.default_app_dir)
options, args = optparse.OptionParser.parse_args(self, *args, **kwargs)
if not self.allow_positional_args and args:
self.error('Unknown arguments: %s' % args)
app = gae_sdk_utils.process_sdk_options(self, options)
return app, options, args
def _find_app_dir(search_dir):
"""Locates GAE app root directory (or returns None if not found).
Starts by examining search_dir, then its parent, and so on, until it discovers
git repository root or filesystem root.
A directory is a suspect for an app root if it looks like an app root (has
app.yaml or some of its subdir have app.yaml), but its parent directory does
NOT look like an app root.
It allows to detect multi-module Go apps. Their default module directory
usually contains app.yaml, and this directory by itself looks like one-module
GAE app. By looking at the parent we can detect that it's indeed just one
module of multi-module app.
This logic gives false positives if multiple different one-module GAE apps are
located in sibling directories of some root directory (e.g. appengine/<app1>,
appengine/<app2). To prevent this directory to be incorrectly used as an app
root, we forbid root directories of this kind to directly contains apps.
A root directory is denoted either by presence of '.git' subdir, or 'ROOT'
file.
"""
def is_root(p):
return (
os.path.isdir(os.path.join(p, '.git')) or
os.path.isfile(os.path.join(p, 'ROOT')) or
os.path.dirname(p) == p)
cached_check = {}
def is_app_dir(p):
if p not in cached_check:
cached_check[p] = not is_root(p) and gae_sdk_utils.is_app_dir(p)
return cached_check[p]
while not is_root(search_dir):
parent = os.path.dirname(search_dir)
if is_app_dir(search_dir) and not is_app_dir(parent):
return search_dir
search_dir = parent
return None
def main(args):
# gae.py may be symlinked into app's directory or its subdirectory (to avoid
# typing --app-dir all the time). If linked into subdirectory, discover root
# by locating app.yaml. It is used for Python GAE apps and one-module Go apps
# that have all YAMLs in app root dir.
default_app_dir = None
if IS_SYMLINKED:
script_dir = os.path.dirname(os.path.abspath(__file__))
default_app_dir = _find_app_dir(script_dir)
# If not symlinked into an app directory, try to discover app root starting
# from cwd.
default_app_dir = default_app_dir or _find_app_dir(os.getcwd())
colorama.init()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(default_app_dir), args)
except gae_sdk_utils.Error as e:
print >> sys.stderr, str(e)
return 1
except KeyboardInterrupt:
# Don't dump stack traces on Ctrl+C, it's expected flow in some commands.
print >> sys.stderr, '\nInterrupted'
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 33.764045 | 80 | 0.694842 |
9b4deb821f14684f3d5053f246f6ded8c47bb717 | 4,281 | py | Python | chart_serializer.py | ConnorWhalen/wendletrap | 7b7135f4eee0d9fbb1e711a60b1693e3ed53ba05 | [
"MIT"
] | 1 | 2021-07-19T23:44:46.000Z | 2021-07-19T23:44:46.000Z | chart_serializer.py | ConnorWhalen/wendletrap | 7b7135f4eee0d9fbb1e711a60b1693e3ed53ba05 | [
"MIT"
] | null | null | null | chart_serializer.py | ConnorWhalen/wendletrap | 7b7135f4eee0d9fbb1e711a60b1693e3ed53ba05 | [
"MIT"
] | null | null | null | from copy import deepcopy
import midi_parser
STAR_POWER_LANE = 8
def serialize_file(file_data, charts_data):
"""
Write chart file notes.chart and ini file song.ini.
file_data is:
{
title: str
artist: str
genre: str
author: str
offset_secs: float
sample_start_secs: float
album: str
year: str
song_length_secs: str
difficulty_number: str
}
charts_data is:
[
{
midi_filename: str
}
]
"""
print(f"writing chart file...")
with open("notes.chart", "w") as file_:
file_.write("[Song]\n")
file_.write("{\n")
file_.write(f" Name = \"{file_data['title']}\"\n")
file_.write(f" Artist = \"{file_data['artist']}\"\n")
file_.write(f" Charter = \"{file_data['author']}\"\n")
file_.write(f" Album = \"{file_data['album']}\"\n")
file_.write(f" Year = \", {file_data['year']}\"\n")
file_.write(" Offset = 0\n")
file_.write(" Resolution = 192\n")
file_.write(" Player2 = bass\n")
file_.write(" Difficulty = 0\n")
file_.write(" PreviewStart = 0\n")
file_.write(" PreviewEnd = 0\n")
file_.write(f" Genre = \"{file_data['genre']}\"\n")
file_.write(" MediaType = \"cd\"\n")
file_.write(" MusicStream = \"song.ogg\"\n")
file_.write("}\n")
file_.write("[SyncTrack]\n")
file_.write("{\n")
note_starts, tempos, time_sigs = midi_parser.parse_file(charts_data[0]["midi_filename"], type_="chart")
tempos_copy = deepcopy(tempos)
time_sigs_copy = deepcopy(time_sigs)
while len(tempos_copy) > 0 or len(time_sigs_copy) > 0:
if len(tempos_copy) == 0:
tempo_or_timesigb = False
elif len(time_sigs_copy) == 0:
tempo_or_timesigb = True
elif tempos_copy[0][1] < time_sigs_copy[0][1]:
tempo_or_timesigb = True
else:
tempo_or_timesigb = False
if tempo_or_timesigb:
tempo = tempos_copy.pop(0)
tempo_bpm = tempo[0]
tempo_measure = tempo[1]
file_.write(f" {write_measure_number(tempo_measure)} = B {write_3_decimal_number(tempo_bpm)}\n")
else:
time_sig = time_sigs_copy.pop(0)
time_sig_value = time_sig[0]
time_sig_measure = time_sig[1]
file_.write(f" {write_measure_number(time_sig_measure)} = TS {time_sig_value}\n")
file_.write("}\n")
file_.write("[Events]\n")
file_.write("{\n")
sections = []
for section in sections:
section_title = section[0]
section_measure = section[1]
file_.write(f" {write_measure_number(section_measure)} = E \"section {section_title}\"\n")
file_.write("}\n")
file_.write("[ExpertSingle]\n")
file_.write("{\n")
for note_start in note_starts:
note_lane = note_start[0]
note_start_measure = note_start[1]
note_end_measure = note_start[2]
if note_end_measure > 0:
note_length = float(note_end_measure) - float(note_start_measure)
else:
note_length = 0
if note_lane == STAR_POWER_LANE:
note_type = "S"
note_lane = 2
else:
note_type = "N"
file_.write(f" {write_measure_number(note_start_measure)} = N {note_lane} {write_measure_number(note_length)}\n")
file_.write("}\n")
print(f"chart file complete!")
print(f"writing ini file...")
with open("song.ini", "w") as file_:
file_.write("[song]\n")
file_.write(f"name = {file_data['title']}\n")
file_.write(f"artist = {file_data['artist']}\n")
file_.write(f"genre = {file_data['genre']}\n")
file_.write(f"year = {file_data['year']}\n")
file_.write(f"diff_band = -1\n")
file_.write(f"diff_guitar = {file_data['difficulty_number']}\n")
file_.write("diff_bass = -1\n")
file_.write("diff_drums = -1\n")
file_.write("diff_keys = -1\n")
file_.write("diff_guitarghl = -1\n")
file_.write("diff_bassghl = -1\n")
file_.write(f"preview_start_time = {write_3_decimal_number(file_data['sample_start_secs'])}\n")
file_.write("icon = \n")
file_.write("album_track = 0\n")
file_.write("playlist_track = 0\n")
file_.write("video_start_time = 0\n")
file_.write(f"charter = {file_data['author']}\n")
file_.write(f"delay = {-write_3_decimal_number(file_data['offset_secs'])}\n")
file_.write(f"song_length = {write_3_decimal_number(file_data['song_length_secs'])}\n")
print(f"ini file complete!")
def write_measure_number(number_str):
return int(float(number_str)*192)
def write_3_decimal_number(number_str):
return int(float(number_str)*1000)
| 28.54 | 117 | 0.668535 |
09eb3e6dc6aae230da59a2ea972721b97e5fc5eb | 644 | py | Python | Day20_21/scoreboard.py | MHKomeili/100DaysofCode | a5799011a43f777ddc5ac9e649aa27291313b62b | [
"MIT"
] | null | null | null | Day20_21/scoreboard.py | MHKomeili/100DaysofCode | a5799011a43f777ddc5ac9e649aa27291313b62b | [
"MIT"
] | null | null | null | Day20_21/scoreboard.py | MHKomeili/100DaysofCode | a5799011a43f777ddc5ac9e649aa27291313b62b | [
"MIT"
] | null | null | null | from turtle import Turtle
ALIGNMENT = "center"
FONT = ('Courier', 20 , 'normal')
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.color('orange')
self.penup()
self.goto(x=0, y=270)
self.score = 0
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.write(f"Score: {self.score}", align=ALIGNMENT, font=FONT)
self.hideturtle()
def game_over(self):
self.goto(0, 0)
self.write(f"GAME OVER", align=ALIGNMENT, font=FONT)
def add_score(self):
self.score += 1
self.update_scoreboard()
| 22.206897 | 70 | 0.590062 |
2004f96882e3835081a152ed139b7bdad4e83b97 | 2,318 | py | Python | payments/price_feed.py | SatSale/SatSale | b10ba265af1c028602c977fc6d65b5e76ea6f868 | [
"MIT"
] | 5 | 2022-03-18T22:01:52.000Z | 2022-03-27T09:17:18.000Z | payments/price_feed.py | SatSale/SatSale | b10ba265af1c028602c977fc6d65b5e76ea6f868 | [
"MIT"
] | 8 | 2022-03-17T01:41:13.000Z | 2022-03-31T20:48:38.000Z | payments/price_feed.py | SatSale/SatSale | b10ba265af1c028602c977fc6d65b5e76ea6f868 | [
"MIT"
] | 1 | 2022-03-30T05:13:47.000Z | 2022-03-30T05:13:47.000Z | import requests
import logging
import config
def get_currency_provider(currency, currency_provider):
# Define some currency_provider-specific settings
if currency_provider == "COINDESK":
return {
"price_feed": "https://api.coindesk.com/v1/bpi/currentprice.json",
"result_root": "bpi",
"value_attribute": "rate_float",
"ticker": currency.upper(),
}
else:
return {
"price_feed": "https://api.coingecko.com/api/v3/exchange_rates",
"result_root": "rates",
"value_attribute": "value",
"ticker": currency.lower(),
}
def get_price(currency, currency_provider=config.currency_provider, bitcoin_rate_multiplier=config.bitcoin_rate_multiplier):
provider = get_currency_provider(currency, currency_provider)
for i in range(config.connection_attempts):
try:
r = requests.get(provider["price_feed"])
price_data = r.json()
prices = price_data[provider["result_root"]]
break
except Exception as e:
logging.error(e)
logging.info(
"Attempting again... {}/{}...".format(i + 1, config.connection_attempts)
)
else:
raise ("Failed to reach {}.".format(provider["price_feed"]))
try:
price = prices[provider["ticker"]][provider["value_attribute"]]
if bitcoin_rate_multiplier != 1.00:
logging.debug("Adjusting BTC price from {} to {} because of rate multiplier {}.".format(
price, price * bitcoin_rate_multiplier, bitcoin_rate_multiplier))
price = price * bitcoin_rate_multiplier
return price
except Exception:
logging.error(
"Failed to find currency {} from {}.".format(currency, provider["price_feed"])
)
return None
def get_btc_value(base_amount, currency):
price = get_price(currency)
if price is not None:
try:
float_value = float(base_amount) / float(price)
if not isinstance(float_value, float):
raise Exception("Fiat value should be a float.")
except Exception as e:
logging.error(e)
return float_value
raise Exception("Failed to get fiat value.")
| 32.194444 | 124 | 0.608714 |
7964d878fc54bb52d22576efd4eb15da5f7e8522 | 9,559 | py | Python | simpletransformers/ner/ner_utils.py | hjc3613/simpletransformers | bce58639f3fa8f45f445b053b5aaae428c3c5429 | [
"Apache-2.0"
] | null | null | null | simpletransformers/ner/ner_utils.py | hjc3613/simpletransformers | bce58639f3fa8f45f445b053b5aaae428c3c5429 | [
"Apache-2.0"
] | null | null | null | simpletransformers/ner/ner_utils.py | hjc3613/simpletransformers | bce58639f3fa8f45f445b053b5aaae428c3c5429 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
from __future__ import absolute_import, division, print_function
import logging
import os
from io import open
from multiprocessing import Pool, cpu_count
import re
from tqdm.auto import tqdm
import pandas as pd
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def read_examples_from_file(data_file, mode):
file_path = data_file
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid="{}-{}".format(mode, guid_index), words=words, labels=labels,))
guid_index += 1
words = []
labels = []
else:
#splits = line.split(" ")
splits = re.split(r'\s+', line.strip())
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid="%s-%d".format(mode, guid_index), words=words, labels=labels))
return examples
def get_examples_from_df(data):
return [
InputExample(guid=sentence_id, words=sentence_df["words"].tolist(), labels=sentence_df["labels"].tolist(),)
for sentence_id, sentence_df in data.groupby(["sentence_id"])
]
def convert_example_to_feature(example_row):
(
example,
label_map,
max_seq_length,
tokenizer,
cls_token_at_end,
cls_token,
cls_token_segment_id,
sep_token,
sep_token_extra,
pad_on_left,
pad_token,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero,
) = example_row
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
if word_tokens: # avoid non printable character like '\u200e' which are tokenized as a void token ''
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [label_map[sep_token]]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [label_map[sep_token]]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [label_map[cls_token]] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
return InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids,)
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
process_count=cpu_count() - 2,
chunksize=500,
silent=False,
use_multiprocessing=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
examples = [
(
example,
label_map,
max_seq_length,
tokenizer,
cls_token_at_end,
cls_token,
cls_token_segment_id,
sep_token,
sep_token_extra,
pad_on_left,
pad_token,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero,
)
for example in examples
]
if use_multiprocessing:
with Pool(process_count) as p:
features = list(
tqdm(
p.imap(convert_example_to_feature, examples, chunksize=chunksize),
total=len(examples),
disable=silent,
)
)
else:
features = []
for example in tqdm(examples):
features.append(convert_example_to_feature(example))
return features
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return [
"O",
"B-MISC",
"I-MISC",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
| 34.886861 | 117 | 0.620253 |
7cd1ff4a4cf35dd69911611422be856309b8643b | 1,884 | py | Python | Core/mailer.py | AdrMXR/PhishMailer | 66831c9933969a83ec311ca16a6e0672f1545675 | [
"MIT"
] | 7 | 2020-07-04T02:57:03.000Z | 2022-01-02T03:13:31.000Z | Core/mailer.py | Ranjithkumar567/PhishMailer-Templates | 66831c9933969a83ec311ca16a6e0672f1545675 | [
"MIT"
] | null | null | null | Core/mailer.py | Ranjithkumar567/PhishMailer-Templates | 66831c9933969a83ec311ca16a6e0672f1545675 | [
"MIT"
] | 1 | 2020-07-01T07:35:07.000Z | 2020-07-01T07:35:07.000Z | import smtplib
import os
import getpass
import sys
import ssl
from email.mime.text import MIMEText
from email.utils import formataddr
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
from email.mime.text import MIMEText
red = ("\033[1;31;40m")
green = ("\033[1;32;40m")
white = ("\033[1;37;40m")
blue = ("\033[1;34;40m")
start = (green + "[" + white + "+" + green + "]" + white)
alert = (green + "[" + red + "!" + green + "]" + white)
def NormalEmail():
os.system("clear")
print(green)
print("""
__^__ __^__
( ___ )------------------------------------------------------( ___ )
| / | | \ |
| / |+-------------)PhishMailer BaitMailer V1(--------------+| \ |
|___| |___|
(_____)------------------------------------------------------(_____) """)
print(alert + "It Might Take A Few Minutes Until The Target Gets The Email" + alert)
print(alert + "You Need To Allow Less Secure Apps On You Gmail Account" + alert)
print("")
fromaddr = input(start + " Enter Your Email-Address: ")
password = getpass.getpass(start + " Enter Your Password (will not be shown): ")
toaddr = input(start + " Enter Email-Address To Send To: ")
subject = input(start + " Enter Subject: ")
pathfile = input(start + " Enter Path To Html File: ")
html = open(pathfile)
msg = MIMEText(html.read(), 'html')
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = subject
debug = False
if debug:
print(msg.as_string())
else:
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login(fromaddr, password)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
print(alert + "Email Sent" + alert)
| 29.904762 | 85 | 0.553079 |
249ee9e538dc766282847c151aed3454862c64ca | 19,420 | py | Python | netbox/netbox/settings.py | Megzo/netbox | f8a21da9f034b31d7b91587cc6a295bbc4d9edea | [
"Apache-2.0"
] | null | null | null | netbox/netbox/settings.py | Megzo/netbox | f8a21da9f034b31d7b91587cc6a295bbc4d9edea | [
"Apache-2.0"
] | null | null | null | netbox/netbox/settings.py | Megzo/netbox | f8a21da9f034b31d7b91587cc6a295bbc4d9edea | [
"Apache-2.0"
] | null | null | null | import logging
import os
import platform
import socket
import warnings
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured
#
# Environment setup
#
VERSION = '2.7.7-dev'
# Hostname
HOSTNAME = platform.node()
# Set the base directory two levels up
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Validate Python version
if platform.python_version_tuple() < ('3', '5'):
raise RuntimeError(
"NetBox requires Python 3.5 or higher (current: Python {})".format(platform.python_version())
)
elif platform.python_version_tuple() < ('3', '6'):
warnings.warn(
"Python 3.6 or higher will be required starting with NetBox v2.8 (current: Python {})".format(
platform.python_version()
)
)
#
# Configuration import
#
# Import configuration parameters
try:
from netbox import configuration
except ImportError:
raise ImproperlyConfigured(
"Configuration file is not present. Please define netbox/netbox/configuration.py per the documentation."
)
# Enforce required configuration parameters
for parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY', 'REDIS']:
if not hasattr(configuration, parameter):
raise ImproperlyConfigured(
"Required parameter {} is missing from configuration.py.".format(parameter)
)
# Set required parameters
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')
DATABASE = getattr(configuration, 'DATABASE')
REDIS = getattr(configuration, 'REDIS')
SECRET_KEY = getattr(configuration, 'SECRET_KEY')
# Set optional parameters
ADMINS = getattr(configuration, 'ADMINS', [])
BANNER_BOTTOM = getattr(configuration, 'BANNER_BOTTOM', '')
BANNER_LOGIN = getattr(configuration, 'BANNER_LOGIN', '')
BANNER_TOP = getattr(configuration, 'BANNER_TOP', '')
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
CACHE_TIMEOUT = getattr(configuration, 'CACHE_TIMEOUT', 900)
CHANGELOG_RETENTION = getattr(configuration, 'CHANGELOG_RETENTION', 90)
CORS_ORIGIN_ALLOW_ALL = getattr(configuration, 'CORS_ORIGIN_ALLOW_ALL', False)
CORS_ORIGIN_REGEX_WHITELIST = getattr(configuration, 'CORS_ORIGIN_REGEX_WHITELIST', [])
CORS_ORIGIN_WHITELIST = getattr(configuration, 'CORS_ORIGIN_WHITELIST', [])
DATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')
DATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')
DEBUG = getattr(configuration, 'DEBUG', False)
DEVELOPER = getattr(configuration, 'DEVELOPER', False)
EMAIL = getattr(configuration, 'EMAIL', {})
ENFORCE_GLOBAL_UNIQUE = getattr(configuration, 'ENFORCE_GLOBAL_UNIQUE', False)
EXEMPT_VIEW_PERMISSIONS = getattr(configuration, 'EXEMPT_VIEW_PERMISSIONS', [])
LOGGING = getattr(configuration, 'LOGGING', {})
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
LOGIN_TIMEOUT = getattr(configuration, 'LOGIN_TIMEOUT', None)
MAINTENANCE_MODE = getattr(configuration, 'MAINTENANCE_MODE', False)
MAX_PAGE_SIZE = getattr(configuration, 'MAX_PAGE_SIZE', 1000)
MEDIA_ROOT = getattr(configuration, 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media')).rstrip('/')
STORAGE_BACKEND = getattr(configuration, 'STORAGE_BACKEND', None)
STORAGE_CONFIG = getattr(configuration, 'STORAGE_CONFIG', {})
METRICS_ENABLED = getattr(configuration, 'METRICS_ENABLED', False)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 50)
PREFER_IPV4 = getattr(configuration, 'PREFER_IPV4', False)
REPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')
SCRIPTS_ROOT = getattr(configuration, 'SCRIPTS_ROOT', os.path.join(BASE_DIR, 'scripts')).rstrip('/')
SESSION_FILE_PATH = getattr(configuration, 'SESSION_FILE_PATH', None)
SHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')
SHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')
SHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')
TIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
#
# Database
#
# Only PostgreSQL is supported
if METRICS_ENABLED:
DATABASE.update({
'ENGINE': 'django_prometheus.db.backends.postgresql'
})
else:
DATABASE.update({
'ENGINE': 'django.db.backends.postgresql'
})
DATABASES = {
'default': DATABASE,
}
#
# Media storage
#
if STORAGE_BACKEND is not None:
DEFAULT_FILE_STORAGE = STORAGE_BACKEND
# django-storages
if STORAGE_BACKEND.startswith('storages.'):
try:
import storages.utils
except ImportError:
raise ImproperlyConfigured(
"STORAGE_BACKEND is set to {} but django-storages is not present. It can be installed by running 'pip "
"install django-storages'.".format(STORAGE_BACKEND)
)
# Monkey-patch django-storages to fetch settings from STORAGE_CONFIG
def _setting(name, default=None):
if name in STORAGE_CONFIG:
return STORAGE_CONFIG[name]
return globals().get(name, default)
storages.utils.setting = _setting
if STORAGE_CONFIG and STORAGE_BACKEND is None:
warnings.warn(
"STORAGE_CONFIG has been set in configuration.py but STORAGE_BACKEND is not defined. STORAGE_CONFIG will be "
"ignored."
)
#
# Redis
#
if 'webhooks' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing webhooks subsection."
)
if 'caching' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing caching subsection."
)
WEBHOOKS_REDIS = REDIS.get('webhooks', {})
WEBHOOKS_REDIS_HOST = WEBHOOKS_REDIS.get('HOST', 'localhost')
WEBHOOKS_REDIS_PORT = WEBHOOKS_REDIS.get('PORT', 6379)
WEBHOOKS_REDIS_SENTINELS = WEBHOOKS_REDIS.get('SENTINELS', [])
WEBHOOKS_REDIS_USING_SENTINEL = all([
isinstance(WEBHOOKS_REDIS_SENTINELS, (list, tuple)),
len(WEBHOOKS_REDIS_SENTINELS) > 0
])
WEBHOOKS_REDIS_SENTINEL_SERVICE = WEBHOOKS_REDIS.get('SENTINEL_SERVICE', 'default')
WEBHOOKS_REDIS_PASSWORD = WEBHOOKS_REDIS.get('PASSWORD', '')
WEBHOOKS_REDIS_DATABASE = WEBHOOKS_REDIS.get('DATABASE', 0)
WEBHOOKS_REDIS_DEFAULT_TIMEOUT = WEBHOOKS_REDIS.get('DEFAULT_TIMEOUT', 300)
WEBHOOKS_REDIS_SSL = WEBHOOKS_REDIS.get('SSL', False)
CACHING_REDIS = REDIS.get('caching', {})
CACHING_REDIS_HOST = CACHING_REDIS.get('HOST', 'localhost')
CACHING_REDIS_PORT = CACHING_REDIS.get('PORT', 6379)
CACHING_REDIS_SENTINELS = CACHING_REDIS.get('SENTINELS', [])
CACHING_REDIS_USING_SENTINEL = all([
isinstance(CACHING_REDIS_SENTINELS, (list, tuple)),
len(CACHING_REDIS_SENTINELS) > 0
])
CACHING_REDIS_SENTINEL_SERVICE = CACHING_REDIS.get('SENTINEL_SERVICE', 'default')
CACHING_REDIS_PASSWORD = CACHING_REDIS.get('PASSWORD', '')
CACHING_REDIS_DATABASE = CACHING_REDIS.get('DATABASE', 0)
CACHING_REDIS_DEFAULT_TIMEOUT = CACHING_REDIS.get('DEFAULT_TIMEOUT', 300)
CACHING_REDIS_SSL = CACHING_REDIS.get('SSL', False)
#
# Sessions
#
if LOGIN_TIMEOUT is not None:
# Django default is 1209600 seconds (14 days)
SESSION_COOKIE_AGE = LOGIN_TIMEOUT
if SESSION_FILE_PATH is not None:
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
#
# Email
#
EMAIL_HOST = EMAIL.get('SERVER')
EMAIL_PORT = EMAIL.get('PORT', 25)
EMAIL_HOST_USER = EMAIL.get('USERNAME')
EMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')
EMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)
SERVER_EMAIL = EMAIL.get('FROM_EMAIL')
EMAIL_SUBJECT_PREFIX = '[NetBox] '
#
# Django
#
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'cacheops',
'corsheaders',
'debug_toolbar',
'django_filters',
'django_rq',
'django_tables2',
'django_prometheus',
'mptt',
'rest_framework',
'taggit',
'taggit_serializer',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'secrets',
'tenancy',
'users',
'utilities',
'virtualization',
'drf_yasg',
]
# Middleware
MIDDLEWARE = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'utilities.middleware.ExceptionHandlingMiddleware',
'utilities.middleware.LoginRequiredMiddleware',
'utilities.middleware.APIVersionMiddleware',
'extras.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
)
ROOT_URLCONF = 'netbox.urls'
TEMPLATES_DIR = BASE_DIR + '/templates'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'utilities.context_processors.settings',
],
},
},
]
# Authentication
AUTHENTICATION_BACKENDS = [
'utilities.auth_backends.ViewExemptModelBackend',
]
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_TZ = True
# WSGI
WSGI_APPLICATION = 'netbox.wsgi.application'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
USE_X_FORWARDED_HOST = True
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static'
STATIC_URL = '/{}static/'.format(BASE_PATH)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "project-static"),
)
# Media
MEDIA_URL = '/{}media/'.format(BASE_PATH)
# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Authentication URLs
LOGIN_URL = '/{}login/'.format(BASE_PATH)
CSRF_TRUSTED_ORIGINS = ALLOWED_HOSTS
#
# LDAP authentication (optional)
#
try:
from netbox import ldap_config as LDAP_CONFIG
except ImportError:
LDAP_CONFIG = None
if LDAP_CONFIG is not None:
# Check that django_auth_ldap is installed
try:
import ldap
import django_auth_ldap
except ImportError:
raise ImproperlyConfigured(
"LDAP authentication has been configured, but django-auth-ldap is not installed. Remove "
"netbox/ldap_config.py to disable LDAP."
)
# Required configuration parameters
try:
AUTH_LDAP_SERVER_URI = getattr(LDAP_CONFIG, 'AUTH_LDAP_SERVER_URI')
except AttributeError:
raise ImproperlyConfigured(
"Required parameter AUTH_LDAP_SERVER_URI is missing from ldap_config.py."
)
# Optional configuration parameters
AUTH_LDAP_ALWAYS_UPDATE_USER = getattr(LDAP_CONFIG, 'AUTH_LDAP_ALWAYS_UPDATE_USER', True)
AUTH_LDAP_AUTHORIZE_ALL_USERS = getattr(LDAP_CONFIG, 'AUTH_LDAP_AUTHORIZE_ALL_USERS', False)
AUTH_LDAP_BIND_AS_AUTHENTICATING_USER = getattr(LDAP_CONFIG, 'AUTH_LDAP_BIND_AS_AUTHENTICATING_USER', False)
AUTH_LDAP_BIND_DN = getattr(LDAP_CONFIG, 'AUTH_LDAP_BIND_DN', '')
AUTH_LDAP_BIND_PASSWORD = getattr(LDAP_CONFIG, 'AUTH_LDAP_BIND_PASSWORD', '')
AUTH_LDAP_CACHE_TIMEOUT = getattr(LDAP_CONFIG, 'AUTH_LDAP_CACHE_TIMEOUT', 0)
AUTH_LDAP_CONNECTION_OPTIONS = getattr(LDAP_CONFIG, 'AUTH_LDAP_CONNECTION_OPTIONS', {})
AUTH_LDAP_DENY_GROUP = getattr(LDAP_CONFIG, 'AUTH_LDAP_DENY_GROUP', None)
AUTH_LDAP_FIND_GROUP_PERMS = getattr(LDAP_CONFIG, 'AUTH_LDAP_FIND_GROUP_PERMS', False)
AUTH_LDAP_GLOBAL_OPTIONS = getattr(LDAP_CONFIG, 'AUTH_LDAP_GLOBAL_OPTIONS', {})
AUTH_LDAP_GROUP_SEARCH = getattr(LDAP_CONFIG, 'AUTH_LDAP_GROUP_SEARCH', None)
AUTH_LDAP_GROUP_TYPE = getattr(LDAP_CONFIG, 'AUTH_LDAP_GROUP_TYPE', None)
AUTH_LDAP_MIRROR_GROUPS = getattr(LDAP_CONFIG, 'AUTH_LDAP_MIRROR_GROUPS', None)
AUTH_LDAP_MIRROR_GROUPS_EXCEPT = getattr(LDAP_CONFIG, 'AUTH_LDAP_MIRROR_GROUPS_EXCEPT', None)
AUTH_LDAP_PERMIT_EMPTY_PASSWORD = getattr(LDAP_CONFIG, 'AUTH_LDAP_PERMIT_EMPTY_PASSWORD', False)
AUTH_LDAP_REQUIRE_GROUP = getattr(LDAP_CONFIG, 'AUTH_LDAP_REQUIRE_GROUP', None)
AUTH_LDAP_NO_NEW_USERS = getattr(LDAP_CONFIG, 'AUTH_LDAP_NO_NEW_USERS', False)
AUTH_LDAP_START_TLS = getattr(LDAP_CONFIG, 'AUTH_LDAP_START_TLS', False)
AUTH_LDAP_USER_QUERY_FIELD = getattr(LDAP_CONFIG, 'AUTH_LDAP_USER_QUERY_FIELD', None)
AUTH_LDAP_USER_ATTRLIST = getattr(LDAP_CONFIG, 'AUTH_LDAP_USER_ATTRLIST', None)
AUTH_LDAP_USER_ATTR_MAP = getattr(LDAP_CONFIG, 'AUTH_LDAP_USER_ATTR_MAP', {})
AUTH_LDAP_USER_DN_TEMPLATE = getattr(LDAP_CONFIG, 'AUTH_LDAP_USER_DN_TEMPLATE', None)
AUTH_LDAP_USER_FLAGS_BY_GROUP = getattr(LDAP_CONFIG, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {})
AUTH_LDAP_USER_SEARCH = getattr(LDAP_CONFIG, 'AUTH_LDAP_USER_SEARCH', None)
# Optionally disable strict certificate checking
if getattr(LDAP_CONFIG, 'LDAP_IGNORE_CERT_ERRORS', False):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
# Prepend LDAPBackend to the authentication backends list
AUTHENTICATION_BACKENDS.insert(0, 'django_auth_ldap.backend.LDAPBackend')
# Enable logging for django_auth_ldap
ldap_logger = logging.getLogger('django_auth_ldap')
ldap_logger.addHandler(logging.StreamHandler())
ldap_logger.setLevel(logging.DEBUG)
#
# Caching
#
if CACHING_REDIS_USING_SENTINEL:
CACHEOPS_SENTINEL = {
'locations': CACHING_REDIS_SENTINELS,
'service_name': CACHING_REDIS_SENTINEL_SERVICE,
'db': CACHING_REDIS_DATABASE,
}
else:
if CACHING_REDIS_SSL:
REDIS_CACHE_CON_STRING = 'rediss://'
else:
REDIS_CACHE_CON_STRING = 'redis://'
if CACHING_REDIS_PASSWORD:
REDIS_CACHE_CON_STRING = '{}:{}@'.format(REDIS_CACHE_CON_STRING, CACHING_REDIS_PASSWORD)
REDIS_CACHE_CON_STRING = '{}{}:{}/{}'.format(
REDIS_CACHE_CON_STRING,
CACHING_REDIS_HOST,
CACHING_REDIS_PORT,
CACHING_REDIS_DATABASE
)
CACHEOPS_REDIS = REDIS_CACHE_CON_STRING
if not CACHE_TIMEOUT:
CACHEOPS_ENABLED = False
else:
CACHEOPS_ENABLED = True
CACHEOPS_DEFAULTS = {
'timeout': CACHE_TIMEOUT
}
CACHEOPS = {
'auth.user': {'ops': 'get', 'timeout': 60 * 15},
'auth.*': {'ops': ('fetch', 'get')},
'auth.permission': {'ops': 'all'},
'circuits.*': {'ops': 'all'},
'dcim.*': {'ops': 'all'},
'ipam.*': {'ops': 'all'},
'extras.*': {'ops': 'all'},
'secrets.*': {'ops': 'all'},
'users.*': {'ops': 'all'},
'tenancy.*': {'ops': 'all'},
'virtualization.*': {'ops': 'all'},
}
CACHEOPS_DEGRADE_ON_FAILURE = True
#
# Django Prometheus
#
PROMETHEUS_EXPORT_MIGRATIONS = False
#
# Django filters
#
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = 'null'
#
# Django REST framework (API)
#
REST_FRAMEWORK_VERSION = VERSION[0:3] # Use major.minor as API version
REST_FRAMEWORK = {
'ALLOWED_VERSIONS': [REST_FRAMEWORK_VERSION],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'netbox.api.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_PAGINATION_CLASS': 'netbox.api.OptionalLimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'netbox.api.TokenPermissions',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'netbox.api.FormlessBrowsableAPIRenderer',
),
'DEFAULT_VERSION': REST_FRAMEWORK_VERSION,
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
'PAGE_SIZE': PAGINATE_COUNT,
'VIEW_NAME_FUNCTION': 'netbox.api.get_view_name',
}
#
# drf_yasg (OpenAPI/Swagger)
#
SWAGGER_SETTINGS = {
'DEFAULT_AUTO_SCHEMA_CLASS': 'utilities.custom_inspectors.NetBoxSwaggerAutoSchema',
'DEFAULT_FIELD_INSPECTORS': [
'utilities.custom_inspectors.NullableBooleanFieldInspector',
'utilities.custom_inspectors.CustomChoiceFieldInspector',
'utilities.custom_inspectors.TagListFieldInspector',
'utilities.custom_inspectors.SerializedPKRelatedFieldInspector',
'drf_yasg.inspectors.CamelCaseJSONFilter',
'drf_yasg.inspectors.ReferencingSerializerInspector',
'drf_yasg.inspectors.RelatedFieldInspector',
'drf_yasg.inspectors.ChoiceFieldInspector',
'drf_yasg.inspectors.FileFieldInspector',
'drf_yasg.inspectors.DictFieldInspector',
'drf_yasg.inspectors.SerializerMethodFieldInspector',
'drf_yasg.inspectors.SimpleFieldInspector',
'drf_yasg.inspectors.StringDefaultFieldInspector',
],
'DEFAULT_FILTER_INSPECTORS': [
'utilities.custom_inspectors.IdInFilterInspector',
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'DEFAULT_INFO': 'netbox.urls.openapi_info',
'DEFAULT_MODEL_DEPTH': 1,
'DEFAULT_PAGINATOR_INSPECTORS': [
'utilities.custom_inspectors.NullablePaginatorInspector',
'drf_yasg.inspectors.DjangoRestResponsePagination',
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
}
},
'VALIDATOR_URL': None,
}
#
# Django RQ (Webhooks backend)
#
RQ_QUEUES = {
'default': {
'HOST': WEBHOOKS_REDIS_HOST,
'PORT': WEBHOOKS_REDIS_PORT,
'DB': WEBHOOKS_REDIS_DATABASE,
'PASSWORD': WEBHOOKS_REDIS_PASSWORD,
'DEFAULT_TIMEOUT': WEBHOOKS_REDIS_DEFAULT_TIMEOUT,
'SSL': WEBHOOKS_REDIS_SSL,
} if not WEBHOOKS_REDIS_USING_SENTINEL else {
'SENTINELS': WEBHOOKS_REDIS_SENTINELS,
'MASTER_NAME': WEBHOOKS_REDIS_SENTINEL_SERVICE,
'DB': WEBHOOKS_REDIS_DATABASE,
'PASSWORD': WEBHOOKS_REDIS_PASSWORD,
'SOCKET_TIMEOUT': None,
'CONNECTION_KWARGS': {
'socket_connect_timeout': WEBHOOKS_REDIS_DEFAULT_TIMEOUT
},
}
}
#
# Django debug toolbar
#
INTERNAL_IPS = (
'127.0.0.1',
'::1',
)
#
# NetBox internal settings
#
# Secrets
SECRETS_MIN_PUBKEY_SIZE = 2048
# Pagination
PER_PAGE_DEFAULTS = [
25, 50, 100, 250, 500, 1000
]
if PAGINATE_COUNT not in PER_PAGE_DEFAULTS:
PER_PAGE_DEFAULTS.append(PAGINATE_COUNT)
PER_PAGE_DEFAULTS = sorted(PER_PAGE_DEFAULTS)
| 32.693603 | 119 | 0.723223 |
7cd3feb7a77ef6f24b4fdc02f708000dd8710a32 | 676 | py | Python | pracgram/users/migrations/0003_auto_20180520_2058.py | lowosiriskgn/pracgram | db33b7969636628b2f562fb6ebd17c18f40c34e4 | [
"MIT"
] | 1 | 2018-07-15T05:03:50.000Z | 2018-07-15T05:03:50.000Z | pracgram/users/migrations/0003_auto_20180520_2058.py | lowosiriskgn/pracgram | db33b7969636628b2f562fb6ebd17c18f40c34e4 | [
"MIT"
] | null | null | null | pracgram/users/migrations/0003_auto_20180520_2058.py | lowosiriskgn/pracgram | db33b7969636628b2f562fb6ebd17c18f40c34e4 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2018-05-20 11:58
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20180520_2002'),
]
operations = [
migrations.AddField(
model_name='user',
name='followers',
field=models.ManyToManyField(related_name='_user_followers_+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='following',
field=models.ManyToManyField(related_name='_user_following_+', to=settings.AUTH_USER_MODEL),
),
]
| 27.04 | 104 | 0.631657 |
cfaafee00f1bf4feef74750275638a94fa2dcb10 | 4,722 | gyp | Python | third_party/libpng/libpng.gyp | dimitrilongo/mod_pagespeed | d0d3bc51aa4feddf010b7085872c64cc46b5aae0 | [
"Apache-2.0"
] | 2 | 2019-11-02T07:54:17.000Z | 2020-04-16T09:26:51.000Z | third_party/libpng/libpng.gyp | dimitrilongo/mod_pagespeed | d0d3bc51aa4feddf010b7085872c64cc46b5aae0 | [
"Apache-2.0"
] | 12 | 2017-03-14T18:26:11.000Z | 2021-10-01T15:33:50.000Z | third_party/libpng/libpng.gyp | dimitrilongo/mod_pagespeed | d0d3bc51aa4feddf010b7085872c64cc46b5aae0 | [
"Apache-2.0"
] | 1 | 2020-04-16T09:28:30.000Z | 2020-04-16T09:28:30.000Z | # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'conditions': [
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
# Link to system .so since we already use it due to GTK.
'use_system_libpng%': 1,
}, { # OS!="linux" and OS!="freebsd" and OS!="openbsd"
'use_system_libpng%': 0,
}],
],
},
'conditions': [
['use_system_libpng==0', {
'targets': [
{
'target_name': 'libpng',
'type': '<(component)',
'dependencies': [
'../zlib/zlib.gyp:zlib',
],
'msvs_guid': 'C564F145-9172-42C3-BFCB-6014CA97DBCD',
'sources': [
'src/png.c',
'src/png.h',
'src/pngconf.h',
'src/pngerror.c',
'src/pnggccrd.c',
'src/pngget.c',
'src/pngmem.c',
'src/pngpread.c',
'src/pngread.c',
'src/pngrio.c',
'src/pngrtran.c',
'src/pngrutil.c',
'src/pngset.c',
'src/pngtrans.c',
'src/pngusr.h',
'src/pngvcrd.c',
'src/pngwio.c',
'src/pngwrite.c',
'src/pngwtran.c',
'src/pngwutil.c',
],
'direct_dependent_settings': {
'include_dirs': [
'src/',
],
'defines': [
# We end up including setjmp.h directly, but libpng
# doesn't like that. This define tells libpng to not
# complain about our inclusion of setjmp.h.
'PNG_SKIP_SETJMP_CHECK',
],
},
'export_dependent_settings': [
'../zlib/zlib.gyp:zlib',
],
'conditions': [
['OS!="win"', {'product_name': 'png'}],
['OS=="win" and component=="shared_library"', {
'defines': [
'PNG_BUILD_DLL',
'PNG_NO_MODULEDEF',
],
'direct_dependent_settings': {
'defines': [
'PNG_USE_DLL',
],
},
}],
],
},
]
}, {
'conditions': [
['sysroot!=""', {
'variables': {
'pkg-config': '../../build/linux/pkg-config-wrapper "<(sysroot)"',
},
}, {
'variables': {
'pkg-config': 'pkg-config'
},
}],
],
'targets': [
{
'target_name': 'libpng',
'type': 'none',
'dependencies': [
'../zlib/zlib.gyp:zlib',
],
'variables': {
# Quoth libpagespeed's libpng.gyp:
# "The PNG_FREE_ME_SUPPORTED define was dropped in libpng
# 1.4.0beta78, with its behavior becoming the default
# behavior."
#
# Hence, we define it ourselves for version >= 1.4.0 so that
# libpagespeed's code (which checks PNG_FREE_ME_SUPPORTED for
# compatibility with earlier versions) will run with both earlier
# and later versions of libpng.
#
# This detects the version and sets the variable to non-zero for
# pre-1.4 versions.
'png_free_me_suported_define_in_libpng' :
'<!(<(pkg-config) --atleast-version=1.4.0 libpng; echo $?)'
},
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags libpng)',
],
'defines+': [
'USE_SYSTEM_LIBPNG',
'DBG=<(png_free_me_suported_define_in_libpng)',
# We end up including setjmp.h directly, but libpng
# doesn't like that. This define tells libpng to not
# complain about our inclusion of setjmp.h.
'PNG_SKIP_SETJMP_CHECK',
],
},
'conditions': [
['<(png_free_me_suported_define_in_libpng)==0', {
'direct_dependent_settings': {
'defines+': [
'PNG_FREE_ME_SUPPORTED',
],
}
}],
],
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other libpng)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l libpng)',
],
},
},
],
}],
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 30.464516 | 78 | 0.448115 |
888c5eea5fb7b921f878194b9481451a6a1df23b | 1,716 | py | Python | StudArt/tests/core/views/test_EditSelfEmailAPIView.py | YuriyLisovskiy/OOA_Team_X-A | f8a977f5f498e33c69df1ed503d1e44d5f5b99a5 | [
"MIT"
] | null | null | null | StudArt/tests/core/views/test_EditSelfEmailAPIView.py | YuriyLisovskiy/OOA_Team_X-A | f8a977f5f498e33c69df1ed503d1e44d5f5b99a5 | [
"MIT"
] | 10 | 2020-11-06T08:37:02.000Z | 2020-12-09T23:08:25.000Z | StudArt/tests/core/views/test_EditSelfEmailAPIView.py | YuriyLisovskiy/OOA_Team_X-A | f8a977f5f498e33c69df1ed503d1e44d5f5b99a5 | [
"MIT"
] | 1 | 2021-09-16T10:56:02.000Z | 2021-09-16T10:56:02.000Z | import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import force_authenticate
from rest_framework_simplejwt.state import User
from core.views import EditSelfEmailAPIView
from tests.common import APIFactoryTestCase
class EditSelfEmailAPITestCase(APIFactoryTestCase):
def setUp(self) -> None:
super(EditSelfEmailAPITestCase, self).setUp()
self.view = EditSelfEmailAPIView.as_view()
self.user = User.objects.get(username='User')
self.user_3 = User.objects.get(username='User3')
def test_EditValid(self):
request = self.request_factory.put(reverse('api_v1:core:edit_self_email'), {
'password': 'qwerty',
'email': 'q@q.com'
})
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_EditInvalid(self):
request = self.request_factory.put(reverse('api_v1:core:edit_self_email'), {
'password': 'qwerty',
'email': 'qq.q'
})
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_EditInvalidPassword(self):
request = self.request_factory.put(reverse('api_v1:core:edit_self_email'), {
'password': 'qwer',
'email': 'q@q.q'
})
force_authenticate(request, self.user)
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_EditUnauthenticated(self):
request = self.request_factory.put(reverse('api_v1:core:edit_self_email'), {
'password': 'qwerty',
'email': 'q@q.q'
})
response = self.view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
| 32.377358 | 78 | 0.759324 |
036199067b824c864bceb452a3f8697d53c50fa4 | 1,656 | py | Python | src/plugins/PythonFileIO/PythonFileIO/__init__.py | webgme/bindings | 985ea14d159f3001bc831a464c2d60d5f970333e | [
"MIT"
] | null | null | null | src/plugins/PythonFileIO/PythonFileIO/__init__.py | webgme/bindings | 985ea14d159f3001bc831a464c2d60d5f970333e | [
"MIT"
] | 15 | 2018-10-30T19:02:54.000Z | 2021-04-01T10:52:29.000Z | src/plugins/PythonFileIO/PythonFileIO/__init__.py | webgme/bindings | 985ea14d159f3001bc831a464c2d60d5f970333e | [
"MIT"
] | 4 | 2019-09-27T20:21:50.000Z | 2021-04-21T00:49:26.000Z | """
This is where the implementation of the plugin code goes.
The PythonFileIO-class is imported from both run_plugin.py and run_debug.py
"""
import sys
import logging
import os
from webgme_bindings import PluginBase
# Setup a logger
logger = logging.getLogger('PythonFileIO')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class PythonFileIO(PluginBase):
def main(self):
core = self.core
root_node = self.root_node
active_node = self.active_node
name = core.get_attribute(active_node, 'name')
binary_file = open('./src/plugins/PythonFileIO/PythonFileIO/heart.png','rb')
binary_content = binary_file.read()
bin_hash = self.add_file('heart.png', binary_content)
retrieved_content = self.get_bin_file(bin_hash)
if binary_content != retrieved_content:
self.logger.error('issue in simple binary')
self.result_set_success(False)
self.result_set_error('simple binary content mismatch')
arti_hash = self.add_artifact('myArti', {'text.txt':'just because', 'heart.png':binary_content})
retrieved_content_from_arti = self.get_bin_file(arti_hash,'heart.png')
if binary_content != retrieved_content_from_arti:
self.logger.error('issue in complex blob')
self.result_set_success(False)
self.result_set_error('embedded binary content mismatch')
| 36 | 104 | 0.705314 |
b96ad3630aa50e4bfd7f5a745ece9723346bd97b | 1,048 | py | Python | psh/tools/basic.py | m-boniecki/phoenix-rtos-tests | 3650fe1c04d676d371059abccdc60004b7d830b1 | [
"BSD-3-Clause"
] | null | null | null | psh/tools/basic.py | m-boniecki/phoenix-rtos-tests | 3650fe1c04d676d371059abccdc60004b7d830b1 | [
"BSD-3-Clause"
] | null | null | null | psh/tools/basic.py | m-boniecki/phoenix-rtos-tests | 3650fe1c04d676d371059abccdc60004b7d830b1 | [
"BSD-3-Clause"
] | null | null | null |
# Phoenix-RTOS
#
# phoenix-rtos-tests
#
# basic tools for psh related tests
#
# Copyright 2021 Phoenix Systems
# Author: Jakub Sarzyński
#
# This file is part of Phoenix-RTOS.
#
# %LICENSE%
#
import pexpect
def run_psh(p):
p.send('psh\r\n')
p.expect(r'psh(\r+)\n')
def assert_only_prompt(p):
# Expect an erase in display ascii escape sequence and a prompt sign
prompt = '\r\x1b[0J' + '(psh)% '
got = p.read(len(prompt))
assert got == prompt, f'Expected:\n{prompt}\nGot:\n{got}'
def assert_prompt(p, msg=None, timeout=-1, catch_timeout=True):
if not msg:
msg = ''
patterns = ['(psh)% ']
if catch_timeout:
patterns.append(pexpect.TIMEOUT)
idx = p.expect_exact(patterns, timeout=timeout)
# if catch_timeout is false then pyexpect exception is raised
assert idx == 0, msg
def assert_prompt_fail(p, msg=None, timeout=-1):
if not msg:
msg = ''
patterns = ['(psh)% ', pexpect.TIMEOUT]
idx = p.expect_exact(patterns, timeout=timeout)
assert idx == 1, msg
| 20.54902 | 72 | 0.640267 |
1e557326a5ef2e59513468c21f78739304911b88 | 12,950 | py | Python | mrjob/tools/emr/create_cluster.py | etiennebatise/mrjob | 2803b7310afc72d986752aa816c9d48ae4632f95 | [
"Apache-2.0"
] | null | null | null | mrjob/tools/emr/create_cluster.py | etiennebatise/mrjob | 2803b7310afc72d986752aa816c9d48ae4632f95 | [
"Apache-2.0"
] | null | null | null | mrjob/tools/emr/create_cluster.py | etiennebatise/mrjob | 2803b7310afc72d986752aa816c9d48ae4632f95 | [
"Apache-2.0"
] | null | null | null | # Copyright 2009-2013 Yelp and Contributors
# Copyright 2015-2016 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a persistent EMR cluster to run clusters in, and print its ID to
stdout.
.. warning::
Do not run this without ``mrjob terminate-idle-clusters`` in
your crontab; clusters left idle can quickly become expensive!
Usage::
mrjob create-cluster
Options::
-h, --help show this help message and exit
--additional-emr-info=ADDITIONAL_EMR_INFO
A JSON string for selecting additional features on EMR
--ami-version=AMI_VERSION
AMI Version to use, e.g. "2.4.11" (default "latest").
--aws-availability-zone=AWS_AVAILABILITY_ZONE
Availability zone to run the cluster on
--aws-region=AWS_REGION
Region to connect to S3 and EMR on (e.g. us-west-1).
--bootstrap=BOOTSTRAP
A shell command to set up libraries etc. before any
steps (e.g. "sudo apt-get -qy install python3"). You
may interpolate files available via URL or locally
with Hadoop Distributed Cache syntax ("sudo dpkg -i
foo.deb#")
--bootstrap-action=BOOTSTRAP_ACTIONS
Raw bootstrap action scripts to run before any of the
other bootstrap steps. You can use --bootstrap-action
more than once. Local scripts will be automatically
uploaded to S3. To add arguments, just use quotes:
"foo.sh arg1 arg2"
--bootstrap-cmd=BOOTSTRAP_CMDS
Commands to run on the master node to set up
libraries, etc. You can use --bootstrap-cmd more than
once. Use mrjob.conf to specify arguments as a list to
be run directly.
--bootstrap-file=BOOTSTRAP_FILES
File to upload to the master node before running
bootstrap_cmds (for example, debian packages). These
will be made public on S3 due to a limitation of the
bootstrap feature. You can use --bootstrap-file more
than once.
--bootstrap-mrjob Automatically tar up the mrjob library and install it
when we run the mrjob. This is the default. Use --no-
bootstrap-mrjob if you've already installed mrjob on
your Hadoop cluster.
--no-bootstrap-mrjob Don't automatically tar up the mrjob library and
install it when we run this job. Use this if you've
already installed mrjob on your Hadoop cluster.
--bootstrap-python-package=BOOTSTRAP_PYTHON_PACKAGES
Path to a Python module to install on EMR. These
should be standard python module tarballs where you
can cd into a subdirectory and run ``sudo python
setup.py install``. You can use --bootstrap-python-
package more than once.
--bootstrap-script=BOOTSTRAP_SCRIPTS
Script to upload and then run on the master node (a
combination of bootstrap_cmds and bootstrap_files).
These are run after the command from bootstrap_cmds.
You can use --bootstrap-script more than once.
-c CONF_PATHS, --conf-path=CONF_PATHS
Path to alternate mrjob.conf file to read from
--no-conf Don't load mrjob.conf even if it's available
--ec2-core-instance-bid-price=EC2_CORE_INSTANCE_BID_PRICE
Bid price to specify for core (or "slave") nodes when
setting them up as EC2 spot instances (you probably
only want to set a bid price for task instances).
--ec2-core-instance-type=EC2_CORE_INSTANCE_TYPE,
--ec2-slave-instance-type=EC2_CORE_INSTANCE_TYPE
Type of EC2 instance for core (or "slave") nodes only
--ec2-instance-type=EC2_INSTANCE_TYPE
Type of EC2 instance(s) to launch (e.g. m1.small,
c1.xlarge, m2.xlarge). See http://aws.amazon.com/ec2
/instance-types/ for the full list.
--ec2-key-pair=EC2_KEY_PAIR
Name of the SSH key pair you set up for EMR
--ec2-master-instance-bid-price=EC2_MASTER_INSTANCE_BID_PRICE
Bid price to specify for the master node when setting
it up as an EC2 spot instance (you probably only want
to set a bid price for task instances).
--ec2-master-instance-type=EC2_MASTER_INSTANCE_TYPE
Type of EC2 instance for master node only
--ec2-task-instance-bid-price=EC2_TASK_INSTANCE_BID_PRICE
Bid price to specify for task nodes when setting them
up as EC2 spot instances.
--ec2-task-instance-type=EC2_TASK_INSTANCE_TYPE
Type of EC2 instance for task nodes only
--emr-api-param=EMR_API_PARAMS
Additional parameters to pass directly to the EMR API
when creating a cluster. Should take the form
KEY=VALUE. You can use --emr-api-param multiple times.
--emr-endpoint=EMR_ENDPOINT
Optional host to connect to when communicating with S3
(e.g. us-west-1.elasticmapreduce.amazonaws.com).
Default is to infer this from aws_region.
--pool-name=POOL_NAME
Specify a pool name to join. Set to "default" if not
specified.
--disable-emr-debugging
Disable storage of Hadoop logs in SimpleDB
--enable-emr-debugging
Enable storage of Hadoop logs in SimpleDB
--iam-instance-profile=IAM_INSTANCE_PROFILE
EC2 instance profile to use for the EMR cluster - see
"Configure IAM Roles for Amazon EMR" in AWS docs
--iam-service-role=IAM_SERVICE_ROLE
IAM service role to use for the EMR cluster - see
"Configure IAM Roles for Amazon EMR" in AWS docs
--label=LABEL custom prefix for job name, to help us identify the
job
--max-hours-idle=MAX_HOURS_IDLE
If we create a persistent cluster, have it
automatically terminate itself after it's been idle
this many hours.
--mins-to-end-of-hour=MINS_TO_END_OF_HOUR
If --max-hours-idle is set, control how close to the
end of an EC2 billing hour the cluster can
automatically terminate itself (default is 5 minutes).
--no-emr-api-param=NO_EMR_API_PARAMS
Parameters to be unset when calling EMR API. You can
use --no-emr-api-param multiple times.
--num-ec2-core-instances=NUM_EC2_CORE_INSTANCES
Number of EC2 instances to start as core (or "slave")
nodes. Incompatible with --num-ec2-instances.
--num-ec2-instances=NUM_EC2_INSTANCES
Total number of EC2 instances to launch
--num-ec2-task-instances=NUM_EC2_TASK_INSTANCES
Number of EC2 instances to start as task nodes.
Incompatible with --num-ec2-instances.
--owner=OWNER custom username to use, to help us identify who ran
the job
--no-pool-clusters
Don't try to run our job on a pooled cluster.
--pool-clusters Add to an existing cluster or create a new one that
does not terminate when the job completes. Overrides
other cluster-related options including EC2 instance
configuration. Joins pool "default" if
--pool-name is not specified. WARNING: do
not run this without
mrjob terminate-idle-clusters in your
crontab; clusters left idle can quickly become
expensive!
-q, --quiet Don't print anything to stderr
--s3-endpoint=S3_ENDPOINT
Host to connect to when communicating with S3 (e.g. s3
-us-west-1.amazonaws.com). Default is to infer this
from region (see --aws-region).
--s3-log-uri=S3_LOG_URI
URI on S3 to write logs into
--s3-scratch-uri=S3_SCRATCH_URI
URI on S3 to use as our temp directory.
--s3-sync-wait-time=S3_SYNC_WAIT_TIME
How long to wait for S3 to reach eventual consistency.
This is typically less than a second (zero in us-west)
but the default is 5.0 to be safe.
--s3-upload-part-size=S3_UPLOAD_PART_SIZE
Upload files to S3 in parts no bigger than this many
megabytes. Default is 100 MiB. Set to 0 to disable
multipart uploading entirely.
-v, --verbose print more messages to stderr
--visible-to-all-users
Whether the cluster is visible to all IAM users of
the AWS account associated with the cluster. If this
value is set to True, all IAM users of that AWS
account can view and (if they have the proper policy
permissions set) manage the cluster. If it is set to
False, only the IAM user that created the cluster can
view and manage it. This option can be overridden by
--emr-api-param VisibleToAllUsers=true|false.
"""
from __future__ import print_function
from optparse import OptionParser
from mrjob.emr import EMRJobRunner
from mrjob.job import MRJob
from mrjob.options import _add_basic_opts
from mrjob.options import _add_dataproc_emr_opts
from mrjob.options import _add_emr_connect_opts
from mrjob.options import _add_emr_launch_opts
from mrjob.options import _alphabetize_options
from mrjob.options import _fix_custom_options
from mrjob.util import scrape_options_into_new_groups
def main(args=None):
"""Run the create_cluster tool with arguments from ``sys.argv`` and
printing to ``sys.stdout``."""
runner = EMRJobRunner(**_runner_kwargs(args))
cluster_id = runner.make_persistent_cluster()
print(cluster_id)
def _runner_kwargs(cl_args=None):
"""Parse command line arguments into arguments for
:py:class:`EMRJobRunner`
"""
# parser command-line args
option_parser = _make_option_parser()
options, args = option_parser.parse_args(cl_args)
# fix emr_api_params and emr_tags
_fix_custom_options(options, option_parser)
if args:
option_parser.error('takes no arguments')
MRJob.set_up_logging(quiet=options.quiet, verbose=options.verbose)
# create the persistent job
kwargs = options.__dict__.copy()
del kwargs['quiet']
del kwargs['verbose']
del kwargs['no_emr_api_params']
return kwargs
def _make_option_parser():
usage = '%prog [options]'
description = (
'Create a persistent EMR cluster to run jobs in, and print its ID to'
' stdout. WARNING: Do not run'
' this without mrjob terminate-idle-clusters in your'
' crontab; clusters left idle can quickly become expensive!')
option_parser = OptionParser(usage=usage, description=description)
_add_basic_opts(option_parser)
# these aren't nicely broken down, just scrape specific options
scrape_options_into_new_groups(MRJob().all_option_groups(), {
option_parser: (
'bootstrap_mrjob',
'label',
'owner',
),
})
_add_emr_connect_opts(option_parser)
_add_emr_launch_opts(option_parser)
_add_dataproc_emr_opts(option_parser)
_alphabetize_options(option_parser)
return option_parser
if __name__ == '__main__':
main()
| 48.501873 | 78 | 0.606255 |
78e627a8240939f14630abba139c80cc9a8d33ca | 3,173 | py | Python | cropduster/widgets.py | pbs/django-cropduster | de4bd375421c29bb80653a01aaf263f1a9e6e626 | [
"BSD-2-Clause"
] | null | null | null | cropduster/widgets.py | pbs/django-cropduster | de4bd375421c29bb80653a01aaf263f1a9e6e626 | [
"BSD-2-Clause"
] | null | null | null | cropduster/widgets.py | pbs/django-cropduster | de4bd375421c29bb80653a01aaf263f1a9e6e626 | [
"BSD-2-Clause"
] | null | null | null | from django.forms import HiddenInput, Media
from django.template import Context, loader
from django.core.urlresolvers import reverse
from cropduster.models import SizeSet, Image as CropDusterImage, ImageRegistry
from django.contrib.contenttypes.models import ContentType
class AdminCropdusterWidget(HiddenInput):
ctx_overrides = None
def __init__(self, model, field, size_set_slug, template="admin/inline.html", attrs=None, *args, **ctx_overrides):
try:
self.size_set = SizeSet.objects.get(slug=size_set_slug)
except SizeSet.DoesNotExist:
# Throw the error during rendering.
self.size_set = None
self.size_set_slug = size_set_slug
self.register_image(model, field)
self.template = template
super(AdminCropdusterWidget, self).__init__(attrs)
self.is_hidden = False
self.ctx_overrides = ctx_overrides
def _media(self):
base = getattr(super(AdminCropdusterWidget, self), 'media', None)
media = Media(base) if base else Media()
media_url = reverse("cropduster-static", kwargs={"path": ""})
media.add_js([media_url + 'js/admin.cropduster.js',])
media.add_css({
'all': (
media_url + 'css/admin.cropduster.css',
),})
return media
media = property(_media)
def register_image(self, model, field_name):
model_id = ContentType.objects.get_for_model(model)
field = model._meta.get_field_by_name(field_name)[0]
image = field.rel.to
self._image_field = image
self.image_hash = ImageRegistry.add(model_id, field_name, image)
def render(self, name, value, attrs=None):
if self.size_set is None:
raise SizeSet.DoesNotExist("SizeSet '%s' missing from database" % self.size_set_slug)
attrs.setdefault("class", "cropduster")
media_url = reverse("cropduster-static", kwargs={"path": ""})
cropduster_url = reverse("cropduster-upload")
input = super(HiddenInput, self).render(name, value, attrs)
if not value:
image = None
else:
try:
image = self._image_field.objects.get(id=value)
except CropDusterImage.DoesNotExist:
image = None
if image:
filter_kwargs = {
'size__size_set': self.size_set,
'size__auto_crop': False,
}
filter_kwargs.update(self.ctx_overrides.pop('derived_filter_kwargs', {}))
manual = image.derived.filter(**filter_kwargs)
else:
manual = None
t = loader.get_template(self.template)
ctx = {
"image": image,
"image_hash": self.image_hash,
"size_set": self.size_set,
"media_url": media_url,
"cropduster_url": cropduster_url,
"input": input,
"attrs": attrs,
"show_original": True,
"manual": manual,
"has_manual": image and len(manual) > 0,
}
ctx.update(self.ctx_overrides)
return t.render(Context(ctx))
| 34.11828 | 118 | 0.610463 |
46dec656ca405f0e028d43bd735e47e5d13994d3 | 5,417 | py | Python | fedml_api/distributed/fedavg_gRPC/FedAvgServerManager.py | WingFeiTsang/FedML_New | 755d8fc63ce08df4dc3eef326aa7693e94262c7e | [
"Apache-2.0"
] | null | null | null | fedml_api/distributed/fedavg_gRPC/FedAvgServerManager.py | WingFeiTsang/FedML_New | 755d8fc63ce08df4dc3eef326aa7693e94262c7e | [
"Apache-2.0"
] | null | null | null | fedml_api/distributed/fedavg_gRPC/FedAvgServerManager.py | WingFeiTsang/FedML_New | 755d8fc63ce08df4dc3eef326aa7693e94262c7e | [
"Apache-2.0"
] | null | null | null | import logging
import os, signal
import sys
import time
from .message_define import MyMessage
from .utils import transform_tensor_to_list, post_complete_message_to_sweep_process, transform_list_to_tensor
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../../FedML")))
try:
from fedml_core.distributed.communication.message import Message
from fedml_core.distributed.server.server_manager import ServerManager
except ImportError:
from FedML.fedml_core.distributed.communication.message import Message
from FedML.fedml_core.distributed.server.server_manager import ServerManager
class FedAVGServerManager(ServerManager):
def __init__(self, args, aggregator, comm=None, rank=0, size=0, backend="MPI", is_preprocessed=False, preprocessed_client_lists=None):
super().__init__(args, comm, rank, size, backend)
self.args = args
self.aggregator = aggregator
self.round_num = args.comm_round
self.round_idx = 0
self.is_preprocessed = is_preprocessed
self.preprocessed_client_lists = preprocessed_client_lists
def run(self):
super().run()
def send_init_msg(self):
# sampling clients
client_indexes = self.aggregator.client_sampling(self.round_idx, self.args.client_num_in_total,
self.args.client_num_per_round)
global_model_params = self.aggregator.get_global_model_params()
global_model_params = transform_tensor_to_list(global_model_params)
# newly added by zrf for error "object of type Tensor is Jason serializable"
if self.args.is_mobile == 1:
global_model_params = transform_tensor_to_list(global_model_params)
for process_id in range(1, self.size+1):
self.send_message_init_config(process_id, global_model_params, client_indexes[process_id - 1])
def register_message_receive_handlers(self):
self.register_message_receive_handler(MyMessage.MSG_TYPE_C2S_SEND_MODEL_TO_SERVER,
self.handle_message_receive_model_from_client)
def handle_message_receive_model_from_client(self, msg_params):
sender_id = msg_params.get(MyMessage.MSG_ARG_KEY_SENDER)
model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
model_params = transform_list_to_tensor(model_params)
# new added by zrf
local_sample_number = msg_params.get(MyMessage.MSG_ARG_KEY_NUM_SAMPLES)
self.aggregator.add_local_trained_result(int(sender_id) - 1, model_params, int(local_sample_number))
b_all_received = self.aggregator.check_whether_all_receive()
# logging.info("b_all_received = " + str(b_all_received))
if b_all_received:
global_model_params = self.aggregator.aggregate()
test_time_start = time.time()
self.aggregator.test_on_server_for_all_clients(self.round_idx)
test_time_end = time.time()
logging.info("Test on Sever for All Clients: %f" % (test_time_end - test_time_start))
# start the next round
self.round_idx += 1
if self.round_idx == self.round_num:
post_complete_message_to_sweep_process(self.args)
self.finish()
return
if self.is_preprocessed:
if self.preprocessed_client_lists is None:
# sampling has already been done in data preprocessor
client_indexes = [self.round_idx] * self.args.client_num_per_round
else:
client_indexes = self.preprocessed_client_lists[self.round_idx]
else:
# sampling clients
client_indexes = self.aggregator.client_sampling(self.round_idx, self.args.client_num_in_total,
self.args.client_num_per_round)
# print('indexes of clients: ' + str(client_indexes))
# print("size = %d" % self.size)
if self.args.is_mobile == 1:
global_model_params = transform_tensor_to_list(global_model_params)
global_model_params = transform_tensor_to_list(global_model_params)
# newly added by zrf
for receiver_id in range(1, self.size+1):
self.send_message_sync_model_to_client(receiver_id, global_model_params,
client_indexes[receiver_id - 1])
def send_message_init_config(self, receive_id, global_model_params, client_index):
message = Message(MyMessage.MSG_TYPE_S2C_INIT_CONFIG, self.get_sender_id(), receive_id)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)
message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))
self.send_message(message)
def send_message_sync_model_to_client(self, receive_id, global_model_params, client_index):
message = Message(MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT, self.get_sender_id(), receive_id)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)
message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))
self.send_message(message)
| 52.086538 | 138 | 0.685619 |
a7ceb826207444e2fd17af6552b060ed9eb31c38 | 1,945 | py | Python | tests/unit/etl_remote.py | sharabeshj/course-editor-test | 9af15d10ef1f039fdf5758134a7cb72384ccf3f5 | [
"Apache-2.0"
] | 1 | 2021-01-06T17:58:30.000Z | 2021-01-06T17:58:30.000Z | tests/unit/etl_remote.py | priyankagohil/coursebuilder-assessment | 559e867a2a846dd773471c6bc76cf6005a57098f | [
"Apache-2.0"
] | 27 | 2016-08-31T19:04:46.000Z | 2016-09-29T00:22:32.000Z | tests/unit/etl_remote.py | priyankagohil/coursebuilder-assessment | 559e867a2a846dd773471c6bc76cf6005a57098f | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for tools/etl/remote.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from tests import suite
from tools.etl import remote
from google.appengine.ext.remote_api import remote_api_stub
class EnvironmentTests(suite.TestBase):
def test_establish_logs_auth_error_and_root_cause_when_oauth_errors(self):
def throw(unused_server, unused_path, secure=None):
raise Exception('root cause text')
self.swap(remote_api_stub, 'ConfigureRemoteApiForOAuth', throw)
environment = remote.Environment('server')
with self.assertRaises(SystemExit):
environment.establish()
self.assertLogContains('missing OAuth2 credentials')
self.assertLogContains('root cause text')
def test_establish_logs_sdk_error_when_oauth_method_missing(self):
environment = remote.Environment('server')
oauth2_method_missing = object()
with self.assertRaises(SystemExit):
environment.establish(stub=oauth2_method_missing)
self.assertLogContains('Your Google App Engine SDK is old')
def test_establish_is_noop_when_testing_true(self):
# If we actually called the implementation without credentials, we'd
# crash.
environment = remote.Environment('server', testing=True)
environment.establish()
| 34.122807 | 78 | 0.731105 |
3a8239eb39bf2492eb1d5d42b270a5787e497d27 | 164,810 | py | Python | tensorflow/lite/python/lite_v2_test.py | Nickmeagan70/tensorflow | 6bfedde8466daced9f40a0e11840f5ce274abc7d | [
"Apache-2.0"
] | 7 | 2022-03-04T21:14:47.000Z | 2022-03-22T23:07:39.000Z | tensorflow/lite/python/lite_v2_test.py | Nickmeagan70/tensorflow | 6bfedde8466daced9f40a0e11840f5ce274abc7d | [
"Apache-2.0"
] | 1 | 2022-03-08T18:28:46.000Z | 2022-03-08T18:37:20.000Z | tensorflow/lite/python/lite_v2_test.py | Nickmeagan70/tensorflow | 6bfedde8466daced9f40a0e11840f5ce274abc7d | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
import ctypes
import functools
import itertools
import os
import sys
from absl.testing import parameterized
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
# Force loaded shared object symbols to be globally visible. This is needed so
# that the interpreter_wrapper, in one .so file, can see the test_registerer,
# in a different .so file. Note that this may already be set by default.
# pylint: disable=g-import-not-at-top
if hasattr(sys, 'setdlopenflags') and hasattr(sys, 'getdlopenflags'):
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.lite.python import conversion_metadata_schema_py_generated as metadata_fb
from tensorflow.lite.python import convert
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_v2_test_util
from tensorflow.lite.python import schema_py_generated as schema_fb
from tensorflow.lite.python import test_util as tflite_test_util
from tensorflow.lite.python import util
from tensorflow.lite.python.convert import mlir_quantize
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.lite.python.interpreter import InterpreterWithCustomOps
from tensorflow.lite.python.interpreter import OpResolverType
from tensorflow.lite.python.testdata import _pywrap_test_registerer as test_registerer
from tensorflow.lite.python.testdata import double_op
from tensorflow.lite.python.util import get_conversion_metadata
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import map_ops
from tensorflow.python.ops import rnn
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model import saved_model
from tensorflow.python.saved_model.loader_impl import parse_saved_model
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
# Only run jax related tests when we can import jax.
DISABLE_JAX_TEST = False
try:
import jax
from jax import numpy as jnp
except ImportError:
DISABLE_JAX_TEST = True
# pylint: enable=g-import-not-at-top
class FromConcreteFunctionTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testTypeInvalid(self):
root = self._getSimpleVariableModel()
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_concrete_functions([root.f], root)
self.assertIn('call get_concrete_function', str(error.exception))
@test_util.run_v2_only
def testFloat(self):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
tflite_model = converter.convert()
# Check output value from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@parameterized.named_parameters(('_INT8InputOutput', dtypes.int8),
('_UINT8InputOutput', dtypes.uint8),
('_INT16InputOutput', dtypes.int16))
@test_util.run_v2_only
def testInvalidFloat(self, inference_input_output_type):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
with self.assertRaises(ValueError) as error:
converter.inference_input_type = inference_input_output_type
converter.inference_output_type = inference_input_output_type
converter.convert()
self.assertEqual(
'The inference_input_type and inference_output_type '
'must be tf.float32.', str(error.exception))
@test_util.run_v2_only
def testScalarInput(self):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testModelWithoutInputs(self):
def _get_random_number_gen():
root = tracking.AutoTrackable()
@tf.function(input_signature=[])
def func():
return tf.random.uniform(shape=[1], dtype=tf.float32)
root.f = func
to_save = root.f.get_concrete_function()
return (root, to_save)
# Model with no input
root, concrete_func = _get_random_number_gen()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Convert a single model in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.add.get_concrete_function(input_data)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.add(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testConvertMultipleFunctions(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[add_func, sub_func], root)
tflite_model = converter.convert()
# Check signatures are valid from converted model.
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 2)
self.assertEqual(list(signature_defs.keys()), ['add', 'sub'])
self.assertEqual(len(signature_defs.values()), 2)
self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['add']['inputs'], ['x'])
self.assertEqual(list(signature_defs['add']['outputs']), ['output_0'])
self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['sub']['inputs'], ['x'])
self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0'])
# Verify the Signature runner executions.
add_signature_runner = interpreter.get_signature_runner('add')
add_output = add_signature_runner(x=input_data)
self.assertEqual(add_output['output_0'], 3)
input_details = add_signature_runner.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('add_x:0', input_details['x']['name'])
self.assertEqual(np.float32, input_details['x']['dtype'])
self.assertTrue(([1] == input_details['x']['shape']).all())
self.assertEqual((0.0, 0), input_details['x']['quantization'])
sub_signature_runner = interpreter.get_signature_runner('sub')
sub_output = sub_signature_runner(x=input_data)
self.assertEqual(sub_output['output_0'], -2)
output_details = sub_signature_runner.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('StatefulPartitionedCall:0',
output_details['output_0']['name'])
self.assertEqual(np.float32, output_details['output_0']['dtype'])
self.assertTrue(([1] == output_details['output_0']['shape']).all())
self.assertEqual((0.0, 0), output_details['output_0']['quantization'])
# Check the conversion metadata.
metadata = get_conversion_metadata(tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(metadata.environment.apiVersion, 2)
self.assertEqual(metadata.environment.modelType,
metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS)
self.assertAllEqual([], metadata.options.modelOptimizationModes)
def _getIntegerQuantizeModel(self, num_filters=16):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)])
def func(inp):
conv = tf.nn.conv2d(
inp,
tf.ones([3, 3, 3, num_filters]), strides=[1, 1, 1, 1], padding='SAME')
output = tf.nn.relu(conv, name='output')
return output
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
root.f = func
to_save = root.f.get_concrete_function()
return (root, to_save, calibration_gen)
@parameterized.named_parameters(
('EnableMlirQuantizer', True), # enable mlir quantizer
('DisableMlirQuantizer', False)) # disable mlir quantizer
def testPostTrainingCalibrateAndQuantize(self, mlir_quantizer):
root, func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_converter.experimental_new_quantizer = mlir_quantizer
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(quantized_tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(
metadata.environment.tensorflowVersion.decode('utf-8'),
versions.__version__)
self.assertEqual(metadata.environment.apiVersion, 2)
self.assertEqual(metadata.environment.modelType,
metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS)
self.assertEqual(metadata.options.allowCustomOps, False)
self.assertEqual(metadata.options.enableSelectTfOps, False)
self.assertEqual(metadata.options.forceSelectTfOps, False)
self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER],
metadata.options.modelOptimizationModes)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@parameterized.named_parameters(('_INT8InputOutput', dtypes.int8),
('_UINT8InputOutput', dtypes.uint8),
('_INT16InputOutput', dtypes.int16))
@test_util.run_v2_only
def testInvalidPostTrainingDynamicRangeQuantization(
self, inference_input_output_type):
root, func, _ = self._getIntegerQuantizeModel()
# Convert float model.
converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
with self.assertRaises(ValueError) as error:
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_converter.convert()
self.assertEqual(
'The inference_input_type and inference_output_type '
'must be tf.float32.', str(error.exception))
@parameterized.named_parameters(
('EnableMlirQuantizer', True), # enable mlir quantizer
('DisableMlirQuantizer', False)) # disable mlir quantizer
def testQuantizationRemovesQDQsForFloatIO(self, mlir_quantizer):
func, calibration_gen = self._getSqrtModel()
converter = lite.TFLiteConverterV2.from_concrete_functions(
[func.get_concrete_function()])
converter.representative_dataset = calibration_gen
converter.optimizations = [lite.Optimize.DEFAULT]
converter.experimental_new_quantizer = mlir_quantizer
quantized_model = converter.convert()
# Because assertions on the model later, we opt out applying default TFLite
# delegates (i.e. the XNNPACK delegate).
interpreter = Interpreter(
model_content=quantized_model,
experimental_op_resolver_type=OpResolverType
.BUILTIN_WITHOUT_DEFAULT_DELEGATES)
interpreter.allocate_tensors()
# The model should have only one sqrt op.
op_details = interpreter._get_ops_details()
self.assertLen(op_details, 1)
self.assertEqual(op_details[0]['op_name'], 'SQRT')
@parameterized.named_parameters(
('_Default', False, False, dtypes.float32),
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize', False, True, dtypes.float32),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly', True, False, dtypes.float32),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize', True, True, dtypes.float32),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))
def testIntegerQuantization(self, is_int_only, is_int16_quantize,
inference_input_output_type):
root, func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(quantized_tflite_model)
self.assertIsNotNone(metadata)
expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER]
if is_int16_quantize:
expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_INT16]
self.assertAllEqual(expected_opt_options,
metadata.options.modelOptimizationModes)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details[0]['dtype'])
# Ensure that the quantized tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(tflite_model))
@parameterized.named_parameters(
('_INT16Quantize_INT8InputOutput', True, dtypes.int8))
def testInvalidIntegerQuantization(self, is_int16_quantize,
inference_input_output_type):
root, func, calibration_gen = self._getIntegerQuantizeModel()
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
with self.assertRaises(ValueError) as error:
quantized_converter.inference_input_type = dtypes.int8
quantized_converter.inference_output_type = dtypes.int8
quantized_converter.convert()
self.assertEqual(
'The inference_input_type and inference_output_type '
"must be in ['tf.float32', 'tf.int16'].", str(error.exception))
def testCalibrateAndQuantizeBuiltinInt16(self):
root, func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)
# TODO(b/156309549): We should add INT16 to the builtin types.
converter.optimizations = [lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.representative_dataset = calibration_gen
converter._experimental_calibrate_only = True
calibrated_tflite = converter.convert()
quantized_tflite_model = mlir_quantize(
calibrated_tflite, inference_type=_types_pb2.QUANTIZED_INT16)
self.assertIsNotNone(quantized_tflite_model)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@test_util.run_v2_only
def testSignatureDefs(self):
"""Test converting SignatureDef is correct and uses SignatureDef API."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
converter = lite.TFLiteConverterV2([add_func], trackable_obj=root)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = add_func(input_data)
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
results = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, 'serving_default', {'x': input_data})
self.assertLen(list(results.keys()), 1)
self.assertStartsWith(list(results.keys())[0], 'output')
self.assertAllClose(
expected_value.numpy(),
results[signature_defs['serving_default']['outputs'][0]])
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 1)
self.assertEqual(list(signature_defs.keys()), ['serving_default'])
self.assertEqual(len(signature_defs.values()), 1)
self.assertEqual(
list(signature_defs['serving_default'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['serving_default']['inputs'], ['x'])
self.assertLen(list(signature_defs['serving_default']['outputs']), 1)
self.assertStartsWith(
list(signature_defs['serving_default']['outputs'])[0], 'output')
@test_util.run_v2_only
def testNoSignatureDefsWhenTrackingObjIsNone(self):
"""Test converting SignatureDef is correct and uses SignatureDef API."""
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
None)
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
# Verify that there is no SignatureDef structure found.
self.assertEqual(len(signature_defs), 0)
@test_util.run_v2_only
def testNoSignatureDefsWhenInvalidTrackingObjIsGiven(self):
"""Test converting SignatureDef is correct and uses SignatureDef API."""
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], trackable_obj=tracking.AutoTrackable())
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
# Verify that there is no SignatureDef structure found.
self.assertEqual(len(signature_defs), 0)
@test_util.run_v2_only
def testTrackbleObject(self):
"""Test converting with trackable objects."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
converter = lite.TFLiteConverterV2.from_concrete_functions(
[add_func], trackable_obj=root)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = add_func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
def _getTrainingTimeQuantizedModel(self):
class QLinear(tf.keras.layers.Layer):
def __init__(self, units=3, **kwargs):
super(QLinear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
'weight',
shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.min_var = self.add_weight(
'min',
initializer=tf.keras.initializers.Constant(-6.0),
trainable=False)
self.max_var = self.add_weight(
'max',
initializer=tf.keras.initializers.Constant(6.0),
trainable=False)
def call(self, inputs):
x = tf.quantization.fake_quant_with_min_max_vars(
inputs, self.min_var, self.max_var)
w_fq = tf.quantization.fake_quant_with_min_max_vars(
self.w, self.min_var, self.max_var)
x = tf.matmul(x, w_fq)
x = tf.quantization.fake_quant_with_min_max_vars(
x, self.min_var, self.max_var)
return x
return tf.keras.Sequential(QLinear(3, input_shape=(2,)))
@parameterized.named_parameters(
('_DefaultFLOAT32InputOutput', dtypes.float32),
('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8))
@test_util.run_v2_only
def testTrainingTimeQuantization(self, inference_input_output_type):
model = self._getTrainingTimeQuantizedModel()
float_converter = lite.TFLiteConverterV2.from_keras_model(model)
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
quantized_converter = lite.TFLiteConverterV2.from_keras_model(model)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(quantized_tflite_model)
self.assertIsNotNone(metadata)
self.assertAllEqual(
[metadata_fb.ModelOptimizationMode.QUANTIZATION_AWARE_TRAINING],
metadata.options.modelOptimizationModes)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details[0]['dtype'])
# Ensure that the quantized tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@test_util.run_v2_only
def testNewQuantizer(self):
"""Test the model quantized by the new converter."""
root, func, calibration_gen = self._getIntegerQuantizeModel()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
# default quantizer
quantized_converter.experimental_new_quantizer = False
old_tflite = quantized_converter.convert()
# new quantizer
quantized_converter.experimental_new_quantizer = True
new_tflite = quantized_converter.convert()
for _ in range(5):
input_data = tf.constant(
np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32))
old_value = self._evaluateTFLiteModel(old_tflite, [input_data])
new_value = self._evaluateTFLiteModel(new_tflite, [input_data])
self.assertAllClose(old_value, new_value, atol=1e-01)
@test_util.run_v2_only
def testEmbeddings(self):
"""Test model with embeddings."""
input_data = tf.constant(
np.array(np.random.random_sample((20)), dtype=np.int32))
class EmbeddingModel(tf.keras.Model):
def __init__(self):
super(EmbeddingModel, self).__init__()
self.shared_weights = self.add_weight(
'weights',
shape=(2000, 300),
dtype=tf.float32,
initializer=tf.random_normal_initializer(
mean=0.0, stddev=300**(-0.5)))
@tf.function(input_signature=[tf.TensorSpec(shape=(20), dtype=tf.int32)])
def func(self, x):
return tf.gather(self.shared_weights, x)
# Building the model.
root = EmbeddingModel()
concrete_func = root.func.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertAllClose(expected_value.numpy(), actual_value[0], atol=1e-05)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a concrete function has debug info captured."""
root = tracking.AutoTrackable()
root.v1 = tf.Variable(3.)
root.f = tf.function(lambda x: root.v1 * x)
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
def _getIntegerQuantizationModelWithFlexOp(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(input_signature=[
tf.TensorSpec(shape=[3, 3, 3, 3, 3], dtype=tf.float32)
])
def func(inp):
tanh = tf.math.tanh(inp)
# Flex delegate will merge the consecutive conv3d and erf ops into one
# Delegate node.
conv3d = tf.nn.conv3d(
tanh,
tf.ones([3, 3, 3, 3, 3]),
strides=[1, 1, 1, 1, 1],
padding='SAME')
erf = tf.math.erf(conv3d)
output = tf.math.tanh(erf)
return output
def calibration_gen():
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(3, 3, 3, 3, 3)).astype(np.float32)
]
root.f = func
return (root, root.f.get_concrete_function(), calibration_gen)
@parameterized.named_parameters(
('_Default', False, False, dtypes.float32),
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize', False, True, dtypes.float32),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly', True, False, dtypes.float32),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize', True, True, dtypes.float32),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))
@test_util.run_v2_only
def testIntegerQuantizationWithFlexOp(self, is_int_only, is_int16_quantize,
inference_input_output_type):
root, func, calibration_gen = self._getIntegerQuantizationModelWithFlexOp()
quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func], root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.SELECT_TF_OPS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.SELECT_TF_OPS
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(quantized_tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(metadata.options.enableSelectTfOps, True)
expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER]
if is_int16_quantize:
expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_INT16]
self.assertAllEqual(expected_opt_options,
metadata.options.modelOptimizationModes)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details[0]['dtype'])
def _getIntegerQuantizationModelWithUnsupportedOps(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(input_signature=[
tf.TensorSpec(shape=[3], dtype=tf.float32),
tf.TensorSpec(shape=[3], dtype=tf.float32)
])
def func(a, b):
# ceil kernel does not support int8 nor int16 types neither.
left = tf.math.ceil(a)
right = tf.nn.tanh(b)
add = tf.math.add(left, right)
# ceil kernel does not support int8 nor int16 types neither.
output = tf.math.ceil(add)
return (output, right)
def calibration_gen():
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(3)).astype(np.float32),
np.random.uniform(-1, 1, size=(3)).astype(np.float32)
]
root.f = func
return (root, root.f.get_concrete_function(), calibration_gen)
@parameterized.named_parameters(
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16),
('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True),
('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True))
@test_util.run_v2_only
def testIntegerQuantizationWithUnsupportedOps(self,
is_int_only,
is_int16_quantize,
inference_input_output_type,
enable_mlir_quantizer=False):
root, func, calib_gen = self._getIntegerQuantizationModelWithUnsupportedOps(
)
quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func], root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calib_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_converter.experimental_new_quantizer = enable_mlir_quantizer
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
expected_dtype = inference_input_output_type.as_numpy_dtype
# Allow float32 for fallback on non-quantizable op.
expected_ceil_dtype = (
expected_dtype if enable_mlir_quantizer else dtypes.float32)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual(input_details[0]['dtype'], expected_dtype)
self.assertEqual(input_details[1]['dtype'], expected_ceil_dtype)
output_details = interpreter.get_output_details()
self.assertLen(output_details, 2)
self.assertEqual(output_details[0]['dtype'], expected_dtype)
self.assertEqual(output_details[1]['dtype'], expected_ceil_dtype)
def _getIntegerQuantizationModelWithControlFlow(self):
def true_fn(x):
return x
def false_fn(x):
return x
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, 2], dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.bool)
])
def model(x, b):
x = x + x
x = tf.cond(b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
return x + x
def calibration_gen():
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(
1,
2,
)).astype(np.float32),
tf.constant(True),
]
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(
1,
2,
)).astype(np.float32),
tf.constant(False),
]
return (model, model.get_concrete_function(), calibration_gen)
@parameterized.named_parameters(
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16),
# TODO(b/198231624): Support control flow ops in MLIR quantizer
# ('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True),
# ('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True),
)
@test_util.run_v2_only
def testIntegerQuantizationWithControlFlow(self,
is_int_only,
is_int16_quantize,
inference_input_output_type,
enable_mlir_quantizer=False):
root, func, calib_gen = self._getIntegerQuantizationModelWithControlFlow()
quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func], root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calib_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_converter.experimental_new_quantizer = enable_mlir_quantizer
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
expected_dtype = inference_input_output_type.as_numpy_dtype
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual(input_details[0]['dtype'], expected_dtype)
self.assertEqual(input_details[1]['dtype'], dtypes.bool)
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(output_details[0]['dtype'], expected_dtype)
@parameterized.named_parameters(
('_BlocklistedNoneWithLowering', None, None, True),
('_BlocklistedNoneWithoutLowering', None, None, False),
('_BlocklistedOpsWithLowering', {'CONV_2D'}, None, True),
('_BlocklistedOpsWithoutLowering', {'CONV_2D'}, None, False),
('_BlocklistedNodesWithLowering', None, {'PartitionedCall:0'}, True),
('_BlocklistedNodesWithoutLowering', None, {'Identity'}, False))
@test_util.run_v2_only
def testNewQuantizerBlocklistingArgs(self, denylisted_ops, denylisted_nodes,
lower_to_saved_model):
"""Test the model quantized by the new converter and denylisted options."""
root, func, calibration_gen = self._getIntegerQuantizeModel()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.experimental_new_quantizer = True
quantized_converter._experimental_calibrate_only = True
quantized_converter.experimental_lower_to_saved_model = lower_to_saved_model
calibrated = quantized_converter.convert()
quantized_tflite_model = mlir_quantize(
calibrated,
denylisted_ops=denylisted_ops,
denylisted_nodes=denylisted_nodes)
interpreter = Interpreter(model_content=quantized_tflite_model)
details = interpreter.get_tensor_details()
num_quantized_tensors = sum(
[1 for detail in details
if len(detail['quantization_parameters']['scales'])])
if denylisted_nodes or denylisted_ops:
self.assertEqual(num_quantized_tensors, 0)
return
self.assertEqual(num_quantized_tensors, 4) # quant, filter, bias, dequant
@parameterized.named_parameters(
('_SingleLayer', False),
('_WholeModel', True),
)
@test_util.run_v2_only
def testNewQuantizerNumericVerificationDebugMode(self, whole_model_verify):
"""Test the model quantized by the new converter with numeric verify ops."""
root, func, calibration_gen = self._getIntegerQuantizeModel()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
# Create a TFLite model with new quantizer.
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.experimental_new_quantizer = True
production_tflite = quantized_converter.convert()
# Create a TFLite model with new quantizer and numeric verify ops.
quantized_converter._experimental_calibrate_only = True
calibrated = quantized_converter.convert()
debug_mode_tflite = mlir_quantize(
calibrated,
enable_numeric_verify=True,
enable_whole_model_verify=whole_model_verify)
# Check if adding debug mode should output a different flatbuffer.
self.assertNotEqual(production_tflite, debug_mode_tflite)
# Check if newly added ops are numeric verify ops.
input_data = tf.constant(
np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32))
def examine_tflite_model(tflite_content, input_data):
interpreter = Interpreter(
model_content=tflite_content,
experimental_op_resolver_type=OpResolverType
.BUILTIN_WITHOUT_DEFAULT_DELEGATES)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]['index'], input_data.numpy())
interpreter.invoke()
tensor_details = interpreter.get_tensor_details()
return {
details['name']: interpreter.get_tensor(details['index'])
for details in interpreter.get_tensor_details()
}, tensor_details
tflite_result, _ = examine_tflite_model(production_tflite, input_data)
debug_mode_tflite_result, debug_tensor_details = examine_tflite_model(
debug_mode_tflite, input_data)
# MLIR-based quantizer should output flatbuffer model with `tfl.quantize`.
num_production_quantize_ops = len([
None for output_tensor_name in tflite_result
if 'tfl.quantize' in output_tensor_name
])
self.assertEqual(num_production_quantize_ops, 1)
# MLIR-based quantizer should output flatbuffer model with `tfl.quantize`.
num_debug_quantize_ops = len([
None for output_tensor_name in debug_mode_tflite_result
if 'tfl.quantize' in output_tensor_name
])
# Two numbers should be equal.
self.assertEqual(num_production_quantize_ops, num_debug_quantize_ops)
# DebugMode TFLite flatbuffer should have NumericVerifyOps more than zero.
# The name has the prefix "NumericVerify/{name}:{id}
# where {name} is the tensor name of the original quantized op's activation,
# and {id} is its tensor id.
num_debug_ops = 0
for output_tensor_name in debug_mode_tflite_result:
if 'NumericVerify' in output_tensor_name:
pos_end_prefix = len('NumericVerify/')
pos_colon = output_tensor_name.rfind(':')
self.assertEqual('NumericVerify/', output_tensor_name[:pos_end_prefix])
tensor_id = int(output_tensor_name[pos_colon + 1:])
original_tensor_name = output_tensor_name[pos_end_prefix:pos_colon]
self.assertEqual(original_tensor_name,
debug_tensor_details[tensor_id]['name'])
num_debug_ops += 1
self.assertEqual(num_debug_ops, 1)
# The number of debug ops should be equal to that of quantized ops.
self.assertEqual(num_debug_ops, num_debug_quantize_ops)
@parameterized.named_parameters(
('_PerChannelQuant', False, False),
('_PerChannelMlirQuant', False, True),
('_PerTensorQuant', True, False),
('_PerTensorMlirQuant', True, True),
('_PerChannelDynamicRange', False, False, False),
('_PerTensorDynamicRange', True, False, False))
@test_util.run_v2_only
def testDisablePerChannelQuantization(self, disable_per_channel=False,
enable_mlir_quantizer=False,
representative_dataset=True):
k_conv_name = 'Conv2D1'
# Dynamic range quant requires total num elements of filters > 1024.
k_num_filters = 38
root, func, calib_gen = self._getIntegerQuantizeModel(k_num_filters)
quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func], root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calib_gen
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.experimental_new_quantizer = enable_mlir_quantizer
if disable_per_channel:
quantized_converter._experimental_disable_per_channel = (
disable_per_channel)
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
detail = next((d for d in interpreter.get_tensor_details()
if d['name'] == k_conv_name))
quant_params = detail['quantization_parameters']
expected_num_params = 1 if disable_per_channel else k_num_filters
self.assertLen(quant_params['scales'], expected_num_params)
self.assertLen(quant_params['zero_points'], expected_num_params)
@parameterized.named_parameters(('MlirQuantize', True),
('TocoQuantize', False))
@test_util.run_v2_only
def testQuantizeBiasOverflow(self, enable_mlir_quantizer):
"""Tests if the quantizer handles bias overflow by adjusting scales."""
input_data = np.array([[-1e-3, 1e-3]], dtype=np.float32)
def calibration_gen():
yield {'x': input_data}
root = self._getMatMulModelWithSmallWeights()
input_data = tf.constant([-1e-3, 1e-3], shape=(1, 2))
concrete_func = root.matmul.get_concrete_function(input_data)
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.experimental_new_quantizer = enable_mlir_quantizer
quantized_model = converter.convert()
interpreter = Interpreter(model_content=quantized_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_details = interpreter.get_output_details()
output = interpreter.get_tensor(output_details[0]['index'])
# the inputs and weights are far smaller than the biases, so the final
# result should be equal to the biases.
self.assertAllClose(root.bias, output.flatten())
@test_util.run_v2_only
def testOpVersion(self):
@tf.function(
input_signature=[tf.TensorSpec(shape=[5, 5], dtype=tf.float32)])
def custom_resize(image):
# Add "batch" and "channels" dimensions
image = image[tf.newaxis, ..., tf.newaxis]
# ResizeBilinear version 3.
resize1 = tf.compat.v1.image.resize_bilinear(
image, [2, 2], half_pixel_centers=True)
# ResizeBilinear version 1.
resize2 = tf.compat.v1.image.resize_bilinear(image, [2, 2])
return resize1 + resize2
concrete_func = custom_resize.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
custom_resize)
tflite_model = converter.convert()
model_object = schema_fb.Model.GetRootAsModel(tflite_model, 0)
model = schema_fb.ModelT.InitFromObj(model_object)
for operator in model.operatorCodes:
if operator.builtinCode == schema_fb.BuiltinOperator.RESIZE_BILINEAR:
# half_pixel_centers is supported by ResizeBilinear version 3.
self.assertEqual(operator.version, 3)
break
@test_util.run_v2_only
def testForceSelectTFOps(self):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
# Check the conversion metadata.
metadata = get_conversion_metadata(tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(metadata.options.forceSelectTfOps, True)
# Check output value from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
def testExcludeConversionMetadata(self):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
converter.exclude_conversion_metadata = True
tflite_model = converter.convert()
# Check the conversion metadata.
metadata = get_conversion_metadata(tflite_model)
self.assertIsNone(metadata)
def testConversionMetadataForDynamicRange(self):
func, _ = self._getSqrtModel()
converter = lite.TFLiteConverterV2.from_concrete_functions(
[func.get_concrete_function()])
converter.optimizations = [lite.Optimize.DEFAULT]
quantized_model = converter.convert()
# Check the conversion metadata.
metadata = get_conversion_metadata(quantized_model)
self.assertIsNotNone(metadata)
self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_DYNAMIC_RANGE],
metadata.options.modelOptimizationModes)
def testConversionMetadataForFloat16(self):
root, func, calibration_gen = self._getIntegerQuantizeModel()
converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.target_spec.supported_types = [dtypes.float16]
quantized_model = converter.convert()
# Check the conversion metadata.
metadata = get_conversion_metadata(quantized_model)
self.assertIsNotNone(metadata)
self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_FLOAT16],
metadata.options.modelOptimizationModes)
class FromSavedModelTest(lite_v2_test_util.ModelTest):
def _createV1SavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor_1 = tf.compat.v1.placeholder(
shape=shape, dtype=tf.float32, name='inputB')
in_tensor_2 = tf.compat.v1.placeholder(
shape=shape, dtype=tf.float32, name='inputA')
variable_node = tf.Variable(1.0, name='variable_node')
out_tensor = in_tensor_1 + in_tensor_2 * variable_node
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
sess.run(tf.compat.v1.variables_initializer([variable_node]))
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def _createV2QATSavedModel(self, shape):
"""Create a simple QAT SavedModel in TF 2."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'saved_model')
input_name = 'input'
output_name = 'scores'
input_tensor = tf.keras.layers.Input((32, 32, 128), name=input_name)
x = tf.quantization.fake_quant_with_min_max_args(input_tensor, -3.0, 3.0)
x = tf.keras.layers.Conv2D(1, (3, 3))(x)
x = tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0)
scores = tf.keras.layers.Reshape((-1,), name=output_name)(x)
model = tf.keras.Model(input_tensor, scores)
model.save(saved_model_dir)
return saved_model_dir, input_name, output_name
@test_util.run_v2_only
def testV1SimpleModel(self):
"""Test a SavedModel."""
with tf.Graph().as_default():
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertStartsWith(input_details[0]['name'], 'inputA')
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertStartsWith(
input_details[1]['name'],
'inputB',
)
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue([1, 16, 16, 3], input_details[1]['shape'])
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertStartsWith(output_details[0]['name'], 'add')
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
@parameterized.named_parameters(
('Default', False),
('UnfoldLargeConstant', True),
)
@test_util.run_v2_only
def testUnfoldLargeConstant(self, unfold_large_constant):
"""Test unfolding large splat constant in a TF Lite model."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1000, 1000], dtype=tf.float32, name='input')
constant = tf.constant(value=1, dtype=tf.float32, shape=[1000, 1000])
out_tensor = in_tensor + constant
inputs = {'x': in_tensor}
outputs = {'y': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter._experimental_unfold_large_splat_constant = unfold_large_constant
tflite_model = converter.convert()
self.assertTrue(tflite_model)
model = util._convert_model_from_bytearray_to_object(tflite_model)
if unfold_large_constant:
self.assertEqual(model.operatorCodes[0].builtinCode,
schema_fb.BuiltinOperator.FILL)
self.assertEqual(model.operatorCodes[1].builtinCode,
schema_fb.BuiltinOperator.ADD)
else:
self.assertEqual(model.operatorCodes[0].builtinCode,
schema_fb.BuiltinOperator.ADD)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('input:0', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1000, 1000], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual('add:0', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1000, 1000], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
interpreter.set_tensor(input_details[0]['index'],
np.ones(shape=[1000, 1000], dtype=np.float32))
interpreter.invoke()
self.assertAllEqual(
np.full(shape=[1000, 1000], fill_value=2.0, dtype=np.float32),
interpreter.get_tensor(output_details[0]['index']))
@test_util.run_v2_only
def testTF1HubFormattedModel(self):
"""Test a TF1 hub formatted model."""
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
# TF1 hub model is based on V1 saved model and they omit the saved model
# schema version setting.
saved_model_proto = parse_saved_model(saved_model_dir)
saved_model_proto.saved_model_schema_version = 0
saved_model_pb_file_path = os.path.join(saved_model_dir, 'saved_model.pb')
with file_io.FileIO(saved_model_pb_file_path, 'wb') as writer:
writer.write(saved_model_proto.SerializeToString())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def _createV1ModelWithHashTableInitializer(self):
# Create a v1 saved model with hash table initializers.
tf.compat.v1.disable_eager_execution()
saved_model_dir = os.path.join(self.get_temp_dir(),
'savedmodel_with_hashtable')
table_initializer = tf.lookup.KeyValueTensorInitializer(
keys=['a', 'b', 'c', 'd'],
values=[1, 2, 3, 4],
key_dtype=tf.string,
value_dtype=tf.int64)
table = tf.lookup.StaticHashTable(
table_initializer, default_value=tf.constant(-1, dtype=tf.int64))
x = tf.compat.v1.placeholder(tf.string, shape=(), name='input')
y = table.lookup(x)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y)
signature_def_map, init_op, assets_collection = {
'serving_default':
(tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'y': tensor_info_y},
method_name='some_function'))
}, tf.compat.v1.tables_initializer(), None
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.initializers.global_variables())
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(
saved_model_dir)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map,
main_op=init_op,
assets_collection=assets_collection,
strip_default_attrs=True)
builder.save()
# Restore TF v2 behavior.
tf.compat.v1.reset_default_graph()
tf.compat.v1.enable_eager_execution()
return saved_model_dir
@test_util.run_v2_only
def testModelWithHashTableInitializer(self):
"""Test a model with saved_model's session initializer for hash tables."""
saved_model_dir = self._createV1ModelWithHashTableInitializer()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_data = np.array(['a', 'b', 'c', 'z'], dtype=np.string_)
interpreter.resize_tensor_input(
input_details[0]['index'], [4], strict=False)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], input_data)
# Invoke multiple times to ensure the initializer graph runs only once.
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([1, 2, 3, -1], list(actual_value))
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([1, 2, 3, -1], list(actual_value))
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([1, 2, 3, -1], list(actual_value))
def _createV1ModelWithMutableHashTable(self):
# Create a v1 saved model with mutable hash table.
tf.compat.v1.disable_eager_execution()
saved_model_dir = os.path.join(self.get_temp_dir(),
'savedmodel_with_mutable_hashtable')
table = tf.raw_ops.MutableHashTableV2(
key_dtype=tf.string, value_dtype=tf.int64)
x = tf.compat.v1.placeholder(tf.string, shape=(), name='input')
keys = tf.constant(['a', 'b'], tf.string)
values = tf.constant([1, 5], tf.int64)
default_value = tf.constant(-1, tf.int64)
insert_call = tf.raw_ops.LookupTableInsertV2(
table_handle=table, keys=keys, values=values)
with tf.control_dependencies([insert_call]):
y = tf.raw_ops.LookupTableFindV2(
table_handle=table, keys=x, default_value=default_value)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y)
signature_def_map, init_op, assets_collection = {
'serving_default':
(tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'y': tensor_info_y},
method_name='some_function'))
}, tf.compat.v1.tables_initializer(), None
sess = tf.compat.v1.Session()
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(
saved_model_dir)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map,
main_op=init_op,
assets_collection=assets_collection,
strip_default_attrs=True)
builder.save()
# Restore TF v2 behavior.
tf.compat.v1.reset_default_graph()
tf.compat.v1.enable_eager_execution()
return saved_model_dir
@test_util.run_v2_only
def testModelWithMutableHashTable(self):
"""Test a model with saved_model's session initializer for hash tables."""
saved_model_dir = self._createV1ModelWithMutableHashTable()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_data = np.array(['a', 'b', 'c'], dtype=np.string_)
interpreter.resize_tensor_input(
input_details[0]['index'], [3], strict=False)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([1, 5, -1], list(actual_value))
@test_util.run_v2_only
def testConstModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = tf.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = tf.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check the conversion metadata.
metadata = get_conversion_metadata(tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(metadata.environment.modelType,
metadata_fb.ModelType.TF_SAVED_MODEL)
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@parameterized.named_parameters(('EnableResourceVariables', True),
('DisableResourceVariables', False))
@test_util.run_v2_only
def testNativeVariablesModel(self, enable_resource_variables):
"""Test a basic model with Variables with saving/loading the SavedModel."""
root = self._getSimpleModelWithVariables()
input_data = tf.constant(1., shape=[1, 10])
to_save = root.assign_add.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
converter.experimental_enable_resource_variables = enable_resource_variables
if not enable_resource_variables:
with self.assertRaises(convert.ConverterError) as error:
tflite_model = converter.convert()
self.assertIn(
'Variable constant folding is failed. Please consider using enabling '
'`experimental_enable_resource_variables` flag in the TFLite '
'converter object.',
str(error.exception))
return
# Enable resource variables.
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.assign_add(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for tf_result, tflite_result in zip(expected_value, actual_value[0]):
self.assertAllClose(tf_result, tflite_result, atol=1e-05)
@test_util.run_v2_only
def testSignatures(self):
"""Test values for `signature_keys` argument."""
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model with invalid `signature_keys`.
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['INVALID'])
self.assertIn("Invalid signature key 'INVALID'", str(error.exception))
# Convert model with empty `signature_keys`.
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=[])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testSignatureDefsWithFullIntegerQuantization(self):
# SETUP
# 1. Define input shapes
tf_input_shape = (32, 32, 128)
tflite_input_shape = (1,) + tf_input_shape
# 2. Define model
tf_saved_model_dir, input_name, output_name = (
self._createV2QATSavedModel(tf_input_shape))
# MODEL 1: TFLite (float) model
# 1. Create TFLite model
converter = tf.lite.TFLiteConverter.from_saved_model(tf_saved_model_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
# 2. Initialize the Intepreter
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
interpreter.resize_tensor_input(input_details['index'], tflite_input_shape)
interpreter.allocate_tensors()
signature_list = interpreter._get_full_signature_list()['serving_default']
# 3. (Skip) Verify that signature def input/output tensors are in the model.
# 4. Evaluate the model
input_data = np.random.random(tflite_input_shape).astype(np.float32)
result = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, 'serving_default', {input_name: input_data})[output_name]
# MODEL 2: TFLite (full integer quantized) model
# 1. Create TFLite model
converter = tf.lite.TFLiteConverter.from_saved_model(tf_saved_model_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model_quant = converter.convert()
# 2. Initialize the Intepreter
interpreter = Interpreter(model_content=tflite_model_quant)
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
interpreter.resize_tensor_input(input_details['index'], tflite_input_shape)
interpreter.allocate_tensors()
# 3. Verify that signature def input/output tensors are in the model.
all_indices = {item['index'] for item in interpreter.get_tensor_details()}
signature_list = interpreter._get_full_signature_list()['serving_default']
input_tensor_indices = set(signature_list['inputs'].values())
assert input_tensor_indices.issubset(all_indices)
output_tensor_indices = set(signature_list['outputs'].values())
assert output_tensor_indices.issubset(all_indices)
# 4. Evaluate the model
input_data = np.random.random(tflite_input_shape)
input_scale, input_zero_point = input_details['quantization']
if (input_scale, input_zero_point) != (0.0, 0):
input_data = input_data / input_scale + input_zero_point
input_data = input_data.astype(input_details['dtype'])
result_quant = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model_quant, 'serving_default',
{input_name: input_data})[output_name]
output_scale, output_zero_point = output_details['quantization']
if (output_scale, output_zero_point) != (0.0, 0):
result_quant = result_quant.astype(np.float32)
result_quant = (result_quant - output_zero_point) * output_scale
# COMPARE: Validate that results from both models are approx. the same.
root_mean_squared = np.sqrt(np.mean((result-result_quant)**2))
assert root_mean_squared < 1.0
@test_util.run_v2_only
def testSignatureDefs(self):
"""Test converting SignatureDef is correct and uses SignatureDef API."""
root = self._getMultiFunctionModel()
input_data_0 = tf.constant(1., shape=[1])
input_data_1 = tf.constant(3., shape=[1])
mul_add_func = root.mul_add.get_concrete_function(input_data_1,
input_data_0)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'mul_add': mul_add_func})
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['mul_add'])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.mul_add(input_data_1, input_data_0)
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
results = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, 'mul_add', {
'y': input_data_0,
'x': input_data_1
})
self.assertEqual(list(results.keys()), ['output_0'])
self.assertEqual(expected_value.numpy(), results['output_0'])
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 1)
self.assertEqual(list(signature_defs.keys()), ['mul_add'])
self.assertEqual(len(signature_defs.values()), 1)
self.assertEqual(
list(signature_defs['mul_add'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y'])
self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])
@test_util.run_v2_only
def testSignatureDefsWithDefaultValue(self):
"""Test converting SignatureDef is correct and uses SignatureDef API.
This test uses None as signature_key to test default behavior.
"""
root = self._getMultiFunctionModel()
input_data_0 = tf.constant(1., shape=[1])
input_data_1 = tf.constant(3., shape=[1])
mul_add_func = root.mul_add.get_concrete_function(input_data_1,
input_data_0)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'mul_add': mul_add_func})
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['mul_add'])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.mul_add(input_data_1, input_data_0)
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
results = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, None, {
'y': input_data_0,
'x': input_data_1
})
self.assertEqual(list(results.keys()), ['output_0'])
self.assertEqual(expected_value.numpy(), results['output_0'])
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 1)
self.assertEqual(list(signature_defs.keys()), ['mul_add'])
self.assertEqual(len(signature_defs.values()), 1)
self.assertEqual(
list(signature_defs['mul_add'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y'])
self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])
@test_util.run_v2_only
def testSignatureDefsQuantizedModel(self):
"""Test converting SignatureDef on quantized model."""
root = self._getMultiFunctionModel()
input_data_0 = tf.constant(1., shape=[1])
input_data_1 = tf.constant(3., shape=[1])
mul_add_func = root.mul_add.get_concrete_function(input_data_1,
input_data_0)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'mul_add': mul_add_func})
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['mul_add'])
def representative_dataset_gen():
for _ in range(2):
yield {
'x':
np.random.uniform(low=0, high=1,
size=(1, 1)).astype(np.float32),
'y':
np.random.uniform(low=0, high=1, size=(1, 1)).astype(np.float32)
}
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_model = converter.convert()
# Check signatures are valid from converted model.
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 1)
self.assertEqual(list(signature_defs.keys()), ['mul_add'])
self.assertEqual(len(signature_defs.values()), 1)
self.assertEqual(
list(signature_defs['mul_add'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y'])
self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])
@test_util.run_v2_only
def testMultipleFunctionModel(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'add': add_func, 'sub': sub_func})
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
interpreter = tf.lite.Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 2)
self.assertEqual(list(signature_defs.keys()), ['add', 'sub'])
self.assertEqual(len(signature_defs.values()), 2)
self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['add']['inputs'], ['x'])
self.assertEqual(list(signature_defs['add']['outputs']), ['output_0'])
self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['sub']['inputs'], ['x'])
self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0'])
# Verify the Signature runner executions.
add_signature_runner = interpreter.get_signature_runner('add')
add_output = add_signature_runner(x=input_data)
self.assertEqual(add_output['output_0'], 3)
sub_signature_runner = interpreter.get_signature_runner('sub')
sub_output = sub_signature_runner(x=input_data)
self.assertEqual(sub_output['output_0'], -2)
@parameterized.named_parameters(
('_Default', False, False, dtypes.float32, False),
('_DefaultMlirQuant', False, False, dtypes.float32, True),
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16),
('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True),
('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True))
@test_util.run_v2_only
def testMultipleFunctionQuantizedModel(self,
is_int_only,
is_int16_quantize,
inference_input_output_type,
enable_mlir_quantizer=False):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'add': add_func, 'sub': sub_func})
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
def representative_dataset_gen():
for _ in range(2):
yield ('add', {
'x': np.random.uniform(low=0, high=1, size=(1,)).astype(np.float32),
})
for _ in range(2):
yield ('sub', {
'x': np.random.uniform(low=0, high=1, size=(1,)).astype(np.float32),
})
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
if is_int_only:
if is_int16_quantize:
converter.target_spec.supported_ops = [
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
else:
converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]
else:
if is_int16_quantize:
converter.target_spec.supported_ops = [
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
else:
converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS]
converter.inference_input_type = inference_input_output_type
converter.inference_output_type = inference_input_output_type
converter.experimental_new_quantizer = enable_mlir_quantizer
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
interpreter = tf.lite.Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 2)
self.assertEqual(list(signature_defs.keys()), ['add', 'sub'])
self.assertEqual(len(signature_defs.values()), 2)
self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['add']['inputs'], ['x'])
self.assertEqual(list(signature_defs['add']['outputs']), ['output_0'])
self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['sub']['inputs'], ['x'])
self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0'])
# Verify the Signature runner executions.
input_data = tf.constant(
np.random.uniform(-1, 1, size=(1,)).astype(
inference_input_output_type.as_numpy_dtype))
add_signature_runner = interpreter.get_signature_runner('add')
add_output = add_signature_runner(x=input_data)
self.assertIsNotNone(add_output['output_0'])
input_details = add_signature_runner.get_input_details()
self.assertLen(input_details, 1)
self.assertStartsWith(input_details['x']['name'], 'add_x:0')
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details['x']['dtype'])
self.assertTrue(([1] == input_details['x']['shape']).all())
if inference_input_output_type == dtypes.float32:
self.assertEqual((0.0, 0), input_details['x']['quantization'])
sub_signature_runner = interpreter.get_signature_runner('sub')
sub_output = sub_signature_runner(x=input_data)
self.assertIsNotNone(sub_output['output_0'])
output_details = sub_signature_runner.get_output_details()
self.assertLen(output_details, 1)
self.assertStartsWith(output_details['output_0']['name'],
'StatefulPartitionedCall:0')
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details['output_0']['dtype'])
self.assertTrue(([1] == output_details['output_0']['shape']).all())
if inference_input_output_type == dtypes.float32:
self.assertEqual((0.0, 0), output_details['output_0']['quantization'])
@test_util.run_v2_only
def testMultipleFunctionModelWithSharedWeight(self):
"""Convert multiple functions with the shared weight."""
root = self._getMultiFunctionModelWithSharedWeight()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
mul_func = root.mul.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'add': add_func, 'sub': sub_func, 'mul': mul_func})
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Make sure that the weight tensors are shared.
self.assertLess(len(tflite_model), 1100000)
# TODO(b/184696047): Write down the test codes for multiple signature
# runners once the Python API is ready to use.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
self.assertLen(signature_defs, 3)
add_signature_runner = interpreter.get_signature_runner('add')
sub_signature_runner = interpreter.get_signature_runner('sub')
mul_signature_runner = interpreter.get_signature_runner('mul')
self.assertIsNotNone(add_signature_runner)
self.assertIsNotNone(sub_signature_runner)
self.assertIsNotNone(mul_signature_runner)
@test_util.run_v2_only
def testNoConcreteFunctionModel(self):
root = self._getMultiFunctionModel()
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir)
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(save_dir)
self.assertIn('Only support at least one signature key.',
str(error.exception))
@test_util.run_v2_only
def testKerasSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = tf.constant(1., shape=[1, 1])
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = tf.keras.models.Sequential([
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1),
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(model, save_dir)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a SavedModel has debug info captured."""
input_data = tf.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = tf.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
options = save_options.SaveOptions(save_debug_info=True)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save, options)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
@test_util.run_v2_only
def testNonStatefulConvLSTM2D(self):
"""Test saved model with non stateful ConvLSTM2D keras layer."""
# Create keras model
model = tf.keras.Sequential([
tf.keras.layers.ConvLSTM2D(
32, (3, 3),
padding='same',
return_sequences=True,
stateful=False,
batch_input_shape=(1, 1, 10, 10, 1))
])
model.compile()
# Export the keras model to saved model.
saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_lstm_2d')
model.save(saved_model_dir, save_format='tf', include_optimizer=False)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertTrue(tflite_model)
@test_util.run_v2_only
def testKerasConvLSTM2DWithMoreThanOneDilationRate(self):
input_tensor = tf.keras.layers.Input(
batch_size=8,
shape=[9, 10, 11, 12],
name='input_tensor',
dtype=tf.float32)
output = tf.keras.layers.ConvLSTM2D(
filters=3,
kernel_size=3,
strides=1,
padding='VALID',
dilation_rate=2,
use_bias=False,
bias_initializer='ones',
data_format='channels_last')(
input_tensor)
model = tf.keras.Model(inputs=[input_tensor], outputs=output)
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Export the keras model to saved model.
saved_model_dir = os.path.join(self.get_temp_dir(),
'conv_lstm_2d_with_dilation_rate')
model.save(saved_model_dir, save_format='tf', include_optimizer=False)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def _createUnknownInputShapeModel(self):
"""Create a simple SavedModel with unknown input."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'unknown_input_shape')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
unknown_shape = tf.TensorShape(None)
in_tensor = tf.compat.v1.placeholder(
shape=unknown_shape, dtype=tf.float32, name='input')
out_tensor = in_tensor + in_tensor
inputs = {'input': in_tensor}
outputs = {'output': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
@test_util.run_v2_only
def testUnknownInputShapeModel(self):
"""Test a SavedModel with an unknown input shape."""
saved_model_dir = self._createUnknownInputShapeModel()
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_data = np.array([1., 2., 3.], dtype=np.float32)
interpreter.resize_tensor_input(
input_details[0]['index'], [3], strict=False)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([2., 4., 6.], list(actual_value))
@parameterized.named_parameters(
('_PerChannelQuant', False, False),
('_PerChannelMlirQuant', False, True),
('_PerTensorQuant', True, False),
('_PerTensorMlirQuant', True, True),
('_PerChannelDynamicRange', False, False, True),
('_PerTensorDynamicRange', True, False, True))
@test_util.run_v2_only
def testDisablePerChannelQuantization(self,
disable_per_channel=False,
enable_mlir_quantizer=False,
representative_dataset=True):
# Dynamic range quant requires total num elements of filters > 1024.
k_num_filters = 38
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(k_num_filters, (3, 3), activation='relu')
])
model.build(input_shape=(1, 5, 5, 3))
saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_saved_model')
save(model, saved_model_dir)
k_conv_name = 'sequential/conv2d/Conv2D1'
quantized_converter = tf.lite.TFLiteConverter.from_saved_model(
saved_model_dir)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
if representative_dataset:
def calib_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
quantized_converter.representative_dataset = calib_gen
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.experimental_new_quantizer = enable_mlir_quantizer
if disable_per_channel:
quantized_converter._experimental_disable_per_channel = (
disable_per_channel)
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
detail = next((d for d in interpreter.get_tensor_details()
if d['name'] == k_conv_name))
quant_params = detail['quantization_parameters']
expected_num_params = k_num_filters
if disable_per_channel:
expected_num_params = 1
self.assertLen(quant_params['scales'], expected_num_params)
self.assertLen(quant_params['zero_points'], expected_num_params)
@parameterized.named_parameters(
('_INT8Quant_INT32Bias', False, False, dtypes.int32, True),
('_INT16Quant_INT64Bias', True, False, dtypes.int64, True),
('_INT8Quant_INT32Bias_Set', False, True, dtypes.int32, True),
('_INT8Quant_INT64Bias_Set', False, True, dtypes.int64, False),
('_INT16Quant_INT32Bias_Set', True, True, dtypes.int32, True),
('_INT16Quant_INT64Bias_Set', True, True, dtypes.int64, True),
('_INT16Quant_FLOAT32Bias_Set', True, True, dtypes.float32, False),
)
@test_util.run_v2_only
def testBiasQuantization(self, is_int16_quantize, explicitly_set_bias,
bias_type, is_valid_bias_type):
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
1024, input_shape=[1024], activation=None, bias_initializer='ones')
])
saved_model_dir = os.path.join(self.get_temp_dir(), 'dense_saved_model')
save(model, saved_model_dir)
k_dense_bias_name = 'sequential/dense/BiasAdd/ReadVariableOp'
quantized_converter = tf.lite.TFLiteConverter.from_saved_model(
saved_model_dir)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
if explicitly_set_bias:
quantized_converter._experimental_full_integer_quantization_bias_type = bias_type
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
def calibration_gen():
for _ in range(5):
yield [np.random.randn(1, 1024).astype(np.float32)]
quantized_converter.representative_dataset = calibration_gen
if not is_valid_bias_type:
with self.assertRaisesRegex(ValueError, 'Expected bias type to be'):
quantized_converter.convert()
return
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
dense_bias = next((d for d in interpreter.get_tensor_details()
if d['name'] == k_dense_bias_name))
self.assertEqual(bias_type, dense_bias['dtype'])
@parameterized.named_parameters(
('_Int8PerChannelMlirDynamicRangeQuant', True, False, False),
('_Int8PerChannelTocoDynamicRangeQuant', False, False, False),
('_Int8PerTensorMlirDynamicRangeQuant', True, True, False),
('_Int8PerTensorTocoDynamicRangeQuant', False, True, False),
('_Float16DynamicRangeQuant', True, False, True))
@test_util.run_v2_only
def testMlirDynamicRangeQuantization(self, enable_new_dynamic_range_quantizer,
disable_per_channel,
enable_float16_quant):
num_filters = 1024
conv_name = 'sequential/conv2d/Conv2D1'
model = tf.keras.models.Sequential(
[tf.keras.layers.Conv2D(num_filters, (3, 3), activation='relu')])
model.build(input_shape=(1, 32, 32, 3))
saved_model_dir = self.create_tempdir()
save(model, saved_model_dir.full_path)
converter = tf.lite.TFLiteConverter.from_saved_model(
saved_model_dir.full_path)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.experimental_new_dynamic_range_quantizer = (
enable_new_dynamic_range_quantizer)
converter._experimental_disable_per_channel = disable_per_channel
if enable_float16_quant:
converter.target_spec.supported_types = [tf.float16]
quantized_tflite_model = converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
quantized_weight = next(
d for d in interpreter.get_tensor_details() if d['name'] == conv_name)
quant_params = quantized_weight['quantization_parameters']
if enable_float16_quant:
expected_num_params = 0
else:
expected_num_params = 1 if disable_per_channel else num_filters
self.assertLen(quant_params['scales'], expected_num_params)
self.assertLen(quant_params['zero_points'], expected_num_params)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertEqual(np.float32, output_details[0]['dtype'])
if enable_float16_quant:
self.assertEqual(np.float16, quantized_weight['dtype'])
else:
self.assertEqual(np.int8, quantized_weight['dtype'])
class FromKerasModelTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = tf.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = tf.keras.models.Sequential([
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check the conversion metadata.
metadata = get_conversion_metadata(tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(metadata.environment.modelType,
metadata_fb.ModelType.KERAS_MODEL)
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testSequentialMultiInputOutputModel(self):
"""Test a tf.Keras model with multiple inputs and outputs."""
left_input_data = tf.constant(1., shape=[1, 3])
right_input_data = tf.constant(1., shape=[1, 3])
# Create a simple Keras model.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_c_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 2))
input_a = tf.keras.layers.Input(shape=(3,), name='input_a')
input_b = tf.keras.layers.Input(shape=(3,), name='input_b')
dense = tf.keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
interm_b = dense(input_b)
merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge')
output_c = tf.keras.layers.Dense(
3, activation='softmax', name='dense_2')(
merged)
output_d = tf.keras.layers.Dense(
2, activation='softmax', name='dense_3')(
merged)
model = tf.keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
input_data = [left_input_data, right_input_data]
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, input_data)
for tf_result, tflite_result in zip(expected_value, actual_value):
self.assertAllClose(tf_result, tflite_result, atol=1e-05)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a tf.Keras model has debug info captured."""
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = tf.keras.models.Sequential(
[tf.keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
@test_util.run_v2_only
def testKerasFallbackPath(self):
"""Test keras model which failed when exporting to the saved model."""
input_data = tf.constant(
np.array(np.random.random_sample((20)), dtype=np.float32))
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
# A None name will cause a failure in exporting to a saved model.
self.shared_weights = self.add_weight(
name=None,
shape=(20, 1),
dtype=tf.float32,
initializer=tf.random_normal_initializer(
mean=0.0, stddev=300**(-0.5)))
def call(self, x):
return tf.add(self.shared_weights, x)
# Building the model.
model = Model()
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(input_data, input_data, epochs=1)
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
@test_util.run_v2_only
def testSignatureDefs(self):
"""Test converting SignatureDef is correct and uses SignatureDef API."""
keras_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(
32,
kernel_size=3,
padding='same',
activation='relu',
input_shape=(32, 32, 3),
name='tensor'),
tf.keras.layers.Dense(10, name='output_tensor')
])
converter = lite.TFLiteConverterV2.from_keras_model(keras_model)
tflite_model = converter.convert()
# Check values from converted model.
input_data = tf.constant(
np.random.uniform(-1, 1, size=(1, 32, 32, 3)).astype(np.float32))
expected_value = keras_model(input_data)
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
results = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, 'serving_default', {'tensor_input': input_data})
self.assertEqual(list(results.keys()), ['output_tensor'])
self.assertAllClose(expected_value.numpy(), results['output_tensor'])
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 1)
self.assertEqual(list(signature_defs.keys()), ['serving_default'])
self.assertEqual(len(signature_defs.values()), 1)
self.assertEqual(
list(signature_defs['serving_default'].keys()), ['inputs', 'outputs'])
self.assertCountEqual(signature_defs['serving_default']['inputs'],
['tensor_input'])
self.assertEqual(
list(signature_defs['serving_default']['outputs']), ['output_tensor'])
@parameterized.named_parameters(
('_PerChannelMlirDynamicRangeQuant', True, False, False),
('_PerChannelTocoDynamicRangeQuant', False, False, False),
('_PerTensorMlirDynamicRangeQuant', True, True, False),
('_PerTensorTocoDynamicRangeQuant', False, True, False),
('_Float16DynamicRangeQuant', True, False, True))
@test_util.run_v2_only
def testMlirDynamicRangeQuantization(self, enable_new_dynamic_range_quantizer,
disable_per_channel,
enable_float16_quant):
num_filters = 1024
conv_name = 'sequential/conv2d/Conv2D1'
model = tf.keras.models.Sequential(
[tf.keras.Input(shape=(32, 32, 3)),
tf.keras.layers.Conv2D(num_filters, (3, 3), activation='relu')])
model.build()
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.experimental_new_dynamic_range_quantizer = (
enable_new_dynamic_range_quantizer)
converter._experimental_disable_per_channel = disable_per_channel
if enable_float16_quant:
converter.target_spec.supported_types = [tf.float16]
quantized_tflite_model = converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
quantized_weight = next(
d for d in interpreter.get_tensor_details() if d['name'] == conv_name)
quant_params = quantized_weight['quantization_parameters']
if enable_float16_quant:
expected_num_params = 0
else:
expected_num_params = 1 if disable_per_channel else num_filters
self.assertLen(quant_params['scales'], expected_num_params)
self.assertLen(quant_params['zero_points'], expected_num_params)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertEqual(np.float32, output_details[0]['dtype'])
if enable_float16_quant:
self.assertEqual(np.float16, quantized_weight['dtype'])
else:
self.assertEqual(np.int8, quantized_weight['dtype'])
@parameterized.named_parameters([
('{}BitWeightOnly={}LowBit={}'.format(num_bits, weight_only, low_bit),
num_bits, weight_only, low_bit) for num_bits, weight_only, low_bit
in itertools.product((2, 4, 6), (True, False), (True, False))])
@test_util.run_v2_only
def testQATLowBitKerasModel(self, num_bits, weight_only, low_bit):
bit_max = (1 << (num_bits - 1)) - 1
bit_min = -bit_max
tf_input_shape = (5, 5, 3)
tflite_input_shape = (1,) + tf_input_shape
model, input_name, output_name = (self._createV2QATLowBitKerasModel(
tf_input_shape, weight_only, num_bits, bit_min, bit_max))
input_data = np.linspace(
0, 6, np.prod(tflite_input_shape)).reshape(tflite_input_shape)
tf_result = model(input_data)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
if low_bit:
converter._experimental_low_bit_qat = True
tflite_model = converter.convert()
result = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, 'serving_default',
{input_name: input_data.astype(np.float32)})[output_name]
self.assertAllClose(
[np.linalg.norm(result - tf_result.numpy().astype(np.float32))], [0.0])
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_8bit_activations = 0
num_8bit_weights = 0
kernel_name = ('model/conv_wrapper/Conv2D;model/conv_wrapper/'
'FakeQuantWithMinMaxVarsPerChannel')
for detail in interpreter.get_tensor_details():
if (detail['dtype'] == np.int8 and detail['name'] and
detail['name'] == kernel_name):
num_8bit_weights += 1
weights = interpreter.get_tensor(detail['index'])
if low_bit:
self.assertFalse((bit_min > weights).any() or
(weights > bit_max).any())
else:
self.assertTrue((bit_min > weights).any() or
(weights > bit_max).any())
self.assertIn('scales', detail['quantization_parameters'])
if low_bit and detail['quantization_parameters']['scales']:
self.assertAllClose(
detail['quantization_parameters']['scales'], [1.0])
elif detail['dtype'] == np.int8 and detail['name']:
self.assertFalse(weight_only)
self.assertIn('scales', detail['quantization_parameters'])
if detail['quantization_parameters']['scales']:
self.assertAllClose(
detail['quantization_parameters']['scales'], [6/255])
num_8bit_activations += 1
self.assertEqual(num_8bit_weights, 0 if weight_only and not low_bit else 1)
# 3 activations with full integer: conv_input, conv_output, reshape_output
self.assertEqual(num_8bit_activations, 0 if weight_only else 3)
class FromJaxModelTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testInvalidInputsModel(self):
if DISABLE_JAX_TEST:
return
def simple_model(input1, input2):
return jnp.sin(input1) + jnp.cos(input2)
input_tensor = jnp.zeros([10, 10])
# Invalid case: not specify serving_func
converter = lite.TFLiteConverterV2.experimental_from_jax(
None, [{
'input1': input_tensor
}])
with self.assertRaisesRegex(ValueError, 'No serving func is specified.'):
converter.convert()
# Invalid case: not specify input
converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model],
None)
with self.assertRaisesRegex(ValueError, 'Input tensors are not specified.'):
converter.convert()
converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model], [])
with self.assertRaisesRegex(ValueError, 'Input tensors are not specified.'):
converter.convert()
# Invalid case: not wrap input_tensor in a list.
converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model],
input_tensor)
with self.assertRaisesRegex(
ValueError,
'The truth value of an array with more than one element is ambiguous.'):
converter.convert()
# Invalid case: only partial inputs are provided.
converter = lite.TFLiteConverterV2.experimental_from_jax(
[simple_model], [[('input1', input_tensor)]])
with self.assertRaisesRegex(
ValueError, 'Failed to convert the given Jax function to hlo.'):
converter.convert()
# Invalid case: serving functions length does not match input mapping.
converter = lite.TFLiteConverterV2.experimental_from_jax(
[simple_model, simple_model], [[
('input1', input_tensor),
('input2', input_tensor),
]])
with self.assertRaisesRegex(
ValueError,
'Input tensor mapping len 1 does not match serving func len 2.'):
converter.convert()
# Invalid case: multiple serving function is provided.
converter = lite.TFLiteConverterV2.experimental_from_jax(
[simple_model, simple_model], [[
('input1', input_tensor),
('input2', input_tensor),
], [
('input1', input_tensor),
('input2', input_tensor),
]])
with self.assertRaisesRegex(
ValueError, 'Currently only support single serving function.'):
converter.convert()
@test_util.run_v2_only
def testSingleInputModel(self):
if DISABLE_JAX_TEST:
return
def single_input(input_tensor):
return jnp.sin(input_tensor)
# Convert model.
input_tensor = jnp.zeros([10, 10])
converter = lite.TFLiteConverterV2.experimental_from_jax(
[single_input], [[('input_tensor', input_tensor)]])
tflite_model = converter.convert()
# Check the conversion metadata.
metadata = get_conversion_metadata(tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(metadata.environment.modelType, metadata_fb.ModelType.JAX)
# Check values from converted_model
input_data = np.random.random_sample((10, 10))
tf_input_data = tf.constant(input_data, dtype=np.float32)
actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0]
expected_value = single_input(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@test_util.run_v2_only
def testMultipleInputsModel(self):
if DISABLE_JAX_TEST:
return
def multiple_inputs(input1, input2):
return input1 + input2
# Convert model.
input1 = jnp.zeros([10, 10])
input2 = jnp.zeros([10, 1])
converter = lite.TFLiteConverterV2.experimental_from_jax(
[multiple_inputs], [[('input1', input1), ('input2', input2)]])
tflite_model = converter.convert()
# Check values from converted_model
input1_data = np.random.random_sample((10, 10))
tf_input1_data = tf.constant(input1_data, dtype=np.float32)
input2_data = np.random.random_sample((10, 1))
tf_input2_data = tf.constant(input2_data, dtype=np.float32)
actual_value = self._evaluateTFLiteModel(
tflite_model, [tf_input1_data, tf_input2_data])[0]
expected_value = multiple_inputs(input1_data, input2_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@test_util.run_v2_only
def testInputSignaturesModel(self):
if DISABLE_JAX_TEST:
return
def multiple_inputs(input1, input2):
return input1 + input2
# Convert model.
input1 = jnp.zeros([10, 10])
input2 = jnp.zeros([10, 1])
converter = lite.TFLiteConverterV2.experimental_from_jax(
[multiple_inputs], [[('input1', input1), ('input2', input2)]])
tflite_model = converter.convert()
# Check values from converted_model
input1_data = np.random.random_sample((10, 10))
tf_input1_data = tf.constant(input1_data, dtype=np.float32)
input2_data = np.random.random_sample((10, 1))
tf_input2_data = tf.constant(input2_data, dtype=np.float32)
actual_value = self._evaluateTFLiteModel(
tflite_model, [tf_input1_data, tf_input2_data])[0]
expected_value = multiple_inputs(input1_data, input2_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@test_util.run_v2_only
def testModelWithParams(self):
if DISABLE_JAX_TEST:
return
def model(inputs, weights):
return jnp.matmul(weights, inputs)
weights = np.random.random_sample((10, 10))
serving_func = functools.partial(model, weights=weights)
# Convert model
input_tensor = jnp.zeros([10, 10])
converter = lite.TFLiteConverterV2.experimental_from_jax(
[serving_func], [[('inputs', input_tensor)]])
tflite_model = converter.convert()
# Check values from converted_model
input_data = np.random.random_sample((10, 10))
tf_input_data = tf.constant(input_data, dtype=np.float32)
actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0]
expected_value = serving_func(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@test_util.run_v2_only
def testWhileLoop(self):
if DISABLE_JAX_TEST:
return
def condition(x):
return jnp.sum(x, keepdims=False) < 100
def body(x):
return jnp.add(x, 2.0)
def model(x):
result = jax.lax.while_loop(condition, body, x)
return result[0]
# Convert model.
input_tensor = jnp.zeros([3, 3])
converter = lite.TFLiteConverterV2.experimental_from_jax(
[model], [[('x', input_tensor)]])
tflite_model = converter.convert()
# Check values from converted_model
input_data = np.random.random_sample((3, 3))
tf_input_data = tf.constant(input_data, dtype=np.float32)
actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0]
expected_value = model(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
class ControlFlowTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testCond(self):
input_data = {
'x': tf.constant([1., 2.], shape=[1, 2]),
'b': tf.constant(True)
}
weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)
def true_fn(x):
return tf.matmul(x, weights)
def false_fn(x):
return tf.add(x, weights)
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, 2], dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.bool)
])
def model(x, b):
return tf.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(**input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data['x'], input_data['b']])[0]
self.assertAllClose(expected_value, actual_value)
@test_util.run_v2_only
def testCondWithFullIntegerQuantization(self):
weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)
def true_fn(x):
return tf.matmul(x, weights)
def false_fn(x):
return tf.add(x, weights)
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, 2], dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.bool)
])
def model(x, b):
return tf.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
def calibration_gen():
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(1, 2)).astype(np.float32),
tf.constant(True)
]
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(1, 2)).astype(np.float32),
tf.constant(False)
]
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
@test_util.run_v2_only
def testConverterErrorOnControlFlowV1Ops(self):
filename = resource_loader.get_path_to_datafile(
'testdata/control_flow_v1_saved_model')
converter = lite.TFLiteConverterV2.from_saved_model(filename)
with self.assertRaises(convert.ConverterError) as error:
converter.convert()
self.assertIn(
'Failed to functionalize Control Flow V1 ops. Consider using Control '
'Flow V2 ops instead. See https://www.tensorflow.org/api_docs/python/'
'tf/compat/v1/enable_control_flow_v2.', str(error.exception))
@test_util.run_v2_only
def testStaticRnn(self):
input_data = tf.constant(
np.array(np.random.random_sample((3, 10)), dtype=np.float32))
cell = tf.keras.layers.LSTMCell(10)
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 10], dtype=tf.float32)])
def model(x):
seq = tf.split(x, 3, 0)
return rnn.static_rnn(cell, seq, dtype=tf.float32, sequence_length=[1])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)[0]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
self.assertAllClose(expected, actual)
@test_util.run_v2_only
def testWhileLoop(self):
input_data = tf.constant([1., 2., 3., 4.], shape=[2, 2])
weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)
def condition(x):
return tf.reduce_sum(x) < 100
def body(x):
return tf.add(x, weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[2, 2], dtype=tf.float32)])
def model(x):
return tf.while_loop(condition, body, [x])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)[0]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
@test_util.run_v2_only
def testDynamicRnn(self):
input_data = tf.constant(
np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32))
cell = tf.keras.layers.LSTMCell(10)
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 10, 10], dtype=tf.float32)])
def model(x):
rnn_layer = tf.keras.layers.RNN([cell], return_sequences=True)
return rnn_layer(x)
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
lite_outputs = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertLen(lite_outputs, 1)
actual_value = lite_outputs[0]
for expected, actual in zip(expected_value, actual_value):
self.assertAllClose(expected, actual)
@parameterized.named_parameters(
('LSTMBatchSizeOne', tf.keras.layers.LSTM, True),
('LSTM', tf.keras.layers.LSTM, False),
('SimpleRNNBatchSizeOne', tf.keras.layers.SimpleRNN, True),
('SimpleRNN', tf.keras.layers.SimpleRNN, False),
('GRUBatchSizeOne', tf.keras.layers.GRU, True),
('GRU', tf.keras.layers.GRU, False))
@test_util.run_v2_only
def testKerasRNN(self, rnn_layer, default_to_single_batch):
input_data = tf.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
rnn_obj = rnn_layer(units=10, input_shape=(10, 10))
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(10, 10), name='input'),
rnn_obj,
])
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch
if not default_to_single_batch:
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM),
('SimpleRNN', tf.keras.layers.SimpleRNN),
('GRU', tf.keras.layers.GRU))
@test_util.run_v2_only
def testKerasRNNMultiBatches(self, rnn_layer):
input_data = tf.constant(
np.array(np.random.random_sample((4, 10, 10)), dtype=np.float32))
# Specify a fixed batch size(4) for the test model.
x = tf.keras.layers.Input(batch_shape=(4, 10, 10))
y = rnn_layer(units=10, input_shape=(10, 10))(x)
model = tf.keras.Model(inputs=[x], outputs=[y])
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@parameterized.named_parameters(('ForceToUseBatchSizeOne', True),
('DontForceToUseBatchSizeOne', False))
@test_util.run_v2_only
def testKerasBidirectionalRNNReturnSequence(self, default_to_single_batch):
input_data = tf.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=(10, 10), name='input'))
model.add(
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units=10, return_sequences=True),
input_shape=(10, 10)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(5))
model.add(tf.keras.layers.Activation('softmax'))
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch
if not default_to_single_batch:
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@parameterized.named_parameters(('ForceToUseBatchSizeOne', True),
('DontForceToUseBatchSizeOne', False))
@test_util.run_v2_only
def testKerasBidirectionalRNN(self, default_to_single_batch):
input_data = tf.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=(10, 10), name='input'))
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=10)))
model.add(tf.keras.layers.Dense(5))
model.add(tf.keras.layers.Activation('softmax'))
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch
if not default_to_single_batch:
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
class GrapplerTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testConstantFolding(self):
# Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added.
input_data = tf.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.], shape=[3, 3])
@tf.function
def func(x):
y_const = tf.constant([1., 2., 3.])
y_broadcast = tf.broadcast_to(y_const, [3, 3])
return tf.matmul(x, y_broadcast)
root = tracking.AutoTrackable()
root.f = func
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
root)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
# Enable hybrid quantization, same result
converter.optimizations = [lite.Optimize.DEFAULT]
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
class UnknownShapes(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testMatMul(self):
input_data = tf.constant(
np.array(np.random.random_sample((10, 4)), dtype=np.float32))
@tf.function(
input_signature=[tf.TensorSpec(shape=[None, 4], dtype=tf.float32)])
def model(in_tensor):
shape = tf.shape(in_tensor)
fill = tf.transpose(tf.fill(shape, 1.))
return tf.matmul(fill, in_tensor)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data], input_shapes=[([-1, 4], [10, 4])])[0]
self.assertAllClose(expected_value, actual_value, atol=1e-06)
def _getIntegerQuantizeModelWithUnknownShapes(self):
np.random.seed(0)
@tf.function(
input_signature=[tf.TensorSpec(shape=[None, 33], dtype=tf.float32)])
def model(input_tensor):
"""Define a model with tf.MatMul and unknown shapes."""
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
const_tensor = tf.constant(
np.random.uniform(low=-10., high=10., size=[33, 33]),
shape=[33, 33],
dtype=tf.float32,
name='inputB')
shape = tf.shape(input_tensor)
fill = tf.transpose(tf.fill(shape, 1.))
mult = tf.matmul(fill, input_tensor)
return tf.matmul(mult, const_tensor)
root = tracking.AutoTrackable()
root.f = model
concrete_func = root.f.get_concrete_function()
def calibration_gen():
for batch in range(5, 20, 5):
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(batch, 33)).astype(np.float32)]
return root, concrete_func, calibration_gen
@test_util.run_v2_only
def testMatMulQuantize(self):
root, concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes()
float_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], root)
float_tflite_model = float_converter.convert()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_tflite_model = quantized_converter.convert()
# The default input and output types should be float.
quantized_interpreter = Interpreter(model_content=quantized_tflite_model)
quantized_interpreter.allocate_tensors()
input_details = quantized_interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@test_util.run_v2_only
def testMatMulCalibrateAndQuantize(self):
root, concrete_func, calibration_gen = (
self._getIntegerQuantizeModelWithUnknownShapes())
float_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], root)
float_tflite_model = float_converter.convert()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], root)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite_model = quantized_converter.convert()
# The default input and output types should be float.
quantized_interpreter = Interpreter(model_content=quantized_tflite_model)
quantized_interpreter.allocate_tensors()
input_details = quantized_interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
def testBatchMatMul(self):
input_data_1 = tf.constant(
np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))
input_data_2 = tf.constant(
np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32),
tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32)
])
def model(in_tensor_1, in_tensor_2):
return tf.matmul(in_tensor_1, in_tensor_2)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data_1, input_data_2)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data_1, input_data_2],
input_shapes=[([-1, 256, 256], [1, 256, 256])])[0]
self.assertAllClose(expected_value, actual_value, atol=4)
def testSizeInvalid(self):
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, None, 16, 3], dtype=tf.float32)
])
def model(in_tensor):
return in_tensor + in_tensor
concrete_func = model.get_concrete_function()
# Test invalid shape. None after 1st dimension. Run with TOCO in order to
# invoke shape checking code.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
converter.experimental_new_converter = False
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'in_tensor\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
class ResourceAndVariantTypes(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testVariants(self):
@tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.float32)])
def model(v):
m = map_ops.empty_tensor_map()
k = tf.constant(1.0)
p = tf.add(k, v)
with ops.control_dependencies([m]):
m2 = map_ops.tensor_map_insert(m, p, v)
with ops.control_dependencies([m2]):
return map_ops.tensor_map_size(m2)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(1, actual_value)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(1, actual_value)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(1, actual_value)
@test_util.run_v2_only
def testVariantsWithCond(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(), 'variants_with_cond')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
m = map_ops.empty_tensor_map()
def body(i, m):
m = map_ops.tensor_map_insert(m, i, i)
return i + 1, m
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.int32, name='input')
_, result_m = tf.cond(in_tensor < 10, lambda: body(in_tensor, m),
lambda: body(in_tensor + 1, m))
out_tensor = in_tensor + map_ops.tensor_map_size(result_m)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
input_data = np.array([0], dtype=np.int32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
expected_value = np.array([1], dtype=np.int32)
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(expected_value, actual_value)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(expected_value, actual_value)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testVariantsWithWhile(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(), 'variants_with_while')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
m = map_ops.empty_tensor_map()
def cond(i, m):
del m
return i < 10
def body(i, m):
m = map_ops.tensor_map_insert(m, i, i)
return i + 1, m
_, result_m = tf.while_loop(cond, body, [0, m])
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.int32, name='input')
out_tensor = in_tensor + map_ops.tensor_map_size(result_m)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
input_data = np.array([0], dtype=np.int32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(10, actual_value)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(10, actual_value)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(10, actual_value)
@test_util.run_v2_only
def testResources(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_resources')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input')
stack = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)
w = tf.raw_ops.StackPushV2(handle=stack, elem=in_tensor)
with ops.control_dependencies([w]):
a = in_tensor + in_tensor
with ops.control_dependencies([a]):
out_tensor = a + tf.raw_ops.StackPopV2(
handle=stack, elem_type=tf.float32)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(3.0, actual_value)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(3.0, actual_value)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(3.0, actual_value)
@test_util.run_v2_only
def testResourcesWithCond(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(), 'resources_with_cond')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input')
def body(i, arr):
n = tf.raw_ops.StackPushV2(
handle=arr, elem=tf.cast(i, dtype=tf.float32))
return n, arr
arr = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)
n, result_arr = tf.cond(in_tensor < 10, lambda: body(0, arr),
lambda: body(1, arr))
with ops.control_dependencies([result_arr, n]):
out_tensor = tf.raw_ops.StackPopV2(
handle=result_arr, elem_type=tf.float32)
inputs = {'x': in_tensor}
outputs = {'a': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(0.0, actual_value)
@test_util.run_v2_only
def testResourcesWithWhile(self):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(),
'resources_with_while')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input')
def cond(i, arr, m):
del arr
del m
return i < 10
def body(i, arr, m):
del m
n = tf.raw_ops.StackPushV2(
handle=arr, elem=tf.cast(i, dtype=tf.float32))
return i + 1, arr, n
arr = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)
_, result_arr, n = tf.while_loop(cond, body, [0, arr, 0.0])
with ops.control_dependencies([result_arr, n]):
out_tensor = tf.raw_ops.StackPopV2(
handle=result_arr, elem_type=tf.float32)
inputs = {'x': in_tensor}
outputs = {'a': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(9.0, actual_value)
@parameterized.named_parameters(('EnableLoweringTensorListOps', True),
('DisableLoweringTensorListOps', False))
@test_util.run_v2_only
def testTensorListWithStaticSize(self, lower_tensor_list_ops):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(),
'simple_mutable_variable')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input')
ta = tf.TensorArray(
tf.float32, size=3, dynamic_size=False, clear_after_read=False)
ta = ta.write(0, 10.0)
ta = ta.write(1, 20.0)
ta = ta.write(2, 30.0)
out_tensor = ta.read(0) + ta.read(2)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
if not lower_tensor_list_ops:
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
converter._experimental_lower_tensor_list_ops = lower_tensor_list_ops
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(40.0, actual_value)
@parameterized.named_parameters(('EnableLoweringTensorListOps', True),
('DisableLoweringTensorListOps', False))
@test_util.run_v2_only
def testTensorListWithDynamicSize(self, lower_tensor_list_ops):
def create_v1_saved_model():
saved_model_dir = os.path.join(self.get_temp_dir(),
'simple_mutable_variable')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1], dtype=tf.float32, name='input')
ta = tf.TensorArray(
tf.float32, size=0, dynamic_size=True, clear_after_read=False)
ta = ta.write(0, 10.0)
ta = ta.write(1, 20.0)
ta = ta.write(2, 30.0)
out_tensor = ta.read(0) + ta.read(2)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
saved_model_dir = create_v1_saved_model()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
if lower_tensor_list_ops:
with self.assertRaises(convert.ConverterError) as error:
converter.convert()
self.assertIn(
'Lowering tensor list ops is failed. Please consider using Select '
'TF ops and disabling `_experimental_lower_tensor_list_ops` flag in '
'the TFLite converter object.', str(error.exception))
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
input_data = np.array([1.0], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(40.0, actual_value)
class CalibrateAndQuantizeWithCustomOpTest(lite_v2_test_util.ModelTest):
def _createGraphWithCustomOp(self):
# Create a graph that has one double op.
np.random.seed(0)
saved_model_dir = os.path.join(self.get_temp_dir(), 'double_model')
with ops.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor = tf.compat.v1.placeholder(
shape=[1, 4], dtype=dtypes.float32, name='input')
out_tensor = double_op.double(in_tensor)
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
def calibration_gen():
for _ in range(100):
yield [np.random.uniform(-1, 1, size=(1, 4)).astype(np.float32)]
return (saved_model_dir, calibration_gen)
def testCustomOpRegistererByName(self):
"""Test a calibration with custom op registered by name."""
saved_model_dir, calibration_gen = self._createGraphWithCustomOp()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.allow_custom_ops = True
converter.target_spec._experimental_custom_op_registerers = [
'TF_TestRegisterer'
]
tflite_model = converter.convert()
self.assertTrue(tflite_model)
self.assertGreater(test_registerer.get_num_test_registerer_calls(), 0)
self.assertIn('Double', tflite_test_util.get_ops_list(tflite_model))
# Check the conversion metadata.
metadata = get_conversion_metadata(tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(metadata.options.allowCustomOps, True)
# Check the model works with custom ops.
interpreter = InterpreterWithCustomOps(
model_content=tflite_model, custom_op_registerers=['TF_TestRegisterer'])
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([[0.0, 0.1, 0.2, 0.3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([[0.0, 0.2, 0.4, 0.6]], dtype=np.float32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertArrayNear(expected_output[0], output_data[0], err=1e-2)
def testCustomOpRegistererByFunc(self):
"""Test a calibration with custom op registered by function."""
saved_model_dir, calibration_gen = self._createGraphWithCustomOp()
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.allow_custom_ops = True
converter.target_spec._experimental_custom_op_registerers = [
test_registerer.TF_TestRegisterer
]
tflite_model = converter.convert()
self.assertTrue(tflite_model)
self.assertGreater(test_registerer.get_num_test_registerer_calls(), 0)
self.assertIn('Double', tflite_test_util.get_ops_list(tflite_model))
# Check the model works with custom ops.
interpreter = InterpreterWithCustomOps(
model_content=tflite_model,
custom_op_registerers=[test_registerer.TF_TestRegisterer])
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
test_input = np.array([[0.0, 0.1, 0.2, 0.3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_details = interpreter.get_output_details()
expected_output = np.array([[0.0, 0.2, 0.4, 0.6]], dtype=np.float32)
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertArrayNear(expected_output[0], output_data[0], err=1e-2)
def testCustomOpRegistererFailure(self):
"""Test a calibration with wrong custom op registerer."""
saved_model_dir, calibration_gen = self._createGraphWithCustomOp()
bogus_name = 'CompletelyBogusRegistererName'
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.allow_custom_ops = True
converter.target_spec._experimental_custom_op_registerers = [bogus_name]
with self.assertRaisesRegex(
ValueError, 'Looking up symbol \'' + bogus_name + '\' failed'):
converter.convert()
class IntermediatesTest(lite_v2_test_util.ModelTest):
def _run(self, experimental_preserve_all_tensors):
@tf.function
def f(x):
y = tf.add(x, x, name='y')
z = tf.add(y, y, name='z')
w = tf.add(z, z, name='w')
return w
# NOTE this is exactly representable as a float as are the intermeidates of
# f. So direct comparison is ok below.
input_data = np.array(2.0, np.float32)
concrete_func = f.get_concrete_function(input_data)
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
f)
tflite_model = converter.convert()
interpreter = Interpreter(
model_content=tflite_model,
experimental_preserve_all_tensors=experimental_preserve_all_tensors)
interpreter.allocate_tensors()
interpreter.set_tensor(interpreter.get_input_details()[0]['index'],
input_data)
interpreter.invoke()
out = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])
tensors = {}
for t in interpreter.get_tensor_details():
# With Tensorflow Lite default delegate applied to the model graph, the
# access to original tensors of a delegated op could cause a ValueError
# (i.e. 'Tensor data is null. Run allocate_tensors() first') to be thrown
# out because the tensor memory isn't allocated at all.
val = None
try:
val = interpreter.get_tensor(t['index'])
except ValueError:
pass
tensors.update({t['name']: val})
return (tensors, out)
def testPreserve(self):
tensors, result = self._run(experimental_preserve_all_tensors=True)
# All intermediates should be true and result be true.
self.assertAllClose(tensors['x'], 2.0)
self.assertAllClose(tensors['y'], 4.0)
self.assertAllClose(tensors['z'], 8.0)
self.assertAllClose(result, 16.0)
def testNoPreserve(self):
tensors, result = self._run(experimental_preserve_all_tensors=False)
# One of them should be wrong if preserve is not true, but result should be
# ok. Input should still be ok for repeated invocation.
self.assertAllClose(tensors['x'], 2.0)
self.assertTrue(tensors['y'] != 4.0 or tensors['z'] != 8.0)
self.assertAllClose(result, 16.0)
class DatasetOpsTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testReduceDataset(self):
@tf.function
def model():
dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4])
output = dataset.reduce(np.int32(0), lambda x, y: x + y)
return output
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],
model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
output_details = interpreter.get_output_details()
interpreter.allocate_tensors()
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(10, actual_value)
class SparsityTest(lite_v2_test_util.ModelTest):
def _getSparsificableModel(self, matrix_b_values):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(
input_signature=[tf.TensorSpec(shape=[16, 4], dtype=tf.float32)])
def func(inp):
matrix_b = tf.constant(matrix_b_values, dtype=tf.float32)
matrix_b = tf.reshape(matrix_b, [4, 8])
matmul = tf.matmul(inp, matrix_b, transpose_a=False, transpose_b=False)
output = tf.nn.relu(matmul, name='output')
return output
root.f = func
to_save = root.f.get_concrete_function()
return (root, to_save)
def testRandomSparsity(self):
matrix_b_values = [
0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1
]
root, func = self._getSparsificableModel(matrix_b_values)
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
float_converter.optimizations = [lite.Optimize.EXPERIMENTAL_SPARSITY]
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(float_tflite_model)
self.assertIsNotNone(metadata)
self.assertAllEqual([metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY],
metadata.options.modelOptimizationModes)
def testBlockSparsity(self):
matrix_b_values = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0
]
root, func = self._getSparsificableModel(matrix_b_values)
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
root)
float_converter.optimizations = [lite.Optimize.EXPERIMENTAL_SPARSITY]
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(float_tflite_model)
self.assertIsNotNone(metadata)
self.assertAllEqual([metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY],
metadata.options.modelOptimizationModes)
if __name__ == '__main__':
test.main()
| 41.243744 | 96 | 0.695631 |
1b8efba30539a9d1440df27644567346e0fc689a | 2,959 | py | Python | python/geopandas.py | mapattacker/cheatsheets | e25bec531fdd06e01b39d6c55b11226ba26dac5b | [
"MIT"
] | 7 | 2017-10-18T22:42:42.000Z | 2021-02-17T08:47:11.000Z | python/geopandas.py | mapattacker/cheatsheets | e25bec531fdd06e01b39d6c55b11226ba26dac5b | [
"MIT"
] | null | null | null | python/geopandas.py | mapattacker/cheatsheets | e25bec531fdd06e01b39d6c55b11226ba26dac5b | [
"MIT"
] | 5 | 2019-12-16T20:07:27.000Z | 2022-01-31T20:23:49.000Z | # https://github.com/jorisvandenbossche/geopandas-tutorial
# https://github.com/geopandas/geopandas
# http://geopandas.org/index.html
# Installation, create a virtual env
conda create -n geopandas
source activate geopandas # activate vm
conda install -c conda-forge geopandas
conda install jupyter geopandas # for using within jupyter
source deactivate geopandas # deactivate vm
import geopandas as gpd
import matplotlib.pyplot as plt
%config InlineBackend.figure_format = 'retina'
# READ SHAPEFILE --------------------
df = gpd.read_file(r'ne_110m_admin_0_countries.shp')
df.to_file('new.shp')
# supported export file types
import fiona; fiona.supported_drivers
{'AeronavFAA': 'r',
'ARCGEN': 'r',
'BNA': 'raw',
'DXF': 'raw',
'OpenFileGDB': 'r',
'ESRI Shapefile': 'raw',
'GeoJSON': 'rw',
'GPKG': 'rw',
'GPX': 'raw',
'GPSTrackMaker': 'raw',
'Idrisi': 'r',
'MapInfo File': 'raw',
'DGN': 'raw',
'PCIDSK': 'r',
'SEGY': 'r',
'SUA': 'r'}
# DISPLAY MAP --------------------
# choose colors
# https://matplotlib.org/users/colormaps.html
df.plot(figsize=(10,10), cmap='tab20'); #categorical
df.plot(figsize=(10,10), column='numeric', cmap='YlOrRd', legend=True); #chloropeth
# arguments are similar to matplotlib
borneo.plot(figsize=(15,15),
column='id',
marker='s',
s=8);
plt.show()
# COORDINATE REFERENCE SYSTEM --------------------
# SVY21; epsg=3414
# WGS84; epsg=4326
# Web Mercator; epsg=3857
df.crs
# {'init': 'epsg:4326'}
df_mercator = df.to_crs(epsg=3857) # change CRS to mercator
df_mercator.crs
# {'init': 'epsg:3857', 'no_defs': True}
# CONVERT CSV INTO GEOPANDAS
import geopandas as gpd
from shapely.geometry import Point
geometry = [Point(xy) for xy in zip(df.Lon, df.Lat)]
df = df.drop(['Lon', 'Lat'], axis=1)
crs = {'init': 'epsg:4326'}
gdf = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
# CONVERT DF INTO GEOPANDAS
gdf = gpd.GeoDataFrame(coord, geometry=gpd.points_from_xy(coord.long, coord.lat))
# FILTER, as with pandas --------------------
df[df.SOVEREIGNT=='Australia']
# DISSOLVE --------------------
df2 = df.dissolve(by='CONTINENT')
df2 = df.dissolve(by='CONTINENT', aggfunc='sum') # sum up all continuous columns
# SIMPLE MANIPULATIONS --------------------
# CENTROID
world['centroid_column'] = world.centroid # set centroid column
world = world.set_geometry('centroid_column') # change geometry from polygon to centroid point
# AREA
df2['area'] = df2.area
# JOIN --------------------
# attribute join
# can only use a left join by merge
df.merge(df2, on='iso_a3')
# spatial join
# op can be set to “intersects”, “within” or “contains”
cities_with_country = geopandas.sjoin(cities, countries, how="inner", op='intersects')
# OVERLAY --------------------
geopandas.overlay(df1, df2, how='union')
geopandas.overlay(df1, df2, how='intersection')
geopandas.overlay(df1, df2, how='symmetric_difference')
geopandas.overlay(df1, df2, how='difference') | 25.730435 | 94 | 0.664414 |
8a33b57e9d13fe62f056a866ca28a6aac3bef786 | 9,622 | py | Python | homeassistant/helpers/script.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 1 | 2019-04-22T06:05:09.000Z | 2019-04-22T06:05:09.000Z | homeassistant/helpers/script.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | null | null | null | homeassistant/helpers/script.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 1 | 2021-09-20T01:52:31.000Z | 2021-09-20T01:52:31.000Z | """Helpers to execute scripts."""
import logging
from itertools import islice
from typing import Optional, Sequence
import voluptuous as vol
from homeassistant.core import HomeAssistant, Context, callback
from homeassistant.const import CONF_CONDITION, CONF_TIMEOUT
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import (
service, condition, template as template,
config_validation as cv)
from homeassistant.helpers.event import (
async_track_point_in_utc_time, async_track_template)
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.dt as date_util
from homeassistant.util.async_ import (
run_coroutine_threadsafe, run_callback_threadsafe)
_LOGGER = logging.getLogger(__name__)
CONF_ALIAS = 'alias'
CONF_SERVICE = 'service'
CONF_SERVICE_DATA = 'data'
CONF_SEQUENCE = 'sequence'
CONF_EVENT = 'event'
CONF_EVENT_DATA = 'event_data'
CONF_EVENT_DATA_TEMPLATE = 'event_data_template'
CONF_DELAY = 'delay'
CONF_WAIT_TEMPLATE = 'wait_template'
CONF_CONTINUE = 'continue_on_timeout'
def call_from_config(hass: HomeAssistant, config: ConfigType,
variables: Optional[Sequence] = None,
context: Optional[Context] = None) -> None:
"""Call a script based on a config entry."""
Script(hass, cv.SCRIPT_SCHEMA(config)).run(variables, context)
class Script():
"""Representation of a script."""
def __init__(self, hass: HomeAssistant, sequence, name: str = None,
change_listener=None) -> None:
"""Initialize the script."""
self.hass = hass
self.sequence = sequence
template.attach(hass, self.sequence)
self.name = name
self._change_listener = change_listener
self._cur = -1
self.last_action = None
self.last_triggered = None
self.can_cancel = any(CONF_DELAY in action or CONF_WAIT_TEMPLATE
in action for action in self.sequence)
self._async_listener = []
self._template_cache = {}
self._config_cache = {}
@property
def is_running(self) -> bool:
"""Return true if script is on."""
return self._cur != -1
def run(self, variables=None, context=None):
"""Run script."""
run_coroutine_threadsafe(
self.async_run(variables, context), self.hass.loop).result()
async def async_run(self, variables: Optional[Sequence] = None,
context: Optional[Context] = None) -> None:
"""Run script.
This method is a coroutine.
"""
self.last_triggered = date_util.utcnow()
if self._cur == -1:
self._log('Running script')
self._cur = 0
# Unregister callback if we were in a delay or wait but turn on is
# called again. In that case we just continue execution.
self._async_remove_listener()
for cur, action in islice(enumerate(self.sequence), self._cur, None):
if CONF_DELAY in action:
# Call ourselves in the future to continue work
unsub = None
@callback
def async_script_delay(now):
"""Handle delay."""
# pylint: disable=cell-var-from-loop
self._async_listener.remove(unsub)
self.hass.async_create_task(
self.async_run(variables, context))
delay = action[CONF_DELAY]
try:
if isinstance(delay, template.Template):
delay = vol.All(
cv.time_period,
cv.positive_timedelta)(
delay.async_render(variables))
except (TemplateError, vol.Invalid) as ex:
_LOGGER.error("Error rendering '%s' delay template: %s",
self.name, ex)
break
unsub = async_track_point_in_utc_time(
self.hass, async_script_delay,
date_util.utcnow() + delay
)
self._async_listener.append(unsub)
self._cur = cur + 1
if self._change_listener:
self.hass.async_add_job(self._change_listener)
return
if CONF_WAIT_TEMPLATE in action:
# Call ourselves in the future to continue work
wait_template = action[CONF_WAIT_TEMPLATE]
wait_template.hass = self.hass
# check if condition already okay
if condition.async_template(
self.hass, wait_template, variables):
continue
@callback
def async_script_wait(entity_id, from_s, to_s):
"""Handle script after template condition is true."""
self._async_remove_listener()
self.hass.async_create_task(
self.async_run(variables, context))
self._async_listener.append(async_track_template(
self.hass, wait_template, async_script_wait, variables))
self._cur = cur + 1
if self._change_listener:
self.hass.async_add_job(self._change_listener)
if CONF_TIMEOUT in action:
self._async_set_timeout(
action, variables, context,
action.get(CONF_CONTINUE, True))
return
if CONF_CONDITION in action:
if not self._async_check_condition(action, variables):
break
elif CONF_EVENT in action:
self._async_fire_event(action, variables, context)
else:
await self._async_call_service(action, variables, context)
self._cur = -1
self.last_action = None
if self._change_listener:
self.hass.async_add_job(self._change_listener)
def stop(self) -> None:
"""Stop running script."""
run_callback_threadsafe(self.hass.loop, self.async_stop).result()
def async_stop(self) -> None:
"""Stop running script."""
if self._cur == -1:
return
self._cur = -1
self._async_remove_listener()
if self._change_listener:
self.hass.async_add_job(self._change_listener)
async def _async_call_service(self, action, variables, context):
"""Call the service specified in the action.
This method is a coroutine.
"""
self.last_action = action.get(CONF_ALIAS, 'call service')
self._log("Executing step %s" % self.last_action)
await service.async_call_from_config(
self.hass, action,
blocking=True,
variables=variables,
validate_config=False,
context=context
)
def _async_fire_event(self, action, variables, context):
"""Fire an event."""
self.last_action = action.get(CONF_ALIAS, action[CONF_EVENT])
self._log("Executing step %s" % self.last_action)
event_data = dict(action.get(CONF_EVENT_DATA, {}))
if CONF_EVENT_DATA_TEMPLATE in action:
try:
event_data.update(template.render_complex(
action[CONF_EVENT_DATA_TEMPLATE], variables))
except TemplateError as ex:
_LOGGER.error('Error rendering event data template: %s', ex)
self.hass.bus.async_fire(action[CONF_EVENT],
event_data, context=context)
def _async_check_condition(self, action, variables):
"""Test if condition is matching."""
config_cache_key = frozenset((k, str(v)) for k, v in action.items())
config = self._config_cache.get(config_cache_key)
if not config:
config = condition.async_from_config(action, False)
self._config_cache[config_cache_key] = config
self.last_action = action.get(CONF_ALIAS, action[CONF_CONDITION])
check = config(self.hass, variables)
self._log("Test condition {}: {}".format(self.last_action, check))
return check
def _async_set_timeout(self, action, variables, context,
continue_on_timeout):
"""Schedule a timeout to abort or continue script."""
timeout = action[CONF_TIMEOUT]
unsub = None
@callback
def async_script_timeout(now):
"""Call after timeout is retrieve."""
self._async_listener.remove(unsub)
# Check if we want to continue to execute
# the script after the timeout
if continue_on_timeout:
self.hass.async_create_task(
self.async_run(variables, context))
else:
self._log("Timeout reached, abort script.")
self.async_stop()
unsub = async_track_point_in_utc_time(
self.hass, async_script_timeout,
date_util.utcnow() + timeout
)
self._async_listener.append(unsub)
def _async_remove_listener(self):
"""Remove point in time listener, if any."""
for unsub in self._async_listener:
unsub()
self._async_listener.clear()
def _log(self, msg):
"""Logger helper."""
if self.name is not None:
msg = "Script {}: {}".format(self.name, msg)
_LOGGER.info(msg)
| 36.037453 | 77 | 0.590002 |
a6750c566cebd318a96ba4cab6d91c60c097d597 | 116 | py | Python | literacy/__init__.py | tonyfast/literacy | c1713a1e2f0aa68fe190a33c73d6a97eccf2ee1e | [
"BSD-3-Clause"
] | 13 | 2016-04-10T19:11:11.000Z | 2021-01-25T00:22:23.000Z | literacy/__init__.py | tonyfast/literacy | c1713a1e2f0aa68fe190a33c73d6a97eccf2ee1e | [
"BSD-3-Clause"
] | 5 | 2017-09-25T16:08:36.000Z | 2017-10-18T03:26:22.000Z | literacy/__init__.py | tonyfast/literacy | c1713a1e2f0aa68fe190a33c73d6a97eccf2ee1e | [
"BSD-3-Clause"
] | 1 | 2016-04-13T00:08:52.000Z | 2016-04-13T00:08:52.000Z | from .literate import load_ipython_extension, unload_ipython_extension
from . import literate
from . import template | 38.666667 | 70 | 0.862069 |
cafdb267580b60695b0805c0ea65811218bb7872 | 1,884 | py | Python | Lexicographer/orchestration_fields_generator.py | GaryHughes/Swift.Fix | 48cc96ee626073d4c653417e3fe174e8a8697526 | [
"MIT"
] | null | null | null | Lexicographer/orchestration_fields_generator.py | GaryHughes/Swift.Fix | 48cc96ee626073d4c653417e3fe174e8a8697526 | [
"MIT"
] | null | null | null | Lexicographer/orchestration_fields_generator.py | GaryHughes/Swift.Fix | 48cc96ee626073d4c653417e3fe174e8a8697526 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
def generate_orchestration_fields(prefix, orchestration, outdir, namespace):
filename = '{}Fields.swift'.format(prefix)
path = os.path.join(outdir, filename)
print('regenerating ' + path)
sorted_fields = sorted(orchestration.fields_by_tag.values(), key=lambda x: int(x.id))
with open(path, 'w') as file:
file.write('public struct {} {{\n\n'.format(namespace))
for field in sorted_fields:
try:
code_set = orchestration.code_sets[field.type]
file.write(' public enum {} : String, CaseIterable {{\n\n'.format(field.name))
file.write(' public static var tag: Int {\n')
file.write(' {}\n'.format(field.id))
file.write(' }\n\n')
for code in code_set.codes:
file.write(' case {} = "{}"\n'.format(code.name, code.value))
file.write(' }\n\n')
except:
# The Swift compiler cannot synthesize RawRepresentable for String if the enum has no cases
file.write(' public enum {} : RawRepresentable {{\n\n'.format(field.name))
file.write(' public typealias RawValue = String\n\n')
file.write(' public static var tag: Int {\n')
file.write(' {}\n'.format(field.id))
file.write(' }\n\n')
file.write(' public init?(rawValue: RawValue) {\n')
file.write(' return nil')
file.write(' }\n\n')
file.write(' public var rawValue: RawValue {\n')
file.write(' return ""\n')
file.write(' }\n\n')
file.write(' }\n\n')
file.write('}\n')
| 49.578947 | 107 | 0.494692 |
abdd1ade96559e0a9f0023800421b47840f21caa | 2,524 | py | Python | lastversion/GitLabRepoSession.py | dvershinin/whatversion | 72341917136c35cde24fa12c92c9616abc65e7f3 | [
"BSD-2-Clause"
] | null | null | null | lastversion/GitLabRepoSession.py | dvershinin/whatversion | 72341917136c35cde24fa12c92c9616abc65e7f3 | [
"BSD-2-Clause"
] | null | null | null | lastversion/GitLabRepoSession.py | dvershinin/whatversion | 72341917136c35cde24fa12c92c9616abc65e7f3 | [
"BSD-2-Clause"
] | null | null | null | import logging
import os
from dateutil import parser
from .ProjectHolder import ProjectHolder
log = logging.getLogger(__name__)
class GitLabRepoSession(ProjectHolder):
DEFAULT_HOSTNAME = 'gitlab.com'
# Domains gitlab.example.com
SUBDOMAIN_INDICATOR = "gitlab"
def __init__(self, repo, hostname):
super(GitLabRepoSession, self).__init__()
self.pa_token = os.getenv("GITLAB_PA_TOKEN")
self.hostname = hostname
if not self.hostname:
self.hostname = self.DEFAULT_HOSTNAME
if self.pa_token:
log.info('Using Personal Access token.')
self.headers.update({'Private-Token': "{}".format(self.pa_token)})
self.api_base = 'https://{}/api/v4'.format(self.hostname)
self.set_repo(repo)
self.repo_id = self.repo.replace('/', '%2F')
def repo_query(self, uri):
url = '{}/projects/{}/repository{}'.format(self.api_base, self.repo_id, uri)
return self.get(url)
def get_latest(self, pre_ok=False, major=None):
ret = None
# gitlab returns tags by updated in desc order, this is just what we want :)
r = self.repo_query('/tags')
if r.status_code == 200:
for t in r.json():
tag = t['name']
version = self.sanitize_version(tag, pre_ok, major)
if not version:
continue
if not ret or ret and version > ret['version']:
log.info("Setting version as current selection: {}.".format(version))
ret = t
ret['tag_name'] = tag
ret['tag_date'] = parser.parse(t['commit']['created_at'])
ret['version'] = version
ret['type'] = 'tag'
# stop on first tag, because gitlab is good (c)
break
return ret
def release_download_url(self, release, shorter=False):
"""Get release download URL."""
if shorter:
log.info('Shorter URLs are not supported for GitLab yet')
# https://gitlab.com/onedr0p/sonarr-episode-prune/-/archive/v3.0.0/sonarr-episode-prune-v3.0.0.tar.gz
ext = 'zip' if os.name == 'nt' else 'tar.gz'
tag = release['tag_name']
url_format = 'https://{}/{}/-/archive/{}/{}-{}.{}'
return url_format.format(self.hostname, self.repo, tag, self.repo.split('/')[1], tag, ext)
def repo_license(self, tag):
# TODO implement
pass
| 36.57971 | 109 | 0.573296 |
a06000d5987a36e62dfb47374a0975dbe6592e7a | 844 | py | Python | examples/Spark-ETL+XGBoost/utility/python/com/nvidia/spark/examples/main.py | gerashegalov/spark-rapids-examples | b487413ed37bde2791ada67557a4742e54711261 | [
"Apache-2.0"
] | 23 | 2021-08-17T15:20:10.000Z | 2022-03-04T02:31:07.000Z | examples/Spark-ETL+XGBoost/utility/python/com/nvidia/spark/examples/main.py | gerashegalov/spark-rapids-examples | b487413ed37bde2791ada67557a4742e54711261 | [
"Apache-2.0"
] | 85 | 2021-08-18T06:30:07.000Z | 2022-03-30T23:21:19.000Z | examples/Spark-ETL+XGBoost/utility/python/com/nvidia/spark/examples/main.py | gerashegalov/spark-rapids-examples | b487413ed37bde2791ada67557a4742e54711261 | [
"Apache-2.0"
] | 23 | 2021-08-18T01:17:10.000Z | 2022-02-17T03:23:11.000Z | #
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from com.nvidia.spark.examples.utility.args import parse_arguments
from importlib import import_module
def main():
args, xgboost_args = parse_arguments()
getattr(import_module(args.mainClass), 'main')(args, xgboost_args)
| 38.363636 | 74 | 0.768957 |
ac3f2df30380baa1477aab4ea7b4925d0944bdef | 13,039 | py | Python | src/p_detector/network.py | SummerOf15/tiny-instance-segmentation | bfb3f3403a4637d97763443e56841acda9405498 | [
"Apache-2.0"
] | null | null | null | src/p_detector/network.py | SummerOf15/tiny-instance-segmentation | bfb3f3403a4637d97763443e56841acda9405498 | [
"Apache-2.0"
] | null | null | null | src/p_detector/network.py | SummerOf15/tiny-instance-segmentation | bfb3f3403a4637d97763443e56841acda9405498 | [
"Apache-2.0"
] | null | null | null | """ This file defines network functions and classes.
"""
import logging
import math
import torch.nn as nn
import torchvision
import os
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool
from scipy.optimize import linear_sum_assignment
from torch import nn
from typing import Dict, List
from collections import OrderedDict
from detectron2.layers import ShapeSpec
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks, PolygonMasks
from detectron2.utils.logger import log_first_n
from fvcore.nn import giou_loss, smooth_l1_loss
from p_detector.coco import convert_coco_poly_to_mask
from p_detector.backbone import Joiner
from p_detector.detr import DETR, SetCriterion
from p_detector.matcher import HungarianMatcher
from p_detector.position_encoding import PositionEmbeddingSine
from p_detector.transformer import Transformer
from p_detector.segmentation import DETRsegm, PostProcessPanoptic, PostProcessSegm
from p_detector.boxops_utils import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh
from p_detector.utils import NestedTensor
os.environ['TORCH_HOME'] = './'
def fast_rcnn_resnet18(progress=True, num_classes=2, pretrained_backbone=True, **kwargs):
backbone = resnet_fpn_backbone('resnet18', pretrained_backbone)
outchannels = 256
inchannel = [64, 128, 256, 512]
backbone.fpn = FeaturePyramidNetwork(
in_channels_list=inchannel,
out_channels=outchannels,
extra_blocks=LastLevelMaxPool(),
)
model = FasterRCNN(backbone=backbone, num_classes=num_classes, **kwargs)
return model
def fast_rcnn_resnet34(progress=True, num_classes=2, pretrained_backbone=True, **kwargs):
backbone = resnet_fpn_backbone('resnet34', pretrained_backbone)
outchannels = 256
inchannel = [64, 128, 256, 512]
backbone.fpn = FeaturePyramidNetwork(
in_channels_list=inchannel,
out_channels=outchannels,
extra_blocks=LastLevelMaxPool(),
)
model = FasterRCNN(backbone=backbone, num_classes=num_classes, **kwargs)
return model
def fast_rcnn_resnet50(progress=True, num_classes=2, pretrained_backbone=True, **kwargs):
backbone = resnet_fpn_backbone('resnet50', pretrained_backbone)
model = FasterRCNN(backbone=backbone, num_classes=num_classes, **kwargs)
return model
class MaskedBackbone(nn.Module):
""" This is a thin wrapper around D2's backbone to provide padding masking"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()]
self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels
def forward(self, images):
features = self.backbone(images.tensor)
masks = self.mask_out_padding(
[features_per_level.shape for features_per_level in features.values()],
images.image_sizes,
images.tensor.device,
)
assert len(features) == len(masks)
for i, k in enumerate(features.keys()):
features[k] = NestedTensor(features[k], masks[i])
return features
def mask_out_padding(self, feature_shapes, image_sizes, device):
masks = []
assert len(feature_shapes) == len(self.feature_strides)
for idx, shape in enumerate(feature_shapes):
N, _, H, W = shape
masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device)
for img_idx, (h, w) in enumerate(image_sizes):
masks_per_feature_level[
img_idx,
: int(np.ceil(float(h) / self.feature_strides[idx])),
: int(np.ceil(float(w) / self.feature_strides[idx])),
] = 0
masks.append(masks_per_feature_level)
return masks
@META_ARCH_REGISTRY.register()
class Detr(nn.Module):
"""
Implementation of Detr. Detectron 2 wrapper.
If class ID = 1 (e.g. tuft detection), set the num. classes to 2.
"""
def __init__(self, cfg):
super().__init__()
# Generic settings:
self.device = torch.device(cfg.MODEL.DEVICE)
self.num_classes = cfg.MODEL.DETR.NUM_CLASSES
self.mask_on = cfg.MODEL.MASK_ON
hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM
num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES
# Transformer parameters:
nheads = cfg.MODEL.DETR.NHEADS
dropout = cfg.MODEL.DETR.DROPOUT
dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD
enc_layers = cfg.MODEL.DETR.ENC_LAYERS
dec_layers = cfg.MODEL.DETR.DEC_LAYERS
pre_norm = cfg.MODEL.DETR.PRE_NORM
# Loss parameters:
giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT
l1_weight = cfg.MODEL.DETR.L1_WEIGHT
deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT
# Backbone
N_steps = hidden_dim // 2
d2_backbone = MaskedBackbone(cfg)
backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True))
backbone.num_channels = d2_backbone.num_channels
# Transformers
transformer = Transformer(
d_model=hidden_dim,
dropout=dropout,
nhead=nheads,
dim_feedforward=dim_feedforward,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
normalize_before=pre_norm,
return_intermediate_dec=deep_supervision,
)
# initializing Detr module:
self.detr = DETR(
backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision
)
if self.mask_on:
frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS
if frozen_weights != '':
print("LOAD pre-trained weights")
weight = torch.load(frozen_weights, map_location=lambda storage, loc: storage)['model']
new_weight = {}
for k, v in weight.items():
if 'detr.' in k:
new_weight[k.replace('detr.', '')] = v
else:
print(f"Skipping loading weight {k} from frozen model")
del weight
self.detr.load_state_dict(new_weight)
del new_weight
self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != ''))
self.seg_postprocess = PostProcessSegm
self.detr.to(self.device)
# building criterion:
matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight)
weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight}
weight_dict["loss_giou"] = giou_weight
if deep_supervision:
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes", "cardinality"]
if self.mask_on:
losses += ["masks"]
self.criterion = SetCriterion(
self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses,
)
self.criterion.to(self.device)
# normalize the image:
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
output = self.detr(images)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances)
loss_dict = self.criterion(output, targets)
weight_dict = self.criterion.weight_dict
for k in loss_dict.keys():
if k in weight_dict:
loss_dict[k] *= weight_dict[k]
return loss_dict
else:
box_cls = output["pred_logits"]
box_pred = output["pred_boxes"]
mask_pred = output["pred_masks"] if self.mask_on else None
results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def prepare_targets(self, targets):
new_targets = []
for targets_per_image in targets:
h, w = targets_per_image.image_size
image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)
gt_classes = targets_per_image.gt_classes
gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy
gt_boxes = box_xyxy_to_cxcywh(gt_boxes)
new_targets.append({"labels": gt_classes, "boxes": gt_boxes})
if self.mask_on and hasattr(targets_per_image, 'gt_masks'):
gt_masks = targets_per_image.gt_masks
gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)
new_targets[-1].update({'masks': gt_masks})
return new_targets
def inference(self, box_cls, box_pred, mask_pred, image_sizes):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_queries, K).
The tensor predicts the classification probability for each query.
box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every queryx
image_sizes (List[torch.Size]): the input image sizes
Returns:
results (List[Instances]): a list of #images elements.
"""
assert len(box_cls) == len(image_sizes)
results = []
# For each box we assign the best class or the second best if the best on is `no_object`.
scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)
for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(zip(
scores, labels, box_pred, image_sizes
)):
result = Instances(image_size)
result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))
result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])
if self.mask_on:
mask = F.interpolate(mask_pred[i].unsqueeze(0), size=image_size, mode='bilinear', align_corners=False)
mask = mask[0].sigmoid() > 0.5
B, N, H, W = mask_pred.shape
mask = BitMasks(mask.cpu()).crop_and_resize(result.pred_boxes.tensor.cpu(), 32)
result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device)
result.scores = scores_per_image
result.pred_classes = labels_per_image
results.append(result)
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs]
images = ImageList.from_tensors(images)
return images | 42.334416 | 118 | 0.647672 |
30e98cefc5873b648485473bc717e8f015e9de69 | 450 | py | Python | lesson3/lesson3_task3.py | nekdfl/GB-python-developer | ca3f34bac2a92a930779f89357941bfa9634b3d4 | [
"MIT"
] | null | null | null | lesson3/lesson3_task3.py | nekdfl/GB-python-developer | ca3f34bac2a92a930779f89357941bfa9634b3d4 | [
"MIT"
] | null | null | null | lesson3/lesson3_task3.py | nekdfl/GB-python-developer | ca3f34bac2a92a930779f89357941bfa9634b3d4 | [
"MIT"
] | null | null | null | """
Реализовать функцию my_func(), которая принимает три позиционных аргумента, и возвращает сумму наибольших двух аргументов.
"""
def my_func(num1, num2, num3):
pass
varlist = [num1, num2, num3]
sumarg1 = max(varlist)
varlist.pop(varlist.index(sumarg1))
sumarg2 = max(varlist)
res = sumarg1 + sumarg2
return res
def main():
pass
res = my_func(1, 2, 3)
print(res)
if __name__ == "__main__":
main()
| 17.307692 | 122 | 0.648889 |
995512e7250d77eb19a0a6abaf6d612f085c741b | 4,234 | py | Python | DQMOffline/L1Trigger/test/runDQMOffline_L1TMuonEfficiency_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | DQMOffline/L1Trigger/test/runDQMOffline_L1TMuonEfficiency_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | DQMOffline/L1Trigger/test/runDQMOffline_L1TMuonEfficiency_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import FWCore.ParameterSet.Config as cms
process = cms.Process("L1TDQMOffline")
import os
import sys
import commands
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load('DQMOffline.Configuration.DQMOffline_cff')
process.load('Configuration.EventContent.EventContent_cff')
import FWCore.ParameterSet.Config as cms
# DQM file saver module
dqmSaver = cms.EDAnalyzer("DQMFileSaver",
# Possible conventions are "Online", "Offline" and "RelVal".
convention = cms.untracked.string('Offline'),
# Save files in plain ROOT or encode ROOT objects in ProtocolBuffer
fileFormat = cms.untracked.string('ROOT'),
# Name of the producer.
producer = cms.untracked.string('DQM'),
# Name of the processing workflow.
workflow = cms.untracked.string(''),
# Directory in which to save the files.
dirName = cms.untracked.string('.'),
# Only save this directory
filterName = cms.untracked.string(''),
# Version name to be used in file name.
version = cms.untracked.int32(1),
# runIsComplete
runIsComplete = cms.untracked.bool(False),
# Save file every N lumi sections (-1: disabled)
saveByLumiSection = cms.untracked.int32(-1),
# Save file every N runs (-1: disabled)
saveByRun = cms.untracked.int32(-1),
# Save file at the end of the job
saveAtJobEnd = cms.untracked.bool(True),
# Ignore run number for MC data (-1: disabled)
forceRunNumber = cms.untracked.int32(-1),
# Control reference saving (default / skip / qtests / all)
referenceHandling = cms.untracked.string('all'),
# Control which references are saved for qtests (default: STATUS_OK)
referenceRequireStatus = cms.untracked.int32(100)
)
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(50)
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(False))
process.source = cms.Source('PoolSource',
fileNames = cms.untracked.vstring(
'/store/data/Run2016D/SingleMuon/AOD/PromptReco-v2/000/276/315/00000/023D6C02-F844-E611-BE27-02163E014773.root',
'/store/data/Run2016D/SingleMuon/AOD/PromptReco-v2/000/276/315/00000/02D20100-F844-E611-8AB4-02163E0141D8.root',
'/store/data/Run2016D/SingleMuon/AOD/PromptReco-v2/000/276/315/00000/06C984E1-F744-E611-AB0A-02163E011D06.root',
'/store/data/Run2016D/SingleMuon/AOD/PromptReco-v2/000/276/315/00000/0A20BBE6-F744-E611-B965-02163E011AA6.root',
'/store/data/Run2016D/SingleMuon/AOD/PromptReco-v2/000/276/315/00000/0C1381D6-F744-E611-A5C6-02163E0125A4.root',
'/store/data/Run2016D/SingleMuon/AOD/PromptReco-v2/000/276/315/00000/0C8BE40E-F844-E611-8FB4-02163E011F24.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1))
process.load('DQMOffline.L1Trigger.L1TEfficiencyHarvesting_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load("TrackingTools.Configuration.TrackingTools_cff")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '80X_dataRun2_ICHEP16_repro_v0', '')
process.load('DQMOffline.L1Trigger.L1TEfficiencyMuonsOffline_cff')
process.dumpES = cms.EDAnalyzer("PrintEventSetupContent")
process.l1tdumpeventsetup = cms.Path(process.dumpES)
process.l1tEfficiencyMuons_offline.verbose = cms.untracked.bool(False)
process.l1tEfficiencyMuons_offline.gmtInputTag = cms.untracked.InputTag("gmtStage2Digis:Muon")
process.L1TMuonSeq = cms.Sequence(process.l1tEfficiencyMuons_offline)
process.L1TMuonPath = cms.Path(process.L1TMuonSeq)
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.dqmSaver.convention = 'Offline'
process.dqmSaver.workflow = '/RelVal/DQMOffline/L1Trigger'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd = cms.untracked.bool(True)
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.ppost = cms.EndPath(process.l1tEfficiencyHarvesting + process.dqmSaver)
| 51.012048 | 116 | 0.775862 |
e15d4ac73a8e49a7987dd8bb889e7760ed138b5b | 401 | py | Python | deprecated/tests/mock/schedule.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 4 | 2015-02-10T14:28:12.000Z | 2016-12-26T22:52:07.000Z | deprecated/tests/mock/schedule.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 17 | 2015-01-22T21:58:36.000Z | 2018-01-25T19:47:43.000Z | deprecated/tests/mock/schedule.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 9 | 2015-01-26T19:25:45.000Z | 2018-11-01T20:14:12.000Z | from rcbu.common.schedule import ScheduleFrequency
def schedule(freq,
interval=None, weekday=None,
hour=None, minute=None, period=None):
return {
'Frequency': ScheduleFrequency.to_api(freq),
'StartTimeMinute': minute,
'StartTimeHour': hour,
'StartTimeAmPm': period,
'HourInterval': interval,
'DayOfWeekId': weekday
}
| 26.733333 | 52 | 0.618454 |
55f54dc20d27a18ed57e93941269b3927402f2bd | 2,059 | py | Python | module/user/usersmadmingroup.py | arvin-chou/mc | b82305a4a91fe6150caa5423205a0798f3815724 | [
"MIT"
] | null | null | null | module/user/usersmadmingroup.py | arvin-chou/mc | b82305a4a91fe6150caa5423205a0798f3815724 | [
"MIT"
] | null | null | null | module/user/usersmadmingroup.py | arvin-chou/mc | b82305a4a91fe6150caa5423205a0798f3815724 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from sqlalchemy import Table, Column, Integer, String, \
MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint
from sqlalchemy.orm import relationship, backref
from config.config import db, metadata
from schema.users import User
from schema.admingroup import AdminGroup
__tablename__ = 'usersmadmingroup'
class UsersMAdminGroup(db.Model):
__tablename__ = __tablename__
id = Column(Integer, primary_key=True)
#user_id = Column(Integer)
#admingroup_id = Column(Integer)
user_id = Column(Integer, ForeignKey(User.id), primary_key=True)
admingroup_id = Column(Integer, ForeignKey(AdminGroup.id), primary_key=True)
#user_id = Column(Integer, ForeignKey(User.id))
#admingroup_id = Column(Integer, ForeignKey(AdminGroup.id))
__table_args__ = (UniqueConstraint('user_id', 'admingroup_id',
name='_user_admingroup'),
)
# if user delete, this mapping also delete too.
user_obj = relationship('User', backref=backref('users', uselist=True,
cascade='delete,all', lazy='dynamic'))
admingroup_obj = relationship('AdminGroup',
backref=backref('admingroups', lazy='dynamic'))
#user_obj = relationship('User', lazy='dynamic', cascade='all')
#admingroup_obj = relationship('AdminGroup', lazy='dynamic', cascade='all')
#__table_args__ = (ForeignKeyConstraint(
# #[user_id, admingroup_id],[User.id, AdminGroup.id]), {})
# [user_id, admingroup_id],['user.id', 'admingroup.id']), {})
SchemaUsersMAdminGroup = Table(__tablename__, metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
#ForeignKey("new.new_id", onupdate="CASCADE",
# ondelete="CASCADE"),
#Column('location_code', Unicode(10)),
Column('admingroup_id', None, ForeignKey('admingroup.id')),
UniqueConstraint('user_id', 'admingroup_id')
#ForeignKeyConstraint(['user_id', 'admingroup_id'], ['user.id',
# 'admingroup.id'])
)
| 40.372549 | 80 | 0.679456 |
603b5066d3d84d003ca19d6730f8a8654b66dbe7 | 10,289 | py | Python | tag_remote/tag_remote.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | 1 | 2021-08-09T19:28:49.000Z | 2021-08-09T19:28:49.000Z | tag_remote/tag_remote.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | null | null | null | tag_remote/tag_remote.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | 2 | 2017-07-14T00:15:54.000Z | 2019-03-02T09:46:21.000Z | import asyncio
import random
import json
import discord
from jshbot import utilities, configurations, plugins, logger, data
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('Tag remote')
uses_configuration = False
DATA_VERSION = 1
WEBHOOK_SET = set()
TAG_CONVERTER = None
@plugins.command_spawner
def get_commands(bot):
return [Command(
'tagremote', subcommands=[
SubCommand(doc='Gets the current remote session.', function=tagremote),
SubCommand(
Opt('start'),
doc='Starts a sound tag remote session.',
function=tagremote_start),
SubCommand(
Opt('stop'),
doc='Stops the current sound tag remote session.',
function=tagremote_stop),
SubCommand(
Opt('update'),
doc='Provides a refreshed tag list. Updates can be '
'applied in the settings menu of the tag remote app.',
function=tagremote_update)
],
description='Call sound tags through your phone.',
allow_direct=False
)]
async def tagremote(bot, context):
"""Gets the current session data as a link."""
session_data = data.get(bot, __name__, 'data', guild_id=context.guild.id)
if not session_data:
raise CBException(
"No session available.\nStart one with `{}tagremote start`".format(
utilities.get_invoker(bot, guild=context.guild)))
channel_id, session_code = session_data['channel'], session_data['session']
voice_channel_id = session_data['voice_channel']
channel_mention = data.get_channel(bot, channel_id, guild=context.guild).mention
voice_channel_mention = data.get_channel(bot, voice_channel_id, guild=context.guild).mention
description = 'The session code is:\n`{}`\nThe session is attached to {} and {}'.format(
session_code, channel_mention, voice_channel_mention)
return Response(embed=discord.Embed(
title='Tap here on your phone to use the tag remote',
url='https://jkchen2.github.io/tag-remote/#{}'.format(session_code),
description=description))
def _get_tag_dictionary(bot, guild):
"""Retrieves the tag dictionary of the server."""
if configurations.get(bot, 'tags.py', 'global_tags'):
table_suffix = 'global'
else:
table_suffix = str(guild.id)
tags_plugin = bot.plugins['tags.py']
sound_bit = tags_plugin._get_flag_bits(['sound'])
private_bit = tags_plugin._get_flag_bits(['private'])
cursor = data.db_select(
bot, from_arg='tags', table_suffix=table_suffix,
where_arg='flags & %s = %s AND flags & %s = 0',
input_args=[sound_bit, sound_bit, private_bit])
raw_tag_list = cursor.fetchall() if cursor else []
if not raw_tag_list:
raise CBException("No sound tags available.")
tag_dictionary = {}
for tag in raw_tag_list:
tag_dictionary[tag.key] = {'name': tag.name, 'hits': tag.hits}
return tag_dictionary
async def _upload_session_data(bot, channel, voice_channel, webhook, tag_dictionary):
"""Uploads the tag dictionary and returns the session code."""
tag_data = utilities.get_text_as_file(json.dumps({
'version': DATA_VERSION,
'bot_id': str(bot.user.id),
'guild': str(channel.guild.id),
'guild_name': channel.guild.name,
'channel': str(channel.id),
'channel_name': channel.name,
'voice_channel': str(voice_channel.id),
'voice_channel_name': voice_channel.name,
'webhook': [str(webhook.id), webhook.token],
'tags': tag_dictionary
}))
url = await utilities.upload_to_discord(bot, tag_data, filename='remote_data', close=True)
url_segments = [it[::-1] for it in url[::-1].split('/')[2:0:-1]]
return '{}:{}'.format(*url_segments)
async def tagremote_start(bot, context):
"""Starts a tag remote session."""
# Check for an existing session
session_data = data.get(bot, __name__, 'data', guild_id=context.guild.id)
if session_data:
raise CBException("Session already exists.")
if not context.channel.permissions_for(context.guild.me).manage_webhooks:
raise CBException("Missing the `Manage Webhooks` permission.")
# Retrieve and format tag data
tag_dictionary = _get_tag_dictionary(bot, context.guild)
# Check that the user is in an unblocked voice channel
if not context.author.voice:
raise CBException("You must be in a voice channel.")
voice_channel = context.author.voice.channel
await utilities.join_and_ready(bot, voice_channel, is_mod=context.elevation >= 1)
# Create webhook
webhook = await context.channel.create_webhook(name='Tag Remote []')
# Upload session data
session_code = await _upload_session_data(
bot, context.channel, voice_channel, webhook, tag_dictionary)
# Track session data
session_data = {
'webhook': webhook.id,
'channel': context.channel.id,
'voice_channel': voice_channel.id,
'session': session_code
}
data.add(bot, __name__, 'data', session_data, guild_id=context.guild.id)
data.list_data_append(bot, __name__, 'webhooks', webhook.id, duplicates=False)
WEBHOOK_SET.add(webhook.id)
return await tagremote(bot, context)
async def tagremote_stop(bot, context):
await _delete_session(bot, context.guild)
return Response(content="The session has been stopped.")
async def tagremote_update(bot, context):
"""Renames the webhook with an updated tag list file."""
# Check for an existing session
session_data = data.get(bot, __name__, 'data', guild_id=context.guild.id)
if not session_data:
raise CBException("No session available.")
channel = data.get_channel(bot, session_data['channel'])
if not channel:
await _delete_session(bot, context.guild)
raise CBException("Failed to get the channel.")
voice_channel = data.get_channel(bot, session_data['voice_channel'])
if not voice_channel:
await _delete_session(bot, context.guild)
raise CBException("Failed to get the voice channel.")
webhooks = await channel.webhooks()
if not webhooks:
await _delete_session(bot, context.guild)
raise CBException("No webhooks available.")
for webhook in webhooks:
if webhook.id == session_data['webhook']:
break
else:
await _delete_session(bot, context.guild)
raise CBException("Webhook not found.")
tag_dictionary = _get_tag_dictionary(bot, context.guild)
session_code = await _upload_session_data(bot, channel, voice_channel, webhook, tag_dictionary)
updated_code = session_code.split(':')[1]
await webhook.edit(name='Tag Remote [{}]'.format(updated_code))
return Response(
content="Tag data refreshed. Update the remote on your phone via the options menu.")
async def _delete_session(bot, guild):
"""Deletes the session for the given guild."""
session_data = data.remove(bot, __name__, 'data', guild_id=guild.id, safe=True)
if not session_data:
raise CBException("Session does not exist.")
channel_id, webhook_id = session_data['channel'], session_data['webhook']
channel = data.get_channel(bot, channel_id, safe=True)
webhooks = await channel.webhooks()
for webhook in webhooks:
if webhook.id == webhook_id:
await webhook.delete()
break
else:
logger.warn('Webhook to delete (%s) not found!', webhook_id)
try:
WEBHOOK_SET.remove(webhook_id)
except KeyError:
logger.warn("Webhook not found in WEBHOOK_SET")
data.list_data_remove(bot, __name__, 'webhooks', value=webhook_id, safe=True)
if guild.voice_client and guild.voice_client.channel.id == session_data['voice_channel']:
await utilities.stop_audio(bot, guild)
@plugins.permissions_spawner
def setup_permissions(bot):
return { 'manage_webhooks': "Allows tags to be called by webhook." }
@plugins.listen_for('bot_on_ready_boot')
async def setup_globals(bot):
global WEBHOOK_SET, TAG_CONVERTER
TAG_CONVERTER = bot.plugins['tags.py'].TagConverter(
apply_checks=True, voice_channel_bypass=True)
WEBHOOK_SET = set(data.get(bot, __name__, 'webhooks', default=[]))
@plugins.listen_for('on_message')
async def check_webhook_messages(bot, message):
"""Reads webhook messages and calls tags if necessary."""
if message.author.id in WEBHOOK_SET:
session_data = data.get(bot, __name__, 'data', guild_id=message.guild.id)
voice_channel = data.get_channel(bot, session_data['voice_channel'], guild=message.guild)
# Ignore if nobody is in the channel
if not [it for it in voice_channel.members if not it.bot]:
pass
# Retrieve tag
elif message.content.startswith('[Retrieve]'):
tag_name = message.content[10:].strip()
try:
tag = TAG_CONVERTER(bot, message, tag_name, channel_bypass=voice_channel)
except BotException as e:
logger.warn("Failed to retrieve tag: %s", e)
else:
tags_plugin = bot.plugins['tags.py']
url = random.choice(tag.value)
try:
await tags_plugin._play_sound_tag(bot, tag, url, voice_channel, delay=-1)
except BotException as e:
logger.warn("Failed to play tag: %s", e)
else:
tags_plugin._update_hits(bot, tag.key, message.author.id, message.guild.id)
# Stop audio
elif message.content == '[Stop audio]':
voice_client = message.guild.voice_client
if (voice_client and
voice_client.channel == voice_channel and
voice_client.is_playing()):
voice_client.stop()
# Always remove messages
await asyncio.sleep(3)
try:
await message.delete()
except:
pass
| 38.973485 | 99 | 0.662649 |
5155b18769e1fb5e3833c647164931ee4bc4c6de | 2,545 | py | Python | DjangoBlog/urls.py | ch3czjl/dianputuoguan | e5915462ae13655cb5ff9afb8b1588cc7eac92d7 | [
"MIT"
] | null | null | null | DjangoBlog/urls.py | ch3czjl/dianputuoguan | e5915462ae13655cb5ff9afb8b1588cc7eac92d7 | [
"MIT"
] | 9 | 2021-03-19T03:54:59.000Z | 2022-03-12T00:31:13.000Z | DjangoBlog/urls.py | ch3czjl/dianputuoguan | e5915462ae13655cb5ff9afb8b1588cc7eac92d7 | [
"MIT"
] | null | null | null | """DjangoBlog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from DjangoBlog.sitemap import StaticViewSitemap, ArticleSiteMap, CategorySiteMap, TagSiteMap, UserSiteMap
from DjangoBlog.feeds import DjangoBlogFeed
from django.views.decorators.cache import cache_page
from django.conf import settings
from django.conf.urls.static import static
from DjangoBlog.admin_site import admin_site
# from DjangoBlog.login_site import login_site
from django.urls import include, path
sitemaps = {
'blog': ArticleSiteMap,
'Category': CategorySiteMap,
'Tag': TagSiteMap,
'User': UserSiteMap,
'static': StaticViewSitemap
}
handler404 = 'blog.views.page_not_found_view'
handler500 = 'blog.views.server_error_view'
handle403 = 'blog.views.permission_denied_view'
urlpatterns = [
url(r'^admin/', admin_site.urls),
# url(r'^dianptg/',dianptg.urls),
path('dianptg/', include('dianptg.urls')),
url(r'', include('blog.urls', namespace='blog')),
url(r'mdeditor/', include('mdeditor.urls')),
url(r'', include('comments.urls', namespace='comment')),
url(r'', include('accounts.urls', namespace='account')),
url(r'', include('oauth.urls', namespace='oauth')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^feed/$', DjangoBlogFeed()),
# url(r'^loginn/', include('axf_app.urls', namespace='axf')),
url(r'^rss/$', DjangoBlogFeed()),
url(r'^search', include('haystack.urls'), name='search'),
url(r'', include('servermanager.urls', namespace='servermanager')),
url(r'', include('owntracks.urls', namespace='owntracks'))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 41.721311 | 106 | 0.711198 |
31b5bb65f4089c47a605718ec8a39fc66acb97bf | 8,672 | py | Python | library/os_network.py | pgraziano/ursula | b70ccc4a6bda2830559b99991025ee275301c121 | [
"MIT"
] | 193 | 2015-01-27T13:47:49.000Z | 2022-01-14T23:05:15.000Z | library/os_network.py | pgraziano/ursula | b70ccc4a6bda2830559b99991025ee275301c121 | [
"MIT"
] | 1,812 | 2015-01-01T01:26:39.000Z | 2019-04-22T19:33:11.000Z | library/os_network.py | pgraziano/ursula | b70ccc4a6bda2830559b99991025ee275301c121 | [
"MIT"
] | 258 | 2015-01-23T17:09:44.000Z | 2020-08-26T19:41:14.000Z | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_network
short_description: Creates/removes networks from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or remove network from OpenStack.
options:
name:
description:
- Name to be assigned to the network.
required: true
shared:
description:
- Whether this network is shared or not.
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down.
required: false
default: true
external:
description:
- Whether this network is externally accessible.
required: false
default: false
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
required: false
default: present
provider_physical_network:
description:
- The physical network where this network object is implemented.
required: false
default: None
version_added: "2.1"
provider_network_type:
description:
- The type of physical network that maps to this network resource.
choices: ['flat', 'vlan', 'vxlan', 'gre', 'uplink', 'local', 'geneve']
required: false
default: None
version_added: "2.1"
provider_segmentation_id:
description:
- An isolated segment on the physical network. The I(network_type)
attribute defines the segmentation model. For example, if the
I(network_type) value is vlan, this ID is a vlan identifier. If
the I(network_type) value is gre, this ID is a gre key.
required: false
default: None
version_added: "2.1"
project:
description:
- Project name or ID containing the network (name admin-only)
required: false
default: None
version_added: "2.1"
requirements: ["shade"]
'''
EXAMPLES = '''
# Create an externally accessible network named 'ext_network'.
- os_network:
cloud: mycloud
state: present
name: ext_network
external: true
'''
RETURN = '''
network:
description: Dictionary describing the network.
returned: On success when I(state) is 'present'.
type: dictionary
contains:
id:
description: Network ID.
type: string
sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
name:
description: Network name.
type: string
sample: "ext_network"
shared:
description: Indicates whether this network is shared across all tenants.
type: bool
sample: false
status:
description: Network status.
type: string
sample: "ACTIVE"
mtu:
description: The MTU of a network resource.
type: integer
sample: 0
admin_state_up:
description: The administrative state of the network.
type: bool
sample: true
port_security_enabled:
description: The port security status
type: bool
sample: true
router:external:
description: Indicates whether this network is externally accessible.
type: bool
sample: true
tenant_id:
description: The tenant ID.
type: string
sample: "06820f94b9f54b119636be2728d216fc"
subnets:
description: The associated subnets.
type: list
sample: []
"provider:physical_network":
description: The physical network where this network object is implemented.
type: string
sample: my_vlan_net
"provider:network_type":
description: The type of physical network that maps to this network resource.
type: string
sample: vlan
"provider:segmentation_id":
description: An isolated segment on the physical network.
type: string
sample: 101
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'),
external=dict(default=False, type='bool'),
provider_physical_network=dict(required=False),
provider_network_type=dict(required=False, default=None,
choices=['flat', 'vlan', 'vxlan', 'gre',
'uplink', 'local', 'geneve']),
provider_segmentation_id=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) < StrictVersion('1.6.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be >=1.6.0")
state = module.params['state']
name = module.params['name']
shared = module.params['shared']
admin_state_up = module.params['admin_state_up']
external = module.params['external']
provider_physical_network = module.params['provider_physical_network']
provider_network_type = module.params['provider_network_type']
provider_segmentation_id = module.params['provider_segmentation_id']
project = module.params.pop('project')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
net = cloud.get_network(name, filters=filters)
if state == 'present':
if not net:
provider = {}
if provider_physical_network:
provider['physical_network'] = provider_physical_network
if provider_network_type:
provider['network_type'] = provider_network_type
if provider_segmentation_id:
provider['segmentation_id'] = provider_segmentation_id
if provider and StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
module.fail_json(msg="Shade >= 1.5.0 required to use provider options")
if project_id is not None:
net = cloud.create_network(name, shared, admin_state_up,
external, provider, project_id)
else:
net = cloud.create_network(name, shared, admin_state_up,
external, provider)
changed = True
else:
changed = False
module.exit_json(changed=changed, network=net, id=net['id'])
elif state == 'absent':
if not net:
module.exit_json(changed=False)
else:
cloud.delete_network(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| 34.27668 | 91 | 0.618427 |
2b4f83ee3434ecc89ad5956ed72b0675b3a844e7 | 132 | py | Python | _version.py | agile-geoscience/seisplot | 4afaea9d6825873ed99311a70778ebc5a4f17299 | [
"Apache-2.0"
] | 87 | 2016-01-21T00:52:47.000Z | 2022-02-16T21:08:53.000Z | _version.py | agile-geoscience/seisplot | 4afaea9d6825873ed99311a70778ebc5a4f17299 | [
"Apache-2.0"
] | 41 | 2016-01-22T14:01:17.000Z | 2020-05-18T12:48:46.000Z | _version.py | agilescientific/seisplot | 5d489950c0065de8c36ee48a0c79bc3e908bf87b | [
"Apache-2.0"
] | 46 | 2016-01-21T11:03:00.000Z | 2022-01-10T06:08:33.000Z | # -*- coding: utf-8 -*-
"""
Version.
Doing it this way provides for access in setup.py and via __version__
"""
__version__ = "0.4"
| 16.5 | 69 | 0.659091 |
216d6a345f7ccd177e4da0e692644c2e45d9c0ee | 2,394 | py | Python | src/normatrix/plugged/nested_branches.py | Saverio976/NorMatrix | a26b2d3814990b126c9f8b40cacd6d62b4e82ac5 | [
"MIT"
] | 6 | 2022-01-11T16:53:37.000Z | 2022-03-20T23:27:04.000Z | src/normatrix/plugged/nested_branches.py | Saverio976/NorMatrix | a26b2d3814990b126c9f8b40cacd6d62b4e82ac5 | [
"MIT"
] | 7 | 2022-01-07T18:37:32.000Z | 2022-03-03T21:49:31.000Z | src/normatrix/plugged/nested_branches.py | Saverio976/NorMatrix | a26b2d3814990b126c9f8b40cacd6d62b4e82ac5 | [
"MIT"
] | 4 | 2022-01-07T18:03:17.000Z | 2022-03-20T18:45:14.000Z | try:
from normatrix.source.file_parser import CFileParse
from normatrix.source.config import TypeLine
from normatrix.source.custom_regex import re_sub
except ModuleNotFoundError:
from src.normatrix.source.file_parser import CFileParse
from src.normatrix.source.config import TypeLine
from src.normatrix.source.custom_regex import re_sub
import re
def add_if_error(line: str, in_switch: bool, file: CFileParse, list_error: list, i: int) -> bool:
nb_error = 0
if "switch " in line and line.endswith(" {"):
in_switch = True
if in_switch and line.endswith(" }"):
in_switch = False
condition = line.startswith(" " * 20 if in_switch else " " * 15)
if condition:
if line.startswith(" " * 16) and line.endswith(");"):
return in_switch, nb_error
if line.endswith(") {") or ") ? " in line:
return in_switch, nb_error
if i != 0 and file.real_parsedline[i - 1][1].endswith("\\"):
return in_switch, nb_error
if line.endswith(")") and \
("if (" in file.sub_parsedline[i - 1][1] or \
"while (" in file.sub_parsedline[i - 1][1] or \
"for (" in file.sub_parsedline[i - 1][1]):
return in_switch, nb_error
list_error.append((i + 1, f"maybe too many branch ? ({line})"))
nb_error += 1
return (in_switch, nb_error)
def check(context, file: CFileParse) -> (int, int, list):
nb_error = 0
in_switch = False
is_in_func = [False, False]
list_error = []
if file.filepath.endswith("Makefile"):
return (nb_error, 1, list_error)
for i in range(len(file.sub_parsedline)):
line = file.sub_parsedline[i][1]
line = re_sub('\/\/.*', '', line, timeout=0.1)
line = re_sub("^( )*$", '', line, timeout=0.1)
if not is_in_func[0] and file.sub_parsedline[i][0] == TypeLine.FUNCTION:
is_in_func[0] = True
if is_in_func[1] and file.sub_parsedline[i][0] != TypeLine.FUNCTION:
is_in_func[0] = False
is_in_func[1] = False
if is_in_func[0] and line.startswith('{'):
is_in_func[1] = True
if is_in_func[1] and not line.startswith('}'):
in_switch, is_error = add_if_error(line, in_switch, file, list_error, i)
nb_error += is_error
return (nb_error, 1, list_error)
| 40.576271 | 97 | 0.606516 |
b5b7eccb207d53a4c3281dfa6414afdca1d0d5dc | 566 | py | Python | venv/Lib/site-packages/nipype/interfaces/semtools/filtering/__init__.py | richung99/digitizePlots | 6b408c820660a415a289726e3223e8f558d3e18b | [
"MIT"
] | 585 | 2015-01-12T16:06:47.000Z | 2022-03-26T14:51:08.000Z | nipype/interfaces/semtools/filtering/__init__.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
] | 2,329 | 2015-01-01T09:56:41.000Z | 2022-03-30T14:24:49.000Z | nipype/interfaces/semtools/filtering/__init__.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
] | 487 | 2015-01-20T01:04:52.000Z | 2022-03-21T21:22:47.000Z | # -*- coding: utf-8 -*-
from .denoising import UnbiasedNonLocalMeans
from .featuredetection import (
GenerateSummedGradientImage,
CannySegmentationLevelSetImageFilter,
DilateImage,
TextureFromNoiseImageFilter,
FlippedDifference,
ErodeImage,
GenerateBrainClippedImage,
NeighborhoodMedian,
GenerateTestImage,
NeighborhoodMean,
HammerAttributeCreator,
TextureMeasureFilter,
DilateMask,
DumpBinaryTrainingVectors,
DistanceMaps,
STAPLEAnalysis,
GradientAnisotropicDiffusionImageFilter,
CannyEdge,
)
| 24.608696 | 44 | 0.761484 |
c595b3bbcc9893b7b68e623ab61ad59c27acea48 | 1,793 | py | Python | concurrent_rps.py | aszychlinski/scrapbook | a5cba667a4a5eec6719b36c41c9722cf278f74a2 | [
"MIT"
] | null | null | null | concurrent_rps.py | aszychlinski/scrapbook | a5cba667a4a5eec6719b36c41c9722cf278f74a2 | [
"MIT"
] | null | null | null | concurrent_rps.py | aszychlinski/scrapbook | a5cba667a4a5eec6719b36c41c9722cf278f74a2 | [
"MIT"
] | null | null | null | from random import choice
from time import time
from concurrent.futures import ThreadPoolExecutor
class RPSPlayer:
figures = ['rock', 'paper', 'scissors']
def __init__(self, name: str, preference: str):
self.name = name
self.score = 0
self.preference = preference
self.last_choice = None
@property
def pattern(self):
other_figures = __class__.figures[:]
other_figures.remove(self.preference)
return 6 * [self.preference] + 3 * other_figures
def rps(player1: RPSPlayer, player2: RPSPlayer):
outcomes = (('rock', 'scissors'), ('scissors', 'paper'), ('paper', 'rock'))
player1.last_choice = choice(player1.pattern)
player2.last_choice = choice(player2.pattern)
if player1.last_choice == player2.last_choice:
print(f'Both players chose {player1.last_choice} - it\'s a draw!')
return
if (player1.last_choice, player2.last_choice) in outcomes:
player1.score += 1
print(f'{player1.name} won with {player1.last_choice}!')
else:
player2.score += 1
print(f'{player2.name} won with {player2.last_choice}!')
john, jack = RPSPlayer('John', 'rock'), RPSPlayer('Jack', 'paper')
start = time()
with ThreadPoolExecutor(max_workers=10) as executor:
for _ in range(10000):
derp = executor.submit(rps, john, jack)
end = time()
phase1 = end - start
start = time()
for _ in range(10000):
rps(john, jack)
end = time()
phase2 = end - start
print(f'Final score is... {john.name}: {john.score}, {jack.name}: {jack.score}')
print(f'10,000 games using ThreadPoolExecutor took {phase1} seconds.')
print(f'10,000 games using a single loop took {phase2} seconds.')
# turns out that concurrent.futures is needless overhead for such a simple function :)
| 31.45614 | 86 | 0.668154 |
90c9e6f0fbc593947541bbc2fc6e755d046134f8 | 14,226 | py | Python | tests/components/integration/test_sensor.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 3 | 2019-10-02T04:40:26.000Z | 2020-02-16T13:19:08.000Z | tests/components/integration/test_sensor.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 1,016 | 2019-06-18T21:27:47.000Z | 2020-03-06T11:09:58.000Z | tests/components/integration/test_sensor.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 1 | 2021-12-10T10:33:28.000Z | 2021-12-10T10:33:28.000Z | """The tests for the integration sensor platform."""
from datetime import timedelta
from unittest.mock import patch
from homeassistant.components.sensor import SensorDeviceClass, SensorStateClass
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
ENERGY_KILO_WATT_HOUR,
ENERGY_WATT_HOUR,
POWER_KILO_WATT,
POWER_WATT,
STATE_UNKNOWN,
TIME_SECONDS,
)
from homeassistant.core import HomeAssistant, State
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import mock_restore_cache
async def test_state(hass) -> None:
"""Test integration sensor state."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
"round": 2,
}
}
now = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=now):
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 1, {ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert state.attributes.get("state_class") is SensorStateClass.TOTAL
assert "device_class" not in state.attributes
future_now = dt_util.utcnow() + timedelta(seconds=3600)
with patch("homeassistant.util.dt.utcnow", return_value=future_now):
hass.states.async_set(
entity_id,
1,
{
"device_class": SensorDeviceClass.POWER,
ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT,
},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing a power sensor at 1 KiloWatts for 1hour = 1kWh
assert round(float(state.state), config["sensor"]["round"]) == 1.0
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
assert state.attributes.get("device_class") == SensorDeviceClass.ENERGY
assert state.attributes.get("state_class") is SensorStateClass.TOTAL
async def test_restore_state(hass: HomeAssistant) -> None:
"""Test integration sensor state is restored correctly."""
mock_restore_cache(
hass,
(
State(
"sensor.integration",
"100.0",
{
"device_class": SensorDeviceClass.ENERGY,
"unit_of_measurement": ENERGY_KILO_WATT_HOUR,
},
),
),
)
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
"round": 2,
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state
assert state.state == "100.00"
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
assert state.attributes.get("device_class") == SensorDeviceClass.ENERGY
async def test_restore_state_failed(hass: HomeAssistant) -> None:
"""Test integration sensor state is restored correctly."""
mock_restore_cache(
hass,
(
State(
"sensor.integration",
"INVALID",
{
"last_reset": "2019-10-06T21:00:00.000000",
},
),
),
)
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state
assert state.state == "unknown"
assert state.attributes.get("unit_of_measurement") is None
assert state.attributes.get("state_class") is SensorStateClass.TOTAL
assert "device_class" not in state.attributes
async def test_trapezoidal(hass):
"""Test integration sensor state."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
"round": 2,
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 0, {})
await hass.async_block_till_done()
# Testing a power sensor with non-monotonic intervals and values
for time, value in [(20, 10), (30, 30), (40, 5), (50, 0)]:
now = dt_util.utcnow() + timedelta(minutes=time)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
value,
{ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert round(float(state.state), config["sensor"]["round"]) == 8.33
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
async def test_left(hass):
"""Test integration sensor state with left reimann method."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"method": "left",
"source": "sensor.power",
"round": 2,
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 0, {ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT})
await hass.async_block_till_done()
# Testing a power sensor with non-monotonic intervals and values
for time, value in [(20, 10), (30, 30), (40, 5), (50, 0)]:
now = dt_util.utcnow() + timedelta(minutes=time)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
value,
{ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert round(float(state.state), config["sensor"]["round"]) == 7.5
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
async def test_right(hass):
"""Test integration sensor state with left reimann method."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"method": "right",
"source": "sensor.power",
"round": 2,
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 0, {ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT})
await hass.async_block_till_done()
# Testing a power sensor with non-monotonic intervals and values
for time, value in [(20, 10), (30, 30), (40, 5), (50, 0)]:
now = dt_util.utcnow() + timedelta(minutes=time)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
value,
{ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert round(float(state.state), config["sensor"]["round"]) == 9.17
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
async def test_prefix(hass):
"""Test integration sensor state using a power source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
"round": 2,
"unit_prefix": "k",
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 1000, {"unit_of_measurement": POWER_WATT})
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=3600)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id, 1000, {"unit_of_measurement": POWER_WATT}, force_update=True
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing a power sensor at 1000 Watts for 1hour = 1kWh
assert round(float(state.state), config["sensor"]["round"]) == 1.0
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
async def test_suffix(hass):
"""Test integration sensor state using a network counter source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.bytes_per_second",
"round": 2,
"unit_prefix": "k",
"unit_time": TIME_SECONDS,
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 1000, {ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT})
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
1000,
{ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing a network speed sensor at 1000 bytes/s over 10s = 10kbytes
assert round(float(state.state)) == 10
async def test_units(hass):
"""Test integration sensor units using a power source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
# This replicates the current sequence when HA starts up in a real runtime
# by updating the base sensor state before the base sensor's units
# or state have been correctly populated. Those interim updates
# include states of None and Unknown
hass.states.async_set(entity_id, 100, {"unit_of_measurement": None})
await hass.async_block_till_done()
hass.states.async_set(entity_id, 200, {"unit_of_measurement": None})
await hass.async_block_till_done()
hass.states.async_set(entity_id, 300, {"unit_of_measurement": POWER_WATT})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing the sensor ignored the source sensor's units until
# they became valid
assert state.attributes.get("unit_of_measurement") == ENERGY_WATT_HOUR
async def test_device_class(hass):
"""Test integration sensor units using a power source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
# This replicates the current sequence when HA starts up in a real runtime
# by updating the base sensor state before the base sensor's units
# or state have been correctly populated. Those interim updates
# include states of None and Unknown
hass.states.async_set(entity_id, STATE_UNKNOWN, {})
await hass.async_block_till_done()
hass.states.async_set(entity_id, 100, {"device_class": None})
await hass.async_block_till_done()
hass.states.async_set(entity_id, 200, {"device_class": None})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert "device_class" not in state.attributes
hass.states.async_set(
entity_id, 300, {"device_class": SensorDeviceClass.POWER}, force_update=True
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing the sensor ignored the source sensor's device class until
# it became valid
assert state.attributes.get("device_class") == SensorDeviceClass.ENERGY
async def test_calc_errors(hass):
"""Test integration sensor units using a power source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, None, {})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
# With the source sensor in a None state, the Reimann sensor should be
# unknown
assert state is not None
assert state.state == STATE_UNKNOWN
# Moving from an unknown state to a value is a calc error and should
# not change the value of the Reimann sensor.
hass.states.async_set(entity_id, 0, {"device_class": None})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert state.state == STATE_UNKNOWN
# With the source sensor updated successfully, the Reimann sensor
# should have a zero (known) value.
hass.states.async_set(entity_id, 1, {"device_class": None})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert round(float(state.state)) == 0
| 32.854503 | 88 | 0.634332 |
45b2e6bb7b7409e7571806d70172dec83ad9f011 | 48,233 | py | Python | accelbyte_py_sdk/api/platform/models/payment_order.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/platform/models/payment_order.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/platform/models/payment_order.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ....core import StrEnum
from ..models.currency_summary import CurrencySummary
from ..models.transaction import Transaction
class ChannelEnum(StrEnum):
EXTERNAL = "EXTERNAL"
INTERNAL = "INTERNAL"
class ItemTypeEnum(StrEnum):
APP = "APP"
COINS = "COINS"
INGAMEITEM = "INGAMEITEM"
BUNDLE = "BUNDLE"
CODE = "CODE"
SUBSCRIPTION = "SUBSCRIPTION"
SEASON = "SEASON"
MEDIA = "MEDIA"
class PaymentProviderEnum(StrEnum):
WALLET = "WALLET"
XSOLLA = "XSOLLA"
ADYEN = "ADYEN"
STRIPE = "STRIPE"
CHECKOUT = "CHECKOUT"
ALIPAY = "ALIPAY"
WXPAY = "WXPAY"
PAYPAL = "PAYPAL"
class StatusEnum(StrEnum):
INIT = "INIT"
AUTHORISED = "AUTHORISED"
AUTHORISE_FAILED = "AUTHORISE_FAILED"
CHARGED = "CHARGED"
CHARGE_FAILED = "CHARGE_FAILED"
NOTIFICATION_OF_CHARGEBACK = "NOTIFICATION_OF_CHARGEBACK"
REQUEST_FOR_INFORMATION = "REQUEST_FOR_INFORMATION"
CHARGEBACK = "CHARGEBACK"
CHARGEBACK_REVERSED = "CHARGEBACK_REVERSED"
REFUNDING = "REFUNDING"
REFUNDED = "REFUNDED"
REFUND_FAILED = "REFUND_FAILED"
DELETED = "DELETED"
class PaymentOrder(Model):
"""Payment order (PaymentOrder)
Properties:
authorised_time: (authorisedTime) OPTIONAL str
channel: (channel) OPTIONAL Union[str, ChannelEnum]
chargeback_reversed_time: (chargebackReversedTime) OPTIONAL str
chargeback_time: (chargebackTime) OPTIONAL str
charged_time: (chargedTime) OPTIONAL str
charging: (charging) OPTIONAL bool
created_at: (createdAt) OPTIONAL str
created_time: (createdTime) OPTIONAL str
currency: (currency) OPTIONAL CurrencySummary
custom_parameters: (customParameters) OPTIONAL Dict[str, Any]
description: (description) OPTIONAL str
ext_order_no: (extOrderNo) OPTIONAL str
ext_user_id: (extUserId) OPTIONAL str
item_type: (itemType) OPTIONAL Union[str, ItemTypeEnum]
language: (language) OPTIONAL str
metadata: (metadata) OPTIONAL Dict[str, str]
namespace: (namespace) OPTIONAL str
notify_url: (notifyUrl) OPTIONAL str
omit_notification: (omitNotification) OPTIONAL bool
payment_method: (paymentMethod) OPTIONAL str
payment_method_fee: (paymentMethodFee) OPTIONAL int
payment_order_no: (paymentOrderNo) OPTIONAL str
payment_provider: (paymentProvider) OPTIONAL Union[str, PaymentProviderEnum]
payment_provider_fee: (paymentProviderFee) OPTIONAL int
payment_station_url: (paymentStationUrl) OPTIONAL str
platform: (platform) OPTIONAL str
price: (price) OPTIONAL int
recurring_payment_order_no: (recurringPaymentOrderNo) OPTIONAL str
refunded_time: (refundedTime) OPTIONAL str
region: (region) OPTIONAL str
return_url: (returnUrl) OPTIONAL str
rvn: (rvn) OPTIONAL int
sales_tax: (salesTax) OPTIONAL int
sandbox: (sandbox) OPTIONAL bool
sku: (sku) OPTIONAL str
state: (state) OPTIONAL str
status: (status) OPTIONAL Union[str, StatusEnum]
status_reason: (statusReason) OPTIONAL str
subscription_id: (subscriptionId) OPTIONAL str
subtotal_price: (subtotalPrice) OPTIONAL int
target_namespace: (targetNamespace) OPTIONAL str
target_user_id: (targetUserId) OPTIONAL str
tax: (tax) OPTIONAL int
title: (title) OPTIONAL str
total_price: (totalPrice) OPTIONAL int
total_tax: (totalTax) OPTIONAL int
transactions: (transactions) OPTIONAL List[Transaction]
updated_at: (updatedAt) OPTIONAL str
user_id: (userId) OPTIONAL str
vat: (vat) OPTIONAL int
zip_code: (zipCode) OPTIONAL str
"""
# region fields
authorised_time: str # OPTIONAL
channel: Union[str, ChannelEnum] # OPTIONAL
chargeback_reversed_time: str # OPTIONAL
chargeback_time: str # OPTIONAL
charged_time: str # OPTIONAL
charging: bool # OPTIONAL
created_at: str # OPTIONAL
created_time: str # OPTIONAL
currency: CurrencySummary # OPTIONAL
custom_parameters: Dict[str, Any] # OPTIONAL
description: str # OPTIONAL
ext_order_no: str # OPTIONAL
ext_user_id: str # OPTIONAL
item_type: Union[str, ItemTypeEnum] # OPTIONAL
language: str # OPTIONAL
metadata: Dict[str, str] # OPTIONAL
namespace: str # OPTIONAL
notify_url: str # OPTIONAL
omit_notification: bool # OPTIONAL
payment_method: str # OPTIONAL
payment_method_fee: int # OPTIONAL
payment_order_no: str # OPTIONAL
payment_provider: Union[str, PaymentProviderEnum] # OPTIONAL
payment_provider_fee: int # OPTIONAL
payment_station_url: str # OPTIONAL
platform: str # OPTIONAL
price: int # OPTIONAL
recurring_payment_order_no: str # OPTIONAL
refunded_time: str # OPTIONAL
region: str # OPTIONAL
return_url: str # OPTIONAL
rvn: int # OPTIONAL
sales_tax: int # OPTIONAL
sandbox: bool # OPTIONAL
sku: str # OPTIONAL
state: str # OPTIONAL
status: Union[str, StatusEnum] # OPTIONAL
status_reason: str # OPTIONAL
subscription_id: str # OPTIONAL
subtotal_price: int # OPTIONAL
target_namespace: str # OPTIONAL
target_user_id: str # OPTIONAL
tax: int # OPTIONAL
title: str # OPTIONAL
total_price: int # OPTIONAL
total_tax: int # OPTIONAL
transactions: List[Transaction] # OPTIONAL
updated_at: str # OPTIONAL
user_id: str # OPTIONAL
vat: int # OPTIONAL
zip_code: str # OPTIONAL
# endregion fields
# region with_x methods
def with_authorised_time(self, value: str) -> PaymentOrder:
self.authorised_time = value
return self
def with_channel(self, value: Union[str, ChannelEnum]) -> PaymentOrder:
self.channel = value
return self
def with_chargeback_reversed_time(self, value: str) -> PaymentOrder:
self.chargeback_reversed_time = value
return self
def with_chargeback_time(self, value: str) -> PaymentOrder:
self.chargeback_time = value
return self
def with_charged_time(self, value: str) -> PaymentOrder:
self.charged_time = value
return self
def with_charging(self, value: bool) -> PaymentOrder:
self.charging = value
return self
def with_created_at(self, value: str) -> PaymentOrder:
self.created_at = value
return self
def with_created_time(self, value: str) -> PaymentOrder:
self.created_time = value
return self
def with_currency(self, value: CurrencySummary) -> PaymentOrder:
self.currency = value
return self
def with_custom_parameters(self, value: Dict[str, Any]) -> PaymentOrder:
self.custom_parameters = value
return self
def with_description(self, value: str) -> PaymentOrder:
self.description = value
return self
def with_ext_order_no(self, value: str) -> PaymentOrder:
self.ext_order_no = value
return self
def with_ext_user_id(self, value: str) -> PaymentOrder:
self.ext_user_id = value
return self
def with_item_type(self, value: Union[str, ItemTypeEnum]) -> PaymentOrder:
self.item_type = value
return self
def with_language(self, value: str) -> PaymentOrder:
self.language = value
return self
def with_metadata(self, value: Dict[str, str]) -> PaymentOrder:
self.metadata = value
return self
def with_namespace(self, value: str) -> PaymentOrder:
self.namespace = value
return self
def with_notify_url(self, value: str) -> PaymentOrder:
self.notify_url = value
return self
def with_omit_notification(self, value: bool) -> PaymentOrder:
self.omit_notification = value
return self
def with_payment_method(self, value: str) -> PaymentOrder:
self.payment_method = value
return self
def with_payment_method_fee(self, value: int) -> PaymentOrder:
self.payment_method_fee = value
return self
def with_payment_order_no(self, value: str) -> PaymentOrder:
self.payment_order_no = value
return self
def with_payment_provider(self, value: Union[str, PaymentProviderEnum]) -> PaymentOrder:
self.payment_provider = value
return self
def with_payment_provider_fee(self, value: int) -> PaymentOrder:
self.payment_provider_fee = value
return self
def with_payment_station_url(self, value: str) -> PaymentOrder:
self.payment_station_url = value
return self
def with_platform(self, value: str) -> PaymentOrder:
self.platform = value
return self
def with_price(self, value: int) -> PaymentOrder:
self.price = value
return self
def with_recurring_payment_order_no(self, value: str) -> PaymentOrder:
self.recurring_payment_order_no = value
return self
def with_refunded_time(self, value: str) -> PaymentOrder:
self.refunded_time = value
return self
def with_region(self, value: str) -> PaymentOrder:
self.region = value
return self
def with_return_url(self, value: str) -> PaymentOrder:
self.return_url = value
return self
def with_rvn(self, value: int) -> PaymentOrder:
self.rvn = value
return self
def with_sales_tax(self, value: int) -> PaymentOrder:
self.sales_tax = value
return self
def with_sandbox(self, value: bool) -> PaymentOrder:
self.sandbox = value
return self
def with_sku(self, value: str) -> PaymentOrder:
self.sku = value
return self
def with_state(self, value: str) -> PaymentOrder:
self.state = value
return self
def with_status(self, value: Union[str, StatusEnum]) -> PaymentOrder:
self.status = value
return self
def with_status_reason(self, value: str) -> PaymentOrder:
self.status_reason = value
return self
def with_subscription_id(self, value: str) -> PaymentOrder:
self.subscription_id = value
return self
def with_subtotal_price(self, value: int) -> PaymentOrder:
self.subtotal_price = value
return self
def with_target_namespace(self, value: str) -> PaymentOrder:
self.target_namespace = value
return self
def with_target_user_id(self, value: str) -> PaymentOrder:
self.target_user_id = value
return self
def with_tax(self, value: int) -> PaymentOrder:
self.tax = value
return self
def with_title(self, value: str) -> PaymentOrder:
self.title = value
return self
def with_total_price(self, value: int) -> PaymentOrder:
self.total_price = value
return self
def with_total_tax(self, value: int) -> PaymentOrder:
self.total_tax = value
return self
def with_transactions(self, value: List[Transaction]) -> PaymentOrder:
self.transactions = value
return self
def with_updated_at(self, value: str) -> PaymentOrder:
self.updated_at = value
return self
def with_user_id(self, value: str) -> PaymentOrder:
self.user_id = value
return self
def with_vat(self, value: int) -> PaymentOrder:
self.vat = value
return self
def with_zip_code(self, value: str) -> PaymentOrder:
self.zip_code = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "authorised_time"):
result["authorisedTime"] = str(self.authorised_time)
elif include_empty:
result["authorisedTime"] = ""
if hasattr(self, "channel"):
result["channel"] = str(self.channel)
elif include_empty:
result["channel"] = Union[str, ChannelEnum]()
if hasattr(self, "chargeback_reversed_time"):
result["chargebackReversedTime"] = str(self.chargeback_reversed_time)
elif include_empty:
result["chargebackReversedTime"] = ""
if hasattr(self, "chargeback_time"):
result["chargebackTime"] = str(self.chargeback_time)
elif include_empty:
result["chargebackTime"] = ""
if hasattr(self, "charged_time"):
result["chargedTime"] = str(self.charged_time)
elif include_empty:
result["chargedTime"] = ""
if hasattr(self, "charging"):
result["charging"] = bool(self.charging)
elif include_empty:
result["charging"] = False
if hasattr(self, "created_at"):
result["createdAt"] = str(self.created_at)
elif include_empty:
result["createdAt"] = ""
if hasattr(self, "created_time"):
result["createdTime"] = str(self.created_time)
elif include_empty:
result["createdTime"] = ""
if hasattr(self, "currency"):
result["currency"] = self.currency.to_dict(include_empty=include_empty)
elif include_empty:
result["currency"] = CurrencySummary()
if hasattr(self, "custom_parameters"):
result["customParameters"] = {str(k0): v0 for k0, v0 in self.custom_parameters.items()}
elif include_empty:
result["customParameters"] = {}
if hasattr(self, "description"):
result["description"] = str(self.description)
elif include_empty:
result["description"] = ""
if hasattr(self, "ext_order_no"):
result["extOrderNo"] = str(self.ext_order_no)
elif include_empty:
result["extOrderNo"] = ""
if hasattr(self, "ext_user_id"):
result["extUserId"] = str(self.ext_user_id)
elif include_empty:
result["extUserId"] = ""
if hasattr(self, "item_type"):
result["itemType"] = str(self.item_type)
elif include_empty:
result["itemType"] = Union[str, ItemTypeEnum]()
if hasattr(self, "language"):
result["language"] = str(self.language)
elif include_empty:
result["language"] = ""
if hasattr(self, "metadata"):
result["metadata"] = {str(k0): str(v0) for k0, v0 in self.metadata.items()}
elif include_empty:
result["metadata"] = {}
if hasattr(self, "namespace"):
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "notify_url"):
result["notifyUrl"] = str(self.notify_url)
elif include_empty:
result["notifyUrl"] = ""
if hasattr(self, "omit_notification"):
result["omitNotification"] = bool(self.omit_notification)
elif include_empty:
result["omitNotification"] = False
if hasattr(self, "payment_method"):
result["paymentMethod"] = str(self.payment_method)
elif include_empty:
result["paymentMethod"] = ""
if hasattr(self, "payment_method_fee"):
result["paymentMethodFee"] = int(self.payment_method_fee)
elif include_empty:
result["paymentMethodFee"] = 0
if hasattr(self, "payment_order_no"):
result["paymentOrderNo"] = str(self.payment_order_no)
elif include_empty:
result["paymentOrderNo"] = ""
if hasattr(self, "payment_provider"):
result["paymentProvider"] = str(self.payment_provider)
elif include_empty:
result["paymentProvider"] = Union[str, PaymentProviderEnum]()
if hasattr(self, "payment_provider_fee"):
result["paymentProviderFee"] = int(self.payment_provider_fee)
elif include_empty:
result["paymentProviderFee"] = 0
if hasattr(self, "payment_station_url"):
result["paymentStationUrl"] = str(self.payment_station_url)
elif include_empty:
result["paymentStationUrl"] = ""
if hasattr(self, "platform"):
result["platform"] = str(self.platform)
elif include_empty:
result["platform"] = ""
if hasattr(self, "price"):
result["price"] = int(self.price)
elif include_empty:
result["price"] = 0
if hasattr(self, "recurring_payment_order_no"):
result["recurringPaymentOrderNo"] = str(self.recurring_payment_order_no)
elif include_empty:
result["recurringPaymentOrderNo"] = ""
if hasattr(self, "refunded_time"):
result["refundedTime"] = str(self.refunded_time)
elif include_empty:
result["refundedTime"] = ""
if hasattr(self, "region"):
result["region"] = str(self.region)
elif include_empty:
result["region"] = ""
if hasattr(self, "return_url"):
result["returnUrl"] = str(self.return_url)
elif include_empty:
result["returnUrl"] = ""
if hasattr(self, "rvn"):
result["rvn"] = int(self.rvn)
elif include_empty:
result["rvn"] = 0
if hasattr(self, "sales_tax"):
result["salesTax"] = int(self.sales_tax)
elif include_empty:
result["salesTax"] = 0
if hasattr(self, "sandbox"):
result["sandbox"] = bool(self.sandbox)
elif include_empty:
result["sandbox"] = False
if hasattr(self, "sku"):
result["sku"] = str(self.sku)
elif include_empty:
result["sku"] = ""
if hasattr(self, "state"):
result["state"] = str(self.state)
elif include_empty:
result["state"] = ""
if hasattr(self, "status"):
result["status"] = str(self.status)
elif include_empty:
result["status"] = Union[str, StatusEnum]()
if hasattr(self, "status_reason"):
result["statusReason"] = str(self.status_reason)
elif include_empty:
result["statusReason"] = ""
if hasattr(self, "subscription_id"):
result["subscriptionId"] = str(self.subscription_id)
elif include_empty:
result["subscriptionId"] = ""
if hasattr(self, "subtotal_price"):
result["subtotalPrice"] = int(self.subtotal_price)
elif include_empty:
result["subtotalPrice"] = 0
if hasattr(self, "target_namespace"):
result["targetNamespace"] = str(self.target_namespace)
elif include_empty:
result["targetNamespace"] = ""
if hasattr(self, "target_user_id"):
result["targetUserId"] = str(self.target_user_id)
elif include_empty:
result["targetUserId"] = ""
if hasattr(self, "tax"):
result["tax"] = int(self.tax)
elif include_empty:
result["tax"] = 0
if hasattr(self, "title"):
result["title"] = str(self.title)
elif include_empty:
result["title"] = ""
if hasattr(self, "total_price"):
result["totalPrice"] = int(self.total_price)
elif include_empty:
result["totalPrice"] = 0
if hasattr(self, "total_tax"):
result["totalTax"] = int(self.total_tax)
elif include_empty:
result["totalTax"] = 0
if hasattr(self, "transactions"):
result["transactions"] = [i0.to_dict(include_empty=include_empty) for i0 in self.transactions]
elif include_empty:
result["transactions"] = []
if hasattr(self, "updated_at"):
result["updatedAt"] = str(self.updated_at)
elif include_empty:
result["updatedAt"] = ""
if hasattr(self, "user_id"):
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
if hasattr(self, "vat"):
result["vat"] = int(self.vat)
elif include_empty:
result["vat"] = 0
if hasattr(self, "zip_code"):
result["zipCode"] = str(self.zip_code)
elif include_empty:
result["zipCode"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
authorised_time: Optional[str] = None,
channel: Optional[Union[str, ChannelEnum]] = None,
chargeback_reversed_time: Optional[str] = None,
chargeback_time: Optional[str] = None,
charged_time: Optional[str] = None,
charging: Optional[bool] = None,
created_at: Optional[str] = None,
created_time: Optional[str] = None,
currency: Optional[CurrencySummary] = None,
custom_parameters: Optional[Dict[str, Any]] = None,
description: Optional[str] = None,
ext_order_no: Optional[str] = None,
ext_user_id: Optional[str] = None,
item_type: Optional[Union[str, ItemTypeEnum]] = None,
language: Optional[str] = None,
metadata: Optional[Dict[str, str]] = None,
namespace: Optional[str] = None,
notify_url: Optional[str] = None,
omit_notification: Optional[bool] = None,
payment_method: Optional[str] = None,
payment_method_fee: Optional[int] = None,
payment_order_no: Optional[str] = None,
payment_provider: Optional[Union[str, PaymentProviderEnum]] = None,
payment_provider_fee: Optional[int] = None,
payment_station_url: Optional[str] = None,
platform: Optional[str] = None,
price: Optional[int] = None,
recurring_payment_order_no: Optional[str] = None,
refunded_time: Optional[str] = None,
region: Optional[str] = None,
return_url: Optional[str] = None,
rvn: Optional[int] = None,
sales_tax: Optional[int] = None,
sandbox: Optional[bool] = None,
sku: Optional[str] = None,
state: Optional[str] = None,
status: Optional[Union[str, StatusEnum]] = None,
status_reason: Optional[str] = None,
subscription_id: Optional[str] = None,
subtotal_price: Optional[int] = None,
target_namespace: Optional[str] = None,
target_user_id: Optional[str] = None,
tax: Optional[int] = None,
title: Optional[str] = None,
total_price: Optional[int] = None,
total_tax: Optional[int] = None,
transactions: Optional[List[Transaction]] = None,
updated_at: Optional[str] = None,
user_id: Optional[str] = None,
vat: Optional[int] = None,
zip_code: Optional[str] = None,
) -> PaymentOrder:
instance = cls()
if authorised_time is not None:
instance.authorised_time = authorised_time
if channel is not None:
instance.channel = channel
if chargeback_reversed_time is not None:
instance.chargeback_reversed_time = chargeback_reversed_time
if chargeback_time is not None:
instance.chargeback_time = chargeback_time
if charged_time is not None:
instance.charged_time = charged_time
if charging is not None:
instance.charging = charging
if created_at is not None:
instance.created_at = created_at
if created_time is not None:
instance.created_time = created_time
if currency is not None:
instance.currency = currency
if custom_parameters is not None:
instance.custom_parameters = custom_parameters
if description is not None:
instance.description = description
if ext_order_no is not None:
instance.ext_order_no = ext_order_no
if ext_user_id is not None:
instance.ext_user_id = ext_user_id
if item_type is not None:
instance.item_type = item_type
if language is not None:
instance.language = language
if metadata is not None:
instance.metadata = metadata
if namespace is not None:
instance.namespace = namespace
if notify_url is not None:
instance.notify_url = notify_url
if omit_notification is not None:
instance.omit_notification = omit_notification
if payment_method is not None:
instance.payment_method = payment_method
if payment_method_fee is not None:
instance.payment_method_fee = payment_method_fee
if payment_order_no is not None:
instance.payment_order_no = payment_order_no
if payment_provider is not None:
instance.payment_provider = payment_provider
if payment_provider_fee is not None:
instance.payment_provider_fee = payment_provider_fee
if payment_station_url is not None:
instance.payment_station_url = payment_station_url
if platform is not None:
instance.platform = platform
if price is not None:
instance.price = price
if recurring_payment_order_no is not None:
instance.recurring_payment_order_no = recurring_payment_order_no
if refunded_time is not None:
instance.refunded_time = refunded_time
if region is not None:
instance.region = region
if return_url is not None:
instance.return_url = return_url
if rvn is not None:
instance.rvn = rvn
if sales_tax is not None:
instance.sales_tax = sales_tax
if sandbox is not None:
instance.sandbox = sandbox
if sku is not None:
instance.sku = sku
if state is not None:
instance.state = state
if status is not None:
instance.status = status
if status_reason is not None:
instance.status_reason = status_reason
if subscription_id is not None:
instance.subscription_id = subscription_id
if subtotal_price is not None:
instance.subtotal_price = subtotal_price
if target_namespace is not None:
instance.target_namespace = target_namespace
if target_user_id is not None:
instance.target_user_id = target_user_id
if tax is not None:
instance.tax = tax
if title is not None:
instance.title = title
if total_price is not None:
instance.total_price = total_price
if total_tax is not None:
instance.total_tax = total_tax
if transactions is not None:
instance.transactions = transactions
if updated_at is not None:
instance.updated_at = updated_at
if user_id is not None:
instance.user_id = user_id
if vat is not None:
instance.vat = vat
if zip_code is not None:
instance.zip_code = zip_code
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> PaymentOrder:
instance = cls()
if not dict_:
return instance
if "authorisedTime" in dict_ and dict_["authorisedTime"] is not None:
instance.authorised_time = str(dict_["authorisedTime"])
elif include_empty:
instance.authorised_time = ""
if "channel" in dict_ and dict_["channel"] is not None:
instance.channel = str(dict_["channel"])
elif include_empty:
instance.channel = Union[str, ChannelEnum]()
if "chargebackReversedTime" in dict_ and dict_["chargebackReversedTime"] is not None:
instance.chargeback_reversed_time = str(dict_["chargebackReversedTime"])
elif include_empty:
instance.chargeback_reversed_time = ""
if "chargebackTime" in dict_ and dict_["chargebackTime"] is not None:
instance.chargeback_time = str(dict_["chargebackTime"])
elif include_empty:
instance.chargeback_time = ""
if "chargedTime" in dict_ and dict_["chargedTime"] is not None:
instance.charged_time = str(dict_["chargedTime"])
elif include_empty:
instance.charged_time = ""
if "charging" in dict_ and dict_["charging"] is not None:
instance.charging = bool(dict_["charging"])
elif include_empty:
instance.charging = False
if "createdAt" in dict_ and dict_["createdAt"] is not None:
instance.created_at = str(dict_["createdAt"])
elif include_empty:
instance.created_at = ""
if "createdTime" in dict_ and dict_["createdTime"] is not None:
instance.created_time = str(dict_["createdTime"])
elif include_empty:
instance.created_time = ""
if "currency" in dict_ and dict_["currency"] is not None:
instance.currency = CurrencySummary.create_from_dict(dict_["currency"], include_empty=include_empty)
elif include_empty:
instance.currency = CurrencySummary()
if "customParameters" in dict_ and dict_["customParameters"] is not None:
instance.custom_parameters = {str(k0): v0 for k0, v0 in dict_["customParameters"].items()}
elif include_empty:
instance.custom_parameters = {}
if "description" in dict_ and dict_["description"] is not None:
instance.description = str(dict_["description"])
elif include_empty:
instance.description = ""
if "extOrderNo" in dict_ and dict_["extOrderNo"] is not None:
instance.ext_order_no = str(dict_["extOrderNo"])
elif include_empty:
instance.ext_order_no = ""
if "extUserId" in dict_ and dict_["extUserId"] is not None:
instance.ext_user_id = str(dict_["extUserId"])
elif include_empty:
instance.ext_user_id = ""
if "itemType" in dict_ and dict_["itemType"] is not None:
instance.item_type = str(dict_["itemType"])
elif include_empty:
instance.item_type = Union[str, ItemTypeEnum]()
if "language" in dict_ and dict_["language"] is not None:
instance.language = str(dict_["language"])
elif include_empty:
instance.language = ""
if "metadata" in dict_ and dict_["metadata"] is not None:
instance.metadata = {str(k0): str(v0) for k0, v0 in dict_["metadata"].items()}
elif include_empty:
instance.metadata = {}
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "notifyUrl" in dict_ and dict_["notifyUrl"] is not None:
instance.notify_url = str(dict_["notifyUrl"])
elif include_empty:
instance.notify_url = ""
if "omitNotification" in dict_ and dict_["omitNotification"] is not None:
instance.omit_notification = bool(dict_["omitNotification"])
elif include_empty:
instance.omit_notification = False
if "paymentMethod" in dict_ and dict_["paymentMethod"] is not None:
instance.payment_method = str(dict_["paymentMethod"])
elif include_empty:
instance.payment_method = ""
if "paymentMethodFee" in dict_ and dict_["paymentMethodFee"] is not None:
instance.payment_method_fee = int(dict_["paymentMethodFee"])
elif include_empty:
instance.payment_method_fee = 0
if "paymentOrderNo" in dict_ and dict_["paymentOrderNo"] is not None:
instance.payment_order_no = str(dict_["paymentOrderNo"])
elif include_empty:
instance.payment_order_no = ""
if "paymentProvider" in dict_ and dict_["paymentProvider"] is not None:
instance.payment_provider = str(dict_["paymentProvider"])
elif include_empty:
instance.payment_provider = Union[str, PaymentProviderEnum]()
if "paymentProviderFee" in dict_ and dict_["paymentProviderFee"] is not None:
instance.payment_provider_fee = int(dict_["paymentProviderFee"])
elif include_empty:
instance.payment_provider_fee = 0
if "paymentStationUrl" in dict_ and dict_["paymentStationUrl"] is not None:
instance.payment_station_url = str(dict_["paymentStationUrl"])
elif include_empty:
instance.payment_station_url = ""
if "platform" in dict_ and dict_["platform"] is not None:
instance.platform = str(dict_["platform"])
elif include_empty:
instance.platform = ""
if "price" in dict_ and dict_["price"] is not None:
instance.price = int(dict_["price"])
elif include_empty:
instance.price = 0
if "recurringPaymentOrderNo" in dict_ and dict_["recurringPaymentOrderNo"] is not None:
instance.recurring_payment_order_no = str(dict_["recurringPaymentOrderNo"])
elif include_empty:
instance.recurring_payment_order_no = ""
if "refundedTime" in dict_ and dict_["refundedTime"] is not None:
instance.refunded_time = str(dict_["refundedTime"])
elif include_empty:
instance.refunded_time = ""
if "region" in dict_ and dict_["region"] is not None:
instance.region = str(dict_["region"])
elif include_empty:
instance.region = ""
if "returnUrl" in dict_ and dict_["returnUrl"] is not None:
instance.return_url = str(dict_["returnUrl"])
elif include_empty:
instance.return_url = ""
if "rvn" in dict_ and dict_["rvn"] is not None:
instance.rvn = int(dict_["rvn"])
elif include_empty:
instance.rvn = 0
if "salesTax" in dict_ and dict_["salesTax"] is not None:
instance.sales_tax = int(dict_["salesTax"])
elif include_empty:
instance.sales_tax = 0
if "sandbox" in dict_ and dict_["sandbox"] is not None:
instance.sandbox = bool(dict_["sandbox"])
elif include_empty:
instance.sandbox = False
if "sku" in dict_ and dict_["sku"] is not None:
instance.sku = str(dict_["sku"])
elif include_empty:
instance.sku = ""
if "state" in dict_ and dict_["state"] is not None:
instance.state = str(dict_["state"])
elif include_empty:
instance.state = ""
if "status" in dict_ and dict_["status"] is not None:
instance.status = str(dict_["status"])
elif include_empty:
instance.status = Union[str, StatusEnum]()
if "statusReason" in dict_ and dict_["statusReason"] is not None:
instance.status_reason = str(dict_["statusReason"])
elif include_empty:
instance.status_reason = ""
if "subscriptionId" in dict_ and dict_["subscriptionId"] is not None:
instance.subscription_id = str(dict_["subscriptionId"])
elif include_empty:
instance.subscription_id = ""
if "subtotalPrice" in dict_ and dict_["subtotalPrice"] is not None:
instance.subtotal_price = int(dict_["subtotalPrice"])
elif include_empty:
instance.subtotal_price = 0
if "targetNamespace" in dict_ and dict_["targetNamespace"] is not None:
instance.target_namespace = str(dict_["targetNamespace"])
elif include_empty:
instance.target_namespace = ""
if "targetUserId" in dict_ and dict_["targetUserId"] is not None:
instance.target_user_id = str(dict_["targetUserId"])
elif include_empty:
instance.target_user_id = ""
if "tax" in dict_ and dict_["tax"] is not None:
instance.tax = int(dict_["tax"])
elif include_empty:
instance.tax = 0
if "title" in dict_ and dict_["title"] is not None:
instance.title = str(dict_["title"])
elif include_empty:
instance.title = ""
if "totalPrice" in dict_ and dict_["totalPrice"] is not None:
instance.total_price = int(dict_["totalPrice"])
elif include_empty:
instance.total_price = 0
if "totalTax" in dict_ and dict_["totalTax"] is not None:
instance.total_tax = int(dict_["totalTax"])
elif include_empty:
instance.total_tax = 0
if "transactions" in dict_ and dict_["transactions"] is not None:
instance.transactions = [Transaction.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["transactions"]]
elif include_empty:
instance.transactions = []
if "updatedAt" in dict_ and dict_["updatedAt"] is not None:
instance.updated_at = str(dict_["updatedAt"])
elif include_empty:
instance.updated_at = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
if "vat" in dict_ and dict_["vat"] is not None:
instance.vat = int(dict_["vat"])
elif include_empty:
instance.vat = 0
if "zipCode" in dict_ and dict_["zipCode"] is not None:
instance.zip_code = str(dict_["zipCode"])
elif include_empty:
instance.zip_code = ""
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, PaymentOrder]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[PaymentOrder]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[PaymentOrder, List[PaymentOrder], Dict[Any, PaymentOrder]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"authorisedTime": "authorised_time",
"channel": "channel",
"chargebackReversedTime": "chargeback_reversed_time",
"chargebackTime": "chargeback_time",
"chargedTime": "charged_time",
"charging": "charging",
"createdAt": "created_at",
"createdTime": "created_time",
"currency": "currency",
"customParameters": "custom_parameters",
"description": "description",
"extOrderNo": "ext_order_no",
"extUserId": "ext_user_id",
"itemType": "item_type",
"language": "language",
"metadata": "metadata",
"namespace": "namespace",
"notifyUrl": "notify_url",
"omitNotification": "omit_notification",
"paymentMethod": "payment_method",
"paymentMethodFee": "payment_method_fee",
"paymentOrderNo": "payment_order_no",
"paymentProvider": "payment_provider",
"paymentProviderFee": "payment_provider_fee",
"paymentStationUrl": "payment_station_url",
"platform": "platform",
"price": "price",
"recurringPaymentOrderNo": "recurring_payment_order_no",
"refundedTime": "refunded_time",
"region": "region",
"returnUrl": "return_url",
"rvn": "rvn",
"salesTax": "sales_tax",
"sandbox": "sandbox",
"sku": "sku",
"state": "state",
"status": "status",
"statusReason": "status_reason",
"subscriptionId": "subscription_id",
"subtotalPrice": "subtotal_price",
"targetNamespace": "target_namespace",
"targetUserId": "target_user_id",
"tax": "tax",
"title": "title",
"totalPrice": "total_price",
"totalTax": "total_tax",
"transactions": "transactions",
"updatedAt": "updated_at",
"userId": "user_id",
"vat": "vat",
"zipCode": "zip_code",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"authorisedTime": False,
"channel": False,
"chargebackReversedTime": False,
"chargebackTime": False,
"chargedTime": False,
"charging": False,
"createdAt": False,
"createdTime": False,
"currency": False,
"customParameters": False,
"description": False,
"extOrderNo": False,
"extUserId": False,
"itemType": False,
"language": False,
"metadata": False,
"namespace": False,
"notifyUrl": False,
"omitNotification": False,
"paymentMethod": False,
"paymentMethodFee": False,
"paymentOrderNo": False,
"paymentProvider": False,
"paymentProviderFee": False,
"paymentStationUrl": False,
"platform": False,
"price": False,
"recurringPaymentOrderNo": False,
"refundedTime": False,
"region": False,
"returnUrl": False,
"rvn": False,
"salesTax": False,
"sandbox": False,
"sku": False,
"state": False,
"status": False,
"statusReason": False,
"subscriptionId": False,
"subtotalPrice": False,
"targetNamespace": False,
"targetUserId": False,
"tax": False,
"title": False,
"totalPrice": False,
"totalTax": False,
"transactions": False,
"updatedAt": False,
"userId": False,
"vat": False,
"zipCode": False,
}
@staticmethod
def get_enum_map() -> Dict[str, List[Any]]:
return {
"channel": ["EXTERNAL", "INTERNAL"],
"itemType": ["APP", "COINS", "INGAMEITEM", "BUNDLE", "CODE", "SUBSCRIPTION", "SEASON", "MEDIA"],
"paymentProvider": ["WALLET", "XSOLLA", "ADYEN", "STRIPE", "CHECKOUT", "ALIPAY", "WXPAY", "PAYPAL"],
"status": ["INIT", "AUTHORISED", "AUTHORISE_FAILED", "CHARGED", "CHARGE_FAILED", "NOTIFICATION_OF_CHARGEBACK", "REQUEST_FOR_INFORMATION", "CHARGEBACK", "CHARGEBACK_REVERSED", "REFUNDING", "REFUNDED", "REFUND_FAILED", "DELETED"],
}
# endregion static methods
| 41.014456 | 240 | 0.564945 |
aee855d93f7871033c902c466ebfda82ea548236 | 3,624 | py | Python | test/test_report_open_shift_aws_storage_inventory_all_of.py | chargio/using-koku-api-test | 2f41fd83ab730705352b116b7a6e05ae3d9a8ebd | [
"MIT"
] | 1 | 2020-03-18T11:32:09.000Z | 2020-03-18T11:32:09.000Z | test/test_report_open_shift_aws_storage_inventory_all_of.py | chargio/using-koku-api-test | 2f41fd83ab730705352b116b7a6e05ae3d9a8ebd | [
"MIT"
] | null | null | null | test/test_report_open_shift_aws_storage_inventory_all_of.py | chargio/using-koku-api-test | 2f41fd83ab730705352b116b7a6e05ae3d9a8ebd | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Cost Management
The API for Project Koku and OpenShift cost management. You can find out more about Cost Management at [https://github.com/project-koku/](https://github.com/project-koku/). # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.report_open_shift_aws_storage_inventory_all_of import ReportOpenShiftAWSStorageInventoryAllOf # noqa: E501
from openapi_client.rest import ApiException
class TestReportOpenShiftAWSStorageInventoryAllOf(unittest.TestCase):
"""ReportOpenShiftAWSStorageInventoryAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ReportOpenShiftAWSStorageInventoryAllOf
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.report_open_shift_aws_storage_inventory_all_of.ReportOpenShiftAWSStorageInventoryAllOf() # noqa: E501
if include_optional :
return ReportOpenShiftAWSStorageInventoryAllOf(
group_by = {"account":["*"]},
order_by = {"cost":"asc"},
filter = openapi_client.models.report_open_shift_aws_filter.ReportOpenShiftAWSFilter(
limit = 5,
offset = 5,
resolution = 'daily',
time_scope_value = -10,
time_scope_units = 'day',
resource_scope = [],
account = [
'0'
],
service = [
'0'
],
region = [
'0'
],
az = [
'0'
],
tag = [
'0'
],
project = [
'0'
],
cluster = [
'0'
],
node = [
'0'
], ),
data = [
[{"date":"2019-01","accounts":[{"account":"9999999999999","values":[{"date":"2019-01","account":"9999999999999","account_alias":"9999999999999","infrastructure_cost":{"value":0,"units":"USD"},"derived_cost":{"value":24,"units":"USD"},"cost":{"value":24,"units":"USD"},"usage":{"value":24,"units":"GB-Mo"}}]}]}]
]
)
else :
return ReportOpenShiftAWSStorageInventoryAllOf(
data = [
[{"date":"2019-01","accounts":[{"account":"9999999999999","values":[{"date":"2019-01","account":"9999999999999","account_alias":"9999999999999","infrastructure_cost":{"value":0,"units":"USD"},"derived_cost":{"value":24,"units":"USD"},"cost":{"value":24,"units":"USD"},"usage":{"value":24,"units":"GB-Mo"}}]}]}]
],
)
def testReportOpenShiftAWSStorageInventoryAllOf(self):
"""Test ReportOpenShiftAWSStorageInventoryAllOf"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 39.824176 | 330 | 0.532285 |
502ccb0fb3d822e9581af338f0bfa3ec69540aa9 | 1,742 | py | Python | popmon/hist/filling/__init__.py | lnxpy/popmon | b0a05001ccca189648cfd861533573d1ecb5acff | [
"MIT"
] | null | null | null | popmon/hist/filling/__init__.py | lnxpy/popmon | b0a05001ccca189648cfd861533573d1ecb5acff | [
"MIT"
] | null | null | null | popmon/hist/filling/__init__.py | lnxpy/popmon | b0a05001ccca189648cfd861533573d1ecb5acff | [
"MIT"
] | null | null | null | # Copyright (c) 2020 ING Wholesale Banking Advanced Analytics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from ...hist.filling.make_histograms import (get_bin_specs, get_one_time_axis,
get_time_axes, has_one_time_axis,
make_histograms)
from ...hist.filling.numpy_histogrammar import NumpyHistogrammar
from ...hist.filling.pandas_histogrammar import PandasHistogrammar
from ...hist.filling.spark_histogrammar import SparkHistogrammar
__all__ = [
"PandasHistogrammar",
"SparkHistogrammar",
"NumpyHistogrammar",
"make_histograms",
"get_time_axes",
"get_one_time_axis",
"has_one_time_axis",
"get_bin_specs",
]
| 45.842105 | 82 | 0.741102 |
b21076c69b802e57779b4db96b235cc1bed322de | 29,709 | py | Python | pybleau/app/ui/dataframe_analyzer_model_view.py | KBIbiopharma/pybleau | 5cdfce603ad29af874f74f0f527adc6b4c9066e8 | [
"MIT"
] | 4 | 2020-02-27T22:38:29.000Z | 2021-05-03T05:32:11.000Z | pybleau/app/ui/dataframe_analyzer_model_view.py | KBIbiopharma/pybleau | 5cdfce603ad29af874f74f0f527adc6b4c9066e8 | [
"MIT"
] | 85 | 2020-02-04T21:57:14.000Z | 2021-05-03T14:29:40.000Z | pybleau/app/ui/dataframe_analyzer_model_view.py | KBIbiopharma/pybleau | 5cdfce603ad29af874f74f0f527adc6b4c9066e8 | [
"MIT"
] | 1 | 2020-02-20T00:45:09.000Z | 2020-02-20T00:45:09.000Z | import logging
from copy import copy
import numpy as np
import pandas as pd
import traitsui
from app_common.pyface.ui.extra_file_dialogs import request_csv_file
from app_common.std_lib.filepath_utils import open_file
from app_common.std_lib.logging_utils import ACTION_LEVEL
from app_common.traitsui.common_traitsui_groups import make_window_title_group
from pyface.api import warning
from traits.api import Any, Bool, Button, cached_property, Dict, Either, \
Enum, Instance, Int, List, on_trait_change, Property, Set, Str, \
ToolbarButton
from traitsui.api import ButtonEditor, CheckListEditor, HGroup, HSplit, \
InstanceEditor, Item, Label, ModelView, OKButton, Spring, Tabbed, VGroup, \
View, VSplit
from traitsui.ui_editors.data_frame_editor import DataFrameEditor
from pybleau.app.image_resources import pop_out_img, apply_img, \
manage_img, save_img, load_img
from pybleau.app.model.dataframe_analyzer import DataFrameAnalyzer, \
CATEGORICAL_COL_TYPES
from pybleau.app.ui.filter_expression_editor import \
FilterExpressionEditorView
try:
from pybleau.app.ui.dataframe_plot_manager_view import \
DataFramePlotManager, DataFramePlotManagerView
except ImportError:
DataFramePlotManager = object
DataFramePlotManagerView = object
from pybleau.app.tools.filter_expression_manager import FilterExpression, \
FilterExpressionManager
logger = logging.getLogger(__name__)
DEFAULT_FONT = 'Courier'
class DataFrameAnalyzerView(ModelView):
""" Flexible ModelView class for a DataFrameAnalyzer.
The view is built using many methods to build each component of the view so
it can easily be subclassed and customized.
TODO: add traits events to pass update/refresh notifications to the
DFEditors once we have updated TraitsUI.
TODO: Add traits events to receive notifications that a column/row was
clicked/double-clicked.
"""
#: Model being viewed
model = Instance(DataFrameAnalyzer)
#: Selected list of data columns to display and analyze
visible_columns = List(Str)
#: Check box to hide/show what stats are included in the summary DF
show_summary_controls = Bool
#: Show the summary categorical df
show_categorical_summary = Bool(True)
#: Check box to hide/show what columns to analyze (panel when few columns)
show_column_controls = Bool
#: Open control for what columns to analyze (popup when many columns)
open_column_controls = Button("Show column control")
#: Button to launch the plotter tool when plotter_layout=popup
plotter_launcher = Button("Launch Plot Tool")
# Plotting tool attributes ------------------------------------------------
#: Does the UI expose a DF plotter?
include_plotter = Bool
#: Plot manager view to display. Ignored if include_plotter is False.
plotter = Instance(DataFramePlotManagerView)
# Styling and branding attributes -----------------------------------------
#: String describing the font to use, or dict mapping column names to font
fonts = Either(Str, Dict)
#: Name of the font to use if same across all columns
font_name = Str(DEFAULT_FONT)
#: Size of the font to use if same across all columns
font_size = Int(14)
#: Number of digits to display in the tables
display_precision = Int(-1)
#: Formatting to use to include
formats = Either(Str, Dict)
#: UI title for the Data section
data_section_title = Str("Data")
#: Exploration group label: visible only when plotter_layout="Tabbed"
exploration_group_label = Str("Exploration Tools")
#: Plotting group label: visible only when plotter_layout="Tabbed"
plotting_group_label = Str("Plotting Tools")
#: UI title for the data summary section
summary_section_title = Str
#: UI title for the categorical data summary section
cat_summary_section_title = Str("Categorical data summary")
#: UI title for the column list section
column_list_section_title = Str("Column content")
#: UI title for the summary content section
summary_content_section_title = Str("Summary content")
#: UI summary group (tab) name for numerical columns
num_summary_group_name = Str("Numerical data")
#: UI summary group (tab) name for categorical columns
cat_summary_group_name = Str("Categorical data")
#: Text to display in title bar of the containing window (if applicable)
app_title = Str("Tabular Data Analyzer")
#: How to place the plotter tool with respect to the exploration tool?
plotter_layout = Enum("Tabbed", "HSplit", "VSplit", "popup")
#: DFPlotManager traits to customize it
plotter_kw = Dict
#: Message displayed below the table if truncated
truncation_msg = Property(Str, depends_on="model.num_displayed_rows")
# Functionality controls --------------------------------------------------
#: Button to shuffle the order of the filtered data
shuffle_button = Button("Shuffle")
show_shuffle_button = Bool(True)
#: Button to display more rows in the data table
show_more_button = Button
#: Button to display all rows in the data table
show_all_button = Button("Show All")
#: Apply button for the filter if model not in auto-apply mode
apply_filter_button = ToolbarButton(image=apply_img)
#: Edit the filter in a pop-out dialog
pop_out_filter_button = ToolbarButton(image=pop_out_img)
#: Whether to support saving, and loading filters
filter_manager = Bool
#: Button to launch filter expression manager to load an existing filter
load_filter_button = ToolbarButton(image=load_img)
#: Button to save current filter expression
save_filter_button = ToolbarButton(image=save_img)
#: Button to launch filter expression manager to modify saved filters
manage_filter_button = ToolbarButton(image=manage_img)
#: List of saved filtered expressions
_known_expr = Property(Set, depends_on="model.known_filter_exps")
#: Show the bottom panel with the summary of the data:
_show_summary = Bool(True)
allow_show_summary = Bool(True)
#: Button to export the analyzed data to a CSV file
data_exporter = Button("Export Data to CSV")
#: Button to export the summary data to a CSV file
summary_exporter = Button("Export Summary to CSV")
# Detailed configuration traits -------------------------------------------
#: View class to use. Modify to customize.
view_klass = Any(View)
#: Width of the view
view_width = Int(1100)
#: Height of the view
view_height = Int(700)
#: Width of the filter box
filter_item_width = Int(400)
max_names_per_column = Int(12)
truncation_msg_template = Str("Table truncated at {} rows")
warn_if_sel_hidden = Bool(True)
hidden_selection_msg = Str
#: Column names (as a list) to include in filter editor assistant
filter_editor_cols = List
# Implementation details --------------------------------------------------
#: Evaluate number of columns to select panel or popup column control
_many_columns = Property(Bool, depends_on="model.column_list")
#: Popped-up UI to control the visible columns
_control_popup = Any
#: Collected traitsUI editors for both the data DF and the summary DF
_df_editors = Dict
# HasTraits interface -----------------------------------------------------
def __init__(self, **traits):
if "model" in traits and isinstance(traits["model"], pd.DataFrame):
traits["model"] = DataFrameAnalyzer(source_df=traits["model"])
super(DataFrameAnalyzerView, self).__init__(**traits)
if self.include_plotter:
# If a plotter view was specified, its model should be in the
# model's list of plot managers:
if self.plotter.model not in self.model.plot_manager_list:
self.model.plot_manager_list.append(self.plotter.model)
def traits_view(self):
""" Putting the view components together.
Each component of the view is built in a separate method so it can
easily be subclassed and customized.
"""
# Construction of view groups -----------------------------------------
data_group = self.view_data_group_builder()
column_controls_group = self.view_data_control_group_builder()
summary_group = self.view_summary_group_builder()
summary_controls_group = self.view_summary_control_group_builder()
if self.show_categorical_summary:
cat_summary_group = self.view_cat_summary_group_builder()
else:
cat_summary_group = None
plotter_group = self.view_plotter_group_builder()
button_content = [
Item("data_exporter", show_label=False),
Spring(),
Item("summary_exporter", show_label=False)
]
if self.plotter_layout == "popup":
button_content += [
Spring(),
Item("plotter_launcher", show_label=False)
]
button_group = HGroup(*button_content)
# Organization of item groups -----------------------------------------
# If both types of summary are available, display as Tabbed view:
if summary_group is not None and cat_summary_group is not None:
summary_container = Tabbed(
HSplit(
summary_controls_group,
summary_group,
label=self.num_summary_group_name
),
cat_summary_group,
)
elif cat_summary_group is not None:
summary_container = cat_summary_group
else:
summary_container = HSplit(
summary_controls_group,
summary_group
)
# Allow to hide all summary information:
summary_container.visible_when = "_show_summary"
exploration_groups = VGroup(
VSplit(
HSplit(
column_controls_group,
data_group,
),
summary_container
),
button_group,
label=self.exploration_group_label
)
if self.include_plotter and self.plotter_layout != "popup":
layout = getattr(traitsui.api, self.plotter_layout)
groups = layout(
exploration_groups,
plotter_group
)
else:
groups = exploration_groups
view = self.view_klass(
groups,
resizable=True,
title=self.app_title,
width=self.view_width, height=self.view_height
)
return view
# Traits view building methods --------------------------------------------
def view_data_group_builder(self):
""" Build view element for the Data display
"""
editor_kw = dict(show_index=True, columns=self.visible_columns,
fonts=self.fonts, formats=self.formats)
data_editor = DataFrameEditor(selected_row="selected_idx",
multi_select=True, **editor_kw)
filter_group = HGroup(
Item("model.filter_exp", label="Filter",
width=self.filter_item_width),
Item("pop_out_filter_button", show_label=False, style="custom",
tooltip="Open filter editor..."),
Item("apply_filter_button", show_label=False,
visible_when="not model.filter_auto_apply", style="custom",
tooltip="Apply current filter"),
Item("save_filter_button", show_label=False,
enabled_when="model.filter_exp not in _known_expr",
visible_when="filter_manager", style="custom",
tooltip="Save current filter"),
Item("load_filter_button", show_label=False,
visible_when="filter_manager", style="custom",
tooltip="Load a filter..."),
Item("manage_filter_button", show_label=False,
visible_when="filter_manager", style="custom",
tooltip="Manage filters..."),
)
truncated = ("len(model.displayed_df) < len(model.filtered_df) and "
"not model.show_selected_only")
more_label = "Show {} More".format(self.model.num_display_increment)
display_control_group = HGroup(
Item("model.show_selected_only", label="Selected rows only"),
Item("truncation_msg", style="readonly", show_label=False,
visible_when=truncated),
Item("show_more_button", editor=ButtonEditor(label=more_label),
show_label=False, visible_when=truncated),
Item("show_all_button", show_label=False,
visible_when=truncated),
)
data_group = VGroup(
make_window_title_group(self.data_section_title, title_size=3,
include_blank_spaces=False),
HGroup(
Item("model.sort_by_col", label="Sort by"),
Item("shuffle_button", show_label=False,
visible_when="show_shuffle_button"),
Spring(),
filter_group
),
HGroup(
Item("model.displayed_df", editor=data_editor,
show_label=False),
),
HGroup(
Item("show_column_controls",
label="\u2190 Show column control",
visible_when="not _many_columns"),
Item("open_column_controls", show_label=False,
visible_when="_many_columns"),
Spring(),
Item("_show_summary", label=u'\u2193 Show summary',
visible_when="allow_show_summary"),
Spring(),
display_control_group
),
show_border=True
)
return data_group
def view_data_control_group_builder(self, force_visible=False):
""" Build view element for the Data column control.
Parameters
----------
force_visible : bool
Controls visibility of the created group. Don't force for the group
embedded in the global view, but force it when opened as a popup.
"""
num_cols = 1 + len(self.model.column_list) // self.max_names_per_column
column_controls_group = VGroup(
make_window_title_group(self.column_list_section_title,
title_size=3, include_blank_spaces=False),
Item("visible_columns", show_label=False,
editor=CheckListEditor(values=self.model.column_list,
cols=num_cols),
# The custom style allows to control a list of options rather
# than having a checklist editor for a single value:
style='custom'),
show_border=True
)
if force_visible:
column_controls_group.visible_when = ""
else:
column_controls_group.visible_when = "show_column_controls"
return column_controls_group
def view_summary_group_builder(self):
""" Build view element for the numerical data summary display
"""
editor_kw = dict(show_index=True, columns=self.visible_columns,
fonts=self.fonts, formats=self.formats)
summary_editor = DataFrameEditor(**editor_kw)
summary_group = VGroup(
make_window_title_group(self.summary_section_title, title_size=3,
include_blank_spaces=False),
Item("model.summary_df", editor=summary_editor, show_label=False,
visible_when="len(model.summary_df) != 0"),
# Workaround the fact that the Label's visible_when is buggy:
# encapsulate it into a group and add the visible_when to the group
HGroup(
Label("No data columns with numbers were found."),
visible_when="len(model.summary_df) == 0"
),
HGroup(
Item("show_summary_controls"),
Spring(),
visible_when="len(model.summary_df) != 0"
),
show_border=True,
)
return summary_group
def view_summary_control_group_builder(self):
""" Build view element for the column controls for data summary.
"""
summary_controls_group = VGroup(
make_window_title_group(self.summary_content_section_title,
title_size=3, include_blank_spaces=False),
Item("model.summary_index", show_label=False),
visible_when="show_summary_controls",
show_border=True
)
return summary_controls_group
def view_cat_summary_group_builder(self):
""" Build view element for the categorical data summary display.
"""
editor_kw = dict(show_index=True, fonts=self.fonts,
formats=self.formats)
summary_editor = DataFrameEditor(**editor_kw)
cat_summary_group = VGroup(
make_window_title_group(self.cat_summary_section_title,
title_size=3, include_blank_spaces=False),
Item("model.summary_categorical_df", editor=summary_editor,
show_label=False,
visible_when="len(model.summary_categorical_df)!=0"),
# Workaround the fact that the Label's visible_when is buggy:
# encapsulate it into a group and add the visible_when to the group
HGroup(
Label("No data columns with numbers were found."),
visible_when="len(model.summary_categorical_df)==0"
),
show_border=True, label=self.cat_summary_group_name
)
return cat_summary_group
def view_plotter_group_builder(self):
""" Build view element for the plotter tool.
"""
plotter_group = VGroup(
Item("plotter", editor=InstanceEditor(), show_label=False,
style="custom"),
label=self.plotting_group_label
)
return plotter_group
# Public interface --------------------------------------------------------
def destroy(self):
""" Clean up resources.
"""
if self._control_popup:
self._control_popup.dispose()
# Traits listeners --------------------------------------------------------
def _open_column_controls_fired(self):
""" Pop-up a new view on the column list control.
"""
if self._control_popup and self._control_popup.control:
# If there is an existing window, bring it in focus:
# Discussion: https://stackoverflow.com/questions/2240717/in-qt-how-do-i-make-a-window-be-the-current-window # noqa
self._control_popup.control._mw.activateWindow()
return
# Before viewing self with a simplified view, make sure the original
# view editors are collected so they can be modified when the controls
# are used:
if not self._df_editors:
self._collect_df_editors()
view = self.view_klass(
self.view_data_control_group_builder(force_visible=True),
buttons=[OKButton],
width=600, resizable=True,
title="Control visible columns"
)
# WARNING: this will modify the info object the view points to!
self._control_popup = self.edit_traits(view=view, kind="live")
def _shuffle_button_fired(self):
self.model.shuffle_filtered_df()
def _apply_filter_button_fired(self):
flt = self.model.filter_exp
msg = f"Applying filter {flt}."
logger.log(ACTION_LEVEL, msg)
self.model.recompute_filtered_df()
def _pop_out_filter_button_fired(self):
if not self.filter_editor_cols:
# if there are no included columns, then use all categorical cols
df = self.model.source_df
cat_df = df.select_dtypes(include=CATEGORICAL_COL_TYPES)
self.filter_editor_cols = list(cat_df.columns)
filter_editor = FilterExpressionEditorView(
expr=self.model.filter_exp, view_klass=self.view_klass,
source_df=self.model.source_df,
included_cols=self.filter_editor_cols)
ui = filter_editor.edit_traits(kind="livemodal")
if ui.result:
self.model.filter_exp = filter_editor.expr
self.apply_filter_button = True
def _manage_filter_button_fired(self):
""" TODO: review if replacing the copy by a deepcopy or removing the
copy altogether would help traits trigger listeners correctly
"""
msg = "Opening filter manager."
logger.log(ACTION_LEVEL, msg)
# Make a copy of the list of filters so the model can listen to changes
# even if only a field of an existing filter is modified:
filter_manager = FilterExpressionManager(
known_filter_exps=copy(self.model.known_filter_exps),
mode="manage", view_klass=self.view_klass
)
ui = filter_manager.edit_traits(kind="livemodal")
if ui.result:
# FIXME: figure out why this simpler assignment doesn't trigger the
# traits listener on the model when changing a FilterExpression
# attribute:
# self.model.known_filter_exps = filter_manager.known_filter_exps
self.model.known_filter_exps = [
FilterExpression(name=e.name, expression=e.expression) for e in
filter_manager.known_filter_exps
]
def _load_filter_button_fired(self):
filter_manager = FilterExpressionManager(
known_filter_exps=self.model.known_filter_exps,
mode="load", view_klass=self.view_klass
)
ui = filter_manager.edit_traits(kind="livemodal")
if ui.result:
selection = filter_manager.selected_expression
self.model.filter_exp = selection.expression
def _save_filter_button_fired(self):
exp = self.model.filter_exp
if exp in [e.expression for e in self.model.known_filter_exps]:
return
expr = FilterExpression(name=exp, expression=exp)
self.model.known_filter_exps.append(expr)
def _show_more_button_fired(self):
self.model.num_displayed_rows += self.model.num_display_increment
def _show_all_button_fired(self):
self.model.num_displayed_rows = -1
@on_trait_change("model:selected_data_in_plotter_updated", post_init=True)
def warn_if_selection_hidden(self):
""" Pop up warning msg if some of the selected rows aren't displayed.
"""
if not self.warn_if_sel_hidden:
return
if not self.model.selected_idx:
return
truncated = len(self.model.displayed_df) < len(self.model.filtered_df)
max_displayed = self.model.displayed_df.index.max()
some_selection_hidden = max(self.model.selected_idx) > max_displayed
if truncated and some_selection_hidden:
warning(None, self.hidden_selection_msg, "Hidden selection")
@on_trait_change("visible_columns[]", post_init=True)
def update_filtered_df_on_columns(self):
""" Just show the columns that are set to visible.
Notes
-----
We are not modifying the filtered data because if we remove a column
and then bring it back, the adapter breaks because it is missing data.
Breakage happen when removing a column if the model is changed first,
or when bring a column back if the adapter column list is changed
first.
"""
if not self.info.initialized:
return
if not self._df_editors:
self._collect_df_editors()
# Rebuild the column list (col name, column id) for the tabular
# adapter:
all_visible_cols = [(col, col) for col in self.visible_columns]
df = self.model.source_df
cat_dtypes = self.model.categorical_dtypes
summarizable_df = df.select_dtypes(exclude=cat_dtypes)
summary_visible_cols = [(col, col) for col in self.visible_columns
if col in summarizable_df.columns]
for df_name, cols in zip(["displayed_df", "summary_df"],
[all_visible_cols, summary_visible_cols]):
df = getattr(self.model, df_name)
index_name = df.index.name
if index_name is None:
index_name = ''
# This grabs the corresponding _DataFrameEditor (not the editor
# factory) which has access to the adapter object:
editor = self._df_editors[df_name]
editor.adapter.columns = [(index_name, 'index')] + cols
def _collect_df_editors(self):
for df_name in ["displayed_df", "summary_df"]:
try:
# This grabs the corresponding _DataFrameEditor (not the editor
# factory) which has access to the adapter object:
self._df_editors[df_name] = getattr(self.info, df_name)
except Exception as e:
msg = "Error trying to collect the tabular adapter: {}"
logger.error(msg.format(e))
def _plotter_launcher_fired(self):
""" Pop up plot manager view. Only when self.plotter_layout="popup".
"""
self.plotter.edit_traits(kind="livemodal")
def _data_exporter_fired(self):
filepath = request_csv_file(action="save as")
if filepath:
self.model.filtered_df.to_csv(filepath)
open_file(filepath)
def _summary_exporter_fired(self):
filepath = request_csv_file(action="save as")
if filepath:
self.model.summary_df.to_csv(filepath)
open_file(filepath)
# Traits property getters/setters -----------------------------------------
def _get__known_expr(self):
return {e.expression for e in self.model.known_filter_exps}
@cached_property
def _get_truncation_msg(self):
num_displayed_rows = self.model.num_displayed_rows
return self.truncation_msg_template.format(num_displayed_rows)
@cached_property
def _get__many_columns(self):
# Many columns means more than 2 columns:
return len(self.model.column_list) > 2 * self.max_names_per_column
# Traits initialization methods -------------------------------------------
def _plotter_default(self):
if self.include_plotter:
if self.model.plot_manager_list:
if len(self.model.plot_manager_list) > 1:
num_plotters = len(self.model.plot_manager_list)
msg = "Model contains {} plot manager, but only " \
"initializing the Analyzer view with the first " \
"plot manager available.".format(num_plotters)
logger.warning(msg)
plot_manager = self.model.plot_manager_list[0]
else:
plot_manager = DataFramePlotManager(
data_source=self.model.filtered_df,
source_analyzer=self.model,
**self.plotter_kw
)
view = DataFramePlotManagerView(model=plot_manager,
view_klass=self.view_klass)
return view
def _formats_default(self):
if self.display_precision < 0:
return '%s'
else:
formats = {}
float_format = '%.{}g'.format(self.display_precision)
for col in self.model.source_df.columns:
col_dtype = self.model.source_df.dtypes[col]
if np.issubdtype(col_dtype, np.number):
formats[col] = float_format
else:
formats[col] = '%s'
return formats
def _visible_columns_default(self):
return self.model.column_list
def _hidden_selection_msg_default(self):
msg = "The displayed data is truncated and some of the selected " \
"rows isn't displayed in the data table."
return msg
def _summary_section_title_default(self):
if len(self.model.summary_categorical_df) == 0:
return "Data summary"
else:
return "Numerical data summary"
def _fonts_default(self):
return "{} {}".format(self.font_name, self.font_size)
if __name__ == "__main__":
from pandas import DataFrame
from numpy import random
df = DataFrame({"a": [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4],
"b": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],
"c": random.randn(16),
"d": list("abcdefghijklmnop")},
dtype=float)
df.index.name = "BALH"
summarizer = DataFrameAnalyzer(source_df=df, num_displayed_rows=5,
filter_auto_apply=False)
print(summarizer.compute_summary())
view = DataFrameAnalyzerView(model=summarizer, include_plotter=True,
display_precision=5, filter_manager=True,
plotter_layout="HSplit")
view.configure_traits()
| 38.334194 | 128 | 0.619004 |
c4d7e4f334cdd6121d3777b5d69eb76ad658c709 | 3,857 | py | Python | plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py | cam-parra/indy-plenum | a891defac546488c6ec2f4a12d23894742d1427f | [
"Apache-2.0"
] | null | null | null | plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py | cam-parra/indy-plenum | a891defac546488c6ec2f4a12d23894742d1427f | [
"Apache-2.0"
] | null | null | null | plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py | cam-parra/indy-plenum | a891defac546488c6ec2f4a12d23894742d1427f | [
"Apache-2.0"
] | null | null | null | import pytest
from plenum.server.view_change.view_changer import ViewChanger
from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check, view_change_timeout
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected
from plenum.test.test_node import get_master_primary_node
from plenum.test.view_change.helper import start_stopped_node, ensure_view_change_by_primary_restart
TestRunningTimeLimitSec = 150
@pytest.fixture(scope="module")
def tconf(tconf):
with view_change_timeout(tconf, 20):
yield tconf
def test_view_change_after_back_to_quorum_with_disconnected_primary(txnPoolNodeSet, looper,
sdk_pool_handle,
sdk_wallet_client,
tdir, tconf, allPluginsPath):
assert len(txnPoolNodeSet) == 4
pr_node = get_master_primary_node(txnPoolNodeSet)
assert pr_node.name == "Alpha"
# 1. Initiate view change be primary (Alpha) restart
nodes = ensure_view_change_by_primary_restart(looper,
txnPoolNodeSet,
tconf,
tdir,
allPluginsPath,
customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT,
exclude_from_check=['check_last_ordered_3pc_backup'])
# Now primary should be Beta
pr_node = get_master_primary_node(nodes)
assert pr_node.name == "Beta"
# 2. Stop non-primary node Delta, no any view changes are expected
non_primary_to_stop = [n for n in nodes if n.name == "Delta"][0]
disconnect_node_and_ensure_disconnected(
looper, txnPoolNodeSet, non_primary_to_stop)
looper.removeProdable(non_primary_to_stop)
remaining_nodes = list(set(nodes) - {non_primary_to_stop})
# Primary is going to be stopped, remember instance change messages count
# to ensure that no view change happened as number of connected nodes is less
# than quorum.
ic_cnt = {}
for n in remaining_nodes:
ic_cnt[n.name] = n.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)
# 3. Disconnect primary
disconnect_node_and_ensure_disconnected(
looper, remaining_nodes, pr_node)
looper.removeProdable(pr_node)
# Wait for more than ToleratePrimaryDisconnection timeout and check that no IC messages presented.
looper.runFor(tconf.ToleratePrimaryDisconnection + 5)
remaining_nodes = list(set(remaining_nodes) - {pr_node})
for n in remaining_nodes:
assert ic_cnt[n.name] == n.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)
view_no = checkViewNoForNodes(remaining_nodes)
# 4. Start Delta (non-primary), now primary (Beta) is disconnected but there is a quorum
# to choose a new one.
restartedNode = start_stopped_node(non_primary_to_stop, looper, tconf,
tdir, allPluginsPath,
delay_instance_change_msgs=False)
remaining_nodes = remaining_nodes + [restartedNode]
# 5. Check that view change happened.
waitForViewChange(looper, remaining_nodes, expectedViewNo=(view_no + 1),
customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
# ensure pool is working properly
sdk_send_random_and_check(looper, remaining_nodes, sdk_pool_handle,
sdk_wallet_client, 3)
ensure_all_nodes_have_same_data(looper, nodes=remaining_nodes)
| 46.46988 | 117 | 0.655432 |
8f1a41b0e7a6fe554429f4b4cc797321daf47813 | 1,793 | py | Python | docs/names/examples/gethostbyname.py | mathieui/twisted | 35546d2b50742a32edba54719ce3e752dc50dd2a | [
"MIT",
"Unlicense"
] | 1 | 2019-02-08T18:37:42.000Z | 2019-02-08T18:37:42.000Z | docs/names/examples/gethostbyname.py | mathieui/twisted | 35546d2b50742a32edba54719ce3e752dc50dd2a | [
"MIT",
"Unlicense"
] | 5 | 2020-06-05T18:16:39.000Z | 2022-01-13T00:45:49.000Z | docs/names/examples/gethostbyname.py | mathieui/twisted | 35546d2b50742a32edba54719ce3e752dc50dd2a | [
"MIT",
"Unlicense"
] | 1 | 2021-12-13T10:46:13.000Z | 2021-12-13T10:46:13.000Z | #!/usr/bin/env python
# -*- test-case-name: twisted.names.test.test_examples -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Print the IP address for a given hostname. eg
python gethostbyname.py www.google.com
This script does a host lookup using the default Twisted Names
resolver, a chained resolver, which attempts to lookup a name from:
* local hosts file
* memory cache of previous lookup results
* system recursive DNS servers
"""
import sys
from twisted.names import client, error
from twisted.internet.task import react
from twisted.python import usage
class Options(usage.Options):
synopsis = 'Usage: gethostbyname.py HOSTNAME'
def parseArgs(self, hostname):
self['hostname'] = hostname
def printResult(address, hostname):
"""
Print the IP address or an error message if an IP address was not
found.
"""
if address:
sys.stdout.write(address + '\n')
else:
sys.stderr.write(
'ERROR: No IP addresses found for name %r\n' % (hostname,))
def printError(failure, hostname):
"""
Print a friendly error message if the hostname could not be
resolved.
"""
failure.trap(error.DNSNameError)
sys.stderr.write('ERROR: hostname not found %r\n' % (hostname,))
def main(reactor, *argv):
options = Options()
try:
options.parseOptions(argv)
except usage.UsageError as errortext:
sys.stderr.write(str(options) + '\n')
sys.stderr.write('ERROR: %s\n' % (errortext,))
raise SystemExit(1)
hostname = options['hostname']
d = client.getHostByName(hostname)
d.addCallback(printResult, hostname)
d.addErrback(printError, hostname)
return d
if __name__ == '__main__':
react(main, sys.argv[1:])
| 23.285714 | 71 | 0.677078 |
49ee43b2c2ae9da4bbe42ad722ea106cc936f1fd | 532 | py | Python | cride/circles/urls.py | sgg10/cride | e241028430656c54aa0133a173efad656cab233d | [
"MIT"
] | null | null | null | cride/circles/urls.py | sgg10/cride | e241028430656c54aa0133a173efad656cab233d | [
"MIT"
] | null | null | null | cride/circles/urls.py | sgg10/cride | e241028430656c54aa0133a173efad656cab233d | [
"MIT"
] | null | null | null | """Circle urls."""
# Django
from django.urls import path, include
# Django REST Framework
from rest_framework.routers import DefaultRouter
# Views
from .views import circles as circles_views
from .views import memberships as membership_views
router = DefaultRouter()
router.register(r'circles', circles_views.CircleViewSet, basename='circle')
router.register(
r'circles/(?P<slug_name>[-a-zA-Z0-9_-]+)/members',
membership_views.MembershipViewSet,
basename='membership'
)
urlpatterns = [
path('', include(router.urls))
] | 23.130435 | 75 | 0.763158 |
ddc9c9e688ef40153ac61aa5609e26c467f7ebf4 | 5,460 | py | Python | src/zenmake/zm/buildconf/select.py | pustotnik/zenmake | 0c089b35d2dcfd1825440c2561fc57e79e7383f0 | [
"BSD-3-Clause"
] | 2 | 2019-10-14T05:05:34.000Z | 2022-03-28T04:55:00.000Z | src/zenmake/zm/buildconf/select.py | pustotnik/zenmake | 0c089b35d2dcfd1825440c2561fc57e79e7383f0 | [
"BSD-3-Clause"
] | 42 | 2020-08-25T07:59:32.000Z | 2021-11-15T03:12:29.000Z | src/zenmake/zm/buildconf/select.py | pustotnik/zenmake | 0c089b35d2dcfd1825440c2561fc57e79e7383f0 | [
"BSD-3-Clause"
] | 1 | 2021-08-13T13:59:51.000Z | 2021-08-13T13:59:51.000Z | # coding=utf-8
#
"""
Copyright (c) 2020, Alexander Magola. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
import os
from zm.constants import PLATFORM, KNOWN_PLATFORMS, HOST_OS, DISTRO_INFO, CPU_ARCH
from zm.utils import toList
from zm.error import ZenMakeLogicError, ZenMakeConfError
from zm.buildconf.scheme import KNOWN_CONDITION_PARAM_NAMES
from zm.buildconf.processing import convertTaskParamValue
from zm.buildconf.expression import Expression
from zm.features import areFeaturesLoaded
from zm.toolchains import getAllNames as getAllToolchainNames
_SYS_STATES = (
('platform', PLATFORM),
('host-os', HOST_OS),
('distro', DISTRO_INFO.get('ID', '')),
('cpu-arch', CPU_ARCH),
)
_exprHandler = Expression(['and', 'or', 'not'])
_local = {}
def _getReadyConditions(bconf):
bconfId = id(bconf)
_local.setdefault('ready-conditions', {})
conditions = _local['ready-conditions'].get(bconfId)
if conditions is not None:
return conditions
if not areFeaturesLoaded():
msg = "Programming error: task features have not been loaded yet"
raise ZenMakeLogicError(msg)
conditions = _local.get('common-ready-conditions')
if conditions is None:
conditions = {}
# platform conditions
for platform in KNOWN_PLATFORMS:
conditions[platform] = { 'platform' : (platform, ) }
conditions['macos'] = { 'host-os' : ('macos', ) }
# toolchain conditions
for toolchain in getAllToolchainNames(platform = 'all'):
assert toolchain not in conditions
conditions[toolchain] = { 'toolchain' : (toolchain, ) }
_local['common-ready-conditions'] = conditions
# don't change common conditions
conditions = conditions.copy()
buildtypes = bconf.supportedBuildTypes
for buildtype in buildtypes:
if buildtype not in conditions:
conditions[buildtype] = { 'buildtype' : (buildtype, ) }
_local['ready-conditions'][bconfId] = conditions
return conditions
def _tryToSelect(bconf, condName, taskParams, paramName):
# pylint: disable = too-many-return-statements
condition = bconf.conditions.get(condName,
_getReadyConditions(bconf).get(condName))
if condition is None:
msg = "Task %r: " % taskParams['name']
msg += "there is no condition %r in buildconf.conditions" % condName
raise ZenMakeConfError(msg, confpath = bconf.path)
# check we didn't forget any param
assert frozenset(condition.keys()) <= KNOWN_CONDITION_PARAM_NAMES
# check system states
for name, val in _SYS_STATES:
filterVals = condition.get(name)
if filterVals is not None and val not in filterVals:
return False
# check task
filterVals = condition.get('task')
if filterVals is not None and taskParams['name'] not in filterVals:
return False
# check buildtype
buildtype = bconf.selectedBuildType
filterVals = condition.get('buildtype')
if filterVals is not None and buildtype not in filterVals:
return False
# check toolchain
filterVals = condition.get('toolchain')
if filterVals is not None:
if paramName == 'toolchain':
msg = "Task %r: " % taskParams['name']
msg += "Condition %r in buildconf.conditions" % condName
msg += " cannot be used to select toolchain because it"
msg += " contains the 'toolchain' parameter."
raise ZenMakeConfError(msg, confpath = bconf.path)
filterVals = set(filterVals)
taskToolchains = toList(taskParams.get('toolchain', []))
if not filterVals.issubset(taskToolchains):
return False
# check system env vars
filterVals = condition.get('env', {})
for var, val in filterVals.items():
if os.environ.get(var) != val:
return False
return True
def clearLocalCache():
""" Clear local cache. It's mostly for tests """
_local.clear()
def handleOneTaskParamSelect(bconf, taskParams, paramName):
"""
Handle one <param name>.select
"""
selectName = '%s.select' % paramName
selectParam = taskParams.get(selectName)
if selectParam is None:
return
defaultValue = selectParam.get('default', taskParams.get(paramName))
detectedValue = None
def handleCond(name):
return _tryToSelect(bconf, name, taskParams, paramName)
for label, param in selectParam.items():
if label == 'default':
continue
# try one record of conditions
if _exprHandler.eval(label, lambda x: handleCond):
# found
detectedValue = param
if detectedValue is not None:
# already found, stop loop
break
if detectedValue is None:
detectedValue = defaultValue
if detectedValue is None:
taskParams.pop(paramName, None)
else:
taskParams[paramName] = detectedValue
convertTaskParamValue(taskParams, paramName)
# remove *.select param
taskParams.pop(selectName, None)
def handleTaskParamSelects(bconf):
"""
Handle all *.select params
"""
for taskParams in bconf.tasks.values():
paramNames = [x[:x.rfind('.')] for x in taskParams if x.endswith('.select')]
for name in paramNames:
handleOneTaskParamSelect(bconf, taskParams, name)
| 31.022727 | 84 | 0.655495 |
9598ab944ce39d838f8e635bd5f862970bac42a2 | 306 | py | Python | lib/ops/__init__.py | BarneyQiao/pcl.pytorch | 4e0280e5e1470f705e620eda26f881d627c5016c | [
"MIT"
] | 233 | 2019-05-10T07:17:42.000Z | 2022-03-30T09:24:16.000Z | lib/ops/__init__.py | Michael-Steven/Crack_Image_WSOD | 4e8591a7c0768cee9eb7240bb9debd54824f5b33 | [
"MIT"
] | 78 | 2019-05-10T21:10:47.000Z | 2022-03-29T13:57:32.000Z | lib/ops/__init__.py | Michael-Steven/Crack_Image_WSOD | 4e8591a7c0768cee9eb7240bb9debd54824f5b33 | [
"MIT"
] | 57 | 2019-05-10T07:17:37.000Z | 2022-03-24T04:43:24.000Z | # This file is added for back-compatibility. Thus, downstream codebase
# could still use and import mmdet.ops.
# yapf: disable
from mmcv.ops import (RoIPool, RoIAlign, roi_pool, roi_align, nms, soft_nms)
# yapf: enable
__all__ = [
'RoIPool', 'RoIAlign', 'roi_pool', 'roi_align', 'nms', 'soft_nms'
]
| 25.5 | 76 | 0.712418 |
e1558e195fdb00baebb28c928d814deb9fd9ea0e | 54 | py | Python | python/metaparticle_pkg/__init__.py | radu-matei/metaparticle-package | 5c1640db16079aea02f738d37612a6b68fc10bc0 | [
"MIT"
] | null | null | null | python/metaparticle_pkg/__init__.py | radu-matei/metaparticle-package | 5c1640db16079aea02f738d37612a6b68fc10bc0 | [
"MIT"
] | null | null | null | python/metaparticle_pkg/__init__.py | radu-matei/metaparticle-package | 5c1640db16079aea02f738d37612a6b68fc10bc0 | [
"MIT"
] | null | null | null | from metaparticle_pkg.containerize import Containerize | 54 | 54 | 0.925926 |
fad94b857b0d7082616495124cb995176ad5665d | 5,897 | py | Python | archive/script_check_quote.py | mit-ll/MIT-keylime | b530e931aab74b32d375e4bb611767e297f06ace | [
"BSD-2-Clause"
] | 2 | 2020-04-02T07:19:41.000Z | 2020-05-06T16:05:48.000Z | scripts/script_check_quote.py | bu3alwa/keylime | 02305afa2e917554e54c2c5a2f150dc2c25dd290 | [
"BSD-2-Clause"
] | null | null | null | scripts/script_check_quote.py | bu3alwa/keylime | 02305afa2e917554e54c2c5a2f150dc2c25dd290 | [
"BSD-2-Clause"
] | 1 | 2019-11-06T22:48:52.000Z | 2019-11-06T22:48:52.000Z | #!/usr/bin/env python
'''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import keylime.common
keylime.common.USE_CLIME=True
from keylime.tpm_quote import check_deep_quote, check_quote
from timeit import timeit
from timeit import default_timer as timer
import logging
import sys
import os
import tempfile
import subprocess
import base64
logging.basicConfig(stream=sys.stdout, level=logging.WARN,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('test_check_quote')
runs = 250
test_clime=True
tpm_policy = {'22':'ffffffffffffffffffffffffffffffffffffffff','16':'0000000000000000000000000000000000000000'}
quote = keylime.common.TEST_QUOTE
aik=keylime.common.TEST_AIK
# now do it raw
try:
# write out quote
qfd, qtemp = tempfile.mkstemp()
quoteFile = open(qtemp,"wb")
quoteFile.write(base64.b64decode(quote).decode("zlib"))
quoteFile.close()
os.close(qfd)
afd, atemp = tempfile.mkstemp()
aikFile = open(atemp,"w")
aikFile.write(aik)
aikFile.close()
os.close(afd)
print('Checking quote raw %d times ... '%(runs), end='')
cmd = "for i in `seq 1 %d`; do checkquote -aik %s -quote %s -nonce %s > /dev/null; done"%(runs,aikFile.name, quoteFile.name, keylime.common.TEST_NONCE)
start = timer()
proc = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
proc.wait()
end = timer()
c = end - start
print("DONE")
# while True:
# line = proc.stdout.readline()
# if line=="":
# break
# print(line)
print("check_quote(raw): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
except Exception as e:
logger.exception(e)
finally:
if aikFile is not None:
os.remove(aikFile.name)
if quoteFile is not None:
os.remove(quoteFile.name)
pass
print('Checking quote %s times ... '%(runs), end='')
keylime.common.STUB_TPM=True
keylime.common.USE_CLIME=False
setup = 'from __main__ import quote,aik,logger,tpm_policy, check_quote'
c = timeit('check_quote(None, None, quote,aik,logger,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_quote: %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
if test_clime:
keylime.common.USE_CLIME=True
print('Checking quote %s times with cLime... '%(runs), end='')
setup = 'from __main__ import quote,aik,logger,tpm_policy, check_quote'
c = timeit('check_quote(None, None, quote,aik,logger,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_quote(cLime): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
print("\n================================\n\n")
keylime.common.USE_CLIME=True
tpm_policy = {'22':'ffffffffffffffffffffffffffffffffffffffff','16':'0000000000000000000000000000000000000000'}
vtpm_policy = {'23':'0000000000000000000000000000000000000000','16':'0000000000000000000000000000000000000000'}
quote = keylime.common.TEST_DQ
vaik=keylime.common.TEST_VAIK
haik=keylime.common.TEST_HAIK
# now do it raw
try:
# write out quote
qfd, qtemp = tempfile.mkstemp()
quoteFile = open(qtemp,"wb")
quoteFile.write(base64.b64decode(quote).decode("zlib"))
quoteFile.close()
os.close(qfd)
afd, atemp = tempfile.mkstemp()
vAIKFile = open(atemp,"w")
vAIKFile.write(vaik)
vAIKFile.close()
os.close(afd)
afd, atemp = tempfile.mkstemp()
hAIKFile = open(atemp,"w")
hAIKFile.write(haik)
hAIKFile.close()
os.close(afd)
print('Checking deep quote raw %d times ... '%(runs), end='')
cmd = "for i in `seq 1 %d`; do checkdeepquote -aik %s -deepquote %s -nonce %s -vaik %s > /dev/null ; done"%(runs,hAIKFile.name, quoteFile.name, keylime.common.TEST_DQ_NONCE,vAIKFile.name)
start = timer()
proc = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
proc.wait()
end = timer()
c = end - start
print("DONE")
# while True:
# line = proc.stdout.readline()
# if line=="":
# break
# print("="+line)
print("check_deep_quote (raw): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
except Exception as e:
logger.exception(e)
finally:
if vAIKFile is not None:
os.remove(vAIKFile.name)
if hAIKFile is not None:
os.remove(hAIKFile.name)
if quoteFile is not None:
os.remove(quoteFile.name)
pass
print('Checking deep quote %s times ... '%(runs), end='')
keylime.common.STUB_TPM=True
setup = 'from __main__ import quote,vaik,haik,logger,vtpm_policy,tpm_policy, check_deep_quote'
c = timeit('check_deep_quote(None, None, quote,vaik,haik,logger,vtpm_policy,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_deep_quote: %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
print("\n================================\n\n")
| 33.890805 | 191 | 0.690012 |
ee7fe7948b1b9c62fff333542904390bfac6dd68 | 3,448 | py | Python | code/datasets/unreal_DTU.py | simon-donne/defusr | fa4275070af4024eea128e99d7c6df2358d129a5 | [
"MIT"
] | 65 | 2019-04-08T20:24:01.000Z | 2021-09-22T22:16:13.000Z | code/datasets/unreal_DTU.py | simon-donne/defusr | fa4275070af4024eea128e99d7c6df2358d129a5 | [
"MIT"
] | 4 | 2019-07-22T05:30:27.000Z | 2020-05-27T05:36:52.000Z | code/datasets/unreal_DTU.py | simon-donne/defusr | fa4275070af4024eea128e99d7c6df2358d129a5 | [
"MIT"
] | 13 | 2019-05-01T22:22:06.000Z | 2021-09-24T07:19:13.000Z |
from datasets.DTU import DTUAdapter
import torch
from local_config import base_data_folder
import os
class UnrealDTUAdapter(DTUAdapter):
"""Adapter for a homebrew Unreal Engine version of the DTU MVS dataset."""
datapath = os.path.join(base_data_folder, 'unrealDTU/')
nr_views = 49
def _set_default_splits(self):
self.split['train'] = []
self.split['test'] = [8,16,24,32,40,48,56,64]
self.split['val'] = []
self._complete_splits()
@staticmethod
def _all_elements():
return range(1, 69)
def get_dataset_name(self):
return "uDTU"
def _get_image_scale_subfolder(self):
"""Returns the subfolder for the images, depending on the image scale."""
if self.im_scale <= 0.25:
if self.im_scale <= 0.125:
return "Rectified_rescaled/0.125/"
else:
return "Rectified_rescaled/0.25/"
else:
return "Rectified/"
def _get_depth_map_scale_subfolder(self):
"""Returns the subfolder for the depth maps, depending on the image scale."""
if self.im_scale <= 0.25:
if self.im_scale <= 0.125:
return "Depth/0.125/"
else:
return "Depth/0.25/"
else:
return "Depth/"
def get_depth_map_path(self, element, view, gt=True):
depth_map_path = "%s/%s%s/%s/rect_%03d_points.npy" % (
self.datapath,
"" if gt else self.depth_map_prefix,
self._get_depth_map_scale_subfolder(),
self._get_element_folder(element),
view
)
return depth_map_path
def _get_normal_map_scale_subfolder(self):
"""Returns the subfolder for the normal maps, depending on the image scale."""
if self.im_scale <= 0.25:
if self.im_scale <= 0.125:
return "Normals/0.125/"
else:
return "Normals/0.25/"
else:
return "Normals/"
def get_element_worldtf(self, element):
world_transform = torch.eye(4, 4)
for i in range(3):
world_transform[i, i] = 70
world_transform[0, 3] = -35
world_transform[1, 3] = -35
world_transform[2, 3] = -10
return world_transform
valid_centerviews = range(0, nr_views)
def get_view_neighbours(self,cameras,center_view,nr_neighbours):
if nr_neighbours == 0:
return []
clocs = []
for i in range(cameras.shape[0]):
invKR = torch.inverse(cameras[i][:3,:3])
cloc = - torch.matmul(invKR,cameras[i][:3,3])
clocs.append(cloc)
cloc = clocs[center_view]
distances = []
for i in range(len(clocs)):
distances.append(torch.norm(clocs[i] - cloc).item())
orders = sorted(range(len(distances)), key=distances.__getitem__)
if nr_neighbours >= len(distances):
return orders
if self._neighbour_selection == "closest":
return orders[1:1+nr_neighbours]
elif self._neighbour_selection == "furthest":
return orders[-nr_neighbours:]
elif self._neighbour_selection == "mixed":
return orders[1:1+nr_neighbours//2] + orders[-(nr_neighbours - nr_neighbours//2):]
else:
raise ValueError("Unsupported neighbourhood selection approach '%s'" % self._neighbour_selection)
| 33.475728 | 109 | 0.588457 |
d366dfba204d90d86f535a62fce06a6c5a2bb2f5 | 163 | py | Python | textract/parsers/psv_parser.py | Pandaaaa906/textract | cee75460d3d43f0aa6f4967c6ccf069ee79fc560 | [
"MIT"
] | 1,950 | 2015-01-01T18:30:11.000Z | 2022-03-30T21:06:41.000Z | textract/parsers/psv_parser.py | Pandaaaa906/textract | cee75460d3d43f0aa6f4967c6ccf069ee79fc560 | [
"MIT"
] | 322 | 2015-01-05T09:54:45.000Z | 2022-03-28T17:47:15.000Z | textract/parsers/psv_parser.py | Pandaaaa906/textract | cee75460d3d43f0aa6f4967c6ccf069ee79fc560 | [
"MIT"
] | 470 | 2015-01-14T11:51:42.000Z | 2022-03-23T07:05:46.000Z | from .csv_parser import Parser as BaseParser
class Parser(BaseParser):
"""Extract text from pipe separated values files (.psv).
"""
delimiter = '|'
| 18.111111 | 60 | 0.674847 |
6855b9fafaac9e59c5f9cb4766f7c008e043d67a | 4,462 | py | Python | models/noise2true_trainer.py | P0lyFish/noise2-series | a21ad1b7cb20e44161393156efd7dcdab729b4a3 | [
"MIT"
] | 4 | 2021-01-05T05:27:36.000Z | 2022-01-07T12:39:54.000Z | models/noise2true_trainer.py | P0lyFish/noise2-series | a21ad1b7cb20e44161393156efd7dcdab729b4a3 | [
"MIT"
] | null | null | null | models/noise2true_trainer.py | P0lyFish/noise2-series | a21ad1b7cb20e44161393156efd7dcdab729b4a3 | [
"MIT"
] | null | null | null | import logging
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel
import models.lr_scheduler as lr_scheduler
from .base_trainer import BaseTrainer
from models.loss import CharbonnierLoss
from models.unet import Unet
logger = logging.getLogger('base')
class Noise2TrueTrainer(BaseTrainer):
def __init__(self, opt):
super(Noise2TrueTrainer, self).__init__(opt)
if opt['dist']:
self.rank = torch.distributed.get_rank()
else:
self.rank = -1 # non dist training
train_opt = opt['train']
# define network and load pretrained models
self.netG = Unet(opt['network_G']['img_channels'],
opt['network_G']['img_channels']).to(self.device)
if opt['dist']:
self.netG = DistributedDataParallel(self.netG,
device_ids=[
torch.cuda.current_device()
])
else:
self.netG = DataParallel(self.netG)
# print network
self.print_network()
self.load()
if self.is_train:
self.netG.train()
# loss
loss_type = train_opt['pixel_criterion']
if loss_type == 'l1':
self.cri_pix = nn.L1Loss(reduction='sum').to(self.device)
elif loss_type == 'l2':
self.cri_pix = nn.MSELoss(reduction='sum').to(self.device)
elif loss_type == 'cb':
self.cri_pix = CharbonnierLoss().to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not\
recognized.'.format(loss_type))
# optimizers
wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G']\
else 0
params = []
for k, v in self.netG.named_parameters():
if v.requires_grad:
params.append(v)
else:
if self.rank <= 0:
logger.warning('Params [{:s}] will not\
optimize.'.format(k))
optim_params = [
{
'params': params,
'lr': train_opt['lr_G']
},
]
self.optimizer_G = torch.optim.Adam(optim_params,
lr=train_opt['lr_G'],
weight_decay=wd_G,
betas=(train_opt['beta1'],
train_opt['beta2']))
self.optimizers.append(self.optimizer_G)
# schedulers
if train_opt['lr_scheme'] == 'MultiStepLR':
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.MultiStepLR_Restart(
optimizer,
train_opt['lr_steps'],
restarts=train_opt['restarts'],
weights=train_opt['restart_weights'],
gamma=train_opt['lr_gamma'],
clear_state=train_opt['clear_state']
)
)
elif train_opt['lr_scheme'] == 'CosineAnnealingLR_Restart':
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.CosineAnnealingLR_Restart(
optimizer, train_opt['T_period'],
eta_min=train_opt['eta_min'],
restarts=train_opt['restarts'],
weights=train_opt['restart_weights']
)
)
else:
raise NotImplementedError()
self.log_dict = OrderedDict()
def optimize_parameters(self, step):
batchsz, _, _, _ = self.LQ.shape
self.optimizer_G.zero_grad()
out = self.netG(self.LQ)
l_total = self.cri_pix(out, self.HQ)
l_total.backward()
self.optimizer_G.step()
# set log
self.log_dict['l_total'] = l_total.item() / batchsz
| 36.876033 | 79 | 0.476916 |
3d5e559d72ec0809a49a8f17168a7276ccf681bd | 10,344 | py | Python | utils.py | denisyarats/exorl | a3fb07a420939280aa0918150923dcca7e82bf2a | [
"MIT"
] | 23 | 2022-02-08T20:28:47.000Z | 2022-03-31T11:00:25.000Z | utils.py | denisyarats/exorl | a3fb07a420939280aa0918150923dcca7e82bf2a | [
"MIT"
] | 1 | 2022-03-10T04:45:19.000Z | 2022-03-10T04:45:19.000Z | utils.py | denisyarats/exorl | a3fb07a420939280aa0918150923dcca7e82bf2a | [
"MIT"
] | null | null | null | import random
import re
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import OmegaConf
from torch import distributions as pyd
from torch.distributions.utils import _standard_normal
class eval_mode:
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def chain(*iterables):
for it in iterables:
yield from it
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def hard_update_params(net, target_net):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(param.data)
def to_torch(xs, device):
return tuple(torch.as_tensor(x, device=device) for x in xs)
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data, gain)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
def grad_norm(params, norm_type=2.0):
params = [p for p in params if p.grad is not None]
total_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), norm_type) for p in params]),
norm_type)
return total_norm.item()
def param_norm(params, norm_type=2.0):
total_norm = torch.norm(
torch.stack([torch.norm(p.detach(), norm_type) for p in params]),
norm_type)
return total_norm.item()
class Until:
def __init__(self, until, action_repeat=1):
self._until = until
self._action_repeat = action_repeat
def __call__(self, step):
if self._until is None:
return True
until = self._until // self._action_repeat
return step < until
class Every:
def __init__(self, every, action_repeat=1):
self._every = every
self._action_repeat = action_repeat
def __call__(self, step):
if self._every is None:
return False
every = self._every // self._action_repeat
if step % every == 0:
return True
return False
class Timer:
def __init__(self):
self._start_time = time.time()
self._last_time = time.time()
def reset(self):
elapsed_time = time.time() - self._last_time
self._last_time = time.time()
total_time = time.time() - self._start_time
return elapsed_time, total_time
def total_time(self):
return time.time() - self._start_time
class TruncatedNormal(pyd.Normal):
def __init__(self, loc, scale, low=-1.0, high=1.0, eps=1e-6):
super().__init__(loc, scale, validate_args=False)
self.low = low
self.high = high
self.eps = eps
def _clamp(self, x):
clamped_x = torch.clamp(x, self.low + self.eps, self.high - self.eps)
x = x - x.detach() + clamped_x.detach()
return x
def sample(self, clip=None, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape,
dtype=self.loc.dtype,
device=self.loc.device)
eps *= self.scale
if clip is not None:
eps = torch.clamp(eps, -clip, clip)
x = self.loc + eps
return self._clamp(x)
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
def schedule(schdl, step):
try:
return float(schdl)
except ValueError:
match = re.match(r'linear\((.+),(.+),(.+)\)', schdl)
if match:
init, final, duration = [float(g) for g in match.groups()]
mix = np.clip(step / duration, 0.0, 1.0)
return (1.0 - mix) * init + mix * final
match = re.match(r'step_linear\((.+),(.+),(.+),(.+),(.+)\)', schdl)
if match:
init, final1, duration1, final2, duration2 = [
float(g) for g in match.groups()
]
if step <= duration1:
mix = np.clip(step / duration1, 0.0, 1.0)
return (1.0 - mix) * init + mix * final1
else:
mix = np.clip((step - duration1) / duration2, 0.0, 1.0)
return (1.0 - mix) * final1 + mix * final2
raise NotImplementedError(schdl)
class RandomShiftsAug(nn.Module):
def __init__(self, pad):
super().__init__()
self.pad = pad
def forward(self, x):
x = x.float()
n, c, h, w = x.size()
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, 'replicate')
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(-1.0 + eps,
1.0 - eps,
h + 2 * self.pad,
device=x.device,
dtype=x.dtype)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
shift = torch.randint(0,
2 * self.pad + 1,
size=(n, 1, 1, 2),
device=x.device,
dtype=x.dtype)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
return F.grid_sample(x,
grid,
padding_mode='zeros',
align_corners=False)
class RMS(object):
"""running mean and std """
def __init__(self, device, epsilon=1e-4, shape=(1,)):
self.M = torch.zeros(shape).to(device)
self.S = torch.ones(shape).to(device)
self.n = epsilon
def __call__(self, x):
bs = x.size(0)
delta = torch.mean(x, dim=0) - self.M
new_M = self.M + delta * bs / (self.n + bs)
new_S = (self.S * self.n + torch.var(x, dim=0) * bs +
torch.square(delta) * self.n * bs /
(self.n + bs)) / (self.n + bs)
self.M = new_M
self.S = new_S
self.n += bs
return self.M, self.S
class PBE(object):
"""particle-based entropy based on knn normalized by running mean """
def __init__(self, rms, knn_clip, knn_k, knn_avg, knn_rms, device):
self.rms = rms
self.knn_rms = knn_rms
self.knn_k = knn_k
self.knn_avg = knn_avg
self.knn_clip = knn_clip
self.device = device
def __call__(self, rep):
source = target = rep
b1, b2 = source.size(0), target.size(0)
# (b1, 1, c) - (1, b2, c) -> (b1, 1, c) - (1, b2, c) -> (b1, b2, c) -> (b1, b2)
sim_matrix = torch.norm(source[:, None, :].view(b1, 1, -1) -
target[None, :, :].view(1, b2, -1),
dim=-1,
p=2)
reward, _ = sim_matrix.topk(self.knn_k,
dim=1,
largest=False,
sorted=True) # (b1, k)
if not self.knn_avg: # only keep k-th nearest neighbor
reward = reward[:, -1]
reward = reward.reshape(-1, 1) # (b1, 1)
reward /= self.rms(reward)[0] if self.knn_rms else 1.0
reward = torch.maximum(
reward - self.knn_clip,
torch.zeros_like(reward).to(self.device)
) if self.knn_clip >= 0.0 else reward # (b1, 1)
else: # average over all k nearest neighbors
reward = reward.reshape(-1, 1) # (b1 * k, 1)
reward /= self.rms(reward)[0] if self.knn_rms else 1.0
reward = torch.maximum(
reward - self.knn_clip,
torch.zeros_like(reward).to(
self.device)) if self.knn_clip >= 0.0 else reward
reward = reward.reshape((b1, self.knn_k)) # (b1, k)
reward = reward.mean(dim=1, keepdim=True) # (b1, 1)
reward = torch.log(reward + 1.0)
return reward
| 32.325 | 137 | 0.554234 |
e08d6fc696bfe8b30c58792e7acff54143e8c262 | 216 | py | Python | finished/edabit/very_easy/sum_polygon_angles.py | UltiRequiem/daily-python-practice | 31f72c45378be90b8fcadd30d7042819ee551a17 | [
"MIT"
] | 8 | 2021-05-29T23:30:12.000Z | 2021-09-24T03:25:44.000Z | finished/edabit/very_easy/sum_polygon_angles.py | UltiRequiem/daily-python-practice | 31f72c45378be90b8fcadd30d7042819ee551a17 | [
"MIT"
] | null | null | null | finished/edabit/very_easy/sum_polygon_angles.py | UltiRequiem/daily-python-practice | 31f72c45378be90b8fcadd30d7042819ee551a17 | [
"MIT"
] | 6 | 2021-06-02T14:20:24.000Z | 2021-08-19T00:49:26.000Z | # Return the total sum of internal angles (in degrees)
def sum_polygon(n: int) -> int:
return (n - 2) * 180
# sum_polygon_lambda = lambda n: (n - 2) * 180
if __name__ == "__main__":
print(sum_polygon(24))
| 21.6 | 54 | 0.648148 |
657c7fc7206fcedc644bb2f16e5959db0eeedf24 | 79 | py | Python | table/apps.py | sainioan/extractiontool | 9908b7ff1915b00a5721405a48b13d941442e1dd | [
"MIT"
] | 2 | 2021-05-18T17:25:06.000Z | 2021-05-28T04:24:16.000Z | table/apps.py | sainioan/extractiontool | 9908b7ff1915b00a5721405a48b13d941442e1dd | [
"MIT"
] | 38 | 2021-01-20T09:38:37.000Z | 2021-05-15T13:10:05.000Z | table/apps.py | sainioan/extractiontool | 9908b7ff1915b00a5721405a48b13d941442e1dd | [
"MIT"
] | 3 | 2021-01-20T13:18:31.000Z | 2021-02-25T13:34:49.000Z | from django.apps import AppConfig
class Table(AppConfig):
name = 'table'
| 13.166667 | 33 | 0.721519 |
e5cf9f4d971d4fa3cfc5c6849c0ae1589426effe | 720 | py | Python | ampa/voting/forms.py | jordiprats/django-ampa | b8e9d6076c32caa8bdc11094362ddccb12d95f8c | [
"Apache-2.0"
] | null | null | null | ampa/voting/forms.py | jordiprats/django-ampa | b8e9d6076c32caa8bdc11094362ddccb12d95f8c | [
"Apache-2.0"
] | null | null | null | ampa/voting/forms.py | jordiprats/django-ampa | b8e9d6076c32caa8bdc11094362ddccb12d95f8c | [
"Apache-2.0"
] | null | null | null | from django.forms import ModelForm
from django import forms
from voting.models import *
class ElectionForm(forms.ModelForm):
class Meta:
model = Election
fields = (['titol', 'html_message', 'multianswer', 'anonymous'])
labels = {
'titol': 'Titol',
'html_message': 'Missatge',
'multianswer': 'Multiresposta',
'anonymous': 'Enquesta anònima'
}
class OptionForm(forms.ModelForm):
text = forms.TextInput(attrs={'size': '40'})
class Meta:
model = Option
fields = (['text', 'order'])
labels = {
'text': 'Text de l\'opció',
'order': 'Ordre en la llista de opcions',
} | 27.692308 | 72 | 0.551389 |
8da196dbb664c95e975ea97ba4e6c0183872e776 | 6,071 | py | Python | code/svm.py | lahrie/Ensemble_Twitter_Analysis | a661c7b20cd491e454faf18240f3c7f5779d2829 | [
"MIT"
] | 11 | 2021-07-15T13:21:26.000Z | 2022-01-28T03:27:24.000Z | code/svm.py | lahrie/FINAL_EXAM-Ensemble_Twitter_Analysis | a661c7b20cd491e454faf18240f3c7f5779d2829 | [
"MIT"
] | null | null | null | code/svm.py | lahrie/FINAL_EXAM-Ensemble_Twitter_Analysis | a661c7b20cd491e454faf18240f3c7f5779d2829 | [
"MIT"
] | 7 | 2021-06-27T16:37:47.000Z | 2022-02-25T03:59:07.000Z | from sklearn import svm
import utils
import random
import numpy as np
from scipy.sparse import lil_matrix
from sklearn.feature_extraction.text import TfidfTransformer
# Performs classification using SVM.
FREQ_DIST_FILE = '../train-processed-freqdist.pkl'
BI_FREQ_DIST_FILE = '../train-processed-freqdist-bi.pkl'
TRAIN_PROCESSED_FILE = '../train-processed.csv'
TEST_PROCESSED_FILE = '../test-processed.csv'
TRAIN = True
UNIGRAM_SIZE = 15000
VOCAB_SIZE = UNIGRAM_SIZE
USE_BIGRAMS = True
if USE_BIGRAMS:
BIGRAM_SIZE = 10000
VOCAB_SIZE = UNIGRAM_SIZE + BIGRAM_SIZE
FEAT_TYPE = 'frequency'
def get_feature_vector(tweet):
uni_feature_vector = []
bi_feature_vector = []
words = tweet.split()
for i in xrange(len(words) - 1):
word = words[i]
next_word = words[i + 1]
if unigrams.get(word):
uni_feature_vector.append(word)
if USE_BIGRAMS:
if bigrams.get((word, next_word)):
bi_feature_vector.append((word, next_word))
if len(words) >= 1:
if unigrams.get(words[-1]):
uni_feature_vector.append(words[-1])
return uni_feature_vector, bi_feature_vector
def extract_features(tweets, batch_size=500, test_file=True, feat_type='presence'):
num_batches = int(np.ceil(len(tweets) / float(batch_size)))
for i in xrange(num_batches):
batch = tweets[i * batch_size: (i + 1) * batch_size]
features = lil_matrix((batch_size, VOCAB_SIZE))
labels = np.zeros(batch_size)
for j, tweet in enumerate(batch):
if test_file:
tweet_words = tweet[1][0]
tweet_bigrams = tweet[1][1]
else:
tweet_words = tweet[2][0]
tweet_bigrams = tweet[2][1]
labels[j] = tweet[1]
if feat_type == 'presence':
tweet_words = set(tweet_words)
tweet_bigrams = set(tweet_bigrams)
for word in tweet_words:
idx = unigrams.get(word)
if idx:
features[j, idx] += 1
if USE_BIGRAMS:
for bigram in tweet_bigrams:
idx = bigrams.get(bigram)
if idx:
features[j, UNIGRAM_SIZE + idx] += 1
yield features, labels
def apply_tf_idf(X):
transformer = TfidfTransformer(smooth_idf=True, sublinear_tf=True, use_idf=True)
transformer.fit(X)
return transformer
def process_tweets(csv_file, test_file=True):
"""Returns a list of tuples of type (tweet_id, feature_vector)
or (tweet_id, sentiment, feature_vector)
Args:
csv_file (str): Name of processed csv file generated by preprocess.py
test_file (bool, optional): If processing test file
Returns:
list: Of tuples
"""
tweets = []
print 'Generating feature vectors'
with open(csv_file, 'r') as csv:
lines = csv.readlines()
total = len(lines)
for i, line in enumerate(lines):
if test_file:
tweet_id, tweet = line.split(',')
else:
tweet_id, sentiment, tweet = line.split(',')
feature_vector = get_feature_vector(tweet)
if test_file:
tweets.append((tweet_id, feature_vector))
else:
tweets.append((tweet_id, int(sentiment), feature_vector))
utils.write_status(i + 1, total)
print '\n'
return tweets
if __name__ == '__main__':
np.random.seed(1337)
unigrams = utils.top_n_words(FREQ_DIST_FILE, UNIGRAM_SIZE)
if USE_BIGRAMS:
bigrams = utils.top_n_bigrams(BI_FREQ_DIST_FILE, BIGRAM_SIZE)
tweets = process_tweets(TRAIN_PROCESSED_FILE, test_file=False)
if TRAIN:
train_tweets, val_tweets = utils.split_data(tweets)
else:
random.shuffle(tweets)
train_tweets = tweets
del tweets
print 'Extracting features & training batches'
clf = svm.LinearSVC(C=0.1)
batch_size = len(train_tweets)
i = 1
n_train_batches = int(np.ceil(len(train_tweets) / float(batch_size)))
for training_set_X, training_set_y in extract_features(train_tweets, test_file=False, feat_type=FEAT_TYPE, batch_size=batch_size):
utils.write_status(i, n_train_batches)
i += 1
if FEAT_TYPE == 'frequency':
tfidf = apply_tf_idf(training_set_X)
training_set_X = tfidf.transform(training_set_X)
clf.fit(training_set_X, training_set_y)
print '\n'
print 'Testing'
if TRAIN:
correct, total = 0, len(val_tweets)
i = 1
batch_size = len(val_tweets)
n_val_batches = int(np.ceil(len(val_tweets) / float(batch_size)))
for val_set_X, val_set_y in extract_features(val_tweets, test_file=False, feat_type=FEAT_TYPE, batch_size=batch_size):
if FEAT_TYPE == 'frequency':
val_set_X = tfidf.transform(val_set_X)
prediction = clf.predict(val_set_X)
correct += np.sum(prediction == val_set_y)
utils.write_status(i, n_val_batches)
i += 1
print '\nCorrect: %d/%d = %.4f %%' % (correct, total, correct * 100. / total)
else:
del train_tweets
test_tweets = process_tweets(TEST_PROCESSED_FILE, test_file=True)
n_test_batches = int(np.ceil(len(test_tweets) / float(batch_size)))
predictions = np.array([])
print 'Predicting batches'
i = 1
for test_set_X, _ in extract_features(test_tweets, test_file=True, feat_type=FEAT_TYPE):
if FEAT_TYPE == 'frequency':
test_set_X = tfidf.transform(test_set_X)
prediction = clf.predict(test_set_X)
predictions = np.concatenate((predictions, prediction))
utils.write_status(i, n_test_batches)
i += 1
predictions = [(str(j), int(predictions[j]))
for j in range(len(test_tweets))]
utils.save_results_to_csv(predictions, 'svm.csv')
print '\nSaved to svm.csv'
| 37.245399 | 134 | 0.620985 |
2f7929f0768c9ed6588b5ebf9accbf8963e5c4aa | 45,222 | py | Python | env/lib/python3.7/site-packages/sklearn/feature_extraction/tests/test_text.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 25 | 2019-03-08T01:03:03.000Z | 2022-02-14T17:38:32.000Z | env/lib/python3.7/site-packages/sklearn/feature_extraction/tests/test_text.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 9 | 2020-09-25T22:32:02.000Z | 2022-02-09T23:45:10.000Z | env/lib/python3.7/site-packages/sklearn/feature_extraction/tests/test_text.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 31 | 2019-01-15T20:16:50.000Z | 2022-03-01T05:47:38.000Z | # -*- coding: utf-8 -*-
from collections.abc import Mapping
import re
import warnings
import pytest
from scipy import sparse
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import IS_PYPY
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.utils.testing import (assert_equal, assert_not_equal,
assert_almost_equal, assert_in,
assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, ignore_warnings,
SkipTest, assert_raises, assert_no_warnings,
fails_if_pypy, assert_allclose_dense_sparse,
skip_if_32bit)
from collections import defaultdict
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('é', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = 'àáâãäåçèéêë'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = 'ìíîïñòóôõöùúûüý'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # alef with a hamza below: إ
expected = '\u0627' # simple alef: ا
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is à test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = 'àáâãäåçèéêë'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = "ìíîïñòóôõöùúûüý"
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is à test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
@pytest.mark.parametrize('Vectorizer', (CountVectorizer, HashingVectorizer))
def test_word_analyzer_unigrams(Vectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mangé du kangourou ce midi, "
"c'était pas très bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mangé du kangourou ce midi, "
" c'était pas très bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mangé du kangourou ce midi, "
"c'était pas très bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_word_ngram_analyzer():
cnga = CountVectorizer(analyzer='word', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['this is test', 'is test really', 'test really met']
assert_equal(cnga(text)[:3], expected)
expected = ['test really met harry yesterday',
'this is test really met harry',
'is test really met harry yesterday']
assert_equal(cnga(text)[-3:], expected)
cnga_file = CountVectorizer(input='file', analyzer='word',
ngram_range=(3, 6)).build_analyzer()
file = StringIO(text)
assert_equal(cnga_file(file), cnga(text))
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indices():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert "the" not in vocabulary
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert "copyright" not in vocabulary
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert not hasattr(t2, "idf_")
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert not tv.fixed_vocabulary_
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert tv._tfidf.use_idf
tv.smooth_idf = True
assert tv._tfidf.smooth_idf
tv.sublinear_tf = True
assert tv._tfidf.sublinear_tf
@fails_if_pypy
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.min(X.data) < 0
assert np.max(X.data) > 0
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert ngrams_nnz > token_nnz
assert ngrams_nnz < 2 * token_nnz
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
assert not cv.fixed_vocabulary_
# test for vocabulary learned from data
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
# test for custom vocabulary
vocab = ['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water']
cv = CountVectorizer(vocabulary=vocab)
feature_names = cv.get_feature_names()
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad',
'sparkling', 'tomato', 'water'], feature_names)
assert cv.fixed_vocabulary_
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
@pytest.mark.parametrize('Vectorizer', (CountVectorizer, TfidfVectorizer))
def test_vectorizer_max_features(Vectorizer):
expected_vocabulary = {'burger', 'beer', 'salad', 'pizza'}
expected_stop_words = {'celeri', 'tomato', 'copyright', 'coke',
'sparkling', 'water', 'the'}
# test bounded number of extracted features
vectorizer = Vectorizer(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert 'a' in vect.vocabulary_.keys()
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert 'a' not in vect.vocabulary_.keys() # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert 'a' in vect.stop_words_
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert 'a' not in vect.vocabulary_.keys() # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert 'a' in vect.stop_words_
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert 'a' in vect.vocabulary_.keys()
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert 'c' not in vect.vocabulary_.keys() # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert 'c' in vect.stop_words_
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert 'c' not in vect.vocabulary_.keys() # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert 'c' in vect.stop_words_
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
@fails_if_pypy
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(alternate_sign=False, analyzer='char', norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', alternate_sign=False,
binary=True, norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', alternate_sign=False,
binary=True, norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
@pytest.mark.parametrize('Vectorizer', (CountVectorizer, TfidfVectorizer))
def test_vectorizer_inverse_transform(Vectorizer):
# raw documents
data = ALL_FOOD_DOCS
vectorizer = Vectorizer()
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert not best_vectorizer.fixed_vocabulary_
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
@fails_if_pypy
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"Машинное обучение — обширный подраздел искусственного "
"интеллекта, изучающий методы построения алгоритмов, "
"способных обучаться."
)
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 12))
vect = HashingVectorizer(norm=None, alternate_sign=False)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and not alternate_sign, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert vect.fixed_vocabulary_
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
if IS_PYPY and isinstance(orig, HashingVectorizer):
continue
else:
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_transformer_idf_setter():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
copy = TfidfTransformer()
copy.idf_ = orig.idf_
assert_array_equal(
copy.transform(X).toarray(),
orig.transform(X).toarray())
def test_tfidf_vectorizer_setter():
orig = TfidfVectorizer(use_idf=True)
orig.fit(JUNK_FOOD_DOCS)
copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)
copy.idf_ = orig.idf_
assert_array_equal(
copy.transform(JUNK_FOOD_DOCS).toarray(),
orig.transform(JUNK_FOOD_DOCS).toarray())
def test_tfidfvectorizer_invalid_idf_attr():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True)
expected_idf_len = len(vect.idf_)
invalid_idf = [1.0] * (expected_idf_len + 1)
assert_raises(ValueError, setattr, copy, 'idf_', invalid_idf)
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
@fails_if_pypy
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert v.binary
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
@pytest.mark.parametrize('Vectorizer',
(CountVectorizer, TfidfVectorizer, HashingVectorizer))
def test_vectorizer_string_object_as_input(Vectorizer):
message = ("Iterable over raw text documents expected, "
"string object received.")
vec = Vectorizer()
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(ValueError, message, vec.fit, "hello world!")
assert_raise_message(ValueError, message, vec.transform, "hello world!")
@pytest.mark.parametrize("X_dtype", [np.float32, np.float64])
def test_tfidf_transformer_type(X_dtype):
X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42)
X_trans = TfidfTransformer().fit_transform(X)
assert X_trans.dtype == X.dtype
def test_tfidf_transformer_sparse():
X = sparse.rand(10, 20000, dtype=np.float64, random_state=42)
X_csc = sparse.csc_matrix(X)
X_csr = sparse.csr_matrix(X)
X_trans_csc = TfidfTransformer().fit_transform(X_csc)
X_trans_csr = TfidfTransformer().fit_transform(X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_trans_csr)
assert X_trans_csc.format == X_trans_csr.format
@pytest.mark.parametrize(
"vectorizer_dtype, output_dtype, warning_expected",
[(np.int32, np.float64, True),
(np.int64, np.float64, True),
(np.float32, np.float32, False),
(np.float64, np.float64, False)]
)
def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype,
warning_expected):
X = np.array(["numpy", "scipy", "sklearn"])
vectorizer = TfidfVectorizer(dtype=vectorizer_dtype)
warning_msg_match = "'dtype' should be used."
warning_cls = UserWarning
expected_warning_cls = warning_cls if warning_expected else None
with pytest.warns(expected_warning_cls,
match=warning_msg_match) as record:
X_idf = vectorizer.fit_transform(X)
if expected_warning_cls is None:
relevant_warnings = [w for w in record
if isinstance(w, warning_cls)]
assert len(relevant_warnings) == 0
assert X_idf.dtype == output_dtype
@pytest.mark.parametrize("vec", [
HashingVectorizer(ngram_range=(2, 1)),
CountVectorizer(ngram_range=(2, 1)),
TfidfVectorizer(ngram_range=(2, 1))
])
def test_vectorizers_invalid_ngram_range(vec):
# vectorizers could be initialized with invalid ngram range
# test for raising error message
invalid_range = vec.ngram_range
message = ("Invalid value for ngram_range=%s "
"lower boundary larger than the upper boundary."
% str(invalid_range))
if isinstance(vec, HashingVectorizer):
pytest.xfail(reason='HashingVectorizer not supported on PyPy')
assert_raise_message(
ValueError, message, vec.fit, ["good news everyone"])
assert_raise_message(
ValueError, message, vec.fit_transform, ["good news everyone"])
if isinstance(vec, HashingVectorizer):
assert_raise_message(
ValueError, message, vec.transform, ["good news everyone"])
def _check_stop_words_consistency(estimator):
stop_words = estimator.get_stop_words()
tokenize = estimator.build_tokenizer()
preprocess = estimator.build_preprocessor()
return estimator._check_stop_words_consistency(stop_words, preprocess,
tokenize)
@fails_if_pypy
def test_vectorizer_stop_words_inconsistent():
lstr = "['and', 'll', 've']"
message = ('Your stop_words may be inconsistent with your '
'preprocessing. Tokenizing the stop words generated '
'tokens %s not in stop_words.' % lstr)
for vec in [CountVectorizer(),
TfidfVectorizer(), HashingVectorizer()]:
vec.set_params(stop_words=["you've", "you", "you'll", 'AND'])
assert_warns_message(UserWarning, message, vec.fit_transform,
['hello world'])
# reset stop word validation
del vec._stop_words_id
assert _check_stop_words_consistency(vec) is False
# Only one warning per stop list
assert_no_warnings(vec.fit_transform, ['hello world'])
assert _check_stop_words_consistency(vec) is None
# Test caching of inconsistency assessment
vec.set_params(stop_words=["you've", "you", "you'll", 'blah', 'AND'])
assert_warns_message(UserWarning, message, vec.fit_transform,
['hello world'])
@skip_if_32bit
def test_countvectorizer_sort_features_64bit_sparse_indices():
"""
Check that CountVectorizer._sort_features preserves the dtype of its sparse
feature matrix.
This test is skipped on 32bit platforms, see:
https://github.com/scikit-learn/scikit-learn/pull/11295
for more details.
"""
X = sparse.csr_matrix((5, 5), dtype=np.int64)
# force indices and indptr to int64.
INDICES_DTYPE = np.int64
X.indices = X.indices.astype(INDICES_DTYPE)
X.indptr = X.indptr.astype(INDICES_DTYPE)
vocabulary = {
"scikit-learn": 0,
"is": 1,
"great!": 2
}
Xs = CountVectorizer()._sort_features(X, vocabulary)
assert INDICES_DTYPE == Xs.indices.dtype
@fails_if_pypy
@pytest.mark.parametrize('Estimator',
[CountVectorizer, TfidfVectorizer, HashingVectorizer])
def test_stop_word_validation_custom_preprocessor(Estimator):
data = [{'text': 'some text'}]
vec = Estimator()
assert _check_stop_words_consistency(vec) is True
vec = Estimator(preprocessor=lambda x: x['text'],
stop_words=['and'])
assert _check_stop_words_consistency(vec) == 'error'
# checks are cached
assert _check_stop_words_consistency(vec) is None
vec.fit_transform(data)
class CustomEstimator(Estimator):
def build_preprocessor(self):
return lambda x: x['text']
vec = CustomEstimator(stop_words=['and'])
assert _check_stop_words_consistency(vec) == 'error'
vec = Estimator(tokenizer=lambda doc: re.compile(r'\w{1,}')
.findall(doc),
stop_words=['and'])
assert _check_stop_words_consistency(vec) is True
@pytest.mark.parametrize(
'Estimator',
[CountVectorizer,
TfidfVectorizer,
pytest.param(HashingVectorizer, marks=fails_if_pypy)]
)
@pytest.mark.parametrize(
'input_type, err_type, err_msg',
[('filename', FileNotFoundError, ''),
('file', AttributeError, "'str' object has no attribute 'read'")]
)
def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg):
data = ['this is text, not file or filename']
with pytest.raises(err_type, match=err_msg):
Estimator(analyzer=lambda x: x.split(),
input=input_type).fit_transform(data)
@pytest.mark.parametrize(
'Estimator',
[CountVectorizer,
TfidfVectorizer,
pytest.param(HashingVectorizer, marks=fails_if_pypy)]
)
@pytest.mark.parametrize(
'analyzer', [lambda doc: open(doc, 'r'), lambda doc: doc.read()]
)
@pytest.mark.parametrize('input_type', ['file', 'filename'])
def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type):
data = ['this is text, not file or filename']
warn_msg = 'Since v0.21, vectorizer'
with pytest.raises((FileNotFoundError, AttributeError)):
with pytest.warns(ChangedBehaviorWarning, match=warn_msg) as records:
Estimator(analyzer=analyzer, input=input_type).fit_transform(data)
assert len(records) == 1
assert warn_msg in str(records[0])
@pytest.mark.parametrize(
'Estimator',
[CountVectorizer,
TfidfVectorizer,
pytest.param(HashingVectorizer, marks=fails_if_pypy)]
)
def test_callable_analyzer_reraise_error(tmpdir, Estimator):
# check if a custom exception from the analyzer is shown to the user
def analyzer(doc):
raise Exception("testing")
f = tmpdir.join("file.txt")
f.write("sample content\n")
with pytest.raises(Exception, match="testing"):
Estimator(analyzer=analyzer, input='file').fit_transform([f])
| 36.004777 | 79 | 0.671576 |
2375aed0fc5649289cbc077e2320ecbfe21ba664 | 160 | py | Python | 3.5/scrapy_plus/middlewares/__init__.py | feel-easy/myspider | dcc65032015d7dbd8bea78f846fd3cac7638c332 | [
"Apache-2.0"
] | 1 | 2019-02-28T10:16:00.000Z | 2019-02-28T10:16:00.000Z | 3.5/scrapy_plus/middlewares/__init__.py | wasalen/myspider | dcc65032015d7dbd8bea78f846fd3cac7638c332 | [
"Apache-2.0"
] | null | null | null | 3.5/scrapy_plus/middlewares/__init__.py | wasalen/myspider | dcc65032015d7dbd8bea78f846fd3cac7638c332 | [
"Apache-2.0"
] | null | null | null | # THE WINTER IS COMING! the old driver will be driving who was a man of the world!
# -*- coding: utf-8 -*- python 3.6.7, create time is 18-11-30 上午11:32 GMT+8
| 40 | 82 | 0.68125 |
ee3e43aa04d9354394cd77ace006e633e4d0f79c | 4,345 | py | Python | scripts/experiments-evaluation/network_evaluation.py | gomerudo/nas-rl2 | 3fddf42603ec54d9d157df8515881a1469ed5eb3 | [
"MIT"
] | 5 | 2020-05-24T21:05:26.000Z | 2021-09-27T21:05:02.000Z | scripts/experiments-evaluation/network_evaluation.py | gomerudo/nas-rl2 | 3fddf42603ec54d9d157df8515881a1469ed5eb3 | [
"MIT"
] | null | null | null | scripts/experiments-evaluation/network_evaluation.py | gomerudo/nas-rl2 | 3fddf42603ec54d9d157df8515881a1469ed5eb3 | [
"MIT"
] | 4 | 2020-09-18T16:24:15.000Z | 2022-03-15T08:58:17.000Z | """Train a network specified in Neural Structure Code (NSC) for 100 epochs.
It relies on the NetEvaluation class from the NasGym. The network has to be
manually specified in this file (line 30).
Given a network in NSC code, we build a TensorFlow network with every
convolution layer using 32 -> 64 -> 128 ... filters. The network is trained
using exponential decay for 100 epochs and the accuracy on a test set is
printed.
"""
import math
import time
from datetime import datetime
import numpy as np
import pandas as pd
import nasgym.utl.configreader as cr
from nasgym import nas_logger
from nasgym import CONFIG_INI
from nasgym.net_ops.net_eval import NetEvaluation
from nasgym.envs.factories import DatasetHandlerFactory
from nasgym.envs.factories import TrainerFactory
from nasgym.utl.miscellaneous import compute_str_hash
from nasgym.utl.miscellaneous import state_to_string
if __name__ == '__main__':
state = np.array([
[0, 0, 0, 0, 0], # 1
[0, 0, 0, 0, 0], # 2
[0, 0, 0, 0, 0], # 3
[0, 0, 0, 0, 0], # 4
[0, 0, 0, 0, 0], # 5
[1, 1, 3, 0, 0], # 6
[2, 1, 3, 1, 0], # 7
[3, 1, 3, 2, 0], # 8
[4, 2, 2, 3, 0], # 9
[5, 2, 3, 4, 0], # 10
])
n_epochs = 100
dataset_handler = DatasetHandlerFactory.get_handler("meta-dataset")
hash_state = compute_str_hash(state_to_string(state))
composed_id = "{d}-{h}".format(
d=dataset_handler.current_dataset_name(), h=hash_state
)
try:
log_path = CONFIG_INI[cr.SEC_DEFAULT][cr.PROP_LOGPATH]
except KeyError:
log_path = "workspace"
log_trainer_dir = "{lp}/trainer-{h}".format(lp=log_path, h=composed_id)
batch_size, decay_steps, beta1, beta2, epsilon, fcl_units, dropout_rate, \
split_prop = TrainerFactory._load_default_trainer_attributes()
trainset_length = math.floor(
dataset_handler.current_n_observations()*(1. - split_prop)
)
evaluator = NetEvaluation(
encoded_network=state,
input_shape=dataset_handler.current_shape(),
n_classes=dataset_handler.current_n_classes(),
batch_size=batch_size,
log_path=log_trainer_dir,
variable_scope="cnn-{h}".format(h=hash_state),
n_epochs=n_epochs,
op_beta1=0.9,
op_beta2=0.999,
op_epsilon=10e-08,
fcl_units=4096,
dropout_rate=0.4,
n_obs_train=trainset_length
)
train_features, train_labels = None, None
val_features, val_labels = None, None
def custom_train_input_fn():
return dataset_handler.current_train_set()
def custom_eval_input_fn():
return dataset_handler.current_validation_set()
train_input_fn = custom_train_input_fn
eval_input_fn = custom_eval_input_fn
nas_logger.debug(
"Training architecture %s for %d epochs", composed_id, n_epochs
)
ev_results = pd.DataFrame(columns=["epoch", "test_accuracy"])
start_time = time.time()
for epoch in range(n_epochs):
nas_logger.info("Running epoch %d", epoch + 1)
evaluator.train(
train_data=train_features,
train_labels=train_labels,
train_input_fn=train_input_fn,
n_epochs=1 # As specified by BlockQNN
)
nas_logger.debug("Evaluating architecture %s", composed_id)
res = evaluator.evaluate(
eval_data=val_features,
eval_labels=val_labels,
eval_input_fn=eval_input_fn
)
accuracy = res['accuracy']*100
ev_results = ev_results.append(
{
'epoch': epoch + 1,
'test_accuracy': accuracy
},
ignore_index=True
)
end_time = time.time()
timestamp = datetime.now()
timestamp_str = timestamp.strftime("%Y%m%d%H%M%S%f")
ev_res_path = "{log}/{cid}-{ep}-{time}.csv".format(
log=log_path,
cid=composed_id,
ep=n_epochs,
time=timestamp_str
)
outfile = open(ev_res_path, 'w')
ev_results.to_csv(outfile)
outfile.close()
nas_logger.debug(
"Train-evaluation procedure finished for architecture %s",
composed_id
)
nas_logger.info("Final accuracy is %f", accuracy)
nas_logger.info("Training-evaluation time %f", (end_time - start_time))
| 30.173611 | 78 | 0.643728 |
be378ab80b57131d1ffec21bf02dd3f1bb0e2efa | 4,656 | py | Python | PROJ/LEVY/American_Options/Script_BermudanOptions.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | null | null | null | PROJ/LEVY/American_Options/Script_BermudanOptions.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | null | null | null | PROJ/LEVY/American_Options/Script_BermudanOptions.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | 1 | 2022-01-07T15:31:45.000Z | 2022-01-07T15:31:45.000Z | # Generated with SMOP 0.41-beta
try:
from smop.libsmop import *
except ImportError:
raise ImportError('File compiled with `smop3`, please install `smop3` to run it.') from None
# Script_BermudanOptions.m
##################################################################
### Bermudan OPTION PRICER
##################################################################
# Descritpion: Script to Price Bermudan/American options in Levy Models
# using the PROJ method
# Author: Justin Kirkby
# References: (1) American and exotic option pricing with jump diffusions and other Levy Processes,
# J. Compuational Finance, 2018
# (2) Efficient Option Pricing By Frame Duality with The Fast
# Fourier Transform, SIAM J. Financial Math., 2015
##################################################################
folder,name,ext=fileparts(which(mfilename('fullpath')),nargout=3)
# Script_BermudanOptions.m:13
cd(folder)
addpath('../RN_CHF')
addpath('../Helper_Functions')
############################################
### Step 1) CONTRACT/GENERAL PARAMETERS
############################################
S_0=100
# Script_BermudanOptions.m:22
W=105
# Script_BermudanOptions.m:23
r=0.05
# Script_BermudanOptions.m:24
q=0.0
# Script_BermudanOptions.m:25
T=1
# Script_BermudanOptions.m:26
M=500
# Script_BermudanOptions.m:27
############################################
### Step 2) CHOOSE MODEL PARAMETERS (Levy Models)
############################################
model=1
# Script_BermudanOptions.m:32
params=cellarray([])
# Script_BermudanOptions.m:33
if model == 1:
params.sigmaBSM = copy(0.15)
# Script_BermudanOptions.m:36
else:
if model == 2:
params.C = copy(0.02)
# Script_BermudanOptions.m:39
params.G = copy(5)
# Script_BermudanOptions.m:40
params.MM = copy(15)
# Script_BermudanOptions.m:41
params.Y = copy(1.2)
# Script_BermudanOptions.m:42
else:
if model == 3:
params.alpha = copy(15)
# Script_BermudanOptions.m:45
params.beta = copy(- 5)
# Script_BermudanOptions.m:46
params.delta = copy(0.5)
# Script_BermudanOptions.m:47
else:
if model == 4:
params.sigma = copy(0.12)
# Script_BermudanOptions.m:50
params.lam = copy(0.4)
# Script_BermudanOptions.m:51
params.muj = copy(- 0.12)
# Script_BermudanOptions.m:52
params.sigmaj = copy(0.18)
# Script_BermudanOptions.m:53
else:
if model == 5:
params.sigma = copy(0.15)
# Script_BermudanOptions.m:56
params.lam = copy(3)
# Script_BermudanOptions.m:57
params.p_up = copy(0.2)
# Script_BermudanOptions.m:58
params.eta1 = copy(25)
# Script_BermudanOptions.m:59
params.eta2 = copy(10)
# Script_BermudanOptions.m:60
############################################
### Step 3) CHOOSE PROJ PARAMETERS
############################################
UseCumulant=1
# Script_BermudanOptions.m:67
#---------------------
# APPROACH 1: Cumulant Based approach for grid width
# (see "Robust Option Pricing with Characteritics Functions and the BSpline Order of Density Projection")
#---------------------
if UseCumulant == 1:
logN=12
# Script_BermudanOptions.m:74
L1=12
# Script_BermudanOptions.m:75
#---------------------
# APPROACH 2: Manual GridWidth approach
#---------------------
else:
P=7
# Script_BermudanOptions.m:80
Pbar=3
# Script_BermudanOptions.m:81
############################################
### PRICE
############################################
### Note: rnCHF is the risk netural CHF, c1,c2,c4 are the cumulants
modelInput=getModelInput(model,T / M,r,q,params)
# Script_BermudanOptions.m:88
if UseCumulant == 1:
alpha=getTruncationAlpha(T,L1,modelInput,model)
# Script_BermudanOptions.m:91
else:
logN=P + Pbar
# Script_BermudanOptions.m:93
alpha=2 ** Pbar / 2
# Script_BermudanOptions.m:94
N=2 ** logN
# Script_BermudanOptions.m:96
tic
price=PROJ_Bermudan_Put(M,S_0,W,r,T,modelInput.rnCHF,N,alpha)
# Script_BermudanOptions.m:99
toc
fprintf('%.8f \n',price) | 32.333333 | 106 | 0.515679 |
b871d4938a36e48d54617e789e515582788b8dfc | 2,782 | py | Python | gpvdm_gui/gui/ribbon_solar.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 12 | 2016-09-13T08:58:13.000Z | 2022-01-17T07:04:52.000Z | gpvdm_gui/gui/ribbon_solar.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 3 | 2017-11-11T12:33:02.000Z | 2019-03-08T00:48:08.000Z | gpvdm_gui/gui/ribbon_solar.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 6 | 2019-01-03T06:17:12.000Z | 2022-01-01T15:59:00.000Z | # -*- coding: utf-8 -*-
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package ribbon_solar
# A ribbon for the solar spectrum window.
#
import os
from cal_path import get_css_path
#qt
from PyQt5.QtWidgets import QMainWindow, QTextEdit, QAction, QApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSize, Qt,QFile,QIODevice
from PyQt5.QtWidgets import QWidget,QSizePolicy,QVBoxLayout,QHBoxLayout,QPushButton,QDialog,QFileDialog,QToolBar,QMessageBox, QLineEdit, QToolButton
from PyQt5.QtWidgets import QTabWidget
from icon_lib import icon_get
from about import about_dlg
from util import wrap_text
from ribbon_base import ribbon_base
from play import play
class ribbon_solar(ribbon_base):
def optics(self):
toolbar = QToolBar()
toolbar.setToolButtonStyle( Qt.ToolButtonTextUnderIcon)
toolbar.setIconSize(QSize(42, 42))
self.run = play(self,"main_play_button",run_text=wrap_text(_("Calculate"),2))
toolbar.addAction(self.run)
self.export = QAction(icon_get("document-export"), wrap_text(_("Export spectrum"),5), self)
toolbar.addAction(self.export)
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
toolbar.addWidget(spacer)
self.help = QAction(icon_get("help"), _("Help"), self)
toolbar.addAction(self.help)
return toolbar
def callback_about_dialog(self):
dlg=about_dlg()
dlg.exec_()
def __init__(self):
ribbon_base.__init__(self)
self.setMaximumHeight(120)
#self.setStyleSheet("QWidget { background-color:cyan; }")
self.about = QToolButton(self)
self.about.setText(_("About"))
self.about.pressed.connect(self.callback_about_dialog)
self.setCornerWidget(self.about)
w=self.optics()
self.addTab(w,_("Spectrum"))
sheet=self.readStyleSheet(os.path.join(get_css_path(),"style.css"))
if sheet!=None:
sheet=str(sheet,'utf-8')
self.setStyleSheet(sheet)
| 29.284211 | 148 | 0.75018 |
70800fe42d8cd0f23d619c4a439ac3b5114fe985 | 3,933 | py | Python | demos/Learning_Rate_Decay/Demo_applications/no_decay_lr_comparison_application.py | tdml13/NiftyNet | b35fa19ca307e81d229e2fe8269a417724833da2 | [
"Apache-2.0"
] | 1,403 | 2017-08-30T11:49:45.000Z | 2022-03-31T11:44:05.000Z | demos/Learning_Rate_Decay/Demo_applications/no_decay_lr_comparison_application.py | tdml13/NiftyNet | b35fa19ca307e81d229e2fe8269a417724833da2 | [
"Apache-2.0"
] | 360 | 2017-10-03T15:33:53.000Z | 2021-03-17T06:27:38.000Z | demos/Learning_Rate_Decay/Demo_applications/no_decay_lr_comparison_application.py | tdml13/NiftyNet | b35fa19ca307e81d229e2fe8269a417724833da2 | [
"Apache-2.0"
] | 464 | 2017-09-13T20:56:32.000Z | 2022-02-11T20:33:47.000Z | import tensorflow as tf
from niftynet.application.segmentation_application import \
SegmentationApplication
from niftynet.engine.application_factory import OptimiserFactory
from niftynet.engine.application_variables import CONSOLE
from niftynet.engine.application_variables import TF_SUMMARIES
from niftynet.layer.loss_segmentation import LossFunction
SUPPORTED_INPUT = set(['image', 'label', 'weight'])
class DecayLearningRateApplication(SegmentationApplication):
REQUIRED_CONFIG_SECTION = "SEGMENTATION"
def __init__(self, net_param, action_param, is_training):
SegmentationApplication.__init__(
self, net_param, action_param, is_training)
tf.logging.info('starting decay learning segmentation application')
self.learning_rate = None
self.current_lr = action_param.lr
if self.action_param.validation_every_n > 0:
raise NotImplementedError("validation process is not implemented "
"in this demo.")
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
data_dict = self.get_sampler()[0][0].pop_batch_op()
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, self.is_training)
if self.is_training:
with tf.name_scope('Optimiser'):
self.learning_rate = tf.placeholder(tf.float32, shape=[])
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.learning_rate)
loss_func = LossFunction(
n_class=self.segmentation_param.num_classes,
loss_type=self.action_param.loss_type)
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict.get('label', None),
weight_map=data_dict.get('weight', None))
loss = data_loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
grads = self.optimiser.compute_gradients(loss)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='dice_loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=self.learning_rate, name='lr',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='dice_loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
else:
# converting logits into final output for
# classification probabilities or argmax classification labels
SegmentationApplication.connect_data_and_network(
self, outputs_collector, gradients_collector)
def set_iteration_update(self, iteration_message):
"""
This function will be called by the application engine at each
iteration.
"""
current_iter = iteration_message.current_iter
if iteration_message.is_training:
iteration_message.data_feed_dict[self.is_validation] = False
elif iteration_message.is_validation:
iteration_message.data_feed_dict[self.is_validation] = True
iteration_message.data_feed_dict[self.learning_rate] = self.current_lr
| 45.732558 | 78 | 0.653699 |
0c9a15a17abcfd47f7f90f27a16bf2d28ca51b5b | 267 | py | Python | termitolib/loans/forms.py | dmrib/termitolib | bca3c93758256114ccce0c81be29284cde003cf0 | [
"MIT"
] | 1 | 2017-11-24T21:38:19.000Z | 2017-11-24T21:38:19.000Z | termitolib/loans/forms.py | dmrib/termitolib | bca3c93758256114ccce0c81be29284cde003cf0 | [
"MIT"
] | 3 | 2021-09-07T23:49:45.000Z | 2022-02-10T12:56:39.000Z | termitolib/loans/forms.py | dmrib/termitolib | bca3c93758256114ccce0c81be29284cde003cf0 | [
"MIT"
] | 2 | 2017-07-28T22:38:44.000Z | 2017-08-04T01:09:10.000Z | from django import forms
from .models import Loan
from books.models import Book
class LoanForm(forms.ModelForm):
code = forms.CharField(label='Book Code')
class Meta:
model = Loan
fields = [
'to',
]
| 19.071429 | 46 | 0.561798 |
51d23978ba94c123b947db9269fb00d3771d7f5b | 10,089 | py | Python | intersight/model/virtualization_memory_capacity_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/virtualization_memory_capacity_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/virtualization_memory_capacity_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class VirtualizationMemoryCapacityAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
},
('object_type',): {
'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'capacity': (int,), # noqa: E501
'free': (int,), # noqa: E501
'used': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'capacity': 'Capacity', # noqa: E501
'free': 'Free', # noqa: E501
'used': 'Used', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""VirtualizationMemoryCapacityAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "virtualization.MemoryCapacity", must be one of ["virtualization.MemoryCapacity", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "virtualization.MemoryCapacity", must be one of ["virtualization.MemoryCapacity", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
capacity (int): The total memory capacity of the entity in bytes.. [optional] # noqa: E501
free (int): Free memory (bytes) that is unused and available for allocation, as a point-in-time snapshot. The available memory capacity is reported for an entity (such as Host or Cluster) when inventory data is collected for that entity. As part of the inventory data, a snapshot of the free and used memory capacity is also reported.. [optional] # noqa: E501
used (int): Memory (bytes) that has been already used up, as a point-in-time snapshot.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "virtualization.MemoryCapacity")
object_type = kwargs.get('object_type', "virtualization.MemoryCapacity")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 52.546875 | 1,678 | 0.639508 |
75444f58b28f4be4500e7f9f49d20cd8ecd290f7 | 9,928 | py | Python | profiles/serializers.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | profiles/serializers.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | profiles/serializers.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | """
Serializers for user profiles
"""
from django.db import transaction
from rest_framework.exceptions import ValidationError
from rest_framework.serializers import (
IntegerField,
ModelSerializer,
SerializerMethodField,
)
from profiles.models import (
Education,
Employment,
Profile,
)
def update_work_history(work_history_list, profile_id):
"""
Update employment history for given profile id.
Args:
work_history_list (list): List of work history dicts.
profile_id (int): User profile id.
"""
saved_work_history_ids = set()
for work_history in work_history_list:
work_history_id = work_history.get("id")
work_history_instance = None
if work_history_id:
try:
work_history_instance = Employment.objects.get(
profile_id=profile_id, id=work_history_id
)
except Employment.DoesNotExist:
raise ValidationError("Work history {} does not exist".format(work_history_id))
work_history_serializer = EmploymentSerializer(instance=work_history_instance, data=work_history)
work_history_serializer.is_valid(raise_exception=True)
work_history_serializer.save(profile_id=profile_id)
saved_work_history_ids.add(work_history_serializer.instance.id)
Employment.objects.filter(profile_id=profile_id).exclude(id__in=saved_work_history_ids).delete()
def update_education(education_list, profile_id):
"""
Update education for given profile id.
Args:
education_list (list): List of education dicts.
profile_id (int): User profile id.
"""
saved_education_ids = set()
for education in education_list:
education_id = education.get("id")
if education_id is not None:
try:
education_instance = Education.objects.get(profile_id=profile_id, id=education_id)
except Education.DoesNotExist:
raise ValidationError("Education {} does not exist".format(education_id))
else:
education_instance = None
education_serializer = EducationSerializer(instance=education_instance, data=education)
education_serializer.is_valid(raise_exception=True)
education_serializer.save(profile_id=profile_id)
saved_education_ids.add(education_serializer.instance.id)
Education.objects.filter(profile_id=profile_id).exclude(id__in=saved_education_ids).delete()
class EmploymentSerializer(ModelSerializer):
"""Serializer for Employment objects"""
id = IntegerField(required=False) # override the read_only flag so we can edit it
class Meta:
model = Employment
fields = (
'id',
'city',
'state_or_territory',
'country',
'company_name',
'position',
'industry',
'end_date',
'start_date'
)
def set_fields_to_required(serializer, ignore_fields=None):
"""
Iterate through fields in serializer and set all to required except ignore_fields
Args:
serializer (rest_framework.serializers.Serializer):
A serializer
ignore_fields (list of str):
If not none, a list of field names to skip
Returns:
None
"""
if ignore_fields is None:
ignore_fields = []
for field in serializer.fields.values():
if field.field_name not in ignore_fields:
field.required = True
field.allow_null = False
field.allow_blank = False
class EmploymentFilledOutSerializer(EmploymentSerializer):
"""Serializer for Employment objects in filled out Profiles"""
def __init__(self, *args, **kwargs):
"""
Update serializer_field_mapping to use fields setting required=True
"""
super().__init__(*args, **kwargs)
set_fields_to_required(self, ['end_date'])
class EducationSerializer(ModelSerializer):
"""Serializer for Education objects"""
id = IntegerField(required=False) # override the read_only flag so we can edit it
class Meta:
model = Education
fields = (
'id',
'degree_name',
'graduation_date',
'field_of_study',
'online_degree',
'school_name',
'school_city',
'school_state_or_territory',
'school_country')
class EducationFilledOutSerializer(EducationSerializer):
"""Serializer for Education objects in filled out Profiles"""
def __init__(self, *args, **kwargs):
"""
Update serializer_field_mapping to use fields setting required=True
"""
super().__init__(*args, **kwargs)
set_fields_to_required(self, ['field_of_study'])
class ProfileBaseSerializer(ModelSerializer):
"""Base class for all the profile serializers"""
username = SerializerMethodField()
work_history = EmploymentSerializer(many=True)
education = EducationSerializer(many=True)
def get_username(self, obj):
"""Getter for the username field"""
return obj.user.username
class ProfileSerializer(ProfileBaseSerializer):
"""Serializer for Profile objects"""
def update(self, instance, validated_data):
with transaction.atomic():
for attr, value in validated_data.items():
if attr in ('work_history', 'education'):
continue
setattr(instance, attr, value)
update_image = 'image' in validated_data
instance.save(update_image=update_image)
if 'work_history' in self.initial_data:
update_work_history(validated_data['work_history'], instance.id)
if 'education' in self.initial_data:
update_education(validated_data['education'], instance.id)
return instance
class Meta:
model = Profile
fields = (
'username',
'filled_out',
'agreed_to_terms_of_service',
'account_privacy',
'email_optin',
'email',
'first_name',
'last_name',
'full_name',
'preferred_name',
'country',
'state_or_territory',
'city',
'address',
'postal_code',
'birth_country',
'nationality',
'date_of_birth',
'preferred_language',
'gender',
'pretty_printed_student_id',
'student_id',
'work_history',
'edx_level_of_education',
'education',
'image',
'image_small',
'image_medium',
'about_me',
'romanized_first_name',
'romanized_last_name',
'phone_number',
)
read_only_fields = (
'edx_level_of_education',
'agreed_to_terms_of_service',
'image_small',
'image_medium',
'student_id',
)
class ProfileLimitedSerializer(ProfileBaseSerializer):
"""
Serializer for Profile objects, limited to fields that other users are
allowed to see if a profile is marked public.
"""
class Meta:
model = Profile
fields = (
'username',
'account_privacy',
'first_name',
'last_name',
'full_name',
'preferred_name',
'country',
'state_or_territory',
'city',
'birth_country',
'preferred_language',
'gender',
'work_history',
'edx_level_of_education',
'education',
'about_me',
'image_medium',
'romanized_first_name',
'romanized_last_name'
)
read_only_fields = (
'edx_level_of_education',
'agreed_to_terms_of_service',
'image_small',
'image_medium',
)
class ProfileFilledOutSerializer(ProfileSerializer):
"""Serializer for Profile objects which require filled_out = True"""
work_history = EmploymentFilledOutSerializer(many=True)
education = EducationFilledOutSerializer(many=True)
def __init__(self, *args, **kwargs):
"""
Update serializer_field_mapping to use fields setting required=True
"""
super().__init__(*args, **kwargs)
ignore_fields = (
'about_me',
'romanized_first_name',
'romanized_last_name',
'postal_code',
)
set_fields_to_required(self, ignore_fields=ignore_fields)
def validate(self, attrs):
"""
Assert that filled_out can't be turned off and that agreed_to_terms_of_service is true
"""
if 'filled_out' in attrs and not attrs['filled_out']:
raise ValidationError("filled_out cannot be set to false")
if 'agreed_to_terms_of_service' in attrs and not attrs['agreed_to_terms_of_service']:
raise ValidationError("agreed_to_terms_of_service cannot be set to false")
# Postal code is only required in United States and Canada
country = attrs.get("country", "")
postal_code = attrs.get("postal_code", "")
if country in ("US", "CA") and not postal_code:
raise ValidationError("postal_code may not be blank")
return super().validate(attrs)
class ProfileImageSerializer(ModelSerializer):
"""Serializer for Profile objects for the Learners In Program card"""
username = SerializerMethodField()
def get_username(self, obj):
"""Getter for the username field"""
return obj.user.username
class Meta:
model = Profile
fields = (
'username',
'image_small',
)
| 31.417722 | 105 | 0.612812 |
36f4cbf85564e86accaef75867f8490be2421d15 | 2,691 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/triggers/ha/reload/nxos/n7k/reload.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/triggers/ha/reload/nxos/n7k/reload.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/triggers/ha/reload/nxos/n7k/reload.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | '''NXOS implementation for Reload triggers'''
# import python
import logging
# import pyats
from pyats import aetest
from pyats.utils.objects import R
# Genie Libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.ha.ha import TriggerReloadFabric
log = logging.getLogger(__name__)
# Trigger required data settings
# Which key to exclude for Platform Ops comparison
platform_exclude = ['maker', 'disk_used_space','disk_total_space',
'rp_uptime', 'sn', 'disk_free_space',
'image', 'kickstart_image', 'main_mem']
class TriggerReloadFabricModule(TriggerReloadFabric):
"""Reload fabric module on device."""
__description__ = """Reload fabric module on device.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
steps:
1. Learn Platform Ops object and store the "fabric" oc(s)
if has any, otherwise, SKIP the trigger
2. Do reload by command "poweroff xbar <oc> no poweroff xbar <oc>"
3. Learn Platform Ops again and the ops are the same as the Ops in step 1
4. Update platform PTS if feature pts is enabled,
Update global/local veirifications if enabled
"""
# Mapping of Information between Ops and Conf
# Also permit to dictates which key to verify
mapping = Mapping(requirements={'ops.platform.platform.Platform':{
'requirements':[['slot', 'oc','(?P<oc>.*)', 'state', 'ok'],
['slot', 'oc', '(?P<oc>.*)', 'name', '(?P<name>.*Fabric.*)']],
'all_keys': True,
'exclude': platform_exclude}},
verify_ops={'ops.platform.platform.Platform':{
'requirements': [\
['slot','oc', '(?P<oc>.*)', 'state', 'ok']],
'exclude': platform_exclude}},
num_values={'oc': 'all'})
| 41.4 | 106 | 0.544036 |
a0763a98c618cb9cc4d8396a8ec620f0b5858e54 | 1,819 | py | Python | mysite/polls/views.py | bhagvank/pythonConstructs | eb6b3bb2dca0b859c4e78188f70dadc933b8ce40 | [
"Apache-2.0"
] | null | null | null | mysite/polls/views.py | bhagvank/pythonConstructs | eb6b3bb2dca0b859c4e78188f70dadc933b8ce40 | [
"Apache-2.0"
] | null | null | null | mysite/polls/views.py | bhagvank/pythonConstructs | eb6b3bb2dca0b859c4e78188f70dadc933b8ce40 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import get_object_or_404, render
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| 33.072727 | 82 | 0.693238 |
c4859c58515d357235489e974cae9960b5a18b03 | 3,921 | py | Python | script.module.uncoded/lib/resources/lib/modules/log_utils.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2019-03-05T09:37:15.000Z | 2019-03-05T09:37:15.000Z | script.module.uncoded/lib/resources/lib/modules/log_utils.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | null | null | null | script.module.uncoded/lib/resources/lib/modules/log_utils.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2021-11-05T20:48:09.000Z | 2021-11-05T20:48:09.000Z | """
tknorris shared module
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import time
import cProfile
import StringIO
import pstats
import json
import xbmc
from resources.lib.modules import control
from xbmc import LOGDEBUG, LOGERROR, LOGFATAL, LOGINFO, LOGNONE, LOGNOTICE, LOGSEVERE, LOGWARNING # @UnusedImport
name = control.addonInfo('name')
def log(msg, level=LOGDEBUG):
req_level = level
# override message level to force logging when addon logging turned on
if control.setting('addon_debug') == 'true' and level == LOGDEBUG:
level = LOGNOTICE
try:
if isinstance(msg, unicode):
msg = '%s (ENCODED)' % (msg.encode('utf-8'))
xbmc.log('[%s] %s' % (name, msg), level)
except Exception as e:
try:
xbmc.log('Logging Failure: %s' % (e), level)
except:
pass # just give up
class Profiler(object):
def __init__(self, file_path, sort_by='time', builtins=False):
self._profiler = cProfile.Profile(builtins=builtins)
self.file_path = file_path
self.sort_by = sort_by
def profile(self, f):
def method_profile_on(*args, **kwargs):
try:
self._profiler.enable()
result = self._profiler.runcall(f, *args, **kwargs)
self._profiler.disable()
return result
except Exception as e:
log('Profiler Error: %s' % (e), LOGWARNING)
return f(*args, **kwargs)
def method_profile_off(*args, **kwargs):
return f(*args, **kwargs)
if _is_debugging():
return method_profile_on
else:
return method_profile_off
def __del__(self):
self.dump_stats()
def dump_stats(self):
if self._profiler is not None:
s = StringIO.StringIO()
params = (self.sort_by,) if isinstance(self.sort_by, basestring) else self.sort_by
ps = pstats.Stats(self._profiler, stream=s).sort_stats(*params)
ps.print_stats()
if self.file_path is not None:
with open(self.file_path, 'w') as f:
f.write(s.getvalue())
def trace(method):
def method_trace_on(*args, **kwargs):
start = time.time()
result = method(*args, **kwargs)
end = time.time()
log('{name!r} time: {time:2.4f}s args: |{args!r}| kwargs: |{kwargs!r}|'.format(name=method.__name__, time=end - start, args=args, kwargs=kwargs), LOGDEBUG)
return result
def method_trace_off(*args, **kwargs):
return method(*args, **kwargs)
if _is_debugging():
return method_trace_on
else:
return method_trace_off
def _is_debugging():
command = {'jsonrpc': '2.0', 'id': 1, 'method': 'Settings.getSettings', 'params': {'filter': {'section': 'system', 'category': 'logging'}}}
js_data = execute_jsonrpc(command)
for item in js_data.get('result', {}).get('settings', {}):
if item['id'] == 'debug.showloginfo':
return item['value']
return False
def execute_jsonrpc(command):
if not isinstance(command, basestring):
command = json.dumps(command)
response = control.jsonrpc(command)
return json.loads(response)
| 32.675 | 163 | 0.625096 |
89e9617c39da0fdfd9a3cc92cbd3097cf2acdac8 | 2,604 | py | Python | life_3D/bio_sim_3d.py | BrainAnnex/life123 | 8547d6800c5fc99183c8b98068e27a414fa77705 | [
"MIT"
] | null | null | null | life_3D/bio_sim_3d.py | BrainAnnex/life123 | 8547d6800c5fc99183c8b98068e27a414fa77705 | [
"MIT"
] | null | null | null | life_3D/bio_sim_3d.py | BrainAnnex/life123 | 8547d6800c5fc99183c8b98068e27a414fa77705 | [
"MIT"
] | null | null | null | import numpy as np
class BioSim3D:
"""
Note: for at least the time being, this class doesn't get instantiated
"""
#####################
# Class variables #
#####################
n_cells_x = 0 # Number of x-direction spacial compartments (bins) used in the simulation
n_cells_y = 0 # Number of y-direction spacial compartments (bins) used in the simulation
n_cells_z = 0 # Number of z-direction spacial compartments (bins) used in the simulation
n_species = 1 # The number of (non-water) chemical species
system = None # NumPy array of dimension (n_species x n_cells_x x n_cells_y x n_cells_z)
# Each block represents a species
diffusion_rates = None # NumPy array of diffusion rates for the various species
sealed = True # If True, no exchange with the outside; if False, immersed in a "bath"
# Only applicable if "sealed" is False:
bath_concentrations = None # A NumPy array for each species
container_diffusion = None # A NumPy array for each species: diffusion rate in/out of the container
#########################################################################
# #
# SET/MODIFY CONCENTRATIONS #
# #
#########################################################################
@classmethod
def initialize_system(cls, n_cells: (int, int, int), n_species: int) -> None:
"""
:param n_cells: The number of compartments (bins) to use in the simulation,
in the x-, y- and z- dimensions, as a triplet of integers
:param n_species: The number of (non-water) chemical species. It must be at least 1
:return: None
"""
(n_cells_x, n_cells_y, n_cells_z) = n_cells
assert n_cells_x >= 1, "The number of cells must be at least 1 in any dimension"
assert n_cells_y >= 1, "The number of cells must be at least 1 in any dimension"
assert n_cells_z >= 1, "The number of cells must be at least 1 in any dimension"
assert n_species >= 1, "The number of (non-water) chemical species must be at least 1"
cls.n_cells_x = n_cells_x
cls.n_cells_y = n_cells_y
cls.n_cells_z = n_cells_z
cls.n_species = n_species
cls.system = np.zeros((n_cells_z, n_cells_y, n_cells_x, n_species), dtype=float)
| 44.135593 | 108 | 0.545315 |
178e7871ba2f51c4422b07f6e8f7e92fcdc7c9a2 | 8,887 | py | Python | app.py | Suiname/DDCC | 083d5a81127d9df6cf4279106ed59dee247a8bdc | [
"MIT"
] | null | null | null | app.py | Suiname/DDCC | 083d5a81127d9df6cf4279106ed59dee247a8bdc | [
"MIT"
] | null | null | null | app.py | Suiname/DDCC | 083d5a81127d9df6cf4279106ed59dee247a8bdc | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request
import requests
import os
import re
# get username and password from env variables
username = os.environ.get('GITHUB_USER')
password = os.environ.get('GITHUB_PASS')
app = Flask(__name__)
def create_response():
"""Format Response JSON, referred to as result throughout.
"""
result = {}
result['repo_count'] = {
'original': 0,
'forked': 0,
}
result['repo_watchers'] = 0
result['user_watchers'] = 0
result['stars'] = {
'received': 0,
'given': 0,
}
result['open_issues'] = 0
result['commits'] = 0
result['account_size'] = 0
result['languages'] = {
'list': [],
'count': 0
}
result['repo_topics'] = {
'list': [],
'count': 0,
}
return result
def get_json(url, auth=None, headers=None):
"""Using reqests, perform a get on the appropriate URL.
Return the json if you get a 200 status code on the response,
otherwise return an empty object."""
req = requests.get(url, auth=auth, headers=headers)
if req.status_code == 200:
return req.json()
else:
return {}
def merge_bb_data(params, result):
"""Retrieve all data from Bitbucket's API and merge into the
result object. Further detail of logic is explained in
inline comments."""
# retrieve the list of all repositories from the user endpoint
bb_url = 'https://api.bitbucket.org/1.0/users/{}'.format(
params['bitbucket']
)
bb_repos = get_json(bb_url)
if bb_repos.get('repositories') and len(bb_repos['repositories']):
# loop through each repository in the list
for repo in bb_repos['repositories']:
slug = repo['slug']
# check if the repo is a fork or not
if repo['is_fork']:
result['repo_count']['forked'] += 1
else:
result['repo_count']['original'] += 1
# add to the running total of account size
result['account_size'] += repo['size']
# check if the language of the repo is already in the running list
# if not, add it to the list and increment the count.
if(repo['language'] and
repo['language'].lower() not in result['languages']['list']
):
result['languages']['list'].append(repo['language'].lower())
result['languages']['count'] += 1
# hit the individual repo endpoint
repo_url = 'https://api.bitbucket.org/{}'.format(
repo['resource_uri'])
repo_data = get_json(repo_url)
# Add the number of repo followers of the repo to the running total
result['repo_watchers'] += repo_data.get('followers_count', 0)
# check if there are open issues
if repo_data.get('has_issues'):
# perform lookup of only open issues and add to count
issues_data = get_json(repo_url + '/issues?status=open')
result['open_issues'] += issues_data.get('count', 0)
# get number of user followers to the running count
follower_url = (
'https://api.bitbucket.org/1.0/users/{}/followers'.format(
params['bitbucket'])
)
follower_data = get_json(follower_url)
result['user_watchers'] += follower_data.get('count', 0)
# get the total number of commits across all branches
commits_url = (
'https://api.bitbucket.org/1.0/repositories/{}/{}/changesets/'
.format(
params['bitbucket'], slug)
)
commits_data = get_json(commits_url)
result['commits'] += commits_data.get('count', 0)
return result
def merge_gh_data(params, result):
"""Retrieve all data from Github's API and merge into the
result object. Further detail of logic is explained in
inline comments."""
# get user followers from user profile, add to count
follower_url = 'https://api.github.com/users/{}'.format(
params.get('github')
)
followers_data = get_json(follower_url, auth=(username, password))
result['user_watchers'] += followers_data.get('followers', 0)
# lookup the list of users' starred repos, set page size to 1
star_url = 'https://api.github.com/users/{}/starred?per_page=1'.format(
params.get('github')
)
stars_req = requests.get(star_url, auth=(username, password))
if stars_req.status_code == 200:
# because page size=1, the "last" url will contain star total
stars_last_url = stars_req.headers['Link'].split(',')[1]
# use regex to extract the last page value
num_stars = re.search(r".*&page=([0-9]*)>;", stars_last_url)
# cast as a number from string and add to count
result['stars']['given'] += int(num_stars.group(1))
more = True
page = 0
gh_repos = []
# loop through list of github repos, 100 at a time
while more:
# get the next page of results
page += 1
repo_url = (
'https://api.github.com/users/{}/repos?per_page=100&page={}'
.format(params.get('github'), page)
)
repo_json = get_json(repo_url, auth=(username, password))
if len(repo_json): # result is an array
gh_repos += repo_json
else: # no results, exit loop
more = False
for repo in gh_repos:
if repo:
# check if the repo is fork or original
if repo['fork']:
result['repo_count']['forked'] += 1
else:
result['repo_count']['original'] += 1
# add to the number of repo watchers
result['repo_watchers'] += repo['watchers']
# update stars received
result['stars']['received'] += repo['stargazers_count']
# update count of open issues
result['open_issues'] += repo['open_issues_count']
# lookup all commits of the repo
commit_url = (
'https://api.github.com/repos/{}/{}/contributors'
.format(params.get('github'), repo['name'])
)
commits = get_json(commit_url, auth=(username, password))
# filter list of commits to only the user
user_commits = (
[x for x in commits
if x['login'].lower() == params.get('github').lower()]
)
# add to the commits count
if user_commits and user_commits[0]:
result['commits'] += user_commits[0].get('contributions', 0)
# add to the account size total
result['account_size'] += repo['size']
# check if the repo language is not already in the list
if(repo['language'] and
repo['language'].lower() not in result['languages']['list']
):
# append it to the list and increment the count
result['languages']['list'].append(repo['language'].lower())
result['languages']['count'] += 1
# get all topics, experimental feature so needs special header
topics_url = 'https://api.github.com/repos/{}/{}/topics'.format(
params.get('github'), repo['name']
)
headers = {'Accept': "application/vnd.github.mercy-preview+json"}
topics = get_json(
topics_url,
headers=headers,
auth=(username, password)
)
if(topics.get('names')):
# concatenate array of topics
result['repo_topics']['list'] += topics['names']
# dedupe the list
result['repo_topics']['list'] = list(
set(result['repo_topics']['list'])
)
# set the count
result['repo_topics']['count'] = len(
result['repo_topics']['list']
)
return result
@app.route('/test', methods=['GET'])
def test():
"""Heartbeat route to ensure app is running."""
return jsonify({'heartbeat': True})
@app.route('/merge')
def mash():
"""Route to merge the github and bitbucket profiles.
Takes 2 query params, the bitbucket account name
and the github account name, then creates the response
object and merges the data from each profile into it.
"""
params = {
'bitbucket': request.args.get('bb_name'),
'github': request.args.get('gh_name'),
}
result = create_response()
result = merge_gh_data(params, result)
result = merge_bb_data(params, result)
return jsonify(result)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=3000)
| 37.978632 | 79 | 0.56442 |
8659ecaaf1177c8b4b393b695d3dd5c4af9b5f44 | 3,327 | py | Python | custom-interfaces/video-segmentation-beaverdam/annotator/models.py | stungkit/labelbox | 9ac7364cd2dcf9071615dd86802295eb50e5af7d | [
"Apache-2.0"
] | 1,345 | 2018-01-07T07:06:19.000Z | 2020-02-26T21:54:33.000Z | custom-interfaces/video-segmentation-beaverdam/annotator/models.py | stungkit/labelbox | 9ac7364cd2dcf9071615dd86802295eb50e5af7d | [
"Apache-2.0"
] | 135 | 2018-01-21T21:02:03.000Z | 2019-03-12T16:09:02.000Z | custom-interfaces/video-segmentation-beaverdam/annotator/models.py | stungkit/labelbox | 9ac7364cd2dcf9071615dd86802295eb50e5af7d | [
"Apache-2.0"
] | 214 | 2018-01-22T06:05:21.000Z | 2020-02-25T02:13:44.000Z | from django.db import models
from django.contrib.staticfiles import finders
class Label(models.Model):
"""The classes available for workers to choose from for each object."""
id = models.AutoField(primary_key=True)
name = models.CharField(blank=True, max_length=100, unique=True,
help_text="Name of class label option.")
color = models.CharField(blank=True, max_length=6,
help_text="6 digit hex.")
def __str__(self):
return self.name
class State(models.Model):
"""The states available for each label."""
id = models.AutoField(primary_key=True)
name = models.CharField(blank=True, max_length=100,
help_text="Name of class label option.")
color = models.CharField(blank=True, max_length=6,
help_text="6 digit hex.")
label_name = models.ForeignKey(Label, blank=True, to_field='name')
def __str__(self):
return self.name
class Video(models.Model):
annotation = models.TextField(blank=True,
help_text="A JSON blob containing all user annotation sent from client.")
source = models.CharField(max_length=1048, blank=True,
help_text=("Name of video source or type, for easier grouping/searching of videos."
"This field is not used by BeaverDam and only facilitates querying on videos by type."))
filename = models.CharField(max_length=100, blank=True,
help_text=("Name of the video file."
"The video should be publically accessible by at <host><filename>."))
image_list = models.TextField(blank=True,
help_text=("List of filenames of images to be used as video frames, in JSON format."
"When present, image list is assumed and <filename> is ignored."))
host = models.CharField(max_length=1048, blank=True,
help_text="Path to prepend to filenames to form the url for this video or the images in `image_list`.")
verified = models.BooleanField(default=False, help_text="Verified as correct by expert.")
rejected = models.BooleanField(default=False, help_text="Rejected by expert.")
labels = models.ManyToManyField(Label, blank=True)
@classmethod
def from_list(cls, path_to_list, *, source, host, filename_prefix=''):
created = []
for line in open(path_to_list, 'r'):
if line:
video = cls(source=source, filename=filename_prefix + line.strip(), host=host)
video.save()
created.append(video)
return created
def __str__(self):
return '/video/{}'.format(self.id)
@property
def url(self):
if self.image_list:
return 'Image List'
elif finders.find('videos/{}.mp4'.format(self.id)):
return '/static/videos/{}.mp4'.format(self.id)
elif self.filename and self.host:
return self.host + self.filename
else:
raise Exception('Video {0} does not have a filename, host or image_list. Possible fixes: \n1) Place {0}.mp4 into static/videos to serve locally. \n2) Update the filename & host fields of the Video with id={0}'.format(self.id))
def count_keyframes(self, at_time=None):
if at_time is None:
return self.annotation.count('"frame"')
else:
return self.annotation.count('"frame": {}'.format(at_time))
| 43.207792 | 238 | 0.662158 |
bc81094504d97815f995ed7477856e8b65b4de44 | 228 | py | Python | backend/build_migration/users/urls.py | witold-gren/django-migration | be068f43fd2fb55247ebe50dc0631a51234c8f50 | [
"MIT"
] | 1 | 2020-08-25T18:39:10.000Z | 2020-08-25T18:39:10.000Z | backend/build_migration/users/urls.py | witold-gren/django-migration | be068f43fd2fb55247ebe50dc0631a51234c8f50 | [
"MIT"
] | null | null | null | backend/build_migration/users/urls.py | witold-gren/django-migration | be068f43fd2fb55247ebe50dc0631a51234c8f50 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from rest_framework import routers
from build_migration.users import views
app_name = "users"
router = routers.DefaultRouter()
router.register("", views.UserViewSet)
urlpatterns = router.urls
| 20.727273 | 39 | 0.802632 |
44a1025ec98e22332996edd90cb41c21cdd48c10 | 6,116 | py | Python | src/tests/control/test_search.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/control/test_search.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/control/test_search.py | MaxRink/pretix | f561ece9d1591673a495a6226db812e809ab3aec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import datetime
from decimal import Decimal
from django.utils.timezone import now
from tests.base import SoupTest
from pretix.base.models import (
Event, InvoiceAddress, Item, Order, OrderPosition, Organizer, Team, User,
)
class OrderSearchTest(SoupTest):
def setUp(self):
super().setUp()
self.user = User.objects.create_user('dummy@dummy.dummy', 'dummy')
self.orga1 = Organizer.objects.create(name='CCC', slug='ccc')
self.event1 = Event.objects.create(
organizer=self.orga1, name='30C3', slug='30c3',
date_from=datetime.datetime(2013, 12, 26, tzinfo=datetime.timezone.utc),
plugins='pretix.plugins.banktransfer,tests.testdummy'
)
self.event2 = Event.objects.create(
organizer=self.orga1, name='31C3', slug='31c3',
date_from=datetime.datetime(2014, 12, 26, tzinfo=datetime.timezone.utc),
)
o1 = Order.objects.create(
code='FO1A', event=self.event1, email='dummy1@dummy.test',
status=Order.STATUS_PENDING,
datetime=now(), expires=now() + datetime.timedelta(days=10),
total=14, payment_provider='banktransfer', locale='en'
)
InvoiceAddress.objects.create(order=o1, company="Test Ltd.", name="Peter Miller")
ticket1 = Item.objects.create(event=self.event1, name='Early-bird ticket',
category=None, default_price=23,
admission=True)
OrderPosition.objects.create(
order=o1,
item=ticket1,
variation=None,
price=Decimal("14"),
attendee_name="Peter",
attendee_email="att@att.com"
)
o2 = Order.objects.create(
code='FO2', event=self.event2, email='dummy2@dummy.test',
status=Order.STATUS_PENDING,
datetime=now(), expires=now() + datetime.timedelta(days=10),
total=14, payment_provider='banktransfer', locale='en'
)
ticket2 = Item.objects.create(event=self.event1, name='Early-bird ticket',
category=None, default_price=23,
admission=True)
OrderPosition.objects.create(
order=o2,
item=ticket2,
variation=None,
price=Decimal("14"),
attendee_name="Mark"
)
self.team = Team.objects.create(organizer=self.orga1, can_view_orders=True)
self.team.members.add(self.user)
self.team.limit_events.add(self.event1)
self.client.login(email='dummy@dummy.dummy', password='dummy')
def test_team_limit_event(self):
resp = self.client.get('/control/search/orders/').rendered_content
assert 'FO1' in resp
assert 'FO2' not in resp
def test_team_limit_event_wrong_permission(self):
self.team.can_view_orders = False
self.team.save()
resp = self.client.get('/control/search/orders/').rendered_content
assert 'FO1' not in resp
assert 'FO2' not in resp
def test_team_all_events(self):
self.team.all_events = True
self.team.save()
resp = self.client.get('/control/search/orders/').rendered_content
assert 'FO1' in resp
assert 'FO2' in resp
def test_team_all_events_wrong_permission(self):
self.team.all_events = True
self.team.can_view_orders = False
self.team.save()
resp = self.client.get('/control/search/orders/').rendered_content
assert 'FO1' not in resp
assert 'FO2' not in resp
def test_team_none(self):
self.team.members.clear()
resp = self.client.get('/control/search/orders/').rendered_content
assert 'FO1' not in resp
assert 'FO2' not in resp
def test_superuser(self):
self.user.is_staff = True
self.user.staffsession_set.create(date_start=now(), session_key=self.client.session.session_key)
self.user.save()
self.team.members.clear()
resp = self.client.get('/control/search/orders/').rendered_content
assert 'FO1' in resp
assert 'FO2' in resp
def test_filter_email(self):
resp = self.client.get('/control/search/orders/?query=dummy1@dummy').rendered_content
assert 'FO1' in resp
resp = self.client.get('/control/search/orders/?query=dummynope').rendered_content
assert 'FO1' not in resp
def test_filter_attendee_name(self):
resp = self.client.get('/control/search/orders/?query=Pete').rendered_content
assert 'FO1' in resp
resp = self.client.get('/control/search/orders/?query=Mark').rendered_content
assert 'FO1' not in resp
def test_filter_attendee_email(self):
resp = self.client.get('/control/search/orders/?query=att.com').rendered_content
assert 'FO1' in resp
resp = self.client.get('/control/search/orders/?query=nope.com').rendered_content
assert 'FO1' not in resp
def test_filter_invoice_address(self):
resp = self.client.get('/control/search/orders/?query=Ltd').rendered_content
assert 'FO1' in resp
resp = self.client.get('/control/search/orders/?query=Miller').rendered_content
assert 'FO1' in resp
def test_filter_code(self):
resp = self.client.get('/control/search/orders/?query=FO1').rendered_content
assert '30C3-FO1' in resp
resp = self.client.get('/control/search/orders/?query=30c3-FO1').rendered_content
assert '30C3-FO1' in resp
resp = self.client.get('/control/search/orders/?query=30C3-fO1A').rendered_content
assert '30C3-FO1' in resp
resp = self.client.get('/control/search/orders/?query=30C3-fo14').rendered_content
assert '30C3-FO1' in resp
resp = self.client.get('/control/search/orders/?query=31c3-FO1').rendered_content
assert '30C3-FO1' not in resp
resp = self.client.get('/control/search/orders/?query=FO2').rendered_content
assert '30C3-FO1' not in resp
| 41.324324 | 104 | 0.630641 |
ae34c3db25895600b3648a0695479a50ff19a368 | 1,264 | py | Python | server/products/migrations/0008_review.py | jinsub1999/django_react_bootstrap | 7b77a93f046da25445ff7088709c5aaac3bda412 | [
"MIT"
] | 1 | 2021-08-28T12:09:50.000Z | 2021-08-28T12:09:50.000Z | server/products/migrations/0008_review.py | jinsub1999/django_react_bootstrap | 7b77a93f046da25445ff7088709c5aaac3bda412 | [
"MIT"
] | null | null | null | server/products/migrations/0008_review.py | jinsub1999/django_react_bootstrap | 7b77a93f046da25445ff7088709c5aaac3bda412 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-31 10:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0007_auto_20210828_1358'),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('content', models.TextField()),
('added_date', models.DateTimeField()),
('modded_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_author', to=settings.AUTH_USER_MODEL)),
('downvotes', models.ManyToManyField(related_name='review_downvotes', to=settings.AUTH_USER_MODEL)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_product', to='products.product')),
('upvotes', models.ManyToManyField(related_name='review_upvotes', to=settings.AUTH_USER_MODEL)),
],
),
]
| 42.133333 | 150 | 0.651108 |
713973d30a027a6f6ef3ba60ce211069d1d95b45 | 34,658 | py | Python | bagpipe/bgp/tests/test_tracker_worker.py | ThomasHeinlein/bagpipe-bgp | f196da35b00925a0743b38243773e528fc5b122f | [
"Apache-2.0"
] | null | null | null | bagpipe/bgp/tests/test_tracker_worker.py | ThomasHeinlein/bagpipe-bgp | f196da35b00925a0743b38243773e528fc5b122f | [
"Apache-2.0"
] | null | null | null | bagpipe/bgp/tests/test_tracker_worker.py | ThomasHeinlein/bagpipe-bgp | f196da35b00925a0743b38243773e528fc5b122f | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: test_tracker_worker
:synopsis: a module that defines several test cases for the tracker_worker
module.
In particular, unit tests for TrackerWorker class.
Setup: Run TrackerWorker instance.
TearDown: Stop TrackerWorker instance.
TrackerWorker is in charge to receive RouteEvent from RouteTableManager.
A RouteEvent contains an event type ADVERTIZE or WITHDRAW, and a RouteEntry.
TrackerWorker should call _newBestRoute and/or _bestRouteRemoved if the new
RouteEntry changes the current list of the known best routes. The current
list of the known best routes, which can be modified by the new RouteEntry,
is selected thanks to the trackedEntry associated to the new RouteEntry.
The trackedEntry is obtained thanks to _route2TrackedEntry.
_compareRoutes is used to compare 2 RouteEntry.
Unit tests are organized as follow:
TestA: basic tests, advertise several routes with different NLRI and same or
different sources
TestB: same routes (with _compareRoutes) announced by different sources
TestC: different routes (with _compareRoutes) announced by different
sources, TrackerWorker selects the best route.
TestD: ECMP routes or same routes (with _compareRoutes), same source, same
attributes except NextHop
TestE: different routes (with compareRoutes announced by the same source
with replacedRoute not none
"""
import mock
from copy import copy
from testtools import TestCase
from threading import Thread
from bagpipe.bgp.tests import BaseTestBagPipeBGP, RT1, RT2, NLRI1, NLRI2, \
NH1, NH2, NH3, NBR, BRR
from bagpipe.bgp.engine import RouteEvent
from bagpipe.bgp.engine.worker import Worker
from bagpipe.bgp.engine.tracker_worker import TrackerWorker
from bagpipe.exabgp.message.update.attribute import AttributeID
import logging
log = logging.getLogger()
def _test_compareRoutes(self, routeA, routeB):
if (routeA.nlri != routeB.nlri or
routeA.afi != routeB.afi or
routeA.safi != routeB.safi):
raise Exception('Bug: compareRoutes called with routes having '
'different nlri/afi/safi')
else:
if (routeA.attributes.sameValuesAs(routeB.attributes)):
return 0
else:
lpA = routeA.attributes[AttributeID.LOCAL_PREF].localpref
nhA = routeA.attributes[AttributeID.NEXT_HOP].next_hop
lpB = routeB.attributes[AttributeID.LOCAL_PREF].localpref
nhB = routeB.attributes[AttributeID.NEXT_HOP].next_hop
if nhA != nhB and lpA == lpB:
# ECMP routes
return 0
else:
return cmp(lpA, lpB)
class TrackerWorkerThread(TrackerWorker, Thread):
def __init__(self):
Thread.__init__(self, name='TrackerWorkerThread')
self.setDaemon(True)
TrackerWorker.__init__(
self, 'BGPManager', 'TrackerWorker', _test_compareRoutes)
def stop(self):
self._pleaseStop.set()
self._queue.put(self.stopEvent)
self._stopped()
def _route2trackedEntry(self, route):
return route.nlri
# the definitions below are needed because TrackerWorker is an abstract
# class
def _newBestRoute(self, entry, route):
pass
def _bestRouteRemoved(self, entry, route):
pass
class TestTrackerWorker(TestCase, BaseTestBagPipeBGP):
def setUp(self):
super(TestTrackerWorker, self).setUp()
self.trackerWorker = TrackerWorkerThread()
self.trackerWorker.start()
self.setEventTargetWorker(self.trackerWorker)
self._calls = []
def tearDown(self):
super(TestTrackerWorker, self).tearDown()
self.trackerWorker.stop()
self.trackerWorker.join()
def _checkCalls(self, call_args_list, expected_list):
for ((callArgs, _), expected) in zip(call_args_list, expected_list):
self.assertEquals(expected[0], callArgs[0], 'Bad prefix')
observedRouteEntry = copy(callArgs[1])
observedRouteEntry.source = None
expectedRouteEntry = copy(expected[1])
expectedRouteEntry.source = None
self.assertEquals(expectedRouteEntry, observedRouteEntry,
"bad route Entry")
if len(expected) >= 3:
self.assertEquals(expected[2], callArgs[2], 'wrong last flag')
def _callList(self, method):
def side_effect(*args, **kwargs):
self._append_call(method)
return side_effect
def testA1_differentNLRISameSource(self):
# A source A advertises and withdraws routes for different NLRI.
# Mock objects
self.trackerWorker._newBestRoute = mock.Mock()
self.trackerWorker._bestRouteRemoved = mock.Mock()
# Only 1 source A
workerA = Worker('BGPManager', 'Worker-A')
# Source A advertises a route for NLRI1
routeNlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source A advertises a route for NLRI2
routeNlri2A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI2, [RT1, RT2], workerA, NH1, 100)
# Source A withdraws the route for NLRI1
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source A withdraws the route for NLRI2
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI2, [RT1, RT2], workerA, NH1, 100)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
self.assertEqual(2, self.trackerWorker._newBestRoute.call_count,
'2 new best routes: 1 for NLRI1 and 1 for NLRI2')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, routeNlri1A.routeEntry),
(NLRI2, routeNlri2A.routeEntry)])
self.assertEqual(2, self.trackerWorker._bestRouteRemoved.call_count,
'2 old routes removed: 1 for NLRI1 and 1 for NLRI2')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, routeNlri1A.routeEntry, True),
(NLRI2, routeNlri2A.routeEntry, True)])
def testA2_differentNLRIDifferentSource(self):
# 2 sources A and B advertise and withdraw routes for different NLRI.
# Mock objects
self.trackerWorker._newBestRoute = mock.Mock()
self.trackerWorker._bestRouteRemoved = mock.Mock()
# 2 sources: A and B
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
# Source A advertises a route for NLRI1
routeNlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source B advertises a route for NLRI2
routeNlri2B = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI2, [RT1, RT2], workerB, NH1, 100)
# Source A withdraws the route for NLRI1
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source B withdraws the route for NLRI2
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI2, [RT1, RT2], workerB, NH1, 100)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
self.assertEqual(2, self.trackerWorker._newBestRoute.call_count,
'2 newBestRoute calls: 1 for NLRI1 and 1 for NLRI2')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, routeNlri1A.routeEntry),
(NLRI2, routeNlri2B.routeEntry)])
self.assertEqual(2, self.trackerWorker._bestRouteRemoved.call_count,
'2 bestRouteRemoved calls: 1 for NLRI1 and 1 for '
'NLRI2')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, routeNlri1A.routeEntry, True),
(NLRI2, routeNlri2B.routeEntry, True)])
def testA3_sameNLRISameSource(self):
# A source A advertises the same route for the same NLRI
# Mock objects
self.trackerWorker._newBestRoute = mock.Mock()
self.trackerWorker._bestRouteRemoved = mock.Mock()
# 1 source: A
workerA = Worker('BGPManager', 'Worker-A')
# Source A advertises a route for NLRI1
routeNlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source A advertises the same route for NLRI1
self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
self.assertEqual(1, self.trackerWorker._newBestRoute.call_count,
'expected 1 newBestRoute call for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, routeNlri1A.routeEntry),
(NLRI1, routeNlri1A.routeEntry)])
def testA4_withdrawNLRINotKnown(self):
# A source A withdraws a route that does not exist.
self.trackerWorker._newBestRoute = mock.Mock()
self.trackerWorker._bestRouteRemoved = mock.Mock()
# 1 source: A
workerA = Worker('BGPManager', 'Worker-A')
# Source A withdraws a route for NLRI1 which is not known by
# trackerWorker
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Check calls to _newBestRoute and _bestRouteRemoved
self.assertEqual(0, self.trackerWorker._newBestRoute.call_count,
'newBestRoute should not have been called')
self.assertEqual(0, self.trackerWorker._bestRouteRemoved.call_count,
'bestRouteRemoved should not have been called')
def testB1_isTheCurrentBestRoute(self):
# The route which is advertised by another source is the current best
# route
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 2 sources: A and B
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
# Source A advertises a route for NLRI1
self._append_call("RE1")
routeNlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source B advertises the same route for NLRI1
self._append_call("RE2")
routeNlri1B = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 100)
# Source A withdraws the route for NLRI1
self._append_call("RE3")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source B withdraws the route for NLRI1
self._append_call("RE4")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerB, NH1, 100)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
self.assertEqual(
1, self.trackerWorker._newBestRoute.call_count,
'1 new best route call for NLRI1')
self._checkCalls(
self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, routeNlri1A.routeEntry)])
self.assertEqual(
1, self.trackerWorker._bestRouteRemoved.call_count,
'1 bestRouteRemoved call for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, routeNlri1B.routeEntry, True)])
expectedCalls = ["RE1", NBR, "RE2", "RE3", "RE4", BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
def testB2_isNotTheCurrentBestRoute(self):
# The route which is advertised by an other source is not the current
# best route but will become the best route
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 3 sources: A, B and C
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
workerC = Worker('BGPManager', 'Worker-C')
# Source A advertises route1 for NLRI1
self._append_call("RE1")
route1Nlri1 = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B advertises route2 for NLRI1 : route1 is better than route2
self._append_call("RE2")
route2Nlri1 = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source C advertises also route2
self._append_call("RE3")
self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerC, NH1, 200)
# Source A withdraws route1
self._append_call("RE4")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", "RE3", "RE4", NBR, BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
2, self.trackerWorker._newBestRoute.call_count,
'2 new best route call for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1.routeEntry),
(NLRI1, route2Nlri1.routeEntry)])
self.assertEqual(
1, self.trackerWorker._bestRouteRemoved.call_count,
'1 bestRouteRemoved call for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1.routeEntry, False)])
def testC1_route1BestRoute(self):
# Route1 is the best route
# Mock objects
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 2 sources : A and B
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
# Source A advertises a route1 for NLRI1
self._append_call("RE1")
route1Nlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B advertises a route2 for NLRI1 with different attributes.
# Route1 is better than Route2
self._append_call("RE2")
route2Nrli1B = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source A withdraws route1 for NLRI1
self._append_call("RE3")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B withdraws route2 for NLRI1
self._append_call("RE4")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", "RE3", NBR, BRR, "RE4", BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
2, self.trackerWorker._newBestRoute.call_count,
'2 new newBestRoute calls for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry),
(NLRI1, route2Nrli1B.routeEntry)])
self.assertEqual(
2, self.trackerWorker._bestRouteRemoved.call_count,
'2 bestRouteRemoved calls for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry, False),
(NLRI1, route2Nrli1B.routeEntry, True)])
def testC2_route2BestRoute(self):
# Route2 is the best route
# Mock objects
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 2 sources: A and B
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
# Source A advertises a route1 for NLRI1
self._append_call("RE1")
route1Nlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source B advertises a route2 for NLRI1. Route2 is better than Route1
self._append_call("RE2")
route2Nrli1B = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source A withdraws route1 for NLRI1
self._append_call("RE3")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", NBR, BRR, "RE3"]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
2, self.trackerWorker._newBestRoute.call_count,
'2 new newBestRoute calls for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry),
(NLRI1, route2Nrli1B.routeEntry)])
self.assertEqual(
1, self.trackerWorker._bestRouteRemoved.call_count,
'1 bestRouteRemoved call for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry, False)])
def testC3_selectNewBestRouteAmongSeveral(self):
# When current best route is withdrawn, the new best route should be
# selected among several routes
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 3 sources: A, B and C
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
workerC = Worker('BGPManager', 'Worker-C')
# Source A advertises a route1 for NLRI1
self._append_call("RE1")
route1Nlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B advertises a route2 for NLRI1. Route1 is better than Route2
self._append_call("RE2")
route2Nrli1B = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source C advertises a route3 for NLRI1. Route2 is better than Route3
self._append_call("RE3")
route3Nrli1C = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerC, NH1, 100)
# Source A withdraws route1 for NLRI1
self._append_call("RE4")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B withdraws route2 for NLRI1
self._append_call("RE5")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source C withdraws route3 for NLRI1
self._append_call("RE6")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerC, NH1, 100)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", "RE3",
"RE4", NBR, BRR, "RE5", NBR, BRR, "RE6", BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
3, self.trackerWorker._newBestRoute.call_count,
'3 new newBestRoute calls for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry),
(NLRI1, route2Nrli1B.routeEntry),
(NLRI1, route3Nrli1C.routeEntry)])
self.assertEqual(
3, self.trackerWorker._bestRouteRemoved.call_count,
'3 bestRouteRemoved calls for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry, False),
(NLRI1, route2Nrli1B.routeEntry, False),
(NLRI1, route3Nrli1C.routeEntry, True)])
def testD1_ECMPRoutes(self):
# ECMP routes are routes advertised by the same worker with the same
# LP and different NH
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 1 source: A
workerA = Worker('BGPManager', 'Worker-A')
# Source A advertises a route1 for NLRI1
self._append_call("RE1")
route1Nlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source A advertises a route2 for NLRI1. route2 is equal to route1
# with compareRoutes, but the next_hop are different
self._append_call("RE2")
route2Nrli1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH2, 100)
# Source A withdraws route1 for NLRI1
self._append_call("RE3")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 100)
# Source A withdraws route2 for NLRI1
self._append_call("RE4")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH2, 100)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", NBR, "RE3", BRR, "RE4", BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
2, self.trackerWorker._newBestRoute.call_count,
'2 new newBestRoute calls for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry),
(NLRI1, route2Nrli1A.routeEntry)])
self.assertEqual(
2, self.trackerWorker._bestRouteRemoved.call_count,
'2 bestRouteRemoved calls for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry, False),
(NLRI1, route2Nrli1A.routeEntry, True)])
def testE1_replaceBRisNBR(self):
# Advertise a route that replaces the best route and becomes the new
# best route
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 1 source: A
workerA = Worker('BGPManager', 'Worker-A')
# Source A advertises a route1 for NLRI1
self._append_call("RE1")
route1Nlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 200)
# Source A advertises a route2 for NLRI1. Route1 is better than Route2
# BUT Route2 replaces Route1
self._append_call("RE2")
route2Nrli1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2],
workerA, NH1, 100, route1Nlri1A.routeEntry)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", NBR, BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
2, self.trackerWorker._newBestRoute.call_count,
'2 new newBestRoute calls for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry),
(NLRI1, route2Nrli1A.routeEntry)])
self.assertEqual(
1, self.trackerWorker._bestRouteRemoved.call_count,
'1 bestRouteRemoved call for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry, False)])
def testE2_replaceBRisNotNBR(self):
# Advertise a route that replaces the best route but does not become
# the new best route
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 2 sources : A and B
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
# Source A advertises a route1 for NLRI1
self._append_call("RE1")
route1Nlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B advertises a route2. Route1 is better than Route2
self._append_call("RE2")
route2Nrli1B = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source A advertises a route3 for NLRI1. Route3 replaces Route1.
# Route2 is better than route3.
self._append_call("RE3")
route3Nrli1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2],
workerA, NH1, 100, route1Nlri1A.routeEntry)
# Source B withdraws route2 for NLRI1
self._append_call("RE4")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", "RE3", NBR, BRR, "RE4", NBR, BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
3, self.trackerWorker._newBestRoute.call_count,
'3 new newBestRoute calls for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry),
(NLRI1, route2Nrli1B.routeEntry),
(NLRI1, route3Nrli1A.routeEntry)])
self.assertEqual(
2, self.trackerWorker._bestRouteRemoved.call_count,
'2 bestRouteRemoved calls for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry, False),
(NLRI1, route2Nrli1B.routeEntry, False)])
def testE3_replaceBRisNotNBR(self):
# Advertise a route that replaces the best route but does not become
# the new best route
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 3 sources: A, B and C
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
workerC = Worker('BGPManager', 'Worker-C')
# Source A advertises route1 for NLRI1
self._append_call("RE1")
route1Nlri1 = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B advertises route2 for NLRI1 : route1 is better than route2
self._append_call("RE2")
route2Nlri1 = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source C advertises also route2
self._append_call("RE3")
self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerC, NH1, 200)
# Source A advertises route3 which replaces route1
self._append_call("RE4")
self._newRouteEvent(RouteEvent.ADVERTISE, NLRI1, [RT1, RT2],
workerA, NH1, 100, route1Nlri1.routeEntry)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", "RE3", "RE4", NBR, BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
2, self.trackerWorker._newBestRoute.call_count,
'2 new best route call for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1.routeEntry),
(NLRI1, route2Nlri1.routeEntry)])
self.assertEqual(
1, self.trackerWorker._bestRouteRemoved.call_count,
'1 bestRouteRemoved call for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1.routeEntry)])
def testE4_notReplaceBR(self):
# Advertise a route that does not replaces the best route and becomes
# the new best route when the best route is withdrawn
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 2 sources : A and B
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
# Source A advertises a route1 for NLRI1
self._append_call("RE1")
route1Nlri1A = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B advertises a route2. Route1 is better than Route2
self._append_call("RE2")
route2Nrli1B = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source B advertises a route3 for NLRI1. Route3 replaces Route2.
# Route1 is better than Route3
self._append_call("RE3")
route3Nrli1B = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2],
workerB, NH1, 100, route2Nrli1B.routeEntry)
# Source A withdraws route1 for NLRI1
self._append_call("RE4")
self._newRouteEvent(
RouteEvent.WITHDRAW, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", "RE3", "RE4", NBR, BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self.assertEqual(
2, self.trackerWorker._newBestRoute.call_count,
'2 new newBestRoute calls for NLRI1')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry),
(NLRI1, route3Nrli1B.routeEntry)])
self.assertEqual(
1, self.trackerWorker._bestRouteRemoved.call_count,
'1 bestRouteRemoved call for NLRI1')
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1Nlri1A.routeEntry, False)])
def testE5_replaceBRisNBREqual(self):
# Same as E3, but the route that replaces our current best compares
# equally to the two initially less preferred routes, and becomes best
# route with them
self.trackerWorker._newBestRoute = mock.Mock(
side_effect=self._callList(NBR))
self.trackerWorker._bestRouteRemoved = mock.Mock(
side_effect=self._callList(BRR))
# 3 sources: A, B and C
workerA = Worker('BGPManager', 'Worker-A')
workerB = Worker('BGPManager', 'Worker-B')
workerC = Worker('BGPManager', 'Worker-C')
# Source A advertises route1 for NLRI1
self._append_call("RE1")
route1 = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerA, NH1, 300)
# Source B advertises route2 for NLRI1 : route1 is better than route2
self._append_call("RE2")
route2 = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerB, NH1, 200)
# Source C advertises also route2
self._append_call("RE3")
route3 = self._newRouteEvent(
RouteEvent.ADVERTISE, NLRI1, [RT1, RT2], workerC, NH2, 200)
# Source A advertises route3 which replaces route1
self._append_call("RE4")
route4 = self._newRouteEvent(RouteEvent.ADVERTISE, NLRI1, [RT1, RT2],
workerA, NH3, 200, route1.routeEntry)
# Check calls and arguments list to _newBestRoute and _bestRouteRemoved
expectedCalls = ["RE1", NBR, "RE2", "RE3", "RE4", NBR, NBR, NBR, BRR]
self.assertEqual(expectedCalls, self._calls, 'Wrong call sequence')
self._checkCalls(self.trackerWorker._newBestRoute.call_args_list,
[(NLRI1, route1.routeEntry),
(NLRI1, route2.routeEntry),
(NLRI1, route3.routeEntry),
(NLRI1, route4.routeEntry)])
# FIXME: the order of route2, route3, route4 is not important in the
# test above, we should test independently of the order
self._checkCalls(
self.trackerWorker._bestRouteRemoved.call_args_list,
[(NLRI1, route1.routeEntry, False)])
| 44.835705 | 79 | 0.638006 |
891acfbd928c941b9341bbf7248314b930ce2c48 | 879 | py | Python | pava/implementation/natives/java/security/cert/X509CertSelector.py | laffra/pava | 54d10cf7f8def2f96e254c0356623d08f221536f | [
"MIT"
] | 4 | 2017-03-30T16:51:16.000Z | 2020-10-05T12:25:47.000Z | pava/implementation/natives/java/security/cert/X509CertSelector.py | laffra/pava | 54d10cf7f8def2f96e254c0356623d08f221536f | [
"MIT"
] | null | null | null | pava/implementation/natives/java/security/cert/X509CertSelector.py | laffra/pava | 54d10cf7f8def2f96e254c0356623d08f221536f | [
"MIT"
] | null | null | null | def add_native_methods(clazz):
def setSubjectAlternativeNames__java_util_Collection_java_util_List______(a0, a1):
raise NotImplementedError()
def addSubjectAlternativeName__int__java_lang_String__(a0, a1, a2):
raise NotImplementedError()
def addSubjectAlternativeName__int__byte____(a0, a1, a2):
raise NotImplementedError()
def getSubjectAlternativeNames____(a0):
raise NotImplementedError()
clazz.setSubjectAlternativeNames__java_util_Collection_java_util_List______ = setSubjectAlternativeNames__java_util_Collection_java_util_List______
clazz.addSubjectAlternativeName__int__java_lang_String__ = addSubjectAlternativeName__int__java_lang_String__
clazz.addSubjectAlternativeName__int__byte____ = addSubjectAlternativeName__int__byte____
clazz.getSubjectAlternativeNames____ = getSubjectAlternativeNames____
| 46.263158 | 151 | 0.840728 |
cdc6592f90e4af1fe9ff791fc27c05449f711560 | 2,624 | py | Python | python/ql/test/library-tests/frameworks/twisted/taint_test.py | timoles/codeql | 2d24387e9e300bf03be35694816b1e76ae88a50c | [
"MIT"
] | 4,036 | 2020-04-29T00:09:57.000Z | 2022-03-31T14:16:38.000Z | python/ql/test/library-tests/frameworks/twisted/taint_test.py | timoles/codeql | 2d24387e9e300bf03be35694816b1e76ae88a50c | [
"MIT"
] | 2,970 | 2020-04-28T17:24:18.000Z | 2022-03-31T22:40:46.000Z | python/ql/test/library-tests/frameworks/twisted/taint_test.py | ScriptBox99/github-codeql | 2ecf0d3264db8fb4904b2056964da469372a235c | [
"MIT"
] | 794 | 2020-04-29T00:28:25.000Z | 2022-03-30T08:21:46.000Z | from twisted.web.resource import Resource
from twisted.web.server import Request
class MyTaintTest(Resource):
def getChild(self, path, request): # $ requestHandler
ensure_tainted(path, request) # $ tainted
def render(self, request): # $ requestHandler
ensure_tainted(request) # $ tainted
def render_GET(self, request: Request): # $ requestHandler
# see https://twistedmatrix.com/documents/21.2.0/api/twisted.web.server.Request.html
ensure_tainted(
request, # $ tainted
request.uri, # $ tainted
request.path, # $ tainted
request.prepath, # $ tainted
request.postpath, # $ tainted
# file-like
request.content, # $ tainted
request.content.read(), # $ MISSING: tainted
# Dict[bytes, List[bytes]] (for query args)
request.args, # $ tainted
request.args[b"key"], # $ tainted
request.args[b"key"][0], # $ tainted
request.args.get(b"key"), # $ tainted
request.args.get(b"key")[0], # $ tainted
request.received_cookies, # $ tainted
request.received_cookies["key"], # $ tainted
request.received_cookies.get("key"), # $ tainted
request.getCookie(b"key"), # $ tainted
# twisted.web.http_headers.Headers
# see https://twistedmatrix.com/documents/21.2.0/api/twisted.web.http_headers.Headers.html
request.requestHeaders, # $ tainted
request.requestHeaders.getRawHeaders("key"), # $ MISSING: tainted
request.requestHeaders.getRawHeaders("key")[0], # $ MISSING: tainted
request.requestHeaders.getAllRawHeaders(), # $ MISSING: tainted
list(request.requestHeaders.getAllRawHeaders()), # $ MISSING: tainted
request.getHeader("key"), # $ tainted
request.getAllHeaders(), # $ tainted
request.getAllHeaders()["key"], # $ tainted
request.user, # $ tainted
request.getUser(), # $ tainted
request.password, # $ tainted
request.getPassword(), # $ tainted
request.host, # $ tainted
request.getHost(), # $ tainted
request.getRequestHostname(), # $ tainted
)
# technically user-controlled, but unlikely to lead to vulnerabilities.
ensure_not_tainted(
request.method,
)
# not tainted at all
ensure_not_tainted(
# outgoing things
request.cookies,
request.responseHeaders,
)
| 36.957746 | 102 | 0.582317 |
8b02b481535d5b23f7417cb1da7af063191932bd | 82,903 | py | Python | hoomd/data.py | atravitz/hoomd-blue | 54762a4ec1925efa89be8f48001e676d5c4ffb52 | [
"BSD-3-Clause"
] | null | null | null | hoomd/data.py | atravitz/hoomd-blue | 54762a4ec1925efa89be8f48001e676d5c4ffb52 | [
"BSD-3-Clause"
] | null | null | null | hoomd/data.py | atravitz/hoomd-blue | 54762a4ec1925efa89be8f48001e676d5c4ffb52 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: joaander
R""" Access system configuration data.
Code in the data package provides high-level access to all of the particle, bond and other data that define the
current state of the system. You can use python code to directly read and modify this data, allowing you to analyze
simulation results while the simulation runs, or to create custom initial configurations with python code.
There are two ways to access the data.
1. Snapshots record the system configuration at one instant in time. You can store this state to analyze the data,
restore it at a future point in time, or to modify it and reload it. Use snapshots for initializing simulations,
or when you need to access or modify the entire simulation state.
2. Data proxies directly access the current simulation state. Use data proxies if you need to only touch a few
particles or bonds at a a time.
.. rubric:: Snapshots
Relevant methods:
* :py:meth:`hoomd.data.system_data.take_snapshot()` captures a snapshot of the current system state. A snapshot is a
copy of the simulation state. As the simulation continues to progress, data in a captured snapshot will remain
constant.
* :py:meth:`hoomd.data.system_data.restore_snapshot()` replaces the current system state with the state stored in
a snapshot.
* :py:meth:`hoomd.data.make_snapshot()` creates an empty snapshot that you can populate with custom data.
* :py:func:`hoomd.init.read_snapshot()` initializes a simulation from a snapshot.
Examples::
snapshot = system.take_snapshot()
system.restore_snapshot(snapshot)
snapshot = data.make_snapshot(N=100, particle_types=['A', 'B'], box=data.boxdim(L=10))
# ... populate snapshot with data ...
init.read_snapshot(snapshot)
.. rubric:: Snapshot and MPI
In MPI simulations, the snapshot is only valid on rank 0 by default. make_snapshot, read_snapshot, and take_snapshot,
restore_snapshot are collective calls, and need to be called on all ranks. But only rank 0 can access data
in the snapshot::
snapshot = system.take_snapshot(all=True)
if comm.get_rank() == 0:
s = init.create_random(N=100, phi_p=0.05);numpy.mean(snapshot.particles.velocity))
snapshot.particles.position[0] = [1,2,3];
system.restore_snapshot(snapshot);
snapshot = data.make_snapshot(N=10, box=data.boxdim(L=10))
if comm.get_rank() == 0:
snapshot.particles.position[:] = ....
init.read_snapshot(snapshot)
You can explicitly broadcast the information contained in the snapshot to all other ranks, using **broadcast**.
snapshot = system.take_snapshot(all=True)
snapshot.broadcast() # broadcast from rank 0 to all other ranks using MPI
snapshot.broadcast_all() # broadcast from partition 0 to all other ranks and partitions using MPI
.. rubric:: Simulation box
You can access the simulation box from a snapshot::
>>> print(snapshot.box)
Box: Lx=17.3646569289 Ly=17.3646569289 Lz=17.3646569289 xy=0.0 xz=0.0 yz=0.0 dimensions=3
and can change it::
>>> snapshot.box = data.boxdim(Lx=10, Ly=20, Lz=30, xy=1.0, xz=0.1, yz=2.0)
>>> print(snapshot.box)
Box: Lx=10 Ly=20 Lz=30 xy=1.0 xz=0.1 yz=2.0 dimensions=3
*All* particles must be inside the box before using the snapshot to initialize a simulation or restoring it.
The dimensionality of the system (2D/3D) cannot change after initialization.
.. rubric:: Particle properties
Particle properties are present in `snapshot.particles`. Each property is stored in a numpy array that directly
accesses the memory of the snapshot. References to these arrays will become invalid when the snapshot itself is
garbage collected.
* `N` is the number of particles in the particle data snapshot::
>>> print(snapshot.particles.N)
64000
* Change the number of particles in the snapshot with resize. Existing particle properties are
preserved after the resize. Any newly created particles will have default values. After resizing,
existing references to the numpy arrays will be invalid, access them again
from `snapshot.particles.*`::
>>> snapshot.particles.resize(1000);
* The list of all particle types in the simulation can be accessed and modified::
>>> print(snapshot.particles.types)
['A', 'B', 'C']
>>> snapshot.particles.types = ['1', '2', '3', '4'];
* Individual particles properties are stored in numpy arrays. Vector quantities are stored in Nx3 arrays of floats
(or doubles) and scalar quantities are stored in N length 1D arrays::
>>> print(snapshot.particles.position[10])
[ 1.2398 -10.2687 100.6324]
* Various properties can be accessed of any particle, and the numpy arrays can be sliced or passed whole to other
routines::
>>> print(snapshot.particles.typeid[10])
2
>>> print(snapshot.particles.velocity[10])
(-0.60267972946166992, 2.6205904483795166, -1.7868227958679199)
>>> print(snapshot.particles.mass[10])
1.0
>>> print(snapshot.particles.diameter[10])
1.0
* Particle properties can be set in the same way. This modifies the data in the snapshot, not the
current simulation state::
>>> snapshot.particles.position[10] = [1,2,3]
>>> print(snapshot.particles.position[10])
[ 1. 2. 3.]
* Snapshots store particle types as integers that index into the type name array::
>>> print(snapshot.particles.typeid)
[ 0. 1. 2. 0. 1. 2. 0. 1. 2. 0.]
>>> snapshot.particles.types = ['A', 'B', 'C'];
>>> snapshot.particles.typeid[0] = 2; # C
>>> snapshot.particles.typeid[1] = 0; # A
>>> snapshot.particles.typeid[2] = 1; # B
For a list of all particle properties in the snapshot see :py:class:`hoomd.data.SnapshotParticleData`.
.. rubric:: Bonds
Bonds are stored in `snapshot.bonds`. :py:meth:`hoomd.data.system_data.take_snapshot()` does not record the bonds
by default, you need to request them with the argument `bonds=True`.
* `N` is the number of bonds in the bond data snapshot::
>>> print(snapshot.bonds.N)
100
* Change the number of bonds in the snapshot with resize. Existing bonds are
preserved after the resize. Any newly created bonds will be initialized to 0. After resizing,
existing references to the numpy arrays will be invalid, access them again
from `snapshot.bonds.*`::
>>> snapshot.bonds.resize(1000);
* Bonds are stored in an Nx2 numpy array `group`. The first axis accesses the bond `i`. The second axis `j` goes over
the individual particles in the bond. The value of each element is the tag of the particle participating in the
bond::
>>> print(snapshot.bonds.group)
[[0 1]
[1 2]
[3 4]
[4 5]]
>>> snapshot.bonds.group[0] = [10,11]
* Snapshots store bond types as integers that index into the type name array::
>>> print(snapshot.bonds.typeid)
[ 0. 1. 2. 0. 1. 2. 0. 1. 2. 0.]
>>> snapshot.bonds.types = ['A', 'B', 'C'];
>>> snapshot.bonds.typeid[0] = 2; # C
>>> snapshot.bonds.typeid[1] = 0; # A
>>> snapshot.bonds.typeid[2] = 1; # B
.. rubric:: Angles, dihedrals and impropers
Angles, dihedrals, and impropers are stored similar to bonds. The only difference is that the group array is sized
appropriately to store the number needed for each type of bond.
* `snapshot.angles.group` is Nx3
* `snapshot.dihedrals.group` is Nx4
* `snapshot.impropers.group` is Nx4
.. rubric:: Special pairs
Special pairs are exactly handled like bonds. The snapshot entry is called **pairs**.
.. rubric:: Constraints
Pairwise distance constraints are added and removed like bonds. They are defined between two particles.
The only difference is that instead of a type, constraints take a distance as parameter.
* `N` is the number of constraints in the constraint data snapshot::
>>> print(snapshot.constraints.N)
99
* Change the number of constraints in the snapshot with resize. Existing constraints are
preserved after the resize. Any newly created constraints will be initialized to 0. After resizing,
existing references to the numpy arrays will be invalid, access them again
from `snapshot.constraints.*`::
>>> snapshot.constraints.resize(1000);
* Bonds are stored in an Nx2 numpy array `group`. The first axis accesses the constraint `i`. The second axis `j` goes over
the individual particles in the constraint. The value of each element is the tag of the particle participating in the
constraint::
>>> print(snapshot.constraints.group)
[[4 5]
[6 7]
[6 8]
[7 8]]
>>> snapshot.constraints.group[0] = [10,11]
* Snapshots store constraint distances as floats::
>>> print(snapshot.constraints.value)
[ 1.5 2.3 1.0 0.1 ]
.. rubric:: data_proxy Proxy access
For most of the cases below, it is assumed that the result of the initialization command was saved at the beginning
of the script::
system = init.read_xml(filename="input.xml")
Warning:
The performance of the proxy access is very slow. Use snapshots to access the whole system configuration
efficiently.
.. rubric:: Simulation box
You can access the simulation box::
>>> print(system.box)
Box: Lx=17.3646569289 Ly=17.3646569289 Lz=17.3646569289 xy=0.0 xz=0.0 yz=0.0
and can change it::
>>> system.box = data.boxdim(Lx=10, Ly=20, Lz=30, xy=1.0, xz=0.1, yz=2.0)
>>> print(system.box)
Box: Lx=10 Ly=20 Lz=30 xy=1.0 xz=0.1 yz=2.0
**All** particles must **always** remain inside the box. If a box is set in this way such that a particle ends up outside of the box, expect
errors to be thrown or for hoomd to just crash. The dimensionality of the system cannot change after initialization.
.. rubric:: Particle properties
For a list of all particle properties that can be read and/or set, see :py:class:`hoomd.data.particle_data_proxy`.
The examples here only demonstrate changing a few of them.
``system.particles`` is a window into all of the particles in the system.
It behaves like standard python list in many ways.
* Its length (the number of particles in the system) can be queried::
>>> len(system.particles)
64000
* A short summary can be printed of the list::
>>> print(system.particles)
Particle Data for 64000 particles of 1 type(s)
* The list of all particle types in the simulation can be accessed::
>>> print(system.particles.types)
['A']
>>> print system.particles.types
Particle types: ['A']
* Particle types can be added between :py:func:`hoomd.run()` commands::
>>> system.particles.types.add('newType')
* Individual particles can be accessed at random::
>>> i = 4
>>> p = system.particles[i]
* Various properties can be accessed of any particle (note that p can be replaced with system.particles[i]
and the results are the same)::
>>> p.tag
4
>>> p.position
(27.296911239624023, -3.5986068248748779, 10.364067077636719)
>>> p.velocity
(-0.60267972946166992, 2.6205904483795166, -1.7868227958679199)
>>> p.mass
1.0
>>> p.diameter
1.0
>>> p.type
'A'
>>> p.tag
4
* Particle properties can be set in the same way::
>>> p.position = (1,2,3)
>>> p.position
(1.0, 2.0, 3.0)
* Finally, all particles can be easily looped over::
for p in system.particles:
p.velocity = (0,0,0)
Particles may be added at any time in the job script, and a unique tag is returned::
>>> system.particles.add('A')
>>> t = system.particles.add('B')
Particles may be deleted by index::
>>> del system.particles[0]
>>> print(system.particles[0])
tag : 1
position : (23.846603393554688, -27.558368682861328, -20.501256942749023)
image : (0, 0, 0)
velocity : (0.0, 0.0, 0.0)
acceleration: (0.0, 0.0, 0.0)
charge : 0.0
mass : 1.0
diameter : 1.0
type : A
typeid : 0
body : 4294967295
orientation : (1.0, 0.0, 0.0, 0.0)
net_force : (0.0, 0.0, 0.0)
net_energy : 0.0
net_torque : (0.0, 0.0, 0.0)
Note:
The particle with tag 1 is now at index 0. No guarantee is made about how the
order of particles by index will or will not change, so do not write any job scripts which assume
a given ordering.
To access particles in an index-independent manner, use their tags. For example, to remove all particles
of type 'A', do::
tags = []
for p in system.particles:
if p.type == 'A'
tags.append(p.tag)
Then remove each of the particles by their unique tag::
for t in tags:
system.particles.remove(t)
Particles can also be accessed through their unique tag::
t = system.particles.add('A')
p = system.particles.get(t)
Any defined group can be used in exactly the same way as ``system.particles`` above, only the particles accessed
will be those just belonging to the group. For a specific example, the following will set the velocity of all
particles of type A to 0::
groupA = group.type(name="a-particles", type='A')
for p in groupA:
p.velocity = (0,0,0)
.. rubric:: Bond Data
Bonds may be added at any time in the job script::
>>> system.bonds.add("bondA", 0, 1)
>>> system.bonds.add("bondA", 1, 2)
>>> system.bonds.add("bondA", 2, 3)
>>> system.bonds.add("bondA", 3, 4)
Individual bonds may be accessed by index::
>>> bnd = system.bonds[0]
>>> print(bnd)
tag : 0
typeid : 0
a : 0
b : 1
type : bondA
>>> print(bnd.type)
bondA
>>> print(bnd.a)
0
>>> print(bnd.b)
1
Warning:
The order in which bonds appear by index is not static and may change at any time!
Bonds may be deleted by index::
>>> del system.bonds[0]
>>> print(system.bonds[0])
tag : 3
typeid : 0
a : 3
b : 4
type : bondA
To access bonds in an index-independent manner, use their tags. For example, to delete all bonds which connect to
particle 2, first loop through the bonds and build a list of bond tags that match the criteria::
tags = []
for b in system.bonds:
if b.a == 2 or b.b == 2:
tags.append(b.tag)
Then remove each of the bonds by their unique tag::
for t in tags:
system.bonds.remove(t)
Bonds can also be accessed through their unique tag::
t = system.bonds.add('polymer',0,1)
p = system.bonds.get(t)
.. rubric:: Angle, Dihedral, and Improper Data
Angles, Dihedrals, and Impropers may be added at any time in the job script::
>>> system.angles.add("angleA", 0, 1, 2)
>>> system.dihedrals.add("dihedralA", 1, 2, 3, 4)
>>> system.impropers.add("dihedralA", 2, 3, 4, 5)
Individual angles, dihedrals, and impropers may be accessed, deleted by index or removed by tag with the same syntax
as described for bonds, just replace *bonds* with *angles*, *dihedrals*, or, *impropers* and access the
appropriate number of tag elements (a,b,c for angles) (a,b,c,d for dihedrals/impropers).
.. rubric:: Constraints
Constraints may be added and removed from within the job script.
To add a constraint of length 1.5 between particles 0 and 1::
>>> t = system.constraints.add(0, 1, 1.5)
To remove it again::
>>> system.constraints.remove(t)
.. rubric:: Forces
Forces can be accessed in a similar way::
>>> lj = pair.lj(r_cut=3.0)
>>> lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
>>> print(lj.forces[0])
tag : 0
force : (-0.077489577233791351, -0.029512746259570122, -0.13215918838977814)
virial : -0.0931386947632
energy : -0.0469368174672
>>> f0 = lj.forces[0]
>>> print(f0.force)
(-0.077489577233791351, -0.029512746259570122, -0.13215918838977814)
>>> print(f0.virial)
-0.093138694763n
>>> print(f0.energy)
-0.0469368174672
In this manner, forces due to the lj pair force, bonds, and any other force commands in hoomd can be accessed
independently from one another. See :py:class:`hoomd.data.force_data_proxy` for a definition of each data field.
.. Proxy references
For advanced code using the particle data access from python, it is important to understand that the hoomd
particles, forces, bonds, et cetera, are accessed as proxies. This means that after::
p = system.particles[i]
is executed, *p* **does not** store the position, velocity, ... of particle *i*. Instead, it stores *i* and
provides an interface to get/set the properties on demand. This has some side effects you need to be aware of.
* First, it means that *p* (or any other proxy reference) always references the current state of the particle.
As an example, note how the position of particle p moves after the run() command::
>>> p.position
(-21.317455291748047, -23.883811950683594, -22.159387588500977)
>>> run(1000)
** starting run **
** run complete **
>>> p.position
(-19.774742126464844, -23.564577102661133, -21.418502807617188)
* Second, it means that copies of the proxy reference cannot be changed independently::
p.position
>>> a = p
>>> a.position
(-19.774742126464844, -23.564577102661133, -21.418502807617188)
>>> p.position = (0,0,0)
>>> a.position
(0.0, 0.0, 0.0)
"""
from hoomd import _hoomd
import hoomd
class boxdim(hoomd.meta._metadata):
R""" Define box dimensions.
Args:
Lx (float): box extent in the x direction (distance units)
Ly (float): box extent in the y direction (distance units)
Lz (float): box extent in the z direction (distance units)
xy (float): tilt factor xy (dimensionless)
xz (float): tilt factor xz (dimensionless)
yz (float): tilt factor yz (dimensionless)
dimensions (int): Number of dimensions in the box (2 or 3).
L (float): shorthand for specifying Lx=Ly=Lz=L (distance units)
volume (float): Scale the given box dimensions up to the this volume (area if dimensions=2)
Simulation boxes in hoomd are specified by six parameters, *Lx*, *Ly*, *Lz*, *xy*, *xz* and *yz*. For full details,
see :ref:`boxdim`. A boxdim provides a way to specify all six parameters for a given box and perform some common
operations with them. Modifying a boxdim does not modify the underlying simulation box in hoomd. A boxdim can be passed
to an initialization method or to assigned to a saved sysdef variable (``system.box = new_box``) to set the simulation
box.
Access attributes directly::
b = data.boxdim(L=20)
b.xy = 1.0
b.yz = 0.5
b.Lz = 40
.. rubric:: Two dimensional systems
2D simulations in hoomd are embedded in 3D boxes with short heights in the z direction. To create a 2D box,
set dimensions=2 when creating the boxdim. This will force Lz=1 and xz=yz=0. init commands that support 2D boxes
will pass the dimensionality along to the system. When you assign a new boxdim to an already initialized system,
the dimensionality flag is ignored. Changing the number of dimensions during a simulation run is not supported.
In 2D boxes, *volume* is in units of area.
.. rubric:: Shorthand notation
data.boxdim accepts the keyword argument ``L=x`` as shorthand notation for ``Lx=x, Ly=x, Lz=x`` in 3D
and ``Lx=x, Ly=x, Lz=1`` in 2D. If you specify both ``L`` and ``Lx``, ``Ly``, or ``Lz``, then the value for ``L`` will override
the others.
Examples:
* Cubic box with given volume: ``data.boxdim(volume=V)``
* Triclinic box in 2D with given area: ``data.boxdim(xy=1.0, dimensions=2, volume=A)``
* Rectangular box in 2D with given area and aspect ratio: ``data.boxdim(Lx=1, Ly=aspect, dimensions=2, volume=A)``
* Cubic box with given length: ``data.boxdim(L=10)``
* Fully define all box parameters: ``data.boxdim(Lx=10, Ly=20, Lz=30, xy=1.0, xz=0.5, yz=0.1)``
"""
def __init__(self, Lx=1.0, Ly=1.0, Lz=1.0, xy=0.0, xz=0.0, yz=0.0, dimensions=3, L=None, volume=None):
if L is not None:
Lx = L;
Ly = L;
Lz = L;
if dimensions == 2:
Lz = 1.0;
xz = yz = 0.0;
self.Lx = Lx;
self.Ly = Ly;
self.Lz = Lz;
self.xy = xy;
self.xz = xz;
self.yz = yz;
self.dimensions = dimensions;
if volume is not None:
self.set_volume(volume);
# base class constructor
hoomd.meta._metadata.__init__(self)
def scale(self, sx=1.0, sy=1.0, sz=1.0, s=None):
R""" Scale box dimensions.
Args:
sx (float): scale factor in the x direction
sy (float): scale factor in the y direction
sz (float): scale factor in the z direction
s (float): Shorthand for sx=s, sy=x, sz=s
Scales the box by the given scale factors. Tilt factors are not modified.
Returns:
A reference to the modified box.
"""
if s is not None:
sx = s;
sy = s;
sz = s;
self.Lx = self.Lx * sx;
self.Ly = self.Ly * sy;
self.Lz = self.Lz * sz;
return self
def set_volume(self, volume):
R""" Set the box volume.
Args:
volume (float): new box volume (area if dimensions=2)
Scale the box to the given volume (or area).
Returns:
A reference to the modified box.
"""
cur_vol = self.get_volume();
if self.dimensions == 3:
s = (volume / cur_vol)**(1.0/3.0)
self.scale(s, s, s);
else:
s = (volume / cur_vol)**(1.0/2.0)
self.scale(s, s, 1.0);
return self
def get_volume(self):
R""" Get the box volume.
Returns:
The box volume (area in 2D).
"""
b = self._getBoxDim();
return b.getVolume(self.dimensions == 2);
def get_lattice_vector(self,i):
R""" Get a lattice vector.
Args:
i (int): (=0,1,2) direction of lattice vector
Returns:
The lattice vector (3-tuple) along direction *i*.
"""
b = self._getBoxDim();
v = b.getLatticeVector(int(i))
return (v.x, v.y, v.z)
def wrap(self,v, img=(0,0,0)):
R""" Wrap a vector using the periodic boundary conditions.
Args:
v (tuple): The vector to wrap
img (tuple): A vector of integer image flags that will be updated (optional)
Returns:
The wrapped vector and the image flags in a tuple.
"""
u = _hoomd.make_scalar3(float(v[0]),float(v[1]),float(v[2]))
i = _hoomd.make_int3(int(img[0]),int(img[1]),int(img[2]))
c = _hoomd.make_char3(0,0,0)
self._getBoxDim().wrap(u,i,c)
img = (i.x,i.y,i.z)
return (u.x, u.y, u.z),img
def min_image(self,v):
R""" Apply the minimum image convention to a vector using periodic boundary conditions.
Args:
v (tuple): The vector to apply minimum image to
Returns:
The minimum image as a tuple.
"""
u = _hoomd.make_scalar3(v[0],v[1],v[2])
u = self._getBoxDim().minImage(u)
return (u.x, u.y, u.z)
def make_fraction(self,v):
R""" Scale a vector to fractional coordinates.
Args:
v (tuple): The vector to convert to fractional coordinates
make_fraction() takes a vector in a box and computes a vector where all components are
between 0 and 1.
Returns:
The scaled vector.
"""
u = _hoomd.make_scalar3(v[0],v[1],v[2])
w = _hoomd.make_scalar3(0,0,0)
u = self._getBoxDim().makeFraction(u,w)
return (u.x, u.y, u.z)
## \internal
# \brief Get a C++ boxdim
def _getBoxDim(self):
b = _hoomd.BoxDim(self.Lx, self.Ly, self.Lz);
b.setTiltFactors(self.xy, self.xz, self.yz);
return b
def __str__(self):
return 'Box: Lx=' + str(self.Lx) + ' Ly=' + str(self.Ly) + ' Lz=' + str(self.Lz) + ' xy=' + str(self.xy) + \
' xz='+ str(self.xz) + ' yz=' + str(self.yz) + ' dimensions=' + str(self.dimensions);
## \internal
# \brief Get a dictionary representation of the box dimensions
def get_metadata(self):
data = hoomd.meta._metadata.get_metadata(self)
data['d'] = self.dimensions
data['Lx'] = self.Lx
data['Ly'] = self.Ly
data['Lz'] = self.Lz
data['xy'] = self.xy
data['xz'] = self.xz
data['yz'] = self.yz
data['V'] = self.get_volume()
return data
class system_data(hoomd.meta._metadata):
R""" Access system data
system_data provides access to the different data structures that define the current state of the simulation.
See :py:mod:`hoomd.data` for a full explanation of how to use by example.
Attributes:
box (:py:class:`hoomd.data.boxdim`)
particles (:py:class:`hoomd.data.particle_data_proxy`)
bonds (:py:class:`hoomd.data.bond_data_proxy`)
angles (:py:class:`hoomd.data.angle_data_proxy`)
dihedrals (:py:class:`hoomd.data.dihedral_data_proxy`)
impropers (:py:class:`hoomd.data.dihedral_data_proxy`)
constraint (:py:class:`hoomd.data.constraint_data_proxy`)
pairs (:py:class:`hoomd.data.bond_data_proxy`)
.. versionadded:: 2.1
"""
def __init__(self, sysdef):
self.sysdef = sysdef;
self.particles = particle_data(sysdef.getParticleData());
self.bonds = bond_data(sysdef.getBondData());
self.angles = angle_data(sysdef.getAngleData());
self.dihedrals = dihedral_data(sysdef.getDihedralData());
self.impropers = dihedral_data(sysdef.getImproperData());
self.constraints = constraint_data(sysdef.getConstraintData());
self.pairs = bond_data(sysdef.getPairData());
# base class constructor
hoomd.meta._metadata.__init__(self)
def take_snapshot(self,
particles=True,
bonds=False,
pairs=False,
integrators=False,
all=False,
dtype='float'):
R""" Take a snapshot of the current system data.
Args:
particles (bool): When True, particle data is included in the snapshot.
bonds (bool): When true, bond, angle, dihedral, improper and constraint data is included.
pairs (bool): When true, special pair data is included
.. versionadded:: 2.1
integrators (bool): When true, integrator data is included the snapshot.
all (bool): When true, the entire system state is saved in the snapshot.
dtype (str): Datatype for the snapshot numpy arrays. Must be either 'float' or 'double'.
Returns:
The snapshot object.
This functions returns a snapshot object. It contains the current.
partial or complete simulation state. With appropriate options
it is possible to select which data properties should be included
in the snapshot
Examples::
snapshot = system.take_snapshot()
snapshot = system.take_snapshot()
snapshot = system.take_snapshot(bonds=true)
"""
hoomd.util.print_status_line();
if all is True:
particles=True
bonds=True
pairs=True
integrators=True
# take the snapshot
if dtype == 'float':
cpp_snapshot = self.sysdef.takeSnapshot_float(particles,bonds,bonds,bonds,bonds,bonds,integrators,pairs)
elif dtype == 'double':
cpp_snapshot = self.sysdef.takeSnapshot_double(particles,bonds,bonds,bonds,bonds,bonds,integrators,pairs)
else:
raise ValueError("dtype must be float or double");
return cpp_snapshot
def replicate(self, nx=1, ny=1, nz=1):
R""" Replicates the system along the three spatial dimensions.
Args:
nx (int): Number of times to replicate the system along the x-direction
ny (int): Number of times to replicate the system along the y-direction
nz (int): Number of times to replicate the system along the z-direction
This method replicates particles along all three spatial directions, as
opposed to replication implied by periodic boundary conditions.
The box is resized and the number of particles is updated so that the new box
holds the specified number of replicas of the old box along all directions.
Particle coordinates are updated accordingly to fit into the new box. All velocities and
other particle properties are replicated as well. Also bonded groups between particles
are replicated.
Examples::
system = init.read_xml("some_file.xml")
system.replicate(nx=2,ny=2,nz=2)
Note:
The dimensions of the processor grid are not updated upon replication. For example, if an initially
cubic box is replicated along only one spatial direction, this could lead to decreased performance
if the processor grid was optimal for the original box dimensions, but not for the new ones.
"""
hoomd.util.print_status_line()
nx = int(nx)
ny = int(ny)
nz = int(nz)
if nx == ny == nz == 1:
hoomd.context.msg.warning("All replication factors == 1. Not replicating system.\n")
return
if nx <= 0 or ny <= 0 or nz <= 0:
hoomd.context.msg.error("Cannot replicate by zero or by a negative value along any direction.")
raise RuntimeError("nx, ny, nz need to be positive integers")
# Take a snapshot
hoomd.util.quiet_status()
cpp_snapshot = self.take_snapshot(all=True)
hoomd.util.unquiet_status()
if hoomd.comm.get_rank() == 0:
# replicate
cpp_snapshot.replicate(nx, ny, nz)
# restore from snapshot
hoomd.util.quiet_status()
self.restore_snapshot(cpp_snapshot)
hoomd.util.unquiet_status()
def restore_snapshot(self, snapshot):
R""" Re-initializes the system from a snapshot.
Args:
snapshot:. The snapshot to initialize the system from.
Snapshots temporarily store system data. Snapshots contain the complete simulation state in a
single object. They can be used to restart a simulation.
Example use cases in which a simulation may be restarted from a snapshot include python-script-level
Monte-Carlo schemes, where the system state is stored after a move has been accepted (according to
some criterion), and where the system is re-initialized from that same state in the case
when a move is not accepted.
Example::
system = init.read_xml("some_file.xml")
... run a simulation ...
snapshot = system.take_snapshot(all=True)
...
system.restore_snapshot(snapshot)
Warning:
restore_snapshot() may invalidate force coefficients, neighborlist r_cut values, and other per type
quantities if called within a callback during a run(). You can restore a snapshot during a run only
if the snapshot is of a previous state of the currently running system. Otherwise, you need to use
restore_snapshot() between run() commands to ensure that all per type coefficients are updated properly.
"""
hoomd.util.print_status_line();
if hoomd.comm.get_rank() == 0:
if snapshot.has_particle_data and len(snapshot.particles.types) != self.sysdef.getParticleData().getNTypes():
raise RuntimeError("Number of particle types must remain the same")
# if snapshot.has_bond_data and len(snapshot.bonds.types) != self.sysdef.getBondData().getNTypes():
# raise RuntimeError("Number of bond types must remain the same")
if snapshot.has_angle_data and len(snapshot.angles.types) != self.sysdef.getAngleData().getNTypes():
raise RuntimeError("Number of angle types must remain the same")
if snapshot.has_dihedral_data and len(snapshot.dihedrals.types) != self.sysdef.getDihedralData().getNTypes():
raise RuntimeError("Number of dihedral types must remain the same")
if snapshot.has_improper_data and len(snapshot.impropers.types) != self.sysdef.getImproperData().getNTypes():
raise RuntimeError("Number of dihedral types must remain the same")
if snapshot.has_pair_data and len(snapshot.pairs.types) != self.sysdef.getPairData().getNTypes():
raise RuntimeError("Number of pair types must remain the same")
self.sysdef.initializeFromSnapshot(snapshot);
## \internal
# \brief Get particle metadata
def get_metadata(self):
data = hoomd.meta._metadata.get_metadata(self)
data['box'] = self.box
data['particles'] = self.particles
data['number_density'] = len(self.particles)/self.box.get_volume()
data['bonds'] = self.bonds
data['angles'] = self.angles
data['dihedrals'] = self.dihedrals
data['impropers'] = self.impropers
data['constraints'] = self.constraints
data['pairs'] = self.pairs
data['timestep'] = hoomd.context.current.system.getCurrentTimeStep()
return data
## Get the system box
@property
def box(self):
b = self.sysdef.getParticleData().getGlobalBox();
L = b.getL();
return boxdim(Lx=L.x, Ly=L.y, Lz=L.z, xy=b.getTiltFactorXY(), xz=b.getTiltFactorXZ(), yz=b.getTiltFactorYZ(), dimensions=self.sysdef.getNDimensions());
## Set the system box
# \param value The new boundaries (a data.boxdim object)
@box.setter
def box(self, value):
if not isinstance(value, boxdim):
raise TypeError('box must be a data.boxdim object');
self.sysdef.getParticleData().setGlobalBox(value._getBoxDim());
## \internal
# \brief Access the list of types
#
# pdata_types_proxy provides access to the type names and the possibility to add types to the simulation
# This documentation is intentionally left sparse, see hoomd.data for a full explanation of how to use
# particle_data, documented by example.
#
class pdata_types_proxy(object):
## \internal
# \brief particle_data iterator
class pdata_types_iterator(object):
def __init__(self, data):
self.data = data;
self.index = 0;
def __iter__(self):
return self;
def __next__(self):
if self.index == len(self.data):
raise StopIteration;
result = self.data[self.index];
self.index += 1;
return result;
# support python2
next = __next__;
## \internal
# \brief create a pdata_types_proxy
#
# \param pdata ParticleData to connect
def __init__(self, pdata):
self.pdata = pdata;
## \var pdata
# \internal
# \brief ParticleData to which this instance is connected
## \internal
# \brief Get a the name of a type
# \param type_idx Type index
def __getitem__(self, type_idx):
ntypes = self.pdata.getNTypes();
if type_idx >= ntypes or type_idx < 0:
raise IndexError;
return self.pdata.getNameByType(type_idx);
## \internal
# \brief Set the name of a type
# \param type_idx Particle tag to set
# \param name New type name
def __setitem__(self, type_idx, name):
ntypes = self.pdata.getNTypes();
if type_idx >= ntypes or type_idx < 0:
raise IndexError;
self.pdata.setTypeName(type_idx, name);
## \internal
# \brief Get the number of types
def __len__(self):
return self.pdata.getNTypes();
## \internal
# \brief Get an informal string representing the object
def __str__(self):
ntypes = self.pdata.getNTypes();
result = "Particle types: ["
for i in range(0,ntypes):
result += "'" + self.pdata.getNameByType(i) + "'"
if (i != ntypes-1):
result += ", "
else:
result += "]"
return result
## \internal
# \brief Return an iterator
def __iter__(self):
return pdata_types_proxy.pdata_types_iterator(self);
## \internal
# \brief Add a new particle type
# \param name Name of type to add
# \returns Index of newly added type
def add(self, name):
# check that type does not yet exist
ntypes = self.pdata.getNTypes();
for i in range(0,ntypes):
if self.pdata.getNameByType(i) == name:
hoomd.context.msg.warning("Type '"+name+"' already defined.\n");
return i
typeid = self.pdata.addType(name);
return typeid
## \internal
# \brief Access particle data
#
# particle_data provides access to the per-particle data of all particles in the system.
# This documentation is intentionally left sparse, see hoomd.data for a full explanation of how to use
# particle_data, documented by example.
#
class particle_data(hoomd.meta._metadata):
## \internal
# \brief particle_data iterator
class particle_data_iterator:
def __init__(self, data):
self.data = data;
self.index = 0;
def __iter__(self):
return self;
def __next__(self):
if self.index == len(self.data):
raise StopIteration;
result = self.data[self.index];
self.index += 1;
return result;
# support python2
next = __next__;
## \internal
# \brief create a particle_data
#
# \param pdata ParticleData to connect
def __init__(self, pdata):
self.pdata = pdata;
self.types = pdata_types_proxy(hoomd.context.current.system_definition.getParticleData())
# base class constructor
hoomd.meta._metadata.__init__(self)
## \var pdata
# \internal
# \brief ParticleData to which this instance is connected
## \internal
# \brief Get a particle_proxy reference to the particle with contiguous id \a id
# \param id Contiguous particle id to access
def __getitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
tag = self.pdata.getNthTag(id);
return particle_data_proxy(self.pdata, tag);
## \internal
# \brief Get a particle_proxy reference to the particle with tag \a tag
# \param tag Particle tag to access
def get(self, tag):
if tag > self.pdata.getMaximumTag() or tag < 0:
raise IndexError;
return particle_data_proxy(self.pdata, tag);
## \internal
# \brief Set a particle's properties
# \param tag Particle tag to set
# \param p Value containing properties to set
def __setitem__(self, tag, p):
raise RuntimeError('__setitem__ not implemented');
## \internal
# \brief Add a new particle
# \param type Type name of the particle to add
# \returns Unique tag identifying this bond
def add(self, type):
typeid = self.pdata.getTypeByName(type);
return self.pdata.addParticle(typeid);
## \internal
# \brief Remove a bond by tag
# \param tag Unique tag of the bond to remove
def remove(self, tag):
self.pdata.removeParticle(tag);
## \internal
# \brief Delete a particle by id
# \param id Bond id to delete
def __delitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
tag = self.pdata.getNthTag(id);
self.pdata.removeParticle(tag);
## \internal
# \brief Get the number of particles
def __len__(self):
return self.pdata.getNGlobal();
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "Particle Data for %d particles of %d type(s)" % (self.pdata.getNGlobal(), self.pdata.getNTypes());
return result
## \internal
# \brief Return an iterator
def __iter__(self):
return particle_data.particle_data_iterator(self);
## \internal
# \brief Return metadata for this particle_data instance
def get_metadata(self):
data = hoomd.meta._metadata.get_metadata(self)
data['N'] = len(self)
data['types'] = list(self.types);
return data
class particle_data_proxy(object):
R""" Access a single particle via a proxy.
particle_data_proxy provides access to all of the properties of a single particle in the system.
See :py:mod:`hoomd.data` for examples.
Attributes:
tag (int): A unique name for the particle in the system. Tags run from 0 to N-1.
acceleration (tuple): A 3-tuple of floats (x, y, z). Acceleration is a calculated quantity and cannot be set. (in acceleration units)
typeid (int): The type id of the particle.
position (tuple): (x, y, z) (float, in distance units).
image (tuple): (x, y, z) (int).
velocity (tuple): (x, y, z) (float, in velocity units).
charge (float): Particle charge.
mass (float): (in mass units).
diameter (float): (in distance units).
type (str): Particle type name.
body (int): Body id. -1 for free particles, 0 or larger for rigid bodies, and -2 or lesser for floppy bodies.
orientation (tuple) : (w,x,y,z) (float, quaternion).
net_force (tuple): Net force on particle (x, y, z) (float, in force units).
net_energy (float): Net contribution of particle to the potential energy (in energy units).
net_torque (tuple): Net torque on the particle (x, y, z) (float, in torque units).
net_virial (tuple): Net virial for the particle (xx,yy,zz, xy, xz, yz)
"""
## \internal
# \brief create a particle_data_proxy
#
# \param pdata ParticleData to which this proxy belongs
# \param tag Tag this particle in \a pdata
def __init__(self, pdata, tag):
self.pdata = pdata;
self.tag = tag
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "";
result += "tag : " + str(self.tag) + "\n"
result += "position : " + str(self.position) + "\n";
result += "image : " + str(self.image) + "\n";
result += "velocity : " + str(self.velocity) + "\n";
result += "acceleration: " + str(self.acceleration) + "\n";
result += "charge : " + str(self.charge) + "\n";
result += "mass : " + str(self.mass) + "\n";
result += "diameter : " + str(self.diameter) + "\n";
result += "type : " + str(self.type) + "\n";
result += "typeid : " + str(self.typeid) + "\n";
result += "body : " + str(self.body) + "\n";
result += "orientation : " + str(self.orientation) + "\n";
result += "mom. inertia: " + str(self.moment_inertia) + "\n";
result += "angular_momentum: " + str(self.angular_momentum) + "\n";
result += "net_force : " + str(self.net_force) + "\n";
result += "net_energy : " + str(self.net_energy) + "\n";
result += "net_torque : " + str(self.net_torque) + "\n";
result += "net_virial : " + str(self.net_virial) + "\n";
return result;
@property
def position(self):
pos = self.pdata.getPosition(self.tag);
return (pos.x, pos.y, pos.z);
@position.setter
def position(self, value):
if len(value) != 3:
raise ValueError("The input value/vector should be exactly length 3.")
v = _hoomd.Scalar3();
v.x = float(value[0]);
v.y = float(value[1]);
v.z = float(value[2]);
self.pdata.setPosition(self.tag, v, True);
@property
def velocity(self):
vel = self.pdata.getVelocity(self.tag);
return (vel.x, vel.y, vel.z);
@velocity.setter
def velocity(self, value):
if len(value) != 3:
raise ValueError("The input value/vector should be exactly length 3.")
v = _hoomd.Scalar3();
v.x = float(value[0]);
v.y = float(value[1]);
v.z = float(value[2]);
self.pdata.setVelocity(self.tag, v);
@property
def acceleration(self):
accel = self.pdata.getAcceleration(self.tag);
return (accel.x, accel.y, accel.z);
@property
def image(self):
image = self.pdata.getImage(self.tag);
return (image.x, image.y, image.z);
@image.setter
def image(self, value):
if len(value) != 3:
raise ValueError("The input value/vector should be exactly length 3.")
v = _hoomd.int3();
v.x = int(value[0]);
v.y = int(value[1]);
v.z = int(value[2]);
self.pdata.setImage(self.tag, v);
@property
def charge(self):
return self.pdata.getCharge(self.tag);
@charge.setter
def charge(self, value):
self.pdata.setCharge(self.tag, float(value));
@property
def mass(self):
return self.pdata.getMass(self.tag);
@mass.setter
def mass(self, value):
self.pdata.setMass(self.tag, float(value));
@property
def diameter(self):
return self.pdata.getDiameter(self.tag);
@diameter.setter
def diameter(self, value):
self.pdata.setDiameter(self.tag, float(value));
@property
def typeid(self):
return self.pdata.getType(self.tag);
@property
def body(self):
return self.pdata.getBody(self.tag);
@body.setter
def body(self, value):
self.pdata.setBody(self.tag, value);
@property
def type(self):
typeid = self.pdata.getType(self.tag);
return self.pdata.getNameByType(typeid);
@type.setter
def type(self, value):
typeid = self.pdata.getTypeByName(value);
self.pdata.setType(self.tag, typeid);
@property
def orientation(self):
o = self.pdata.getOrientation(self.tag);
return (o.x, o.y, o.z, o.w);
@orientation.setter
def orientation(self, value):
if len(value) != 4:
raise ValueError("The input value/vector should be exactly length 4.")
o = _hoomd.Scalar4();
o.x = float(value[0]);
o.y = float(value[1]);
o.z = float(value[2]);
o.w = float(value[3]);
self.pdata.setOrientation(self.tag, o);
@property
def angular_momentum(self):
a = self.pdata.getAngularMomentum(self.tag);
return (a.x, a.y, a.z, a.w);
@angular_momentum.setter
def angular_momentum(self, value):
if len(value) != 4:
raise ValueError("The input value/vector should be exactly length 4.")
a = _hoomd.Scalar4();
a.x = float(value[0]);
a.y = float(value[1]);
a.z = float(value[2]);
a.w = float(value[3]);
self.pdata.setAngularMomentum(self.tag, a);
@property
def moment_inertia(self):
m = self.pdata.getMomentsOfInertia(self.tag)
return (m.x, m.y, m.z);
@moment_inertia.setter
def moment_inertia(self, value):
if len(value) != 3:
raise ValueError("The input value/vector should be exactly length 3.")
m = _hoomd.Scalar3();
m.x = float(value[0]);
m.y = float(value[1]);
m.z = float(value[2]);
self.pdata.setMomentsOfInertia(self.tag, m);
@property
def net_force(self):
f = self.pdata.getPNetForce(self.tag);
return (f.x, f.y, f.z);
@property
def net_virial(self):
v = (self.pdata.getPNetVirial(self.tag,0),
self.pdata.getPNetVirial(self.tag,1),
self.pdata.getPNetVirial(self.tag,2),
self.pdata.getPNetVirial(self.tag,3),
self.pdata.getPNetVirial(self.tag,4),
self.pdata.getPNetVirial(self.tag,5));
return v
@property
def net_energy(self):
f = self.pdata.getPNetForce(self.tag);
return f.w;
@property
def net_torque(self):
f = self.pdata.getNetTorque(self.tag);
return (f.x, f.y, f.z);
## \internal
# Access force data
#
# force_data provides access to the per-particle data of all forces in the system.
# This documentation is intentionally left sparse, see hoomd.data for a full explanation of how to use
# force_data, documented by example.
#
class force_data(object):
## \internal
# \brief force_data iterator
class force_data_iterator(object):
def __init__(self, data):
self.data = data;
self.index = 0;
def __iter__(self):
return self;
def __next__(self):
if self.index == len(self.data):
raise StopIteration;
result = self.data[self.index];
self.index += 1;
return result;
# support python2
next = __next__;
## \internal
# \brief create a force_data
#
# \param pdata ParticleData to connect
def __init__(self, force):
self.force = force;
## \var force
# \internal
# \brief ForceCompute to which this instance is connected
## \internal
# \brief Get a force_proxy reference to the particle with tag \a tag
# \param tag Particle tag to access
def __getitem__(self, tag):
if tag >= len(self) or tag < 0:
raise IndexError;
return force_data_proxy(self.force, tag);
## \internal
# \brief Set a particle's properties
# \param tag Particle tag to set
# \param p Value containing properties to set
def __setitem__(self, tag, p):
raise RuntimeError('__setitem__ not implemented');
## \internal
# \brief Get the number of particles
def __len__(self):
return hoomd.context.current.system_definition.getParticleData().getNGlobal();
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "Force Data for %d particles" % (len(self));
return result
## \internal
# \brief Return an iterator
def __iter__(self):
return force_data.force_data_iterator(self);
class force_data_proxy(object):
R""" Access the force on a single particle via a proxy.
force_data_proxy provides access to the current force, virial, and energy of a single particle due to a single
force computation. See :py:mod:`hoomd.data` for examples.
Attributes:
force (tuple): (float, x, y, z) - the current force on the particle (force units)
virial (tuple): This particle's contribution to the total virial tensor.
energy (float): This particle's contribution to the total potential energy (energy units)
torque (float): (float x, y, z) - current torque on the particle (torque units)
"""
## \internal
# \brief create a force_data_proxy
#
# \param force ForceCompute to which this proxy belongs
# \param tag Tag of this particle in \a force
def __init__(self, force, tag):
self.fdata = force;
self.tag = tag;
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "";
result += "tag : " + str(self.tag) + "\n"
result += "force : " + str(self.force) + "\n";
result += "virial : " + str(self.virial) + "\n";
result += "energy : " + str(self.energy) + "\n";
result += "torque : " + str(self.torque) + "\n";
return result;
@property
def force(self):
f = self.fdata.cpp_force.getForce(self.tag);
return (f.x, f.y, f.z);
@property
def virial(self):
return (self.fdata.cpp_force.getVirial(self.tag,0),
self.fdata.cpp_force.getVirial(self.tag,1),
self.fdata.cpp_force.getVirial(self.tag,2),
self.fdata.cpp_force.getVirial(self.tag,3),
self.fdata.cpp_force.getVirial(self.tag,4),
self.fdata.cpp_force.getVirial(self.tag,5));
@property
def energy(self):
energy = self.fdata.cpp_force.getEnergy(self.tag);
return energy;
@property
def torque(self):
f = self.fdata.cpp_force.getTorque(self.tag);
return (f.x, f.y, f.z)
## \internal
# \brief Access bond data
#
# bond_data provides access to the bonds in the system.
# This documentation is intentionally left sparse, see hoomd.data for a full explanation of how to use
# bond_data, documented by example.
#
class bond_data(hoomd.meta._metadata):
## \internal
# \brief bond_data iterator
class bond_data_iterator:
def __init__(self, data):
self.data = data;
self.tag = 0;
def __iter__(self):
return self;
def __next__(self):
if self.tag == len(self.data):
raise StopIteration;
result = self.data[self.tag];
self.tag += 1;
return result;
# support python2
next = __next__;
## \internal
# \brief create a bond_data
#
# \param bdata BondData to connect
def __init__(self, bdata):
self.bdata = bdata;
# base class constructor
hoomd.meta._metadata.__init__(self)
## \internal
# \brief Add a new bond
# \param type Type name of the bond to add
# \param a Tag of the first particle in the bond
# \param b Tag of the second particle in the bond
# \returns Unique tag identifying this bond
def add(self, type, a, b):
typeid = self.bdata.getTypeByName(type);
return self.bdata.addBondedGroup(_hoomd.Bond(typeid, int(a), int(b)));
## \internal
# \brief Remove a bond by tag
# \param tag Unique tag of the bond to remove
def remove(self, tag):
self.bdata.removeBondedGroup(tag);
## \var bdata
# \internal
# \brief BondData to which this instance is connected
## \internal
# \brief Get a bond_data_proxy reference to the bond with contiguous id \a id
# \param id Bond id to access
def __getitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
tag = self.bdata.getNthTag(id);
return bond_data_proxy(self.bdata, tag);
## \internal
# \brief Get a bond_data_proxy reference to the bond with tag \a tag
# \param tag Bond tag to access
def get(self, tag):
if tag > self.bdata.getMaximumTag() or tag < 0:
raise IndexError;
return bond_data_proxy(self.bdata, tag);
## \internal
# \brief Set a bond's properties
# \param id Bond id to set
# \param b Value containing properties to set
def __setitem__(self, id, b):
raise RuntimeError('Cannot change bonds once they are created');
## \internal
# \brief Delete a bond by id
# \param id Bond id to delete
def __delitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
tag = self.bdata.getNthTag(id);
self.bdata.removeBondedGroup(tag);
## \internal
# \brief Get the number of bonds
def __len__(self):
return self.bdata.getNGlobal();
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "Bond Data for %d bonds of %d typeid(s)" % (self.bdata.getNGlobal(), self.bdata.getNTypes());
return result
## \internal
# \brief Return an iterator
def __iter__(self):
return bond_data.bond_data_iterator(self);
## \internal
# \brief Return metadata for this bond_data instance
def get_metadata(self):
data = hoomd.meta._metadata.get_metadata(self)
data['N'] = len(self)
data['types'] = [self.bdata.getNameByType(i) for i in range(self.bdata.getNTypes())];
return data
class bond_data_proxy(object):
R""" Access a single bond via a proxy.
bond_data_proxy provides access to all of the properties of a single bond in the system.
See :py:mod:`hoomd.data` for examples.
Attributes:
tag (int): A unique integer attached to each bond (not in any particular range). A bond's tag remains fixed
during its lifetime. (Tags previously used by removed bonds may be recycled).
typeid (int): Type id of the bond.
a (int): The tag of the first particle in the bond.
b (int): The tag of the second particle in the bond.
type (str): Bond type name.
In the current version of the API, only already defined type names can be used. A future improvement will allow
dynamic creation of new type names from within the python API.
"""
## \internal
# \brief create a bond_data_proxy
#
# \param bdata BondData to which this proxy belongs
# \param tag Tag of this bond in \a bdata
def __init__(self, bdata, tag):
self.bdata = bdata;
self.tag = tag;
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "";
result += "typeid : " + str(self.typeid) + "\n";
result += "a : " + str(self.a) + "\n"
result += "b : " + str(self.b) + "\n"
result += "type : " + str(self.type) + "\n";
return result;
@property
def a(self):
bond = self.bdata.getGroupByTag(self.tag);
return bond.a;
@property
def b(self):
bond = self.bdata.getGroupByTag(self.tag);
return bond.b;
@property
def typeid(self):
bond = self.bdata.getGroupByTag(self.tag);
return bond.type;
@property
def type(self):
bond = self.bdata.getGroupByTag(self.tag);
typeid = bond.type;
return self.bdata.getNameByType(typeid);
## \internal
# \brief Access constraint data
#
# constraint_data provides access to the constraints in the system.
# This documentation is intentionally left sparse, see hoomd.data for a full explanation of how to use
# bond_data, documented by example.
#
class constraint_data(hoomd.meta._metadata):
## \internal
# \brief bond_data iterator
class constraint_data_iterator:
def __init__(self, data):
self.data = data;
self.tag = 0;
def __iter__(self):
return self;
def __next__(self):
if self.tag == len(self.data):
raise StopIteration;
result = self.data[self.tag];
self.tag += 1;
return result;
# support python2
next = __next__;
## \internal
# \brief create a constraint_data
#
# \param bdata ConstraintData to connect
def __init__(self, cdata):
self.cdata = cdata;
# base class constructor
hoomd.meta._metadata.__init__(self)
## \internal
# \brief Add a new distance constraint
# \param a Tag of the first particle in the bond
# \param b Tag of the second particle in the bond
# \param d Distance of the constraint to add
# \returns Unique tag identifying this bond
def add(self, a, b, d):
return self.cdata.addBondedGroup(_hoomd.Constraint(float(d), int(a), int(b)));
## \internal
# \brief Remove a bond by tag
# \param tag Unique tag of the bond to remove
def remove(self, tag):
self.cdata.removeBondedGroup(tag);
## \var cdata
# \internal
# \brief ConstraintData to which this instance is connected
## \internal
# \brief Get a constraint_data_proxy reference to the bond with contiguous id \a id
# \param id Constraint id to access
def __getitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
tag = self.cdata.getNthTag(id);
return constraint_data_proxy(self.cdata, tag);
## \internal
# \brief Get a constraint_data_proxy reference to the bond with tag \a tag
# \param tag Bond tag to access
def get(self, tag):
if tag > self.cdata.getMaximumTag() or tag < 0:
raise IndexError;
return constraint_data_proxy(self.cdata, tag);
## \internal
# \brief Set a constraint's properties
# \param id constraint id to set
# \param b Value containing properties to set
def __setitem__(self, id, b):
raise RuntimeError('Cannot change constraints once they are created');
## \internal
# \brief Delete a constraint by id
# \param id Constraint id to delete
def __delitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
tag = self.cdata.getNthTag(id);
self.cdata.removeBondedGroup(tag);
## \internal
# \brief Get the number of bonds
def __len__(self):
return self.cdata.getNGlobal();
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "Constraint Data for %d constraints" % (self.cdata.getNGlobal());
return result
## \internal
# \brief Return an iterator
def __iter__(self):
return constraint_data.constraint_data_iterator(self);
## \internal
# \brief Return metadata for this bond_data instance
def get_metadata(self):
data = hoomd.meta._metadata.get_metadata(self)
data['N'] = len(self)
return data
class constraint_data_proxy(object):
R""" Access a single constraint via a proxy.
constraint_data_proxy provides access to all of the properties of a single constraint in the system.
See :py:mod:`hoomd.data` for examples.
Attributes:
tag (int): A unique integer attached to each constraint (not in any particular range). A constraint's tag remains fixed
during its lifetime. (Tags previously used by removed constraints may be recycled).
d (float): The constraint distance.
a (int): The tag of the first particle in the constraint.
b (int): The tag of the second particle in the constraint.
"""
## \internal
# \brief create a constraint_data_proxy
#
# \param cdata ConstraintData to which this proxy belongs
# \param tag Tag of this constraint in \a cdata
def __init__(self, cdata, tag):
self.cdata = cdata;
self.tag = tag;
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "";
result += "a : " + str(self.a) + "\n"
result += "b : " + str(self.b) + "\n"
result += "d : " + str(self.d) + "\n";
return result;
@property
def a(self):
constraint = self.cdata.getGroupByTag(self.tag);
return constraint.a;
@property
def b(self):
constraint = self.cdata.getGroupByTag(self.tag);
return constraint.b;
@property
def d(self):
constraint = self.cdata.getGroupByTag(self.tag);
return constraint.d;
## \internal
# \brief Access angle data
#
# angle_data provides access to the angles in the system.
# This documentation is intentionally left sparse, see hoomd.data for a full explanation of how to use
# angle_data, documented by example.
#
class angle_data(hoomd.meta._metadata):
## \internal
# \brief angle_data iterator
class angle_data_iterator:
def __init__(self, data):
self.data = data;
self.index = 0;
def __iter__(self):
return self;
def __next__(self):
if self.index == len(self.data):
raise StopIteration;
result = self.data[self.index];
self.index += 1;
return result;
# support python2
next = __next__;
## \internal
# \brief create a angle_data
#
# \param bdata AngleData to connect
def __init__(self, adata):
self.adata = adata;
# base class constructor
hoomd.meta._metadata.__init__(self)
## \internal
# \brief Add a new angle
# \param type Type name of the angle to add
# \param a Tag of the first particle in the angle
# \param b Tag of the second particle in the angle
# \param c Tag of the third particle in the angle
# \returns Unique tag identifying this bond
def add(self, type, a, b, c):
typeid = self.adata.getTypeByName(type);
return self.adata.addBondedGroup(_hoomd.Angle(typeid, int(a), int(b), int(c)));
## \internal
# \brief Remove an angle by tag
# \param tag Unique tag of the angle to remove
def remove(self, tag):
self.adata.removeBondedGroup(tag);
## \var adata
# \internal
# \brief AngleData to which this instance is connected
## \internal
# \brief Get an angle_data_proxy reference to the angle with contiguous id \a id
# \param id Angle id to access
def __getitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
tag = self.adata.getNthTag(id);
return angle_data_proxy(self.adata, tag);
## \internal
# \brief Get a angle_data_proxy reference to the angle with tag \a tag
# \param tag Angle tag to access
def get(self, tag):
if tag > self.adata.getMaximumTag() or tag < 0:
raise IndexError;
return angle_data_proxy(self.adata, tag);
## \internal
# \brief Set an angle's properties
# \param id Angle id to set
# \param b Value containing properties to set
def __setitem__(self, id, b):
raise RuntimeError('Cannot change angles once they are created');
## \internal
# \brief Delete an angle by id
# \param id Angle id to delete
def __delitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
# Get the tag of the bond to delete
tag = self.adata.getNthTag(id);
self.adata.removeBondedGroup(tag);
## \internal
# \brief Get the number of angles
def __len__(self):
return self.adata.getNGlobal();
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "Angle Data for %d angles of %d typeid(s)" % (self.adata.getNGlobal(), self.adata.getNTypes());
return result;
## \internal
# \brief Return an iterator
def __iter__(self):
return angle_data.angle_data_iterator(self);
## \internal
# \brief Return metadata for this angle_data instance
def get_metadata(self):
data = hoomd.meta._metadata.get_metadata(self)
data['N'] = len(self)
data['types'] = [self.adata.getNameByType(i) for i in range(self.adata.getNTypes())];
return data
class angle_data_proxy(object):
R""" Access a single angle via a proxy.
angle_data_proxy provides access to all of the properties of a single angle in the system.
See :py:mod:`hoomd.data` for examples.
Attributes:
tag (int): A unique integer attached to each angle (not in any particular range). A angle's tag remains fixed
during its lifetime. (Tags previously used by removed angles may be recycled).
typeid (int): Type id of the angle.
a (int): The tag of the first particle in the angle.
b (int): The tag of the second particle in the angle.
c (int): The tag of the third particle in the angle.
type (str): angle type name.
In the current version of the API, only already defined type names can be used. A future improvement will allow
dynamic creation of new type names from within the python API.
"""
## \internal
# \brief create a angle_data_proxy
#
# \param adata AngleData to which this proxy belongs
# \param tag Tag of this angle in \a adata
def __init__(self, adata, tag):
self.adata = adata;
self.tag = tag;
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "";
result += "tag : " + str(self.tag) + "\n";
result += "typeid : " + str(self.typeid) + "\n";
result += "a : " + str(self.a) + "\n"
result += "b : " + str(self.b) + "\n"
result += "c : " + str(self.c) + "\n"
result += "type : " + str(self.type) + "\n";
return result;
@property
def a(self):
angle = self.adata.getGroupByTag(self.tag);
return angle.a;
@property
def b(self):
angle = self.adata.getGroupByTag(self.tag);
return angle.b;
@property
def c(self):
angle = self.adata.getGroupByTag(self.tag);
return angle.c;
@property
def typeid(self):
angle = self.adata.getGroupByTag(self.tag);
return angle.type;
@property
def type(self):
angle = self.adata.getGroupByTag(self.tag);
typeid = angle.type;
return self.adata.getNameByType(typeid);
## \internal
# \brief Access dihedral data
#
# dihedral_data provides access to the dihedrals in the system.
# This documentation is intentionally left sparse, see hoomd.data for a full explanation of how to use
# dihedral_data, documented by example.
#
class dihedral_data(hoomd.meta._metadata):
## \internal
# \brief dihedral_data iterator
class dihedral_data_iterator:
def __init__(self, data):
self.data = data;
self.index = 0;
def __iter__(self):
return self;
def __next__(self):
if self.index == len(self.data):
raise StopIteration;
result = self.data[self.index];
self.index += 1;
return result;
# support python2
next = __next__;
## \internal
# \brief create a dihedral_data
#
# \param bdata DihedralData to connect
def __init__(self, ddata):
self.ddata = ddata;
# base class constructor
hoomd.meta._metadata.__init__(self)
## \internal
# \brief Add a new dihedral
# \param type Type name of the dihedral to add
# \param a Tag of the first particle in the dihedral
# \param b Tag of the second particle in the dihedral
# \param c Tag of the third particle in the dihedral
# \param d Tag of the fourth particle in the dihedral
# \returns Unique tag identifying this bond
def add(self, type, a, b, c, d):
typeid = self.ddata.getTypeByName(type);
return self.ddata.addBondedGroup(_hoomd.Dihedral(typeid, int(a), int(b), int(c), int(d)));
## \internal
# \brief Remove an dihedral by tag
# \param tag Unique tag of the dihedral to remove
def remove(self, tag):
self.ddata.removeBondedGroup(tag);
## \var ddata
# \internal
# \brief DihedralData to which this instance is connected
## \internal
# \brief Get an dihedral_data_proxy reference to the dihedral with contiguous id \a id
# \param id Dihedral id to access
def __getitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
tag = self.ddata.getNthTag(id);
return dihedral_data_proxy(self.ddata, tag);
## \internal
# \brief Get a dihedral_data_proxy reference to the dihedral with tag \a tag
# \param tag Dihedral tag to access
def get(self, tag):
if tag > self.ddata.getMaximumTag() or tag < 0:
raise IndexError;
return dihedral_data_proxy(self.ddata, tag);
## \internal
# \brief Set an dihedral's properties
# \param id dihedral id to set
# \param b Value containing properties to set
def __setitem__(self, id, b):
raise RuntimeError('Cannot change angles once they are created');
## \internal
# \brief Delete an dihedral by id
# \param id Dihedral id to delete
def __delitem__(self, id):
if id >= len(self) or id < 0:
raise IndexError;
# Get the tag of the bond to delete
tag = self.ddata.getNthTag(id);
self.ddata.removeBondedGroup(tag);
## \internal
# \brief Get the number of angles
def __len__(self):
return self.ddata.getNGlobal();
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "Dihedral Data for %d angles of %d typeid(s)" % (self.ddata.getNGlobal(), self.ddata.getNTypes());
return result;
## \internal
# \brief Return an iterator
def __iter__(self):
return dihedral_data.dihedral_data_iterator(self);
## \internal
# \brief Return metadata for this dihedral_data instance
def get_metadata(self):
data = hoomd.meta._metadata.get_metadata(self)
data['N'] = len(self)
data['types'] = [self.ddata.getNameByType(i) for i in range(self.ddata.getNTypes())];
return data
class dihedral_data_proxy(object):
R""" Access a single dihedral via a proxy.
dihedral_data_proxy provides access to all of the properties of a single dihedral in the system.
See :py:mod:`hoomd.data` for examples.
Attributes:
tag (int): A unique integer attached to each dihedral (not in any particular range). A dihedral's tag remains fixed
during its lifetime. (Tags previously used by removed dihedrals may be recycled).
typeid (int): Type id of the dihedral.
a (int): The tag of the first particle in the dihedral.
b (int): The tag of the second particle in the dihedral.
c (int): The tag of the third particle in the dihedral.
d (int): The tag of the fourth particle in the dihedral.
type (str): dihedral type name.
In the current version of the API, only already defined type names can be used. A future improvement will allow
dynamic creation of new type names from within the python API.
"""
def __init__(self, ddata, tag):
self.ddata = ddata;
self.tag = tag;
## \internal
# \brief Get an informal string representing the object
def __str__(self):
result = "";
result += "tag : " + str(self.tag) + "\n";
result += "typeid : " + str(self.typeid) + "\n";
result += "a : " + str(self.a) + "\n"
result += "b : " + str(self.b) + "\n"
result += "c : " + str(self.c) + "\n"
result += "d : " + str(self.d) + "\n"
result += "type : " + str(self.type) + "\n";
return result;
@property
def a(self):
dihedral = self.ddata.getGroupByTag(self.tag);
return dihedral.a;
@property
def b(self):
dihedral = self.ddata.getGroupByTag(self.tag);
return dihedral.b;
@property
def c(self):
dihedral = self.ddata.getGroupByTag(self.tag);
return dihedral.c;
@property
def d(self):
dihedral = self.ddata.getGroupByTag(self.tag);
return dihedral.d;
@property
def typeid(self):
dihedral = self.ddata.getGroupByTag(self.tag);
return dihedral.type;
@property
def type(self):
dihedral = self.ddata.getGroupByTag(self.tag);
typeid = dihedral.type;
return self.ddata.getNameByType(typeid);
## \internal
# \brief Get data.boxdim from a SnapshotSystemData
def get_snapshot_box(snapshot):
b = snapshot._global_box;
L = b.getL();
return boxdim(Lx=L.x, Ly=L.y, Lz=L.z, xy=b.getTiltFactorXY(), xz=b.getTiltFactorXZ(), yz=b.getTiltFactorYZ(), dimensions=snapshot._dimensions);
## \internal
# \brief Set data.boxdim to a SnapshotSystemData
def set_snapshot_box(snapshot, box):
snapshot._global_box = box._getBoxDim();
snapshot._dimensions = box.dimensions;
## \internal
# \brief Broadcast snapshot to all ranks
def broadcast_snapshot(cpp_snapshot):
hoomd.context._verify_init();
hoomd.util.print_status_line();
# broadcast from rank 0
cpp_snapshot._broadcast(0, hoomd.context.exec_conf);
## \internal
# \brief Broadcast snapshot to all ranks
def broadcast_snapshot_all(cpp_snapshot):
hoomd.context._verify_init();
hoomd.util.print_status_line();
# broadcast from rank 0
cpp_snapshot._broadcast_all(0, hoomd.context.exec_conf);
# Inject a box property into SnapshotSystemData that provides and accepts boxdim objects
_hoomd.SnapshotSystemData_float.box = property(get_snapshot_box, set_snapshot_box);
_hoomd.SnapshotSystemData_double.box = property(get_snapshot_box, set_snapshot_box);
# Inject broadcast methods into SnapshotSystemData
_hoomd.SnapshotSystemData_float.broadcast = broadcast_snapshot
_hoomd.SnapshotSystemData_double.broadcast = broadcast_snapshot
_hoomd.SnapshotSystemData_float.broadcast_all = broadcast_snapshot_all
_hoomd.SnapshotSystemData_double.broadcast_all = broadcast_snapshot_all
def make_snapshot(N, box, particle_types=['A'], bond_types=[], angle_types=[], dihedral_types=[], improper_types=[], pair_types=[], dtype='float'):
R""" Make an empty snapshot.
Args:
N (int): Number of particles to create.
box (:py:class:`hoomd.data.boxdim`): Simulation box parameters.
particle_types (list): Particle type names (must not be zero length).
bond_types (list): Bond type names (may be zero length).
angle_types (list): Angle type names (may be zero length).
dihedral_types (list): Dihedral type names (may be zero length).
improper_types (list): Improper type names (may be zero length).
pair_types(list): Special pair type names (may be zero length).
.. versionadded:: 2.1
dtype (str): Data type for the real valued numpy arrays in the snapshot. Must be either 'float' or 'double'.
Examples::
snapshot = data.make_snapshot(N=1000, box=data.boxdim(L=10))
snapshot = data.make_snapshot(N=64000, box=data.boxdim(L=1, dimensions=2, volume=1000), particle_types=['A', 'B'])
snapshot = data.make_snapshot(N=64000, box=data.boxdim(L=20), bond_types=['polymer'], dihedral_types=['dihedralA', 'dihedralB'], improper_types=['improperA', 'improperB', 'improperC'])
... set properties in snapshot ...
init.read_snapshot(snapshot);
:py:func:`hoomd.data.make_snapshot()` creates all particles with **default properties**. You must set reasonable
values for particle properties before initializing the system with :py:func:`hoomd.init.read_snapshot()`.
The default properties are:
* position 0,0,0
* velocity 0,0,0
* image 0,0,0
* orientation 1,0,0,0
* typeid 0
* charge 0
* mass 1.0
* diameter 1.0
See Also:
:py:func:`hoomd.init.read_snapshot()`
"""
if dtype == 'float':
snapshot = _hoomd.SnapshotSystemData_float();
elif dtype == 'double':
snapshot = _hoomd.SnapshotSystemData_double();
else:
raise ValueError("dtype must be either float or double");
snapshot.box = box;
if hoomd.comm.get_rank() == 0:
snapshot.particles.resize(N);
snapshot.particles.types = particle_types;
snapshot.bonds.types = bond_types;
snapshot.angles.types = angle_types;
snapshot.dihedrals.types = dihedral_types;
snapshot.impropers.types = improper_types;
snapshot.pairs.types = pair_types;
return snapshot;
def gsd_snapshot(filename, frame=0):
R""" Read a snapshot from a GSD file.
Args:
filename (str): GSD file to read the snapshot from.
frame (int): Frame to read from the GSD file. Negative values index from the end of the file.
:py:func:`hoomd.data.gsd_snapshot()` opens the given GSD file and reads a snapshot from it.
"""
hoomd.context._verify_init();
reader = _hoomd.GSDReader(hoomd.context.exec_conf, filename, abs(frame), frame < 0);
return reader.getSnapshot();
# Note: SnapshotParticleData should never be instantiated, it is a placeholder to generate sphinx documentation,
# as the real SnapshotParticleData lives in c++.
class SnapshotParticleData:
R""" Snapshot of particle data properties.
Users should not create SnapshotParticleData directly. Use :py:func:`hoomd.data.make_snapshot()`
or :py:meth:`hoomd.data.system_data.take_snapshot()` to make snapshots.
Attributes:
N (int): Number of particles in the snapshot
types (list): List of string type names (assignable)
position (numpy.ndarray): (Nx3) numpy array containing the position of each particle (float or double)
orientation (numpy.ndarray): (Nx4) numpy array containing the orientation quaternion of each particle (float or double)
velocity (numpy.ndarray): (Nx3) numpy array containing the velocity of each particle (float or double)
acceleration (numpy.ndarray): (Nx3) numpy array containing the acceleration of each particle (float or double)
typeid (numpy.ndarray): Length N numpy array containing the type id of each particle (32-bit unsigned int)
mass (numpy.ndarray): Length N numpy array containing the mass of each particle (float or double)
charge (numpy.ndarray): Length N numpy array containing the charge of each particle (float or double)
diameter (numpy.ndarray): Length N numpy array containing the diameter of each particle (float or double)
image (numpy.ndarray): (Nx3) numpy array containing the image of each particle (32-bit int)
body (numpy.ndarray): Length N numpy array containing the body of each particle (32-bit unsigned int). -1 indicates a free particle, and larger negative numbers indicate floppy bodies.
moment_inertia (numpy.ndarray): (Nx3) numpy array containing the principal moments of inertia of each particle (float or double)
angmom (numpy.ndarray): (Nx4) numpy array containing the angular momentum quaternion of each particle (float or double)
See Also:
:py:mod:`hoomd.data`
"""
def resize(self, N):
R""" Resize the snapshot to hold N particles.
Args:
N (int): new size of the snapshot.
:py:meth:`resize()` changes the size of the arrays in the snapshot to hold *N* particles. Existing particle
properties are preserved after the resize. Any newly created particles will have default values. After resizing,
existing references to the numpy arrays will be invalid, access them again
from `snapshot.particles.*`
"""
pass
| 35.068951 | 192 | 0.633258 |
4cd0a3fb09213bfa3ec2dd29679258926e534793 | 1,818 | py | Python | bloguers/recomendations/migrations/0001_initial.py | CamiloGato/web-empresarial-simple | 5d4aafed7aea1a580c82adfcd2102888aa983522 | [
"Apache-2.0"
] | null | null | null | bloguers/recomendations/migrations/0001_initial.py | CamiloGato/web-empresarial-simple | 5d4aafed7aea1a580c82adfcd2102888aa983522 | [
"Apache-2.0"
] | 4 | 2020-06-06T01:09:35.000Z | 2022-03-12T00:10:37.000Z | bloguers/recomendations/migrations/0001_initial.py | CamiloGato/web-empresarial-simple | 5d4aafed7aea1a580c82adfcd2102888aa983522 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.2 on 2020-01-06 23:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='Categoria')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Fecha de edición')),
],
options={
'verbose_name': 'Categoria',
'verbose_name_plural': 'Categorias',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Recomendation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Nombre')),
('order', models.SmallIntegerField(default=0, verbose_name='Posición')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Fecha de edición')),
('categories', models.ManyToManyField(related_name='get_category', to='recomendations.Category', verbose_name='Categorias')),
],
options={
'verbose_name': 'Recomendación',
'verbose_name_plural': 'Recomendaciones',
'ordering': ['-order'],
},
),
]
| 40.4 | 141 | 0.573157 |
edc872431158cbe21611262d6c1be1dbca72ebbe | 1,606 | py | Python | profiles_api/serializers.py | nomadbard916/beginner-django-rest-api | ab1169b1c0dcfa860f0cfb45ced34b39df717e15 | [
"MIT"
] | null | null | null | profiles_api/serializers.py | nomadbard916/beginner-django-rest-api | ab1169b1c0dcfa860f0cfb45ced34b39df717e15 | [
"MIT"
] | 7 | 2020-06-06T01:56:25.000Z | 2022-02-10T11:44:24.000Z | profiles_api/serializers.py | nomadbard916/beginner-django-rest-api | ab1169b1c0dcfa860f0cfb45ced34b39df717e15 | [
"MIT"
] | null | null | null | from rest_framework import serializers
# from rest_framework.serializers import ModelSerializer
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
# bug in profile serializer, see the teacher's instructions in EP46
def update(self, instance, validated_data):
"""Handle updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializers profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}}
| 30.884615 | 71 | 0.643836 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.