hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4030e385e2e10be1ee444a3a75529c03c39c75
| 182
|
py
|
Python
|
om.metaL.py
|
ponyatov/om
|
e972b566537dd168e506a3a42f2fb10c5f697c17
|
[
"MIT"
] | null | null | null |
om.metaL.py
|
ponyatov/om
|
e972b566537dd168e506a3a42f2fb10c5f697c17
|
[
"MIT"
] | null | null | null |
om.metaL.py
|
ponyatov/om
|
e972b566537dd168e506a3a42f2fb10c5f697c17
|
[
"MIT"
] | null | null | null |
## @file
## @brief meta: Object Memory (Smalltalk-like)
from metaL import *
p = Project(
title='''Object Memory (Smalltalk-like)''',
about='''''') \
| Rust()
p.sync()
| 15.166667
| 47
| 0.576923
|
ject Memory (Smalltalk-like)''',
about='''''') \
| Rust()
p.sync()
| true
| true
|
1c4030fea59d53e9d76e40551c3902b116fe92cb
| 212
|
py
|
Python
|
hard-gists/1943426/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/1943426/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/1943426/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
from django.template.loader import add_to_builtins
add_to_builtins('django.contrib.staticfiles.templatetags.staticfiles')
add_to_builtins('django.templatetags.i18n')
add_to_builtins('django.templatetags.future')
| 42.4
| 70
| 0.863208
|
from django.template.loader import add_to_builtins
add_to_builtins('django.contrib.staticfiles.templatetags.staticfiles')
add_to_builtins('django.templatetags.i18n')
add_to_builtins('django.templatetags.future')
| true
| true
|
1c4031fbd2d9af86d329c35827ec5a14f555759a
| 5,152
|
py
|
Python
|
beer/harvester/harvester.py
|
petrus-hanks/flax-blockchain
|
6e180dc84ca24c757555c9947f44bd724b1af3eb
|
[
"Apache-2.0"
] | null | null | null |
beer/harvester/harvester.py
|
petrus-hanks/flax-blockchain
|
6e180dc84ca24c757555c9947f44bd724b1af3eb
|
[
"Apache-2.0"
] | null | null | null |
beer/harvester/harvester.py
|
petrus-hanks/flax-blockchain
|
6e180dc84ca24c757555c9947f44bd724b1af3eb
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import concurrent
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Callable, Dict, List, Optional, Set, Tuple
from blspy import G1Element
import beer.server.ws_connection as ws # lgtm [py/import-and-import-from]
from beer.consensus.constants import ConsensusConstants
from beer.plotting.plot_tools import PlotInfo
from beer.plotting.plot_tools import add_plot_directory as add_plot_directory_pt
from beer.plotting.plot_tools import get_plot_directories as get_plot_directories_pt
from beer.plotting.plot_tools import load_plots
from beer.plotting.plot_tools import remove_plot_directory as remove_plot_directory_pt
log = logging.getLogger(__name__)
class Harvester:
provers: Dict[Path, PlotInfo]
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
root_path: Path
_is_shutdown: bool
executor: ThreadPoolExecutor
state_changed_callback: Optional[Callable]
cached_challenges: List
constants: ConsensusConstants
_refresh_lock: asyncio.Lock
def __init__(self, root_path: Path, config: Dict, constants: ConsensusConstants):
self.root_path = root_path
# From filename to prover
self.provers = {}
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self._is_shutdown = False
self.farmer_public_keys = []
self.pool_public_keys = []
self.match_str = None
self.show_memo: bool = False
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=config["num_threads"])
self.state_changed_callback = None
self.server = None
self.constants = constants
self.cached_challenges = []
self.log = log
self.state_changed_callback: Optional[Callable] = None
self.last_load_time: float = 0
self.plot_load_frequency = config.get("plot_loading_frequency_seconds", 120)
async def _start(self):
self._refresh_lock = asyncio.Lock()
def _close(self):
self._is_shutdown = True
self.executor.shutdown(wait=True)
async def _await_closed(self):
pass
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
def on_disconnect(self, connection: ws.WSBeerConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self._state_changed("close_connection")
def get_plots(self) -> Tuple[List[Dict], List[str], List[str]]:
response_plots: List[Dict] = []
for path, plot_info in self.provers.items():
prover = plot_info.prover
response_plots.append(
{
"filename": str(path),
"size": prover.get_size(),
"plot-seed": prover.get_id(),
"pool_public_key": plot_info.pool_public_key,
"pool_contract_puzzle_hash": plot_info.pool_contract_puzzle_hash,
"plot_public_key": plot_info.plot_public_key,
"file_size": plot_info.file_size,
"time_modified": plot_info.time_modified,
}
)
return (
response_plots,
[str(s) for s, _ in self.failed_to_open_filenames.items()],
[str(s) for s in self.no_key_filenames],
)
async def refresh_plots(self):
locked: bool = self._refresh_lock.locked()
changed: bool = False
if not locked:
async with self._refresh_lock:
# Avoid double refreshing of plots
(changed, self.provers, self.failed_to_open_filenames, self.no_key_filenames,) = load_plots(
self.provers,
self.failed_to_open_filenames,
self.farmer_public_keys,
self.pool_public_keys,
self.match_str,
self.show_memo,
self.root_path,
)
if changed:
self._state_changed("plots")
def delete_plot(self, str_path: str):
path = Path(str_path).resolve()
if path in self.provers:
del self.provers[path]
# Remove absolute and relative paths
if path.exists():
path.unlink()
self._state_changed("plots")
return True
async def add_plot_directory(self, str_path: str) -> bool:
add_plot_directory_pt(str_path, self.root_path)
await self.refresh_plots()
return True
async def get_plot_directories(self) -> List[str]:
return get_plot_directories_pt(self.root_path)
async def remove_plot_directory(self, str_path: str) -> bool:
remove_plot_directory_pt(str_path, self.root_path)
return True
def set_server(self, server):
self.server = server
| 35.287671
| 108
| 0.650427
|
import asyncio
import concurrent
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Callable, Dict, List, Optional, Set, Tuple
from blspy import G1Element
import beer.server.ws_connection as ws
from beer.consensus.constants import ConsensusConstants
from beer.plotting.plot_tools import PlotInfo
from beer.plotting.plot_tools import add_plot_directory as add_plot_directory_pt
from beer.plotting.plot_tools import get_plot_directories as get_plot_directories_pt
from beer.plotting.plot_tools import load_plots
from beer.plotting.plot_tools import remove_plot_directory as remove_plot_directory_pt
log = logging.getLogger(__name__)
class Harvester:
provers: Dict[Path, PlotInfo]
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
root_path: Path
_is_shutdown: bool
executor: ThreadPoolExecutor
state_changed_callback: Optional[Callable]
cached_challenges: List
constants: ConsensusConstants
_refresh_lock: asyncio.Lock
def __init__(self, root_path: Path, config: Dict, constants: ConsensusConstants):
self.root_path = root_path
self.provers = {}
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self._is_shutdown = False
self.farmer_public_keys = []
self.pool_public_keys = []
self.match_str = None
self.show_memo: bool = False
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=config["num_threads"])
self.state_changed_callback = None
self.server = None
self.constants = constants
self.cached_challenges = []
self.log = log
self.state_changed_callback: Optional[Callable] = None
self.last_load_time: float = 0
self.plot_load_frequency = config.get("plot_loading_frequency_seconds", 120)
async def _start(self):
self._refresh_lock = asyncio.Lock()
def _close(self):
self._is_shutdown = True
self.executor.shutdown(wait=True)
async def _await_closed(self):
pass
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
def on_disconnect(self, connection: ws.WSBeerConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self._state_changed("close_connection")
def get_plots(self) -> Tuple[List[Dict], List[str], List[str]]:
response_plots: List[Dict] = []
for path, plot_info in self.provers.items():
prover = plot_info.prover
response_plots.append(
{
"filename": str(path),
"size": prover.get_size(),
"plot-seed": prover.get_id(),
"pool_public_key": plot_info.pool_public_key,
"pool_contract_puzzle_hash": plot_info.pool_contract_puzzle_hash,
"plot_public_key": plot_info.plot_public_key,
"file_size": plot_info.file_size,
"time_modified": plot_info.time_modified,
}
)
return (
response_plots,
[str(s) for s, _ in self.failed_to_open_filenames.items()],
[str(s) for s in self.no_key_filenames],
)
async def refresh_plots(self):
locked: bool = self._refresh_lock.locked()
changed: bool = False
if not locked:
async with self._refresh_lock:
(changed, self.provers, self.failed_to_open_filenames, self.no_key_filenames,) = load_plots(
self.provers,
self.failed_to_open_filenames,
self.farmer_public_keys,
self.pool_public_keys,
self.match_str,
self.show_memo,
self.root_path,
)
if changed:
self._state_changed("plots")
def delete_plot(self, str_path: str):
path = Path(str_path).resolve()
if path in self.provers:
del self.provers[path]
if path.exists():
path.unlink()
self._state_changed("plots")
return True
async def add_plot_directory(self, str_path: str) -> bool:
add_plot_directory_pt(str_path, self.root_path)
await self.refresh_plots()
return True
async def get_plot_directories(self) -> List[str]:
return get_plot_directories_pt(self.root_path)
async def remove_plot_directory(self, str_path: str) -> bool:
remove_plot_directory_pt(str_path, self.root_path)
return True
def set_server(self, server):
self.server = server
| true
| true
|
1c403211e74cfa993b839c5d9ed606a973736e18
| 57,718
|
py
|
Python
|
test/functional/feature_block.py
|
TransFastCore/pivx533
|
b2168d6c2b447c9bf9c7175ffdfc8342b2861179
|
[
"MIT"
] | 15
|
2019-08-28T13:34:30.000Z
|
2021-12-15T22:01:08.000Z
|
test/functional/feature_block.py
|
TransFastCore/pivx533
|
b2168d6c2b447c9bf9c7175ffdfc8342b2861179
|
[
"MIT"
] | 9
|
2019-07-17T22:42:46.000Z
|
2022-03-02T12:41:27.000Z
|
test/functional/feature_block.py
|
TransFastCore/pivx533
|
b2168d6c2b447c9bf9c7175ffdfc8342b2861179
|
[
"MIT"
] | 13
|
2019-06-30T22:44:30.000Z
|
2022-02-19T16:07:54.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Copyright (c) 2020-2021 The TrumpCoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing."""
import copy
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction, get_legacy_sigopcount_block
from test_framework.key import CECKey
from test_framework.messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
uint256_from_compact,
uint256_from_str,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_FALSE,
OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SignatureHash,
hash160,
)
from test_framework.test_framework import TrumpCoinTestFramework
from test_framework.util import assert_equal
MAX_BLOCK_SIGOPS = MAX_BLOCK_BASE_SIZE // 50
class PreviousSpendableOutput():
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n # the output we're spending
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
return super().serialize()
class FullBlockTest(TrumpCoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-acceptnonstdtxn=1"]] # This is a consensus block test, we don't care about tx policy
def run_test(self):
node = self.nodes[0] # convenience reference to the node
# reconnect_p2p() expects the network thread to be running
self.log.info("Starting network thread...")
self.reconnect_p2p()
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
self.spendable_outputs = []
# Create a new block
self.log.info("Creating and sending a block...")
b0 = self.next_block(0)
self.save_spendable_output()
self.send_blocks([b0])
# Allow the block to mature
self.log.info("Maturing the block...")
blocks = []
for i in range(99):
blocks.append(self.next_block(4000 + i))
self.save_spendable_output()
self.log.info("Sending all new blocks")
self.send_blocks(blocks)
# collect spendable outputs now to avoid cluttering the code later on
self.log.info("Collect spendable outputs...")
out = []
for i in range(33):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
self.log.info("Build b1 and b2")
b1 = self.next_block(1, spend=out[0])
self.save_spendable_output()
b2 = self.next_block(2, spend=out[1])
self.save_spendable_output()
self.send_blocks([b1, b2])
# Fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
self.send_blocks([b3], False)
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
self.log.info("Reorg to a longer chain")
b4 = self.next_block(4, spend=out[2])
self.send_blocks([b4])
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
self.move_tip(2)
b5 = self.next_block(5, spend=out[2])
self.save_spendable_output()
self.send_blocks([b5], False)
self.log.info("Reorg back to the original chain")
b6 = self.next_block(6, spend=out[3])
self.send_blocks([b6], True)
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain with a double spend, even if it is longer")
self.move_tip(5)
b7 = self.next_block(7, spend=out[2])
self.send_blocks([b7], False)
b8 = self.next_block(8, spend=out[4])
self.send_blocks([b8], False, reconnect=True)
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block where the miner creates too much coinbase reward")
self.move_tip(6)
b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
self.send_blocks([b9], False, "bad-blk-amount", reconnect=True)
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
self.move_tip(5)
b10 = self.next_block(10, spend=out[3])
self.send_blocks([b10], False)
b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
self.send_blocks([b11], False, "bad-blk-amount", reconnect=True)
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
self.move_tip(5)
b12 = self.next_block(12, spend=out[3])
self.save_spendable_output()
b13 = self.next_block(13, spend=out[4])
self.save_spendable_output()
b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
self.send_blocks([b12, b13, b14], False, "bad-blk-amount", reconnect=True)
# New tip should be b13.
assert_equal(node.getbestblockhash(), b13.hash)
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block with lots of checksigs")
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
self.move_tip(13)
b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
self.save_spendable_output()
self.send_blocks([b15], True)
self.log.info("Reject a block with too many checksigs")
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
self.send_blocks([b16], False, "bad-blk-sigops", reconnect=True)
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx")
self.move_tip(15)
b17 = self.next_block(17, spend=txout_b3)
self.send_blocks([b17], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)")
self.move_tip(13)
b18 = self.next_block(18, spend=txout_b3)
self.send_blocks([b18], False)
b19 = self.next_block(19, spend=out[6])
self.send_blocks([b19], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase.")
self.move_tip(15)
b20 = self.next_block(20, spend=out[7])
self.send_blocks([b20], False, "bad-txns-premature-spend-of-coinbase")
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase (on a forked chain)")
self.move_tip(13)
b21 = self.next_block(21, spend=out[6])
self.send_blocks([b21], False)
b22 = self.next_block(22, spend=out[5])
self.send_blocks([b22], False, "bad-txns-premature-spend-of-coinbase")
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
self.move_tip(15)
height = self.block_heights[self.tip.sha256] + 1
b23 = self.next_block(23)
b23 = self.create_sized_block(b23, out[6], MAX_BLOCK_BASE_SIZE)
self.block_heights[b23.sha256] = height
self.blocks[23] = b23
self.send_blocks([b23], True)
self.save_spendable_output()
self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 2")
self.move_tip(15)
b24 = self.next_block(24)
b24 = self.create_sized_block(b24, out[6], MAX_BLOCK_BASE_SIZE + 2)
self.block_heights[b24.sha256] = height
self.blocks[24] = b24
self.send_blocks([b24], False, "bad-blk-length", reconnect=True)
b25 = self.next_block(25, spend=out[7])
self.send_blocks([b25], False)
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with coinbase input script size out of range")
self.move_tip(15)
b26 = self.next_block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = self.update_block(26, [])
self.send_blocks([b26], False, "bad-cb-length", reconnect=True)
# Extend the b26 chain to make sure trumpcoind isn't accepting b26
b27 = self.next_block(27, spend=out[7])
self.send_blocks([b27], False)
# Now try a too-large-coinbase script
self.move_tip(15)
b28 = self.next_block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig += b'\x00' * 151
b28.vtx[0].rehash()
b28 = self.update_block(28, [])
self.send_blocks([b28], False, "bad-cb-length", reconnect=True)
# Extend the b28 chain to make sure trumpcoind isn't accepting b28
b29 = self.next_block(29, spend=out[7])
self.send_blocks([b29], False)
# b30
self.move_tip(23)
b30 = self.next_block(30)
b30 = self.update_block(30, [])
self.send_blocks([b30], True)
self.save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops")
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
self.send_blocks([b31], True)
self.save_spendable_output()
# this goes over the limit because the coinbase has one sigop
self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = self.next_block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
self.send_blocks([b32], False, "bad-blk-sigops", reconnect=True)
# CHECKMULTISIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops")
self.move_tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs)
self.send_blocks([b33], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
b34 = self.next_block(34, spend=out[10], script=too_many_multisigs)
self.send_blocks([b34], False, "bad-blk-sigops", reconnect=True)
# CHECKSIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops")
self.move_tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs)
self.send_blocks([b35], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops")
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
b36 = self.next_block(36, spend=out[11], script=too_many_checksigs)
self.send_blocks([b36], False, "bad-blk-sigops", reconnect=True)
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
self.log.info("Reject a block spending transaction from a block which failed to connect")
self.move_tip(35)
b37 = self.next_block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = self.create_and_sign_transaction(out[11].tx, out[11].n, 0)
b37 = self.update_block(37, [tx])
self.send_blocks([b37], False, "bad-txns-inputs-missingorspent", reconnect=True)
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
self.move_tip(35)
b38 = self.next_block(38, spend=txout_b37)
self.send_blocks([b38], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
b39 = self.next_block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = self.create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = self.update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size = len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = self.update_block(39, [])
self.send_blocks([b39], True)
self.save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
self.log.info("Reject a block with too many P2SH sigops")
self.move_tip(39)
b40 = self.next_block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes + 1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
self.update_block(40, new_txs)
self.send_blocks([b40], False, "bad-blk-sigops", reconnect=True)
# same as b40, but one less sigop
self.log.info("Accept a block with the max number of P2SH sigops")
self.move_tip(39)
b41 = self.next_block(41, spend=None)
self.update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
self.update_block(41, [tx])
self.send_blocks([b41], True)
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
self.move_tip(39)
b42 = self.next_block(42, spend=out[12])
self.save_spendable_output()
b43 = self.next_block(43, spend=out[13])
self.save_spendable_output()
self.send_blocks([b42, b43], True)
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
self.log.info("Build block 44 manually")
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
self.send_blocks([b44], True)
self.log.info("Reject a block with a non-coinbase as the first tx")
non_coinbase = self.create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1
self.tip = b45
self.blocks[45] = b45
self.send_blocks([b45], False, "bad-cb-missing", reconnect=True)
self.log.info("Reject a block with no transactions")
self.move_tip(44)
b46 = CBlock()
b46.nTime = b44.nTime + 1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
self.send_blocks([b46], False, "bad-blk-length", reconnect=True)
self.log.info("Reject a block with invalid work")
self.move_tip(44)
b47 = self.next_block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.sha256 < target:
b47.nNonce += 1
b47.rehash()
self.send_blocks([b47], False)
self.log.info("Reject a block with invalid merkle hash")
self.move_tip(44)
b49 = self.next_block(49)
b49.hashMerkleRoot += 1
b49.solve()
self.send_blocks([b49], False, "bad-txnmrklroot", reconnect=True)
self.log.info("Reject a block with incorrect POW limit")
self.move_tip(44)
b50 = self.next_block(50)
b50.nBits = b50.nBits - 1
b50.solve()
self.send_blocks([b50], False, reconnect=True)
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
b51 = self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.send_blocks([b51], False, "bad-cb-multiple", reconnect=True)
self.log.info("Reject a block with duplicate transactions")
# Note: txns have to be in the right position in the merkle tree to trigger this error
self.move_tip(44)
b52 = self.next_block(52, spend=out[15])
tx = self.create_tx(b52.vtx[1], 0, 1)
b52 = self.update_block(52, [tx, tx])
self.send_blocks([b52], False, "bad-txns-duplicate", reconnect=True)
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
self.move_tip(43)
b53 = self.next_block(53, spend=out[14])
self.send_blocks([b53], False)
self.save_spendable_output()
# TrumpCoin: timestamp checks disabled for regtest
# valid timestamp
self.move_tip(53)
b55 = self.next_block(55, spend=out[15])
b55.nTime = b35.nTime
self.update_block(55, [])
self.send_blocks([b55], True)
self.save_spendable_output()
# Test Merkle tree malleability
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
self.move_tip(55)
b57 = self.next_block(57)
tx = self.create_and_sign_transaction(out[16].tx, out[16].n, 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx), 3)
b56 = self.update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
self.send_blocks([b56], False, "bad-txns-duplicate", reconnect=True)
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
b57p2 = self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16].tx, out[16].n, 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
tx3 = self.create_tx(tx2, 0, 1)
tx4 = self.create_tx(tx3, 0, 1)
b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx), 6)
b56p2 = self.update_block("b56p2", [tx3, tx4])
self.send_blocks([b56p2], False, "bad-txns-duplicate", reconnect=True)
self.move_tip("57p2")
self.send_blocks([b57p2], True)
self.move_tip(57)
self.send_blocks([b57], False) # The tip is not updated because 57p2 seen first
self.save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
b58 = self.next_block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx.calc_sha256()
b58 = self.update_block(58, [tx])
self.send_blocks([b58], False, "bad-txns-inputs-missingorspent", reconnect=True)
# tx with output value > input value
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip(57)
b59 = self.next_block(59)
tx = self.create_and_sign_transaction(out[17].tx, out[17].n, 251 * COIN)
b59 = self.update_block(59, [tx])
self.send_blocks([b59], False, "bad-txns-in-belowout", reconnect=True)
# reset to good chain
self.move_tip(57)
b60 = self.next_block(60, spend=out[17])
self.send_blocks([b60], True)
self.save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
self.move_tip(60)
b61 = self.next_block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig # Equalize the coinbases
b61.vtx[0].rehash()
b61 = self.update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
self.send_blocks([b61], False, "bad-cb-height", reconnect=True)
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip(60)
b62 = self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff # this locktime is non-final
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = self.update_block(62, [tx])
self.send_blocks([b62], False, "bad-txns-nonfinal")
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime")
self.move_tip(60)
b63 = self.next_block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = self.update_block(63, [])
self.send_blocks([b63], False, "bad-txns-nonfinal")
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
#
self.move_tip(60)
b64 = self.next_block(64)
b64 = self.update_block(64, [])
self.send_blocks([b64], True)
self.save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
b65 = self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0, 0)
b65 = self.update_block(65, [tx1, tx2])
self.send_blocks([b65], True)
self.save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
b66 = self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0, 1)
b66 = self.update_block(66, [tx2, tx1])
self.send_blocks([b66], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
self.log.info("Reject a block with a transaction double spending a transaction created in the same block")
self.move_tip(65)
b67 = self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0, 1)
tx3 = self.create_and_sign_transaction(tx1, 0, 2)
b67 = self.update_block(67, [tx1, tx2, tx3])
self.send_blocks([b67], False, "bad-txns-inputs-missingorspent", reconnect=True)
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
b68 = self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 9)
b68 = self.update_block(68, [tx])
self.send_blocks([b68], False, "bad-blk-amount", reconnect=True)
self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction")
self.move_tip(65)
b69 = self.next_block(69, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 10)
self.update_block(69, [tx])
self.send_blocks([b69], True)
self.save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
b70 = self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
b70 = self.update_block(70, [tx])
self.send_blocks([b70], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability)")
self.move_tip(69)
b72 = self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21].tx, out[21].n, 2)
tx2 = self.create_and_sign_transaction(tx1, 0, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
self.move_tip(71)
self.send_blocks([b71], False, "bad-txns-duplicate", reconnect=True)
self.move_tip(72)
self.send_blocks([b72], True)
self.save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
b73 = self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS + 1] = element_size // 256
a[MAX_BLOCK_SIGOPS + 2] = 0
a[MAX_BLOCK_SIGOPS + 3] = 0
tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a))
b73 = self.update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1)
self.send_blocks([b73], False, 'bad-blk-sigops', reconnect=True)
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
b74 = self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS + 1] = 0xfe
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
a[MAX_BLOCK_SIGOPS + 4] = 0xff
tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a))
b74 = self.update_block(74, [tx])
self.send_blocks([b74], False, 'bad-blk-sigops', reconnect=True)
self.move_tip(72)
b75 = self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS + 1] = 0xff
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a))
b75 = self.update_block(75, [tx])
self.send_blocks([b75], True)
self.save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
b76 = self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = self.create_and_sign_transaction(out[23].tx, 0, 1, CScript(a))
b76 = self.update_block(76, [tx])
self.send_blocks([b76], True)
self.save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the trumpcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
b77 = self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24].tx, out[24].n, 10 * COIN)
b77 = self.update_block(77, [tx77])
self.send_blocks([b77], True)
self.save_spendable_output()
b78 = self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.send_blocks([b78], True)
b79 = self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.send_blocks([b79], True)
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.move_tip(77)
b80 = self.next_block(80, spend=out[25])
self.send_blocks([b80], False)
self.save_spendable_output()
b81 = self.next_block(81, spend=out[26])
self.send_blocks([b81], False) # other chain is same length
self.save_spendable_output()
b82 = self.next_block(82, spend=out[27])
self.send_blocks([b82], True) # now this chain is longer, triggers re-org
self.save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
self.log.info("Accept a block with invalid opcodes in dead execution paths")
b83 = self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = self.create_and_sign_transaction(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
b83 = self.update_block(83, [tx1, tx2])
self.send_blocks([b83], True)
self.save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
self.log.info("Test re-orging blocks with OP_RETURN in them")
b84 = self.next_block(84)
tx1 = self.create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN]))
b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5])
self.send_blocks([b84], True)
self.save_spendable_output()
self.move_tip(83)
b85 = self.next_block(85, spend=out[29])
self.send_blocks([b85], False) # other chain is same length
b86 = self.next_block(86, spend=out[30])
self.send_blocks([b86], True)
self.move_tip(84)
b87 = self.next_block(87, spend=out[30])
self.send_blocks([b87], False) # other chain is same length
self.save_spendable_output()
b88 = self.next_block(88, spend=out[31])
self.send_blocks([b88], True)
self.save_spendable_output()
# trying to spend the OP_RETURN output is rejected
b89a = self.next_block("89a", spend=out[32])
tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.send_blocks([b89a], False, 'bad-txns-inputs-missingorspent', reconnect=True)
# !TODO: add long-reorg test
# Helper methods
################
def add_transactions_to_block(self, block, tx_list):
[tx.rehash() for tx in tx_list]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
return create_transaction(spend_tx, n, b"", value, script)
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend is None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
# save the current tip so it can be spent by a later block
def save_spendable_output(self):
self.log.debug("saving spendable output %s" % self.tip.vtx[0])
self.spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output(self):
self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0])
return PreviousSpendableOutput(self.spendable_outputs.pop(0).vtx[0], 0)
# move the tip back to a previous block
def move_tip(self, number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(self, block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
def reconnect_p2p(self):
"""Add a P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_verack()
def send_blocks(self, blocks, success=True, reject_reason=None, reconnect=False, timeout=60):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, expect_disconnect=reconnect, timeout=timeout)
if reconnect:
self.reconnect_p2p()
# TrumpCoin
# create a block with a tx spending a given out, and lots of txes spending the outputs created
# in the first one. Keep the tx size under 150 kB limit.
def create_sized_block(self, block, spend, block_size):
num_of_txes = int(block_size // 150000) + 1
block.vtx[0].vout[0].nValue += spend.tx.vout[spend.n].nValue - num_of_txes
block.vtx[0].rehash()
tx1 = CTransaction()
tx1.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff))
tx1.vout = [CTxOut(1, CScript([OP_TRUE]))] * num_of_txes
tx1.calc_sha256()
self.sign_tx(tx1, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx1])
available_space = block_size - len(block.serialize())
idx = 0
while available_space > 138 and idx < num_of_txes:
tx = CTransaction()
if 149999 < available_space:
script_length = 149999 - 141
else:
script_length = available_space - 138
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(block.vtx[1].sha256, idx)))
tx.calc_sha256()
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
available_space = block_size - len(block.serialize())
idx += 1
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Make sure the math above worked out to produce a block_size-sized block
bsize = len(block.serialize())
if bsize not in [block_size - 1, block_size]:
self.log.warning("Created block of size %d" % bsize)
return block
if __name__ == '__main__':
FullBlockTest().main()
| 45.662975
| 161
| 0.574535
|
import copy
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction, get_legacy_sigopcount_block
from test_framework.key import CECKey
from test_framework.messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
uint256_from_compact,
uint256_from_str,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_FALSE,
OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SignatureHash,
hash160,
)
from test_framework.test_framework import TrumpCoinTestFramework
from test_framework.util import assert_equal
MAX_BLOCK_SIGOPS = MAX_BLOCK_BASE_SIZE // 50
class PreviousSpendableOutput():
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
return super().serialize()
class FullBlockTest(TrumpCoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-acceptnonstdtxn=1"]] # This is a consensus block test, we don't care about tx policy
def run_test(self):
node = self.nodes[0]
self.log.info("Starting network thread...")
self.reconnect_p2p()
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
self.spendable_outputs = []
self.log.info("Creating and sending a block...")
b0 = self.next_block(0)
self.save_spendable_output()
self.send_blocks([b0])
self.log.info("Maturing the block...")
blocks = []
for i in range(99):
blocks.append(self.next_block(4000 + i))
self.save_spendable_output()
self.log.info("Sending all new blocks")
self.send_blocks(blocks)
self.log.info("Collect spendable outputs...")
out = []
for i in range(33):
out.append(self.get_spendable_output())
self.log.info("Build b1 and b2")
b1 = self.next_block(1, spend=out[0])
self.save_spendable_output()
b2 = self.next_block(2, spend=out[1])
self.save_spendable_output()
self.send_blocks([b1, b2])
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
self.send_blocks([b3], False)
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
self.log.info("Reorg to a longer chain")
b4 = self.next_block(4, spend=out[2])
self.send_blocks([b4])
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
self.move_tip(2)
b5 = self.next_block(5, spend=out[2])
self.save_spendable_output()
self.send_blocks([b5], False)
self.log.info("Reorg back to the original chain")
b6 = self.next_block(6, spend=out[3])
self.send_blocks([b6], True)
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain with a double spend, even if it is longer")
self.move_tip(5)
b7 = self.next_block(7, spend=out[2])
self.send_blocks([b7], False)
b8 = self.next_block(8, spend=out[4])
self.send_blocks([b8], False, reconnect=True)
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block where the miner creates too much coinbase reward")
self.move_tip(6)
b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
self.send_blocks([b9], False, "bad-blk-amount", reconnect=True)
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
self.move_tip(5)
b10 = self.next_block(10, spend=out[3])
self.send_blocks([b10], False)
b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
self.send_blocks([b11], False, "bad-blk-amount", reconnect=True)
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
self.move_tip(5)
b12 = self.next_block(12, spend=out[3])
self.save_spendable_output()
b13 = self.next_block(13, spend=out[4])
self.save_spendable_output()
b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
self.send_blocks([b12, b13, b14], False, "bad-blk-amount", reconnect=True)
# New tip should be b13.
assert_equal(node.getbestblockhash(), b13.hash)
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block with lots of checksigs")
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
self.move_tip(13)
b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
self.save_spendable_output()
self.send_blocks([b15], True)
self.log.info("Reject a block with too many checksigs")
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
self.send_blocks([b16], False, "bad-blk-sigops", reconnect=True)
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx")
self.move_tip(15)
b17 = self.next_block(17, spend=txout_b3)
self.send_blocks([b17], False, "bad-txns-inputs-missingorspent", reconnect=True)
self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)")
self.move_tip(13)
b18 = self.next_block(18, spend=txout_b3)
self.send_blocks([b18], False)
b19 = self.next_block(19, spend=out[6])
self.send_blocks([b19], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase.")
self.move_tip(15)
b20 = self.next_block(20, spend=out[7])
self.send_blocks([b20], False, "bad-txns-premature-spend-of-coinbase")
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase (on a forked chain)")
self.move_tip(13)
b21 = self.next_block(21, spend=out[6])
self.send_blocks([b21], False)
b22 = self.next_block(22, spend=out[5])
self.send_blocks([b22], False, "bad-txns-premature-spend-of-coinbase")
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
self.move_tip(15)
height = self.block_heights[self.tip.sha256] + 1
b23 = self.next_block(23)
b23 = self.create_sized_block(b23, out[6], MAX_BLOCK_BASE_SIZE)
self.block_heights[b23.sha256] = height
self.blocks[23] = b23
self.send_blocks([b23], True)
self.save_spendable_output()
self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 2")
self.move_tip(15)
b24 = self.next_block(24)
b24 = self.create_sized_block(b24, out[6], MAX_BLOCK_BASE_SIZE + 2)
self.block_heights[b24.sha256] = height
self.blocks[24] = b24
self.send_blocks([b24], False, "bad-blk-length", reconnect=True)
b25 = self.next_block(25, spend=out[7])
self.send_blocks([b25], False)
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with coinbase input script size out of range")
self.move_tip(15)
b26 = self.next_block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = self.update_block(26, [])
self.send_blocks([b26], False, "bad-cb-length", reconnect=True)
# Extend the b26 chain to make sure trumpcoind isn't accepting b26
b27 = self.next_block(27, spend=out[7])
self.send_blocks([b27], False)
self.move_tip(15)
b28 = self.next_block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig += b'\x00' * 151
b28.vtx[0].rehash()
b28 = self.update_block(28, [])
self.send_blocks([b28], False, "bad-cb-length", reconnect=True)
b29 = self.next_block(29, spend=out[7])
self.send_blocks([b29], False)
# b30
self.move_tip(23)
b30 = self.next_block(30)
b30 = self.update_block(30, [])
self.send_blocks([b30], True)
self.save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops")
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
self.send_blocks([b31], True)
self.save_spendable_output()
# this goes over the limit because the coinbase has one sigop
self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = self.next_block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
self.send_blocks([b32], False, "bad-blk-sigops", reconnect=True)
# CHECKMULTISIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops")
self.move_tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs)
self.send_blocks([b33], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
b34 = self.next_block(34, spend=out[10], script=too_many_multisigs)
self.send_blocks([b34], False, "bad-blk-sigops", reconnect=True)
# CHECKSIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops")
self.move_tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs)
self.send_blocks([b35], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops")
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
b36 = self.next_block(36, spend=out[11], script=too_many_checksigs)
self.send_blocks([b36], False, "bad-blk-sigops", reconnect=True)
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
self.log.info("Reject a block spending transaction from a block which failed to connect")
self.move_tip(35)
b37 = self.next_block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = self.create_and_sign_transaction(out[11].tx, out[11].n, 0)
b37 = self.update_block(37, [tx])
self.send_blocks([b37], False, "bad-txns-inputs-missingorspent", reconnect=True)
self.move_tip(35)
b38 = self.next_block(38, spend=txout_b37)
self.send_blocks([b38], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
b39 = self.next_block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = self.create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = self.update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size = len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new)
tx_last = tx_new
b39_outputs += 1
b39 = self.update_block(39, [])
self.send_blocks([b39], True)
self.save_spendable_output()
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
self.log.info("Reject a block with too many P2SH sigops")
self.move_tip(39)
b40 = self.next_block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes + 1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
self.update_block(40, new_txs)
self.send_blocks([b40], False, "bad-blk-sigops", reconnect=True)
# same as b40, but one less sigop
self.log.info("Accept a block with the max number of P2SH sigops")
self.move_tip(39)
b41 = self.next_block(41, spend=None)
self.update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
self.update_block(41, [tx])
self.send_blocks([b41], True)
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
self.move_tip(39)
b42 = self.next_block(42, spend=out[12])
self.save_spendable_output()
b43 = self.next_block(43, spend=out[13])
self.save_spendable_output()
self.send_blocks([b42, b43], True)
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
self.log.info("Build block 44 manually")
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
self.send_blocks([b44], True)
self.log.info("Reject a block with a non-coinbase as the first tx")
non_coinbase = self.create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1
self.tip = b45
self.blocks[45] = b45
self.send_blocks([b45], False, "bad-cb-missing", reconnect=True)
self.log.info("Reject a block with no transactions")
self.move_tip(44)
b46 = CBlock()
b46.nTime = b44.nTime + 1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
self.send_blocks([b46], False, "bad-blk-length", reconnect=True)
self.log.info("Reject a block with invalid work")
self.move_tip(44)
b47 = self.next_block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.sha256 < target:
b47.nNonce += 1
b47.rehash()
self.send_blocks([b47], False)
self.log.info("Reject a block with invalid merkle hash")
self.move_tip(44)
b49 = self.next_block(49)
b49.hashMerkleRoot += 1
b49.solve()
self.send_blocks([b49], False, "bad-txnmrklroot", reconnect=True)
self.log.info("Reject a block with incorrect POW limit")
self.move_tip(44)
b50 = self.next_block(50)
b50.nBits = b50.nBits - 1
b50.solve()
self.send_blocks([b50], False, reconnect=True)
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
b51 = self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.send_blocks([b51], False, "bad-cb-multiple", reconnect=True)
self.log.info("Reject a block with duplicate transactions")
self.move_tip(44)
b52 = self.next_block(52, spend=out[15])
tx = self.create_tx(b52.vtx[1], 0, 1)
b52 = self.update_block(52, [tx, tx])
self.send_blocks([b52], False, "bad-txns-duplicate", reconnect=True)
self.move_tip(43)
b53 = self.next_block(53, spend=out[14])
self.send_blocks([b53], False)
self.save_spendable_output()
self.move_tip(53)
b55 = self.next_block(55, spend=out[15])
b55.nTime = b35.nTime
self.update_block(55, [])
self.send_blocks([b55], True)
self.save_spendable_output()
self.move_tip(55)
b57 = self.next_block(57)
tx = self.create_and_sign_transaction(out[16].tx, out[16].n, 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx), 3)
b56 = self.update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
self.send_blocks([b56], False, "bad-txns-duplicate", reconnect=True)
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
b57p2 = self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16].tx, out[16].n, 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
tx3 = self.create_tx(tx2, 0, 1)
tx4 = self.create_tx(tx3, 0, 1)
b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx), 6)
b56p2 = self.update_block("b56p2", [tx3, tx4])
self.send_blocks([b56p2], False, "bad-txns-duplicate", reconnect=True)
self.move_tip("57p2")
self.send_blocks([b57p2], True)
self.move_tip(57)
self.send_blocks([b57], False)
self.save_spendable_output()
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
b58 = self.next_block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx.calc_sha256()
b58 = self.update_block(58, [tx])
self.send_blocks([b58], False, "bad-txns-inputs-missingorspent", reconnect=True)
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip(57)
b59 = self.next_block(59)
tx = self.create_and_sign_transaction(out[17].tx, out[17].n, 251 * COIN)
b59 = self.update_block(59, [tx])
self.send_blocks([b59], False, "bad-txns-in-belowout", reconnect=True)
self.move_tip(57)
b60 = self.next_block(60, spend=out[17])
self.send_blocks([b60], True)
self.save_spendable_output()
self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
self.move_tip(60)
b61 = self.next_block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig
b61.vtx[0].rehash()
b61 = self.update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
self.send_blocks([b61], False, "bad-cb-height", reconnect=True)
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip(60)
b62 = self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n)))
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = self.update_block(62, [tx])
self.send_blocks([b62], False, "bad-txns-nonfinal")
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime")
self.move_tip(60)
b63 = self.next_block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = self.update_block(63, [])
self.send_blocks([b63], False, "bad-txns-nonfinal")
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
#
self.move_tip(60)
b64 = self.next_block(64)
b64 = self.update_block(64, [])
self.send_blocks([b64], True)
self.save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
b65 = self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0, 0)
b65 = self.update_block(65, [tx1, tx2])
self.send_blocks([b65], True)
self.save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
b66 = self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0, 1)
b66 = self.update_block(66, [tx2, tx1])
self.send_blocks([b66], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
self.log.info("Reject a block with a transaction double spending a transaction created in the same block")
self.move_tip(65)
b67 = self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0, 1)
tx3 = self.create_and_sign_transaction(tx1, 0, 2)
b67 = self.update_block(67, [tx1, tx2, tx3])
self.send_blocks([b67], False, "bad-txns-inputs-missingorspent", reconnect=True)
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
b68 = self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 9)
b68 = self.update_block(68, [tx])
self.send_blocks([b68], False, "bad-blk-amount", reconnect=True)
self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction")
self.move_tip(65)
b69 = self.next_block(69, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 10)
self.update_block(69, [tx])
self.send_blocks([b69], True)
self.save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
b70 = self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
b70 = self.update_block(70, [tx])
self.send_blocks([b70], False, "bad-txns-inputs-missingorspent", reconnect=True)
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability)")
self.move_tip(69)
b72 = self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21].tx, out[21].n, 2)
tx2 = self.create_and_sign_transaction(tx1, 0, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
self.move_tip(71)
self.send_blocks([b71], False, "bad-txns-duplicate", reconnect=True)
self.move_tip(72)
self.send_blocks([b72], True)
self.save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
b73 = self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS + 1] = element_size // 256
a[MAX_BLOCK_SIGOPS + 2] = 0
a[MAX_BLOCK_SIGOPS + 3] = 0
tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a))
b73 = self.update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1)
self.send_blocks([b73], False, 'bad-blk-sigops', reconnect=True)
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
b74 = self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS + 1] = 0xfe
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
a[MAX_BLOCK_SIGOPS + 4] = 0xff
tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a))
b74 = self.update_block(74, [tx])
self.send_blocks([b74], False, 'bad-blk-sigops', reconnect=True)
self.move_tip(72)
b75 = self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS + 1] = 0xff
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
tx = self.create_and_sign_transaction(out[22].tx, 0, 1, CScript(a))
b75 = self.update_block(75, [tx])
self.send_blocks([b75], True)
self.save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
b76 = self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = self.create_and_sign_transaction(out[23].tx, 0, 1, CScript(a))
b76 = self.update_block(76, [tx])
self.send_blocks([b76], True)
self.save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the trumpcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
b77 = self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24].tx, out[24].n, 10 * COIN)
b77 = self.update_block(77, [tx77])
self.send_blocks([b77], True)
self.save_spendable_output()
b78 = self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.send_blocks([b78], True)
b79 = self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.send_blocks([b79], True)
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.move_tip(77)
b80 = self.next_block(80, spend=out[25])
self.send_blocks([b80], False)
self.save_spendable_output()
b81 = self.next_block(81, spend=out[26])
self.send_blocks([b81], False) # other chain is same length
self.save_spendable_output()
b82 = self.next_block(82, spend=out[27])
self.send_blocks([b82], True) # now this chain is longer, triggers re-org
self.save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
self.log.info("Accept a block with invalid opcodes in dead execution paths")
b83 = self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = self.create_and_sign_transaction(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
b83 = self.update_block(83, [tx1, tx2])
self.send_blocks([b83], True)
self.save_spendable_output()
self.log.info("Test re-orging blocks with OP_RETURN in them")
b84 = self.next_block(84)
tx1 = self.create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN]))
b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5])
self.send_blocks([b84], True)
self.save_spendable_output()
self.move_tip(83)
b85 = self.next_block(85, spend=out[29])
self.send_blocks([b85], False)
b86 = self.next_block(86, spend=out[30])
self.send_blocks([b86], True)
self.move_tip(84)
b87 = self.next_block(87, spend=out[30])
self.send_blocks([b87], False)
self.save_spendable_output()
b88 = self.next_block(88, spend=out[31])
self.send_blocks([b88], True)
self.save_spendable_output()
b89a = self.next_block("89a", spend=out[32])
tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.send_blocks([b89a], False, 'bad-txns-inputs-missingorspent', reconnect=True)
end(tx_list)
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
return create_transaction(spend_tx, n, b"", value, script)
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE):
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend is None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = create_transaction(spend.tx, spend.n, b"", 1, script)
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def save_spendable_output(self):
self.log.debug("saving spendable output %s" % self.tip.vtx[0])
self.spendable_outputs.append(self.tip)
def get_spendable_output(self):
self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0])
return PreviousSpendableOutput(self.spendable_outputs.pop(0).vtx[0], 0)
def move_tip(self, number):
self.tip = self.blocks[number]
def update_block(self, block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
def reconnect_p2p(self):
self.nodes[0].disconnect_p2ps()
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_verack()
def send_blocks(self, blocks, success=True, reject_reason=None, reconnect=False, timeout=60):
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, expect_disconnect=reconnect, timeout=timeout)
if reconnect:
self.reconnect_p2p()
def create_sized_block(self, block, spend, block_size):
num_of_txes = int(block_size // 150000) + 1
block.vtx[0].vout[0].nValue += spend.tx.vout[spend.n].nValue - num_of_txes
block.vtx[0].rehash()
tx1 = CTransaction()
tx1.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff))
tx1.vout = [CTxOut(1, CScript([OP_TRUE]))] * num_of_txes
tx1.calc_sha256()
self.sign_tx(tx1, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx1])
available_space = block_size - len(block.serialize())
idx = 0
while available_space > 138 and idx < num_of_txes:
tx = CTransaction()
if 149999 < available_space:
script_length = 149999 - 141
else:
script_length = available_space - 138
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(block.vtx[1].sha256, idx)))
tx.calc_sha256()
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
available_space = block_size - len(block.serialize())
idx += 1
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
bsize = len(block.serialize())
if bsize not in [block_size - 1, block_size]:
self.log.warning("Created block of size %d" % bsize)
return block
if __name__ == '__main__':
FullBlockTest().main()
| true
| true
|
1c403246a49c98f7fe3edc9d3278aaa3b9b09ed4
| 5,973
|
py
|
Python
|
sdks/python/apache_beam/testing/test_utils.py
|
kjmrknsn/beam
|
6a6adc8433deff10a5594bbf77cc9148ce0a951a
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/testing/test_utils.py
|
kjmrknsn/beam
|
6a6adc8433deff10a5594bbf77cc9148ce0a951a
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/testing/test_utils.py
|
kjmrknsn/beam
|
6a6adc8433deff10a5594bbf77cc9148ce0a951a
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility methods for testing
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import hashlib
import imp
import os
import shutil
import tempfile
from builtins import object
from mock import Mock
from mock import patch
from apache_beam.io.filesystems import FileSystems
from apache_beam.utils import retry
DEFAULT_HASHING_ALG = 'sha1'
class TempDir(object):
"""Context Manager to create and clean-up a temporary directory."""
def __init__(self):
self._tempdir = tempfile.mkdtemp()
def __enter__(self):
return self
def __exit__(self, *args):
if os.path.exists(self._tempdir):
shutil.rmtree(self._tempdir)
def get_path(self):
"""Returns the path to the temporary directory."""
return self._tempdir
def create_temp_file(self, suffix='', lines=None):
"""Creates a temporary file in the temporary directory.
Args:
suffix (str): The filename suffix of the temporary file (e.g. '.txt')
lines (List[str]): A list of lines that will be written to the temporary
file.
Returns:
The name of the temporary file created.
"""
with tempfile.NamedTemporaryFile(
delete=False, dir=self._tempdir, suffix=suffix) as f:
if lines:
for line in lines:
f.write(line)
return f.name
def compute_hash(content, hashing_alg=DEFAULT_HASHING_ALG):
"""Compute a hash value of a list of objects by hashing their string
representations."""
content = [str(x).encode('utf-8') if not isinstance(x, bytes) else x
for x in content]
content.sort()
m = hashlib.new(hashing_alg)
for elem in content:
m.update(elem)
return m.hexdigest()
def patch_retry(testcase, module):
"""A function to patch retry module to use mock clock and logger.
Clock and logger that defined in retry decorator will be replaced in test
in order to skip sleep phase when retry happens.
Args:
testcase: An instance of unittest.TestCase that calls this function to
patch retry module.
module: The module that uses retry and need to be replaced with mock
clock and logger in test.
"""
real_retry_with_exponential_backoff = retry.with_exponential_backoff
def patched_retry_with_exponential_backoff(num_retries, retry_filter):
"""A patch for retry decorator to use a mock dummy clock and logger."""
return real_retry_with_exponential_backoff(
num_retries=num_retries, retry_filter=retry_filter, logger=Mock(),
clock=Mock())
patch.object(retry, 'with_exponential_backoff',
side_effect=patched_retry_with_exponential_backoff).start()
# Reload module after patching.
imp.reload(module)
def remove_patches():
patch.stopall()
# Reload module again after removing patch.
imp.reload(module)
testcase.addCleanup(remove_patches)
@retry.with_exponential_backoff(
num_retries=3,
retry_filter=retry.retry_on_beam_io_error_filter)
def delete_files(file_paths):
"""A function to clean up files or directories using ``FileSystems``.
Glob is supported in file path and directories will be deleted recursively.
Args:
file_paths: A list of strings contains file paths or directories.
"""
if len(file_paths) == 0:
raise RuntimeError('Clean up failed. Invalid file path: %s.' %
file_paths)
FileSystems.delete(file_paths)
def cleanup_subscriptions(sub_client, subs):
"""Cleanup PubSub subscriptions if exist."""
for sub in subs:
sub_client.delete_subscription(sub.name)
def cleanup_topics(pub_client, topics):
"""Cleanup PubSub topics if exist."""
for topic in topics:
pub_client.delete_topic(topic.name)
class PullResponseMessage(object):
"""Data representing a pull request response.
Utility class for ``create_pull_response``.
"""
def __init__(self, data, attributes=None,
publish_time_secs=None, publish_time_nanos=None, ack_id=None):
self.data = data
self.attributes = attributes
self.publish_time_secs = publish_time_secs
self.publish_time_nanos = publish_time_nanos
self.ack_id = ack_id
def create_pull_response(responses):
"""Create an instance of ``google.cloud.pubsub.types.ReceivedMessage``.
Used to simulate the response from pubsub.SubscriberClient().pull().
Args:
responses: list of ``PullResponseMessage``
Returns:
An instance of ``google.cloud.pubsub.types.PullResponse`` populated with
responses.
"""
from google.cloud import pubsub
res = pubsub.types.PullResponse()
for response in responses:
received_message = res.received_messages.add()
message = received_message.message
message.data = response.data
if response.attributes is not None:
for k, v in response.attributes.items():
message.attributes[k] = v
if response.publish_time_secs is not None:
message.publish_time.seconds = response.publish_time_secs
if response.publish_time_nanos is not None:
message.publish_time.nanos = response.publish_time_nanos
if response.ack_id is not None:
received_message.ack_id = response.ack_id
return res
| 29.865
| 78
| 0.729784
|
from __future__ import absolute_import
import hashlib
import imp
import os
import shutil
import tempfile
from builtins import object
from mock import Mock
from mock import patch
from apache_beam.io.filesystems import FileSystems
from apache_beam.utils import retry
DEFAULT_HASHING_ALG = 'sha1'
class TempDir(object):
def __init__(self):
self._tempdir = tempfile.mkdtemp()
def __enter__(self):
return self
def __exit__(self, *args):
if os.path.exists(self._tempdir):
shutil.rmtree(self._tempdir)
def get_path(self):
return self._tempdir
def create_temp_file(self, suffix='', lines=None):
with tempfile.NamedTemporaryFile(
delete=False, dir=self._tempdir, suffix=suffix) as f:
if lines:
for line in lines:
f.write(line)
return f.name
def compute_hash(content, hashing_alg=DEFAULT_HASHING_ALG):
content = [str(x).encode('utf-8') if not isinstance(x, bytes) else x
for x in content]
content.sort()
m = hashlib.new(hashing_alg)
for elem in content:
m.update(elem)
return m.hexdigest()
def patch_retry(testcase, module):
real_retry_with_exponential_backoff = retry.with_exponential_backoff
def patched_retry_with_exponential_backoff(num_retries, retry_filter):
return real_retry_with_exponential_backoff(
num_retries=num_retries, retry_filter=retry_filter, logger=Mock(),
clock=Mock())
patch.object(retry, 'with_exponential_backoff',
side_effect=patched_retry_with_exponential_backoff).start()
imp.reload(module)
def remove_patches():
patch.stopall()
imp.reload(module)
testcase.addCleanup(remove_patches)
@retry.with_exponential_backoff(
num_retries=3,
retry_filter=retry.retry_on_beam_io_error_filter)
def delete_files(file_paths):
if len(file_paths) == 0:
raise RuntimeError('Clean up failed. Invalid file path: %s.' %
file_paths)
FileSystems.delete(file_paths)
def cleanup_subscriptions(sub_client, subs):
for sub in subs:
sub_client.delete_subscription(sub.name)
def cleanup_topics(pub_client, topics):
for topic in topics:
pub_client.delete_topic(topic.name)
class PullResponseMessage(object):
def __init__(self, data, attributes=None,
publish_time_secs=None, publish_time_nanos=None, ack_id=None):
self.data = data
self.attributes = attributes
self.publish_time_secs = publish_time_secs
self.publish_time_nanos = publish_time_nanos
self.ack_id = ack_id
def create_pull_response(responses):
from google.cloud import pubsub
res = pubsub.types.PullResponse()
for response in responses:
received_message = res.received_messages.add()
message = received_message.message
message.data = response.data
if response.attributes is not None:
for k, v in response.attributes.items():
message.attributes[k] = v
if response.publish_time_secs is not None:
message.publish_time.seconds = response.publish_time_secs
if response.publish_time_nanos is not None:
message.publish_time.nanos = response.publish_time_nanos
if response.ack_id is not None:
received_message.ack_id = response.ack_id
return res
| true
| true
|
1c4032ade22a1b70e4e3c0cd00453e5ed6e4717d
| 596
|
py
|
Python
|
test/animatedledstrip/test_animation_info.py
|
AnimatedLEDStrip/client-python
|
0502b1b70faf6ce9b9ff7b53ee4740372fccb4c3
|
[
"MIT"
] | null | null | null |
test/animatedledstrip/test_animation_info.py
|
AnimatedLEDStrip/client-python
|
0502b1b70faf6ce9b9ff7b53ee4740372fccb4c3
|
[
"MIT"
] | null | null | null |
test/animatedledstrip/test_animation_info.py
|
AnimatedLEDStrip/client-python
|
0502b1b70faf6ce9b9ff7b53ee4740372fccb4c3
|
[
"MIT"
] | null | null | null |
from animatedledstrip import AnimationInfo
def test_constructor():
info = AnimationInfo()
assert info.name == ''
assert info.abbr == ''
assert info.description == ''
assert info.run_count_default == 0
assert info.minimum_colors == 0
assert info.unlimited_colors is False
assert info.dimensionality == []
assert info.int_params == []
assert info.double_params == []
assert info.string_params == []
assert info.location_params == []
assert info.distance_params == []
assert info.rotation_params == []
assert info.equation_params == []
| 28.380952
| 42
| 0.671141
|
from animatedledstrip import AnimationInfo
def test_constructor():
info = AnimationInfo()
assert info.name == ''
assert info.abbr == ''
assert info.description == ''
assert info.run_count_default == 0
assert info.minimum_colors == 0
assert info.unlimited_colors is False
assert info.dimensionality == []
assert info.int_params == []
assert info.double_params == []
assert info.string_params == []
assert info.location_params == []
assert info.distance_params == []
assert info.rotation_params == []
assert info.equation_params == []
| true
| true
|
1c4034ad05f99a26315901a18c8f402d5fa64c10
| 10,357
|
py
|
Python
|
tools/manifest/item.py
|
kosta111121/wpt
|
a9e454c8001472320dc3f049f6180427256a44dc
|
[
"BSD-3-Clause"
] | 1
|
2019-12-16T19:31:40.000Z
|
2019-12-16T19:31:40.000Z
|
tools/manifest/item.py
|
kosta111121/wpt
|
a9e454c8001472320dc3f049f6180427256a44dc
|
[
"BSD-3-Clause"
] | null | null | null |
tools/manifest/item.py
|
kosta111121/wpt
|
a9e454c8001472320dc3f049f6180427256a44dc
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import copy
from inspect import isabstract
from six import iteritems, with_metaclass
from six.moves.urllib.parse import urljoin, urlparse
from abc import ABCMeta, abstractproperty
from .utils import to_os_path
MYPY = False
if MYPY:
# MYPY is set to True when run under Mypy.
from typing import Optional
from typing import Text
from typing import Dict
from typing import Tuple
from typing import List
from typing import Union
from typing import Type
from typing import Any
from typing import Sequence
from typing import Hashable
from .manifest import Manifest
Fuzzy = Dict[Optional[Tuple[Text, Text, Text]], List[int]]
item_types = {} # type: Dict[str, Type[ManifestItem]]
class ManifestItemMeta(ABCMeta):
"""Custom metaclass that registers all the subclasses in the
item_types dictionary according to the value of their item_type
attribute, and otherwise behaves like an ABCMeta."""
def __new__(cls, name, bases, attrs):
# type: (Type[ManifestItemMeta], str, Tuple[ManifestItemMeta, ...], Dict[str, Any]) -> ManifestItemMeta
rv = super(ManifestItemMeta, cls).__new__(cls, name, bases, attrs)
if not isabstract(rv):
assert issubclass(rv, ManifestItem)
assert isinstance(rv.item_type, str)
item_types[rv.item_type] = rv
return rv # type: ignore
class ManifestItem(with_metaclass(ManifestItemMeta)):
__slots__ = ("_tests_root", "path")
def __init__(self, tests_root, path):
# type: (Text, Text) -> None
self._tests_root = tests_root
self.path = path
@abstractproperty
def id(self):
# type: () -> Text
"""The test's id (usually its url)"""
pass
@abstractproperty
def item_type(self):
# type: () -> str
"""The item's type"""
pass
def key(self):
# type: () -> Hashable
"""A unique identifier for the test"""
return (self.item_type, self.id)
def __eq__(self, other):
# type: (Any) -> bool
if not hasattr(other, "key"):
return False
return bool(self.key() == other.key())
def __hash__(self):
# type: () -> int
return hash(self.key())
def __repr__(self):
# type: () -> str
return "<%s.%s id=%r, path=%r>" % (self.__module__, self.__class__.__name__, self.id, self.path)
def to_json(self):
# type: () -> Tuple[Any, ...]
return ()
@classmethod
def from_json(cls,
manifest, # type: Manifest
path, # type: Text
obj # type: Any
):
# type: (...) -> ManifestItem
path = to_os_path(path)
tests_root = manifest.tests_root
assert tests_root is not None
return cls(tests_root, path)
class URLManifestItem(ManifestItem):
__slots__ = ("url_base", "_url", "_extras")
def __init__(self,
tests_root, # type: Text
path, # type: Text
url_base, # type: Text
url, # type: Text
**extras # type: Any
):
# type: (...) -> None
super(URLManifestItem, self).__init__(tests_root, path)
assert url_base[0] == "/"
self.url_base = url_base
assert url[0] != "/"
self._url = url
self._extras = extras
@property
def id(self):
# type: () -> Text
return self.url
@property
def url(self):
# type: () -> Text
# we can outperform urljoin, because we know we just have path relative URLs
if self.url_base == "/":
return "/" + self._url
return urljoin(self.url_base, self._url)
@property
def https(self):
# type: () -> bool
flags = set(urlparse(self.url).path.rsplit("/", 1)[1].split(".")[1:-1])
return ("https" in flags or "serviceworker" in flags)
def to_json(self):
# type: () -> Tuple[Text, Dict[Any, Any]]
rv = (self._url, {}) # type: Tuple[Text, Dict[Any, Any]]
return rv
@classmethod
def from_json(cls,
manifest, # type: Manifest
path, # type: Text
obj # type: Tuple[Text, Dict[Any, Any]]
):
# type: (...) -> URLManifestItem
path = to_os_path(path)
url, extras = obj
tests_root = manifest.tests_root
assert tests_root is not None
return cls(tests_root,
path,
manifest.url_base,
url,
**extras)
class TestharnessTest(URLManifestItem):
__slots__ = ()
item_type = "testharness"
@property
def timeout(self):
# type: () -> Optional[Text]
return self._extras.get("timeout")
@property
def testdriver(self):
# type: () -> Optional[Text]
return self._extras.get("testdriver")
@property
def jsshell(self):
# type: () -> Optional[Text]
return self._extras.get("jsshell")
@property
def script_metadata(self):
# type: () -> Optional[Text]
return self._extras.get("script_metadata")
def to_json(self):
# type: () -> Tuple[Text, Dict[Text, Any]]
rv = super(TestharnessTest, self).to_json()
if self.timeout is not None:
rv[-1]["timeout"] = self.timeout
if self.testdriver:
rv[-1]["testdriver"] = self.testdriver
if self.jsshell:
rv[-1]["jsshell"] = True
if self.script_metadata:
rv[-1]["script_metadata"] = self.script_metadata
return rv
class RefTestBase(URLManifestItem):
__slots__ = ("references",)
def __init__(self,
tests_root, # type: Text
path, # type: Text
url_base, # type: Text
url, # type: Text
references=None, # type: Optional[List[Tuple[Text, Text]]]
**extras # type: Any
):
super(RefTestBase, self).__init__(tests_root, path, url_base, url, **extras)
if references is None:
self.references = [] # type: List[Tuple[Text, Text]]
else:
self.references = references
@property
def timeout(self):
# type: () -> Optional[Text]
return self._extras.get("timeout")
@property
def viewport_size(self):
# type: () -> Optional[Text]
return self._extras.get("viewport_size")
@property
def dpi(self):
# type: () -> Optional[Text]
return self._extras.get("dpi")
@property
def fuzzy(self):
# type: () -> Fuzzy
fuzzy = self._extras.get("fuzzy", {}) # type: Union[Fuzzy, List[Tuple[Optional[Sequence[Text]], List[int]]]]
if not isinstance(fuzzy, list):
return fuzzy
rv = {} # type: Fuzzy
for k, v in fuzzy: # type: Tuple[Optional[Sequence[Text]], List[int]]
if k is None:
key = None # type: Optional[Tuple[Text, Text, Text]]
else:
# mypy types this as Tuple[Text, ...]
assert len(k) == 3
key = tuple(k) # type: ignore
rv[key] = v
return rv
def to_json(self): # type: ignore
# type: () -> Tuple[Text, List[Tuple[Text, Text]], Dict[Text, Any]]
rv = (self._url, self.references, {}) # type: Tuple[Text, List[Tuple[Text, Text]], Dict[Text, Any]]
extras = rv[-1]
if self.timeout is not None:
extras["timeout"] = self.timeout
if self.viewport_size is not None:
extras["viewport_size"] = self.viewport_size
if self.dpi is not None:
extras["dpi"] = self.dpi
if self.fuzzy:
extras["fuzzy"] = list(iteritems(self.fuzzy))
return rv
@classmethod
def from_json(cls, # type: ignore
manifest, # type: Manifest
path, # type: Text
obj # type: Tuple[Text, List[Tuple[Text, Text]], Dict[Any, Any]]
):
# type: (...) -> RefTestBase
tests_root = manifest.tests_root
assert tests_root is not None
path = to_os_path(path)
url, references, extras = obj
return cls(tests_root,
path,
manifest.url_base,
url,
references,
**extras)
def to_RefTest(self):
# type: () -> RefTest
if type(self) == RefTest:
assert isinstance(self, RefTest)
return self
rv = copy(self)
rv.__class__ = RefTest
assert isinstance(rv, RefTest)
return rv
def to_RefTestNode(self):
# type: () -> RefTestNode
if type(self) == RefTestNode:
assert isinstance(self, RefTestNode)
return self
rv = copy(self)
rv.__class__ = RefTestNode
assert isinstance(rv, RefTestNode)
return rv
class RefTestNode(RefTestBase):
__slots__ = ()
item_type = "reftest_node"
class RefTest(RefTestBase):
__slots__ = ()
item_type = "reftest"
class ManualTest(URLManifestItem):
__slots__ = ()
item_type = "manual"
class ConformanceCheckerTest(URLManifestItem):
__slots__ = ()
item_type = "conformancechecker"
class VisualTest(URLManifestItem):
__slots__ = ()
item_type = "visual"
class CrashTest(URLManifestItem):
__slots__ = ()
item_type = "crashtest"
@property
def timeout(self):
# type: () -> Optional[Text]
return None
class WebDriverSpecTest(URLManifestItem):
__slots__ = ()
item_type = "wdspec"
@property
def timeout(self):
# type: () -> Optional[Text]
return self._extras.get("timeout")
def to_json(self):
# type: () -> Tuple[Text, Dict[Text, Any]]
rv = super(WebDriverSpecTest, self).to_json()
if self.timeout is not None:
rv[-1]["timeout"] = self.timeout
return rv
class SupportFile(ManifestItem):
__slots__ = ()
item_type = "support"
@property
def id(self):
# type: () -> Text
return self.path
| 27.991892
| 117
| 0.552283
|
from copy import copy
from inspect import isabstract
from six import iteritems, with_metaclass
from six.moves.urllib.parse import urljoin, urlparse
from abc import ABCMeta, abstractproperty
from .utils import to_os_path
MYPY = False
if MYPY:
from typing import Optional
from typing import Text
from typing import Dict
from typing import Tuple
from typing import List
from typing import Union
from typing import Type
from typing import Any
from typing import Sequence
from typing import Hashable
from .manifest import Manifest
Fuzzy = Dict[Optional[Tuple[Text, Text, Text]], List[int]]
item_types = {}
class ManifestItemMeta(ABCMeta):
def __new__(cls, name, bases, attrs):
rv = super(ManifestItemMeta, cls).__new__(cls, name, bases, attrs)
if not isabstract(rv):
assert issubclass(rv, ManifestItem)
assert isinstance(rv.item_type, str)
item_types[rv.item_type] = rv
return rv
class ManifestItem(with_metaclass(ManifestItemMeta)):
__slots__ = ("_tests_root", "path")
def __init__(self, tests_root, path):
self._tests_root = tests_root
self.path = path
@abstractproperty
def id(self):
pass
@abstractproperty
def item_type(self):
pass
def key(self):
return (self.item_type, self.id)
def __eq__(self, other):
if not hasattr(other, "key"):
return False
return bool(self.key() == other.key())
def __hash__(self):
return hash(self.key())
def __repr__(self):
return "<%s.%s id=%r, path=%r>" % (self.__module__, self.__class__.__name__, self.id, self.path)
def to_json(self):
return ()
@classmethod
def from_json(cls,
manifest,
path,
obj
):
path = to_os_path(path)
tests_root = manifest.tests_root
assert tests_root is not None
return cls(tests_root, path)
class URLManifestItem(ManifestItem):
__slots__ = ("url_base", "_url", "_extras")
def __init__(self,
tests_root,
path,
url_base,
url,
**extras
):
super(URLManifestItem, self).__init__(tests_root, path)
assert url_base[0] == "/"
self.url_base = url_base
assert url[0] != "/"
self._url = url
self._extras = extras
@property
def id(self):
return self.url
@property
def url(self):
if self.url_base == "/":
return "/" + self._url
return urljoin(self.url_base, self._url)
@property
def https(self):
flags = set(urlparse(self.url).path.rsplit("/", 1)[1].split(".")[1:-1])
return ("https" in flags or "serviceworker" in flags)
def to_json(self):
rv = (self._url, {})
return rv
@classmethod
def from_json(cls,
manifest,
path,
obj
):
path = to_os_path(path)
url, extras = obj
tests_root = manifest.tests_root
assert tests_root is not None
return cls(tests_root,
path,
manifest.url_base,
url,
**extras)
class TestharnessTest(URLManifestItem):
__slots__ = ()
item_type = "testharness"
@property
def timeout(self):
return self._extras.get("timeout")
@property
def testdriver(self):
return self._extras.get("testdriver")
@property
def jsshell(self):
return self._extras.get("jsshell")
@property
def script_metadata(self):
return self._extras.get("script_metadata")
def to_json(self):
rv = super(TestharnessTest, self).to_json()
if self.timeout is not None:
rv[-1]["timeout"] = self.timeout
if self.testdriver:
rv[-1]["testdriver"] = self.testdriver
if self.jsshell:
rv[-1]["jsshell"] = True
if self.script_metadata:
rv[-1]["script_metadata"] = self.script_metadata
return rv
class RefTestBase(URLManifestItem):
__slots__ = ("references",)
def __init__(self,
tests_root,
path,
url_base,
url,
references=None,
**extras
):
super(RefTestBase, self).__init__(tests_root, path, url_base, url, **extras)
if references is None:
self.references = []
else:
self.references = references
@property
def timeout(self):
return self._extras.get("timeout")
@property
def viewport_size(self):
return self._extras.get("viewport_size")
@property
def dpi(self):
return self._extras.get("dpi")
@property
def fuzzy(self):
fuzzy = self._extras.get("fuzzy", {})
if not isinstance(fuzzy, list):
return fuzzy
rv = {}
for k, v in fuzzy:
if k is None:
key = None
else:
assert len(k) == 3
key = tuple(k)
rv[key] = v
return rv
def to_json(self):
rv = (self._url, self.references, {})
extras = rv[-1]
if self.timeout is not None:
extras["timeout"] = self.timeout
if self.viewport_size is not None:
extras["viewport_size"] = self.viewport_size
if self.dpi is not None:
extras["dpi"] = self.dpi
if self.fuzzy:
extras["fuzzy"] = list(iteritems(self.fuzzy))
return rv
@classmethod
def from_json(cls,
manifest,
path,
obj
):
tests_root = manifest.tests_root
assert tests_root is not None
path = to_os_path(path)
url, references, extras = obj
return cls(tests_root,
path,
manifest.url_base,
url,
references,
**extras)
def to_RefTest(self):
if type(self) == RefTest:
assert isinstance(self, RefTest)
return self
rv = copy(self)
rv.__class__ = RefTest
assert isinstance(rv, RefTest)
return rv
def to_RefTestNode(self):
if type(self) == RefTestNode:
assert isinstance(self, RefTestNode)
return self
rv = copy(self)
rv.__class__ = RefTestNode
assert isinstance(rv, RefTestNode)
return rv
class RefTestNode(RefTestBase):
__slots__ = ()
item_type = "reftest_node"
class RefTest(RefTestBase):
__slots__ = ()
item_type = "reftest"
class ManualTest(URLManifestItem):
__slots__ = ()
item_type = "manual"
class ConformanceCheckerTest(URLManifestItem):
__slots__ = ()
item_type = "conformancechecker"
class VisualTest(URLManifestItem):
__slots__ = ()
item_type = "visual"
class CrashTest(URLManifestItem):
__slots__ = ()
item_type = "crashtest"
@property
def timeout(self):
return None
class WebDriverSpecTest(URLManifestItem):
__slots__ = ()
item_type = "wdspec"
@property
def timeout(self):
return self._extras.get("timeout")
def to_json(self):
rv = super(WebDriverSpecTest, self).to_json()
if self.timeout is not None:
rv[-1]["timeout"] = self.timeout
return rv
class SupportFile(ManifestItem):
__slots__ = ()
item_type = "support"
@property
def id(self):
return self.path
| true
| true
|
1c40351ffa2ec866a11e9370d900bba8e95acb5e
| 4,998
|
py
|
Python
|
tdvt/tdvt/tabquery.py
|
tabBhargav/connector-plugin-sdk
|
3a78d519c502260c6aa37654215dc0e8b6b93931
|
[
"MIT"
] | 80
|
2018-10-26T15:08:25.000Z
|
2022-03-29T04:44:53.000Z
|
tdvt/tdvt/tabquery.py
|
tabBhargav/connector-plugin-sdk
|
3a78d519c502260c6aa37654215dc0e8b6b93931
|
[
"MIT"
] | 531
|
2018-10-26T14:17:10.000Z
|
2022-03-31T23:24:29.000Z
|
tdvt/tdvt/tabquery.py
|
tabBhargav/connector-plugin-sdk
|
3a78d519c502260c6aa37654215dc0e8b6b93931
|
[
"MIT"
] | 85
|
2018-11-28T23:37:27.000Z
|
2022-03-31T21:01:27.000Z
|
import configparser
import os
import sys
from .resources import *
from .tabquery_path import TabQueryPath
tab_cli_exe = ''
def configure_tabquery_path():
"""Setup the tabquery path from ini settings."""
global tab_cli_exe
if os.environ.get('TABQUERY_CLI_PATH'):
tab_cli_exe = os.environ.get('TABQUERY_CLI_PATH')
logging.info(
"Tabquery path from TABQUERY_CLI_PATH environment variable is: {}"
.format(tab_cli_exe)
)
else:
logging.info("TABQUERY_CLI_PATH environment variable not set. Trying ini files.")
config = configparser.ConfigParser()
tdvt_cfg = get_ini_path_local_first('config/tdvt', 'tdvt')
logging.debug("Reading tdvt ini file [{}]".format(tdvt_cfg))
config.read(tdvt_cfg)
if sys.platform.startswith("darwin"):
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_MAC']
elif sys.platform.startswith("linux"):
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_LINUX']
else:
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_X64']
logging.debug("Reading tdvt ini file tabquerycli path is [{}]".format(tab_cli_exe))
def get_max_process_level_of_parallelization(desired_threads):
if sys.platform.startswith("darwin") and 'tabquerytool' in tab_cli_exe:
return 1
return desired_threads
def build_tabquery_command_line(work):
try:
sys.path.insert(0, get_extensions_dir())
from extend_tabquery import TabqueryCommandLineExtension
sys.path.pop(0)
tb = TabqueryCommandLineExtension()
logging.debug("Imported extension extend_tabquery")
except:
tb = TabqueryCommandLine()
cmdline = tb.build_tabquery_command_line(work)
return cmdline
def build_connectors_test_tabquery_command_line(conn_test_name, conn_test_file_name, conn_test_password_file):
global tab_cli_exe
cmdline = [tab_cli_exe]
cmdline.extend(["--conn-test", conn_test_name])
cmdline.extend(["--conn-test-file", conn_test_file_name])
if conn_test_password_file:
cmdline.extend(["--conn-test-password-file", conn_test_password_file])
return cmdline
class TabqueryCommandLine(object):
def extend_command_line(self, cmdline, work):
pass
def build_tabquery_command_line(self, work):
"""Build the command line string for calling tabquerycli."""
global tab_cli_exe
cli_arg = "--query-file-list" if work.test_config.logical else "--expression-file-list"
cmdline = [tab_cli_exe]
if work.test_config.tested_run_time_config is not None and work.test_config.tested_run_time_config.has_customized_tabquery_path():
cmdline = [work.test_config.tested_run_time_config.tabquery_paths.get_path(sys.platform)]
cmdline_base = [cli_arg, work.test_list_path]
cmdline.extend(cmdline_base)
tds_arg = ["-d", work.test_config.tds]
cmdline.extend(tds_arg)
cmdline.extend(["--combined"])
password_file = work.test_set.get_password_file_name()
if os.path.isfile(password_file):
password_arg = ["--password-file", password_file]
cmdline.extend(password_arg)
if work.test_config.output_dir:
cmdline.extend(["--output-dir", work.test_config.output_dir])
#Save all the log files from the core Tableau process.
cmdline.extend(["-DLogDir=" + work.test_config.log_dir])
cmdline.extend(["-DOverride=ProtocolServerNewLog"])
if work.test_config.d_override:
for override in work.test_config.d_override.split(' '):
cmdline.extend([override])
logical_rewrite_iter = next((i for i in cmdline if i.find('-DLogicalQueryRewriteDisable') != -1), None)
if logical_rewrite_iter == None:
#Disable constant expression folding. This will bypass the VizEngine for certain simple calculations. This way we run a full database query
#that tests what you would expect.
cmdline.extend(["-DLogicalQueryRewriteDisable=Funcall:RewriteConstantFuncall"])
# LogicalQuery cache can cache results across multiple expressions, and prevent
# issuance of queries to the underlying database, so disable it.
cmdline.extend(["-DInMemoryLogicalCacheDisable"])
self.extend_command_line(cmdline, work)
work.test_config.command_line = cmdline
return cmdline
def tabquerycli_exists(tabquery_cli_path: TabQueryPath = None):
global tab_cli_exe
if tabquery_cli_path:
resolved_path = tabquery_cli_path.get_path(sys.platform)
if os.path.isfile(resolved_path):
logging.debug("Found tabquery at [{0}]".format(resolved_path))
return True
if os.path.isfile(tab_cli_exe):
logging.debug("Found tabquery at [{0}]".format(tab_cli_exe))
return True
logging.debug("Could not find tabquery at [{0}]".format(tab_cli_exe))
return False
| 39.046875
| 151
| 0.690876
|
import configparser
import os
import sys
from .resources import *
from .tabquery_path import TabQueryPath
tab_cli_exe = ''
def configure_tabquery_path():
global tab_cli_exe
if os.environ.get('TABQUERY_CLI_PATH'):
tab_cli_exe = os.environ.get('TABQUERY_CLI_PATH')
logging.info(
"Tabquery path from TABQUERY_CLI_PATH environment variable is: {}"
.format(tab_cli_exe)
)
else:
logging.info("TABQUERY_CLI_PATH environment variable not set. Trying ini files.")
config = configparser.ConfigParser()
tdvt_cfg = get_ini_path_local_first('config/tdvt', 'tdvt')
logging.debug("Reading tdvt ini file [{}]".format(tdvt_cfg))
config.read(tdvt_cfg)
if sys.platform.startswith("darwin"):
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_MAC']
elif sys.platform.startswith("linux"):
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_LINUX']
else:
tab_cli_exe = config['DEFAULT']['TAB_CLI_EXE_X64']
logging.debug("Reading tdvt ini file tabquerycli path is [{}]".format(tab_cli_exe))
def get_max_process_level_of_parallelization(desired_threads):
if sys.platform.startswith("darwin") and 'tabquerytool' in tab_cli_exe:
return 1
return desired_threads
def build_tabquery_command_line(work):
try:
sys.path.insert(0, get_extensions_dir())
from extend_tabquery import TabqueryCommandLineExtension
sys.path.pop(0)
tb = TabqueryCommandLineExtension()
logging.debug("Imported extension extend_tabquery")
except:
tb = TabqueryCommandLine()
cmdline = tb.build_tabquery_command_line(work)
return cmdline
def build_connectors_test_tabquery_command_line(conn_test_name, conn_test_file_name, conn_test_password_file):
global tab_cli_exe
cmdline = [tab_cli_exe]
cmdline.extend(["--conn-test", conn_test_name])
cmdline.extend(["--conn-test-file", conn_test_file_name])
if conn_test_password_file:
cmdline.extend(["--conn-test-password-file", conn_test_password_file])
return cmdline
class TabqueryCommandLine(object):
def extend_command_line(self, cmdline, work):
pass
def build_tabquery_command_line(self, work):
global tab_cli_exe
cli_arg = "--query-file-list" if work.test_config.logical else "--expression-file-list"
cmdline = [tab_cli_exe]
if work.test_config.tested_run_time_config is not None and work.test_config.tested_run_time_config.has_customized_tabquery_path():
cmdline = [work.test_config.tested_run_time_config.tabquery_paths.get_path(sys.platform)]
cmdline_base = [cli_arg, work.test_list_path]
cmdline.extend(cmdline_base)
tds_arg = ["-d", work.test_config.tds]
cmdline.extend(tds_arg)
cmdline.extend(["--combined"])
password_file = work.test_set.get_password_file_name()
if os.path.isfile(password_file):
password_arg = ["--password-file", password_file]
cmdline.extend(password_arg)
if work.test_config.output_dir:
cmdline.extend(["--output-dir", work.test_config.output_dir])
cmdline.extend(["-DLogDir=" + work.test_config.log_dir])
cmdline.extend(["-DOverride=ProtocolServerNewLog"])
if work.test_config.d_override:
for override in work.test_config.d_override.split(' '):
cmdline.extend([override])
logical_rewrite_iter = next((i for i in cmdline if i.find('-DLogicalQueryRewriteDisable') != -1), None)
if logical_rewrite_iter == None:
cmdline.extend(["-DLogicalQueryRewriteDisable=Funcall:RewriteConstantFuncall"])
cmdline.extend(["-DInMemoryLogicalCacheDisable"])
self.extend_command_line(cmdline, work)
work.test_config.command_line = cmdline
return cmdline
def tabquerycli_exists(tabquery_cli_path: TabQueryPath = None):
global tab_cli_exe
if tabquery_cli_path:
resolved_path = tabquery_cli_path.get_path(sys.platform)
if os.path.isfile(resolved_path):
logging.debug("Found tabquery at [{0}]".format(resolved_path))
return True
if os.path.isfile(tab_cli_exe):
logging.debug("Found tabquery at [{0}]".format(tab_cli_exe))
return True
logging.debug("Could not find tabquery at [{0}]".format(tab_cli_exe))
return False
| true
| true
|
1c4036521e3762da652af83b989980f4698545e6
| 160
|
py
|
Python
|
_setup.py
|
reverendbedford/scikit-fem
|
bc57d968e56e6b89a99e35eac26ef7bc81b7a46a
|
[
"BSD-3-Clause"
] | 238
|
2018-01-28T11:11:55.000Z
|
2022-03-31T15:13:52.000Z
|
_setup.py
|
reverendbedford/scikit-fem
|
bc57d968e56e6b89a99e35eac26ef7bc81b7a46a
|
[
"BSD-3-Clause"
] | 559
|
2018-04-13T09:06:49.000Z
|
2022-03-22T12:59:25.000Z
|
_setup.py
|
reverendbedford/scikit-fem
|
bc57d968e56e6b89a99e35eac26ef7bc81b7a46a
|
[
"BSD-3-Clause"
] | 50
|
2018-04-13T07:02:28.000Z
|
2022-03-24T18:01:22.000Z
|
from setuptools import setup
setup(
name='scikit-fem',
version='0.1.0.dev0',
install_requires=['numpy', 'scipy', 'matplotlib', 'meshio>=4.0.4'],
)
| 20
| 71
| 0.6375
|
from setuptools import setup
setup(
name='scikit-fem',
version='0.1.0.dev0',
install_requires=['numpy', 'scipy', 'matplotlib', 'meshio>=4.0.4'],
)
| true
| true
|
1c4038d7593ed0769c4d455116cc1ffe6c117b27
| 614
|
py
|
Python
|
catalog/forms.py
|
choia/django-library-tutorial
|
a44fd078b6188bed095b7246316b3bc6ff696dd1
|
[
"MIT"
] | null | null | null |
catalog/forms.py
|
choia/django-library-tutorial
|
a44fd078b6188bed095b7246316b3bc6ff696dd1
|
[
"MIT"
] | null | null | null |
catalog/forms.py
|
choia/django-library-tutorial
|
a44fd078b6188bed095b7246316b3bc6ff696dd1
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from datetime import date, timedelta
from django import forms
class RenewBookForm(forms.Form):
renewal_date = forms.DateField(help_text="Enter a date between today and 4 weeks (default 3).")
def clean_renewal_date(self):
data = self.cleaned_data['renewal_date']
if data < date.today():
raise ValidationError(_('Invalidate date - renewal in past'))
if data > date.today() + timedelta(weeks=4):
raise ValidationError(_('Invalidate date - renewal more than 4 weeks ahead'))
return data
| 32.315789
| 96
| 0.760586
|
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from datetime import date, timedelta
from django import forms
class RenewBookForm(forms.Form):
renewal_date = forms.DateField(help_text="Enter a date between today and 4 weeks (default 3).")
def clean_renewal_date(self):
data = self.cleaned_data['renewal_date']
if data < date.today():
raise ValidationError(_('Invalidate date - renewal in past'))
if data > date.today() + timedelta(weeks=4):
raise ValidationError(_('Invalidate date - renewal more than 4 weeks ahead'))
return data
| true
| true
|
1c4038f05efa89ab152a2e6b15a13fa3a2742bfb
| 66,095
|
py
|
Python
|
oscar/lib/python2.7/site-packages/whoosh/searching.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/whoosh/searching.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/lib/python2.7/site-packages/whoosh/searching.py
|
sainjusajan/django-oscar
|
466e8edc807be689b0a28c9e525c8323cc48b8e1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""This module contains classes and functions related to searching the index.
"""
from __future__ import division
import copy
import weakref
from math import ceil
from whoosh import classify, highlight, query, scoring
from whoosh.compat import iteritems, itervalues, iterkeys, xrange
from whoosh.idsets import DocIdSet, BitSet
from whoosh.reading import TermNotFound
from whoosh.util.cache import lru_cache
class NoTermsException(Exception):
"""Exception raised you try to access matched terms on a :class:`Results`
object was created without them. To record which terms matched in which
document, you need to call the :meth:`Searcher.search` method with
``terms=True``.
"""
message = "Results were created without recording terms"
class TimeLimit(Exception):
"""Raised by :class:`TimeLimitedCollector` if the time limit is reached
before the search finishes. If you have a reference to the collector, you
can get partial results by calling :meth:`TimeLimitedCollector.results`.
"""
pass
# Context class
class SearchContext(object):
"""A container for information about the current search that may be used
by the collector or the query objects to change how they operate.
"""
def __init__(self, needs_current=False, weighting=None, top_query=None,
limit=0):
"""
:param needs_current: if True, the search requires that the matcher
tree be "valid" and able to access information about the current
match. For queries during matcher instantiation, this means they
should not instantiate a matcher that doesn't allow access to the
current match's value, weight, and so on. For collectors, this
means they should advanced the matcher doc-by-doc rather than using
shortcut methods such as all_ids().
:param weighting: the Weighting object to use for scoring documents.
:param top_query: a reference to the top-level query object.
:param limit: the number of results requested by the user.
"""
self.needs_current = needs_current
self.weighting = weighting
self.top_query = top_query
self.limit = limit
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.__dict__)
def set(self, **kwargs):
ctx = copy.copy(self)
ctx.__dict__.update(kwargs)
return ctx
# Searcher class
class Searcher(object):
"""Wraps an :class:`~whoosh.reading.IndexReader` object and provides
methods for searching the index.
"""
def __init__(self, reader, weighting=scoring.BM25F, closereader=True,
fromindex=None, parent=None):
"""
:param reader: An :class:`~whoosh.reading.IndexReader` object for
the index to search.
:param weighting: A :class:`whoosh.scoring.Weighting` object to use to
score found documents.
:param closereader: Whether the underlying reader will be closed when
the searcher is closed.
:param fromindex: An optional reference to the index of the underlying
reader. This is required for :meth:`Searcher.up_to_date` and
:meth:`Searcher.refresh` to work.
"""
self.ixreader = reader
self.is_closed = False
self._closereader = closereader
self._ix = fromindex
self._doccount = self.ixreader.doc_count_all()
# Cache for PostingCategorizer objects (supports fields without columns)
self._field_caches = {}
if parent:
self.parent = weakref.ref(parent)
self.schema = parent.schema
self._idf_cache = parent._idf_cache
self._filter_cache = parent._filter_cache
else:
self.parent = None
self.schema = self.ixreader.schema
self._idf_cache = {}
self._filter_cache = {}
if type(weighting) is type:
self.weighting = weighting()
else:
self.weighting = weighting
self.leafreaders = None
self.subsearchers = None
if not self.ixreader.is_atomic():
self.leafreaders = self.ixreader.leaf_readers()
self.subsearchers = [(self._subsearcher(r), offset) for r, offset
in self.leafreaders]
# Copy attributes/methods from wrapped reader
for name in ("stored_fields", "all_stored_fields", "has_vector",
"vector", "vector_as", "lexicon", "field_terms",
"frequency", "doc_frequency", "term_info",
"doc_field_length", "corrector", "iter_docs"):
setattr(self, name, getattr(self.ixreader, name))
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def _subsearcher(self, reader):
return self.__class__(reader, fromindex=self._ix,
weighting=self.weighting, parent=self)
def _offset_for_subsearcher(self, subsearcher):
for ss, offset in self.subsearchers:
if ss is subsearcher:
return offset
def leaf_searchers(self):
if self.is_atomic():
return [(self, 0)]
else:
return self.subsearchers
def is_atomic(self):
return self.reader().is_atomic()
def has_parent(self):
return self.parent is not None
def get_parent(self):
"""Returns the parent of this searcher (if has_parent() is True), or
else self.
"""
if self.has_parent():
# Call the weak reference to get the parent searcher
return self.parent()
else:
return self
def doc_count(self):
"""Returns the number of UNDELETED documents in the index.
"""
return self.ixreader.doc_count()
def doc_count_all(self):
"""Returns the total number of documents, DELETED OR UNDELETED, in
the index.
"""
return self._doccount
def field_length(self, fieldname):
if self.has_parent():
return self.get_parent().field_length(fieldname)
else:
return self.reader().field_length(fieldname)
def max_field_length(self, fieldname):
if self.has_parent():
return self.get_parent().max_field_length(fieldname)
else:
return self.reader().max_field_length(fieldname)
def up_to_date(self):
"""Returns True if this Searcher represents the latest version of the
index, for backends that support versioning.
"""
if not self._ix:
raise Exception("No reference to index")
return self._ix.latest_generation() == self.ixreader.generation()
def refresh(self):
"""Returns a fresh searcher for the latest version of the index::
my_searcher = my_searcher.refresh()
If the index has not changed since this searcher was created, this
searcher is simply returned.
This method may CLOSE underlying resources that are no longer needed
by the refreshed searcher, so you CANNOT continue to use the original
searcher after calling ``refresh()`` on it.
"""
if not self._ix:
raise Exception("No reference to index")
if self._ix.latest_generation() == self.reader().generation():
return self
# Get a new reader, re-using resources from the current reader if
# possible
self.is_closed = True
newreader = self._ix.reader(reuse=self.ixreader)
return self.__class__(newreader, fromindex=self._ix,
weighting=self.weighting)
def close(self):
if self._closereader:
self.ixreader.close()
self.is_closed = True
def avg_field_length(self, fieldname, default=None):
if not self.schema[fieldname].scorable:
return default
return self.field_length(fieldname) / (self._doccount or 1)
def reader(self):
"""Returns the underlying :class:`~whoosh.reading.IndexReader`.
"""
return self.ixreader
def context(self, **kwargs):
"""Generates a :class:`SearchContext` for this searcher.
"""
if "weighting" not in kwargs:
kwargs["weighting"] = self.weighting
return SearchContext(**kwargs)
def boolean_context(self):
"""Shortcut returns a SearchContext set for unscored (boolean)
searching.
"""
return self.context(needs_current=False, weighting=None)
def postings(self, fieldname, text, weighting=None, qf=1):
"""Returns a :class:`whoosh.matching.Matcher` for the postings of the
given term. Unlike the :func:`whoosh.reading.IndexReader.postings`
method, this method automatically sets the scoring functions on the
matcher from the searcher's weighting object.
"""
weighting = weighting or self.weighting
globalscorer = weighting.scorer(self, fieldname, text, qf=qf)
if self.is_atomic():
return self.ixreader.postings(fieldname, text, scorer=globalscorer)
else:
from whoosh.matching import MultiMatcher
matchers = []
docoffsets = []
term = (fieldname, text)
for subsearcher, offset in self.subsearchers:
r = subsearcher.reader()
if term in r:
# Make a segment-specific scorer; the scorer should call
# searcher.parent() to get global stats
scorer = weighting.scorer(subsearcher, fieldname, text, qf=qf)
m = r.postings(fieldname, text, scorer=scorer)
matchers.append(m)
docoffsets.append(offset)
if not matchers:
raise TermNotFound(fieldname, text)
return MultiMatcher(matchers, docoffsets, globalscorer)
def idf(self, fieldname, text):
"""Calculates the Inverse Document Frequency of the current term (calls
idf() on the searcher's Weighting object).
"""
# This method just calls the Weighting object's idf() method, but
# caches the result. So Weighting objects should call *this* method
# which will then call *their own* idf() methods.
cache = self._idf_cache
term = (fieldname, text)
if term in cache:
return cache[term]
idf = self.weighting.idf(self, fieldname, text)
cache[term] = idf
return idf
def document(self, **kw):
"""Convenience method returns the stored fields of a document
matching the given keyword arguments, where the keyword keys are
field names and the values are terms that must appear in the field.
This method is equivalent to::
searcher.stored_fields(searcher.document_number(<keyword args>))
Where Searcher.documents() returns a generator, this function returns
either a dictionary or None. Use it when you assume the given keyword
arguments either match zero or one documents (i.e. at least one of the
fields is a unique key).
>>> stored_fields = searcher.document(path=u"/a/b")
>>> if stored_fields:
... print(stored_fields['title'])
... else:
... print("There is no document with the path /a/b")
"""
for p in self.documents(**kw):
return p
def documents(self, **kw):
"""Convenience method returns the stored fields of a document
matching the given keyword arguments, where the keyword keys are field
names and the values are terms that must appear in the field.
Returns a generator of dictionaries containing the stored fields of any
documents matching the keyword arguments. If you do not specify any
arguments (``Searcher.documents()``), this method will yield **all**
documents.
>>> for stored_fields in searcher.documents(emailto=u"matt@whoosh.ca"):
... print("Email subject:", stored_fields['subject'])
"""
ixreader = self.ixreader
return (ixreader.stored_fields(docnum)
for docnum in self.document_numbers(**kw))
def _kw_to_text(self, kw):
for k, v in iteritems(kw):
field = self.schema[k]
kw[k] = field.to_bytes(v)
def _query_for_kw(self, kw):
subqueries = []
for key, value in iteritems(kw):
subqueries.append(query.Term(key, value))
if subqueries:
q = query.And(subqueries).normalize()
else:
q = query.Every()
return q
def document_number(self, **kw):
"""Returns the document number of the document matching the given
keyword arguments, where the keyword keys are field names and the
values are terms that must appear in the field.
>>> docnum = searcher.document_number(path=u"/a/b")
Where Searcher.document_numbers() returns a generator, this function
returns either an int or None. Use it when you assume the given keyword
arguments either match zero or one documents (i.e. at least one of the
fields is a unique key).
:rtype: int
"""
# In the common case where only one keyword was given, just use
# first_id() instead of building a query.
self._kw_to_text(kw)
if len(kw) == 1:
k, v = list(kw.items())[0]
try:
return self.reader().first_id(k, v)
except TermNotFound:
return None
else:
m = self._query_for_kw(kw).matcher(self, self.boolean_context())
if m.is_active():
return m.id()
def document_numbers(self, **kw):
"""Returns a generator of the document numbers for documents matching
the given keyword arguments, where the keyword keys are field names and
the values are terms that must appear in the field. If you do not
specify any arguments (``Searcher.document_numbers()``), this method
will yield **all** document numbers.
>>> docnums = list(searcher.document_numbers(emailto="matt@whoosh.ca"))
"""
self._kw_to_text(kw)
return self.docs_for_query(self._query_for_kw(kw))
def _find_unique(self, uniques):
# uniques is a list of ("unique_field_name", "field_value") tuples
delset = set()
for name, value in uniques:
docnum = self.document_number(**{name: value})
if docnum is not None:
delset.add(docnum)
return delset
def _query_to_comb(self, fq):
return BitSet(self.docs_for_query(fq), size=self.doc_count_all())
def _filter_to_comb(self, obj):
if obj is None:
return None
if isinstance(obj, (set, DocIdSet)):
c = obj
elif isinstance(obj, Results):
c = obj.docs()
elif isinstance(obj, ResultsPage):
c = obj.results.docs()
elif isinstance(obj, query.Query):
c = self._query_to_comb(obj)
else:
raise Exception("Don't know what to do with filter object %r"
% obj)
return c
def suggest(self, fieldname, text, limit=5, maxdist=2, prefix=0):
"""Returns a sorted list of suggested corrections for the given
mis-typed word ``text`` based on the contents of the given field::
>>> searcher.suggest("content", "specail")
["special"]
This is a convenience method. If you are planning to get suggestions
for multiple words in the same field, it is more efficient to get a
:class:`~whoosh.spelling.Corrector` object and use it directly::
corrector = searcher.corrector("fieldname")
for word in words:
print(corrector.suggest(word))
:param limit: only return up to this many suggestions. If there are not
enough terms in the field within ``maxdist`` of the given word, the
returned list will be shorter than this number.
:param maxdist: the largest edit distance from the given word to look
at. Numbers higher than 2 are not very effective or efficient.
:param prefix: require suggestions to share a prefix of this length
with the given word. This is often justifiable since most
misspellings do not involve the first letter of the word. Using a
prefix dramatically decreases the time it takes to generate the
list of words.
"""
c = self.reader().corrector(fieldname)
return c.suggest(text, limit=limit, maxdist=maxdist, prefix=prefix)
def key_terms(self, docnums, fieldname, numterms=5,
model=classify.Bo1Model, normalize=True):
"""Returns the 'numterms' most important terms from the documents
listed (by number) in 'docnums'. You can get document numbers for the
documents your interested in with the document_number() and
document_numbers() methods.
"Most important" is generally defined as terms that occur frequently in
the top hits but relatively infrequently in the collection as a whole.
>>> docnum = searcher.document_number(path=u"/a/b")
>>> keywords_and_scores = searcher.key_terms([docnum], "content")
This method returns a list of ("term", score) tuples. The score may be
useful if you want to know the "strength" of the key terms, however to
just get the terms themselves you can just do this:
>>> kws = [kw for kw, score in searcher.key_terms([docnum], "content")]
:param fieldname: Look at the terms in this field. This field must
store vectors.
:param docnums: A sequence of document numbers specifying which
documents to extract key terms from.
:param numterms: Return this number of important terms.
:param model: The classify.ExpansionModel to use. See the classify
module.
:param normalize: normalize the scores.
:returns: a list of ("term", score) tuples.
"""
expander = classify.Expander(self.ixreader, fieldname, model=model)
for docnum in docnums:
expander.add_document(docnum)
return expander.expanded_terms(numterms, normalize=normalize)
def key_terms_from_text(self, fieldname, text, numterms=5,
model=classify.Bo1Model, normalize=True):
"""Return the 'numterms' most important terms from the given text.
:param numterms: Return this number of important terms.
:param model: The classify.ExpansionModel to use. See the classify
module.
"""
expander = classify.Expander(self.ixreader, fieldname, model=model)
expander.add_text(text)
return expander.expanded_terms(numterms, normalize=normalize)
def more_like(self, docnum, fieldname, text=None, top=10, numterms=5,
model=classify.Bo1Model, normalize=False, filter=None):
"""Returns a :class:`Results` object containing documents similar to
the given document, based on "key terms" in the given field::
# Get the ID for the document you're interested in
docnum = search.document_number(path=u"/a/b/c")
r = searcher.more_like(docnum)
print("Documents like", searcher.stored_fields(docnum)["title"])
for hit in r:
print(hit["title"])
:param fieldname: the name of the field to use to test similarity.
:param text: by default, the method will attempt to load the contents
of the field from the stored fields for the document, or from a
term vector. If the field isn't stored or vectored in the index,
but you have access to the text another way (for example, loading
from a file or a database), you can supply it using the ``text``
parameter.
:param top: the number of results to return.
:param numterms: the number of "key terms" to extract from the hit and
search for. Using more terms is slower but gives potentially more
and more accurate results.
:param model: (expert) a :class:`whoosh.classify.ExpansionModel` to use
to compute "key terms".
:param normalize: whether to normalize term weights.
:param filter: a query, Results object, or set of docnums. The results
will only contain documents that are also in the filter object.
"""
if text:
kts = self.key_terms_from_text(fieldname, text, numterms=numterms,
model=model, normalize=normalize)
else:
kts = self.key_terms([docnum], fieldname, numterms=numterms,
model=model, normalize=normalize)
# Create an Or query from the key terms
q = query.Or([query.Term(fieldname, word, boost=weight)
for word, weight in kts])
return self.search(q, limit=top, filter=filter, mask=set([docnum]))
def search_page(self, query, pagenum, pagelen=10, **kwargs):
"""This method is Like the :meth:`Searcher.search` method, but returns
a :class:`ResultsPage` object. This is a convenience function for
getting a certain "page" of the results for the given query, which is
often useful in web search interfaces.
For example::
querystring = request.get("q")
query = queryparser.parse("content", querystring)
pagenum = int(request.get("page", 1))
pagelen = int(request.get("perpage", 10))
results = searcher.search_page(query, pagenum, pagelen=pagelen)
print("Page %d of %d" % (results.pagenum, results.pagecount))
print("Showing results %d-%d of %d"
% (results.offset + 1, results.offset + results.pagelen + 1,
len(results)))
for hit in results:
print("%d: %s" % (hit.rank + 1, hit["title"]))
(Note that results.pagelen might be less than the pagelen argument if
there aren't enough results to fill a page.)
Any additional keyword arguments you supply are passed through to
:meth:`Searcher.search`. For example, you can get paged results of a
sorted search::
results = searcher.search_page(q, 2, sortedby="date", reverse=True)
Currently, searching for page 100 with pagelen of 10 takes the same
amount of time as using :meth:`Searcher.search` to find the first 1000
results. That is, this method does not have any special optimizations
or efficiencies for getting a page from the middle of the full results
list. (A future enhancement may allow using previous page results to
improve the efficiency of finding the next page.)
This method will raise a ``ValueError`` if you ask for a page number
higher than the number of pages in the resulting query.
:param query: the :class:`whoosh.query.Query` object to match.
:param pagenum: the page number to retrieve, starting at ``1`` for the
first page.
:param pagelen: the number of results per page.
:returns: :class:`ResultsPage`
"""
if pagenum < 1:
raise ValueError("pagenum must be >= 1")
results = self.search(query, limit=pagenum * pagelen, **kwargs)
return ResultsPage(results, pagenum, pagelen)
def find(self, defaultfield, querystring, **kwargs):
from whoosh.qparser import QueryParser
qp = QueryParser(defaultfield, schema=self.ixreader.schema)
q = qp.parse(querystring)
return self.search(q, **kwargs)
def docs_for_query(self, q, for_deletion=False):
"""Returns an iterator of document numbers for documents matching the
given :class:`whoosh.query.Query` object.
"""
# If we're getting the document numbers so we can delete them, use the
# deletion_docs method instead of docs; this lets special queries
# (e.g. nested queries) override what gets deleted
if for_deletion:
method = q.deletion_docs
else:
method = q.docs
if self.subsearchers:
for s, offset in self.subsearchers:
for docnum in method(s):
yield docnum + offset
else:
for docnum in method(self):
yield docnum
def collector(self, limit=10, sortedby=None, reverse=False, groupedby=None,
collapse=None, collapse_limit=1, collapse_order=None,
optimize=True, filter=None, mask=None, terms=False,
maptype=None, scored=True):
"""Low-level method: returns a configured
:class:`whoosh.collectors.Collector` object based on the given
arguments. You can use this object with
:meth:`Searcher.search_with_collector` to search.
See the documentation for the :meth:`Searcher.search` method for a
description of the parameters.
This method may be useful to get a basic collector object and then wrap
it with another collector from ``whoosh.collectors`` or with a custom
collector of your own::
# Equivalent of
# results = mysearcher.search(myquery, limit=10)
# but with a time limt...
# Create a TopCollector
c = mysearcher.collector(limit=10)
# Wrap it with a TimeLimitedCollector with a time limit of
# 10.5 seconds
from whoosh.collectors import TimeLimitedCollector
c = TimeLimitCollector(c, 10.5)
# Search using the custom collector
results = mysearcher.search_with_collector(myquery, c)
"""
from whoosh import collectors
if limit is not None and limit < 1:
raise ValueError("limit must be >= 1")
if not scored and not sortedby:
c = collectors.UnsortedCollector()
elif sortedby:
c = collectors.SortingCollector(sortedby, limit=limit,
reverse=reverse)
elif groupedby or reverse or not limit or limit >= self.doc_count():
# A collector that gathers every matching document
c = collectors.UnlimitedCollector(reverse=reverse)
else:
# A collector that uses block quality optimizations and a heap
# queue to only collect the top N documents
c = collectors.TopCollector(limit, usequality=optimize)
if groupedby:
c = collectors.FacetCollector(c, groupedby, maptype=maptype)
if terms:
c = collectors.TermsCollector(c)
if collapse:
c = collectors.CollapseCollector(c, collapse, limit=collapse_limit,
order=collapse_order)
# Filtering wraps last so it sees the docs first
if filter or mask:
c = collectors.FilterCollector(c, filter, mask)
return c
def search(self, q, **kwargs):
"""Runs a :class:`whoosh.query.Query` object on this searcher and
returns a :class:`Results` object. See :doc:`/searching` for more
information.
This method takes many keyword arguments (documented below).
See :doc:`/facets` for information on using ``sortedby`` and/or
``groupedby``. See :ref:`collapsing` for more information on using
``collapse``, ``collapse_limit``, and ``collapse_order``.
:param query: a :class:`whoosh.query.Query` object to use to match
documents.
:param limit: the maximum number of documents to score. If you're only
interested in the top N documents, you can set limit=N to limit the
scoring for a faster search. Default is 10.
:param scored: whether to score the results. Overriden by ``sortedby``.
If both ``scored=False`` and ``sortedby=None``, the results will be
in arbitrary order, but will usually be computed faster than
scored or sorted results.
:param sortedby: see :doc:`/facets`.
:param reverse: Reverses the direction of the sort. Default is False.
:param groupedby: see :doc:`/facets`.
:param optimize: use optimizations to get faster results when possible.
Default is True.
:param filter: a query, Results object, or set of docnums. The results
will only contain documents that are also in the filter object.
:param mask: a query, Results object, or set of docnums. The results
will not contain any documents that are in the mask object.
:param terms: if True, record which terms were found in each matching
document. See :doc:`/searching` for more information. Default is
False.
:param maptype: by default, the results of faceting with ``groupedby``
is a dictionary mapping group names to ordered lists of document
numbers in the group. You can pass a
:class:`whoosh.sorting.FacetMap` subclass to this keyword argument
to specify a different (usually faster) method for grouping. For
example, ``maptype=sorting.Count`` would store only the count of
documents in each group, instead of the full list of document IDs.
:param collapse: a :doc:`facet </facets>` to use to collapse the
results. See :ref:`collapsing` for more information.
:param collapse_limit: the maximum number of documents to allow with
the same collapse key. See :ref:`collapsing` for more information.
:param collapse_order: an optional ordering :doc:`facet </facets>`
to control which documents are kept when collapsing. The default
(``collapse_order=None``) uses the results order (e.g. the highest
scoring documents in a scored search).
:rtype: :class:`Results`
"""
# Call the collector() method to build a collector based on the
# parameters passed to this method
c = self.collector(**kwargs)
# Call the lower-level method to run the collector
self.search_with_collector(q, c)
# Return the results object from the collector
return c.results()
def search_with_collector(self, q, collector, context=None):
"""Low-level method: runs a :class:`whoosh.query.Query` object on this
searcher using the given :class:`whoosh.collectors.Collector` object
to collect the results::
myquery = query.Term("content", "cabbage")
uc = collectors.UnlimitedCollector()
tc = TermsCollector(uc)
mysearcher.search_with_collector(myquery, tc)
print(tc.docterms)
print(tc.results())
Note that this method does not return a :class:`Results` object. You
need to access the collector to get a results object or other
information the collector might hold after the search.
:param q: a :class:`whoosh.query.Query` object to use to match
documents.
:param collector: a :class:`whoosh.collectors.Collector` object to feed
the results into.
"""
# Get the search context object from the searcher
context = context or self.context()
# Allow collector to set up based on the top-level information
collector.prepare(self, q, context)
collector.run()
def correct_query(self, q, qstring, correctors=None, terms=None, maxdist=2,
prefix=0, aliases=None):
"""
Returns a corrected version of the given user query using a default
:class:`whoosh.spelling.ReaderCorrector`.
The default:
* Corrects any words that don't appear in the index.
* Takes suggestions from the words in the index. To make certain fields
use custom correctors, use the ``correctors`` argument to pass a
dictionary mapping field names to :class:`whoosh.spelling.Corrector`
objects.
Expert users who want more sophisticated correction behavior can create
a custom :class:`whoosh.spelling.QueryCorrector` and use that instead
of this method.
Returns a :class:`whoosh.spelling.Correction` object with a ``query``
attribute containing the corrected :class:`whoosh.query.Query` object
and a ``string`` attributes containing the corrected query string.
>>> from whoosh import qparser, highlight
>>> qtext = 'mary "litle lamb"'
>>> q = qparser.QueryParser("text", myindex.schema)
>>> mysearcher = myindex.searcher()
>>> correction = mysearcher().correct_query(q, qtext)
>>> correction.query
<query.And ...>
>>> correction.string
'mary "little lamb"'
>>> mysearcher.close()
You can use the ``Correction`` object's ``format_string`` method to
format the corrected query string using a
:class:`whoosh.highlight.Formatter` object. For example, you can format
the corrected string as HTML, emphasizing the changed words.
>>> hf = highlight.HtmlFormatter(classname="change")
>>> correction.format_string(hf)
'mary "<strong class="change term0">little</strong> lamb"'
:param q: the :class:`whoosh.query.Query` object to correct.
:param qstring: the original user query from which the query object was
created. You can pass None instead of a string, in which the
second item in the returned tuple will also be None.
:param correctors: an optional dictionary mapping fieldnames to
:class:`whoosh.spelling.Corrector` objects. By default, this method
uses the contents of the index to spell check the terms in the
query. You can use this argument to "override" some fields with a
different correct, for example a
:class:`whoosh.spelling.GraphCorrector`.
:param terms: a sequence of ``("fieldname", "text")`` tuples to correct
in the query. By default, this method corrects terms that don't
appear in the index. You can use this argument to override that
behavior and explicitly specify the terms that should be corrected.
:param maxdist: the maximum number of "edits" (insertions, deletions,
subsitutions, or transpositions of letters) allowed between the
original word and any suggestion. Values higher than ``2`` may be
slow.
:param prefix: suggested replacement words must share this number of
initial characters with the original word. Increasing this even to
just ``1`` can dramatically speed up suggestions, and may be
justifiable since spellling mistakes rarely involve the first
letter of a word.
:param aliases: an optional dictionary mapping field names in the query
to different field names to use as the source of spelling
suggestions. The mappings in ``correctors`` are applied after this.
:rtype: :class:`whoosh.spelling.Correction`
"""
reader = self.reader()
# Dictionary of field name alias mappings
if aliases is None:
aliases = {}
# Dictionary of custom per-field correctors
if correctors is None:
correctors = {}
# Remap correctors dict according to aliases
d = {}
for fieldname, corr in iteritems(correctors):
fieldname = aliases.get(fieldname, fieldname)
d[fieldname] = corr
correctors = d
# Fill in default corrector objects for fields that don't have a custom
# one in the "correctors" dictionary
fieldnames = self.schema.names()
for fieldname in fieldnames:
fieldname = aliases.get(fieldname, fieldname)
if fieldname not in correctors:
correctors[fieldname] = self.reader().corrector(fieldname)
# Get any missing terms in the query in the fields we're correcting
if terms is None:
terms = []
for token in q.all_tokens():
aname = aliases.get(token.fieldname, token.fieldname)
text = token.text
if aname in correctors and (aname, text) not in reader:
# Note that we use the original, not aliases fieldname here
# so if we correct the query we know what it was
terms.append((token.fieldname, token.text))
# Make q query corrector
from whoosh import spelling
sqc = spelling.SimpleQueryCorrector(correctors, terms, aliases)
return sqc.correct_query(q, qstring)
class Results(object):
"""This object is returned by a Searcher. This object represents the
results of a search query. You can mostly use it as if it was a list of
dictionaries, where each dictionary is the stored fields of the document at
that position in the results.
Note that a Results object keeps a reference to the Searcher that created
it, so keeping a reference to a Results object keeps the Searcher alive and
so keeps all files used by it open.
"""
def __init__(self, searcher, q, top_n, docset=None, facetmaps=None,
runtime=0, highlighter=None):
"""
:param searcher: the :class:`Searcher` object that produced these
results.
:param query: the original query that created these results.
:param top_n: a list of (score, docnum) tuples representing the top
N search results.
"""
self.searcher = searcher
self.q = q
self.top_n = top_n
self.docset = docset
self._facetmaps = facetmaps or {}
self.runtime = runtime
self.highlighter = highlighter or highlight.Highlighter()
self.collector = None
self._total = None
self._char_cache = {}
def __repr__(self):
return "<Top %s Results for %r runtime=%s>" % (len(self.top_n),
self.q,
self.runtime)
def __len__(self):
"""Returns the total number of documents that matched the query. Note
this may be more than the number of scored documents, given the value
of the ``limit`` keyword argument to :meth:`Searcher.search`.
If this Results object was created by searching with a ``limit``
keyword, then computing the exact length of the result set may be
expensive for large indexes or large result sets. You may consider
using :meth:`Results.has_exact_length`,
:meth:`Results.estimated_length`, and
:meth:`Results.estimated_min_length` to display an estimated size of
the result set instead of an exact number.
"""
if self._total is None:
self._total = self.collector.count()
return self._total
def __getitem__(self, n):
if isinstance(n, slice):
start, stop, step = n.indices(len(self.top_n))
return [Hit(self, self.top_n[i][1], i, self.top_n[i][0])
for i in xrange(start, stop, step)]
else:
if n >= len(self.top_n):
raise IndexError("results[%r]: Results only has %s hits"
% (n, len(self.top_n)))
return Hit(self, self.top_n[n][1], n, self.top_n[n][0])
def __iter__(self):
"""Yields a :class:`Hit` object for each result in ranked order.
"""
for i in xrange(len(self.top_n)):
yield Hit(self, self.top_n[i][1], i, self.top_n[i][0])
def __contains__(self, docnum):
"""Returns True if the given document number matched the query.
"""
return docnum in self.docs()
def __nonzero__(self):
return not self.is_empty()
__bool__ = __nonzero__
def is_empty(self):
"""Returns True if not documents matched the query.
"""
return self.scored_length() == 0
def items(self):
"""Returns an iterator of (docnum, score) pairs for the scored
documents in the results.
"""
return ((docnum, score) for score, docnum in self.top_n)
def fields(self, n):
"""Returns the stored fields for the document at the ``n`` th position
in the results. Use :meth:`Results.docnum` if you want the raw
document number instead of the stored fields.
"""
return self.searcher.stored_fields(self.top_n[n][1])
def facet_names(self):
"""Returns the available facet names, for use with the ``groups()``
method.
"""
return self._facetmaps.keys()
def groups(self, name=None):
"""If you generated facet groupings for the results using the
`groupedby` keyword argument to the ``search()`` method, you can use
this method to retrieve the groups. You can use the ``facet_names()``
method to get the list of available facet names.
>>> results = searcher.search(my_query, groupedby=["tag", "price"])
>>> results.facet_names()
["tag", "price"]
>>> results.groups("tag")
{"new": [12, 1, 4], "apple": [3, 10, 5], "search": [11]}
If you only used one facet, you can call the method without a facet
name to get the groups for the facet.
>>> results = searcher.search(my_query, groupedby="tag")
>>> results.groups()
{"new": [12, 1, 4], "apple": [3, 10, 5, 0], "search": [11]}
By default, this returns a dictionary mapping category names to a list
of document numbers, in the same relative order as they appear in the
results.
>>> results = mysearcher.search(myquery, groupedby="tag")
>>> docnums = results.groups()
>>> docnums['new']
[12, 1, 4]
You can then use :meth:`Searcher.stored_fields` to get the stored
fields associated with a document ID.
If you specified a different ``maptype`` for the facet when you
searched, the values in the dictionary depend on the
:class:`whoosh.sorting.FacetMap`.
>>> myfacet = sorting.FieldFacet("tag", maptype=sorting.Count)
>>> results = mysearcher.search(myquery, groupedby=myfacet)
>>> counts = results.groups()
{"new": 3, "apple": 4, "search": 1}
"""
if (name is None or name == "facet") and len(self._facetmaps) == 1:
# If there's only one facet, just use it; convert keys() to list
# for Python 3
name = list(self._facetmaps.keys())[0]
elif name not in self._facetmaps:
raise KeyError("%r not in facet names %r"
% (name, self.facet_names()))
return self._facetmaps[name].as_dict()
def has_exact_length(self):
"""Returns True if this results object already knows the exact number
of matching documents.
"""
if self.collector:
return self.collector.computes_count()
else:
return self._total is not None
def estimated_length(self):
"""The estimated maximum number of matching documents, or the
exact number of matching documents if it's known.
"""
if self.has_exact_length():
return len(self)
else:
return self.q.estimate_size(self.searcher.reader())
def estimated_min_length(self):
"""The estimated minimum number of matching documents, or the
exact number of matching documents if it's known.
"""
if self.has_exact_length():
return len(self)
else:
return self.q.estimate_min_size(self.searcher.reader())
def scored_length(self):
"""Returns the number of scored documents in the results, equal to or
less than the ``limit`` keyword argument to the search.
>>> r = mysearcher.search(myquery, limit=20)
>>> len(r)
1246
>>> r.scored_length()
20
This may be fewer than the total number of documents that match the
query, which is what ``len(Results)`` returns.
"""
return len(self.top_n)
def docs(self):
"""Returns a set-like object containing the document numbers that
matched the query.
"""
if self.docset is None:
self.docset = set(self.collector.all_ids())
return self.docset
def copy(self):
"""Returns a deep copy of this results object.
"""
# Shallow copy self to get attributes
r = copy.copy(self)
# Deep copies of docset and top_n in case they're modified
r.docset = copy.deepcopy(self.docset)
r.top_n = copy.deepcopy(self.top_n)
return r
def score(self, n):
"""Returns the score for the document at the Nth position in the list
of ranked documents. If the search was not scored, this may return
None.
"""
return self.top_n[n][0]
def docnum(self, n):
"""Returns the document number of the result at position n in the list
of ranked documents.
"""
return self.top_n[n][1]
def query_terms(self, expand=False, fieldname=None):
return self.q.existing_terms(self.searcher.reader(),
fieldname=fieldname, expand=expand)
def has_matched_terms(self):
"""Returns True if the search recorded which terms matched in which
documents.
>>> r = searcher.search(myquery)
>>> r.has_matched_terms()
False
>>>
"""
return hasattr(self, "docterms") and hasattr(self, "termdocs")
def matched_terms(self):
"""Returns the set of ``("fieldname", "text")`` tuples representing
terms from the query that matched one or more of the TOP N documents
(this does not report terms for documents that match the query but did
not score high enough to make the top N results). You can compare this
set to the terms from the original query to find terms which didn't
occur in any matching documents.
This is only valid if you used ``terms=True`` in the search call to
record matching terms. Otherwise it will raise an exception.
>>> q = myparser.parse("alfa OR bravo OR charlie")
>>> results = searcher.search(q, terms=True)
>>> results.terms()
set([("content", "alfa"), ("content", "charlie")])
>>> q.all_terms() - results.terms()
set([("content", "bravo")])
"""
if not self.has_matched_terms():
raise NoTermsException
return set(self.termdocs.keys())
def _get_fragmenter(self):
return self.highlighter.fragmenter
def _set_fragmenter(self, f):
self.highlighter.fragmenter = f
fragmenter = property(_get_fragmenter, _set_fragmenter)
def _get_formatter(self):
return self.highlighter.formatter
def _set_formatter(self, f):
self.highlighter.formatter = f
formatter = property(_get_formatter, _set_formatter)
def _get_scorer(self):
return self.highlighter.scorer
def _set_scorer(self, s):
self.highlighter.scorer = s
scorer = property(_get_scorer, _set_scorer)
def _get_order(self):
return self.highlighter.order
def _set_order(self, o):
self.highlighter.order = o
order = property(_get_order, _set_order)
def key_terms(self, fieldname, docs=10, numterms=5,
model=classify.Bo1Model, normalize=True):
"""Returns the 'numterms' most important terms from the top 'docs'
documents in these results. "Most important" is generally defined as
terms that occur frequently in the top hits but relatively infrequently
in the collection as a whole.
:param fieldname: Look at the terms in this field. This field must
store vectors.
:param docs: Look at this many of the top documents of the results.
:param numterms: Return this number of important terms.
:param model: The classify.ExpansionModel to use. See the classify
module.
:returns: list of unicode strings.
"""
if not len(self):
return []
docs = min(docs, len(self))
reader = self.searcher.reader()
expander = classify.Expander(reader, fieldname, model=model)
for _, docnum in self.top_n[:docs]:
expander.add_document(docnum)
return expander.expanded_terms(numterms, normalize=normalize)
def extend(self, results):
"""Appends hits from 'results' (that are not already in this
results object) to the end of these results.
:param results: another results object.
"""
docs = self.docs()
for item in results.top_n:
if item[1] not in docs:
self.top_n.append(item)
self.docset = docs | results.docs()
self._total = len(self.docset)
def filter(self, results):
"""Removes any hits that are not also in the other results object.
"""
if not len(results):
return
otherdocs = results.docs()
items = [item for item in self.top_n if item[1] in otherdocs]
self.docset = self.docs() & otherdocs
self.top_n = items
def upgrade(self, results, reverse=False):
"""Re-sorts the results so any hits that are also in 'results' appear
before hits not in 'results', otherwise keeping their current relative
positions. This does not add the documents in the other results object
to this one.
:param results: another results object.
:param reverse: if True, lower the position of hits in the other
results object instead of raising them.
"""
if not len(results):
return
otherdocs = results.docs()
arein = [item for item in self.top_n if item[1] in otherdocs]
notin = [item for item in self.top_n if item[1] not in otherdocs]
if reverse:
items = notin + arein
else:
items = arein + notin
self.top_n = items
def upgrade_and_extend(self, results):
"""Combines the effects of extend() and upgrade(): hits that are also
in 'results' are raised. Then any hits from the other results object
that are not in this results object are appended to the end.
:param results: another results object.
"""
if not len(results):
return
docs = self.docs()
otherdocs = results.docs()
arein = [item for item in self.top_n if item[1] in otherdocs]
notin = [item for item in self.top_n if item[1] not in otherdocs]
other = [item for item in results.top_n if item[1] not in docs]
self.docset = docs | otherdocs
self.top_n = arein + notin + other
class Hit(object):
"""Represents a single search result ("hit") in a Results object.
This object acts like a dictionary of the matching document's stored
fields. If for some reason you need an actual ``dict`` object, use
``Hit.fields()`` to get one.
>>> r = searcher.search(query.Term("content", "render"))
>>> r[0]
< Hit {title = u"Rendering the scene"} >
>>> r[0].rank
0
>>> r[0].docnum == 4592
True
>>> r[0].score
2.52045682
>>> r[0]["title"]
"Rendering the scene"
>>> r[0].keys()
["title"]
"""
def __init__(self, results, docnum, pos=None, score=None):
"""
:param results: the Results object this hit belongs to.
:param pos: the position in the results list of this hit, for example
pos = 0 means this is the first (highest scoring) hit.
:param docnum: the document number of this hit.
:param score: the score of this hit.
"""
self.results = results
self.searcher = results.searcher
self.reader = self.searcher.reader()
self.pos = self.rank = pos
self.docnum = docnum
self.score = score
self._fields = None
def fields(self):
"""Returns a dictionary of the stored fields of the document this
object represents.
"""
if self._fields is None:
self._fields = self.searcher.stored_fields(self.docnum)
return self._fields
def matched_terms(self):
"""Returns the set of ``("fieldname", "text")`` tuples representing
terms from the query that matched in this document. You can
compare this set to the terms from the original query to find terms
which didn't occur in this document.
This is only valid if you used ``terms=True`` in the search call to
record matching terms. Otherwise it will raise an exception.
>>> q = myparser.parse("alfa OR bravo OR charlie")
>>> results = searcher.search(q, terms=True)
>>> for hit in results:
... print(hit["title"])
... print("Contains:", hit.matched_terms())
... print("Doesn't contain:", q.all_terms() - hit.matched_terms())
"""
if not self.results.has_matched_terms():
raise NoTermsException
return self.results.docterms.get(self.docnum, [])
def highlights(self, fieldname, text=None, top=3, minscore=1):
"""Returns highlighted snippets from the given field::
r = searcher.search(myquery)
for hit in r:
print(hit["title"])
print(hit.highlights("content"))
See :doc:`/highlight`.
To change the fragmeter, formatter, order, or scorer used in
highlighting, you can set attributes on the results object::
from whoosh import highlight
results = searcher.search(myquery, terms=True)
results.fragmenter = highlight.SentenceFragmenter()
...or use a custom :class:`whoosh.highlight.Highlighter` object::
hl = highlight.Highlighter(fragmenter=sf)
results.highlighter = hl
:param fieldname: the name of the field you want to highlight.
:param text: by default, the method will attempt to load the contents
of the field from the stored fields for the document. If the field
you want to highlight isn't stored in the index, but you have
access to the text another way (for example, loading from a file or
a database), you can supply it using the ``text`` parameter.
:param top: the maximum number of fragments to return.
:param minscore: the minimum score for fragments to appear in the
highlights.
"""
hliter = self.results.highlighter
return hliter.highlight_hit(self, fieldname, text=text, top=top,
minscore=minscore)
def more_like_this(self, fieldname, text=None, top=10, numterms=5,
model=classify.Bo1Model, normalize=True, filter=None):
"""Returns a new Results object containing documents similar to this
hit, based on "key terms" in the given field::
r = searcher.search(myquery)
for hit in r:
print(hit["title"])
print("Top 3 similar documents:")
for subhit in hit.more_like_this("content", top=3):
print(" ", subhit["title"])
:param fieldname: the name of the field to use to test similarity.
:param text: by default, the method will attempt to load the contents
of the field from the stored fields for the document, or from a
term vector. If the field isn't stored or vectored in the index,
but you have access to the text another way (for example, loading
from a file or a database), you can supply it using the ``text``
parameter.
:param top: the number of results to return.
:param numterms: the number of "key terms" to extract from the hit and
search for. Using more terms is slower but gives potentially more
and more accurate results.
:param model: (expert) a :class:`whoosh.classify.ExpansionModel` to use
to compute "key terms".
:param normalize: whether to normalize term weights.
"""
return self.searcher.more_like(self.docnum, fieldname, text=text,
top=top, numterms=numterms, model=model,
normalize=normalize, filter=filter)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.fields())
def __eq__(self, other):
if isinstance(other, Hit):
return self.fields() == other.fields()
elif isinstance(other, dict):
return self.fields() == other
else:
return False
def __len__(self):
return len(self.fields())
def __iter__(self):
return iterkeys(self.fields())
def __getitem__(self, fieldname):
if fieldname in self.fields():
return self._fields[fieldname]
reader = self.reader
if reader.has_column(fieldname):
cr = reader.column_reader(fieldname)
return cr[self.docnum]
raise KeyError(fieldname)
def __contains__(self, key):
return (key in self.fields()
or self.reader.has_column(key))
def items(self):
return list(self.fields().items())
def keys(self):
return list(self.fields().keys())
def values(self):
return list(self.fields().values())
def iteritems(self):
return iteritems(self.fields())
def iterkeys(self):
return iterkeys(self.fields())
def itervalues(self):
return itervalues(self.fields())
def get(self, key, default=None):
return self.fields().get(key, default)
def __setitem__(self, key, value):
raise NotImplementedError("You cannot modify a search result")
def __delitem__(self, key, value):
raise NotImplementedError("You cannot modify a search result")
def clear(self):
raise NotImplementedError("You cannot modify a search result")
def update(self, dict=None, **kwargs):
raise NotImplementedError("You cannot modify a search result")
class ResultsPage(object):
"""Represents a single page out of a longer list of results, as returned
by :func:`whoosh.searching.Searcher.search_page`. Supports a subset of the
interface of the :class:`~whoosh.searching.Results` object, namely getting
stored fields with __getitem__ (square brackets), iterating, and the
``score()`` and ``docnum()`` methods.
The ``offset`` attribute contains the results number this page starts at
(numbered from 0). For example, if the page length is 10, the ``offset``
attribute on the second page will be ``10``.
The ``pagecount`` attribute contains the number of pages available.
The ``pagenum`` attribute contains the page number. This may be less than
the page you requested if the results had too few pages. For example, if
you do::
ResultsPage(results, 5)
but the results object only contains 3 pages worth of hits, ``pagenum``
will be 3.
The ``pagelen`` attribute contains the number of results on this page
(which may be less than the page length you requested if this is the last
page of the results).
The ``total`` attribute contains the total number of hits in the results.
>>> mysearcher = myindex.searcher()
>>> pagenum = 2
>>> page = mysearcher.find_page(pagenum, myquery)
>>> print("Page %s of %s, results %s to %s of %s" %
... (pagenum, page.pagecount, page.offset+1,
... page.offset+page.pagelen, page.total))
>>> for i, fields in enumerate(page):
... print("%s. %r" % (page.offset + i + 1, fields))
>>> mysearcher.close()
To set highlighter attributes (for example ``formatter``), access the
underlying :class:`Results` object::
page.results.formatter = highlight.UppercaseFormatter()
"""
def __init__(self, results, pagenum, pagelen=10):
"""
:param results: a :class:`~whoosh.searching.Results` object.
:param pagenum: which page of the results to use, numbered from ``1``.
:param pagelen: the number of hits per page.
"""
self.results = results
self.total = len(results)
if pagenum < 1:
raise ValueError("pagenum must be >= 1")
self.pagecount = int(ceil(self.total / pagelen))
self.pagenum = min(self.pagecount, pagenum)
offset = (self.pagenum - 1) * pagelen
if (offset + pagelen) > self.total:
pagelen = self.total - offset
self.offset = offset
self.pagelen = pagelen
def __getitem__(self, n):
offset = self.offset
if isinstance(n, slice):
start, stop, step = n.indices(self.pagelen)
return self.results.__getitem__(slice(start + offset,
stop + offset, step))
else:
return self.results.__getitem__(n + offset)
def __iter__(self):
return iter(self.results[self.offset:self.offset + self.pagelen])
def __len__(self):
return self.total
def scored_length(self):
return self.results.scored_length()
def score(self, n):
"""Returns the score of the hit at the nth position on this page.
"""
return self.results.score(n + self.offset)
def docnum(self, n):
"""Returns the document number of the hit at the nth position on this
page.
"""
return self.results.docnum(n + self.offset)
def is_last_page(self):
"""Returns True if this object represents the last page of results.
"""
return self.pagecount == 0 or self.pagenum == self.pagecount
| 39.960701
| 83
| 0.60696
|
from __future__ import division
import copy
import weakref
from math import ceil
from whoosh import classify, highlight, query, scoring
from whoosh.compat import iteritems, itervalues, iterkeys, xrange
from whoosh.idsets import DocIdSet, BitSet
from whoosh.reading import TermNotFound
from whoosh.util.cache import lru_cache
class NoTermsException(Exception):
message = "Results were created without recording terms"
class TimeLimit(Exception):
pass
class SearchContext(object):
def __init__(self, needs_current=False, weighting=None, top_query=None,
limit=0):
self.needs_current = needs_current
self.weighting = weighting
self.top_query = top_query
self.limit = limit
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.__dict__)
def set(self, **kwargs):
ctx = copy.copy(self)
ctx.__dict__.update(kwargs)
return ctx
class Searcher(object):
def __init__(self, reader, weighting=scoring.BM25F, closereader=True,
fromindex=None, parent=None):
self.ixreader = reader
self.is_closed = False
self._closereader = closereader
self._ix = fromindex
self._doccount = self.ixreader.doc_count_all()
self._field_caches = {}
if parent:
self.parent = weakref.ref(parent)
self.schema = parent.schema
self._idf_cache = parent._idf_cache
self._filter_cache = parent._filter_cache
else:
self.parent = None
self.schema = self.ixreader.schema
self._idf_cache = {}
self._filter_cache = {}
if type(weighting) is type:
self.weighting = weighting()
else:
self.weighting = weighting
self.leafreaders = None
self.subsearchers = None
if not self.ixreader.is_atomic():
self.leafreaders = self.ixreader.leaf_readers()
self.subsearchers = [(self._subsearcher(r), offset) for r, offset
in self.leafreaders]
for name in ("stored_fields", "all_stored_fields", "has_vector",
"vector", "vector_as", "lexicon", "field_terms",
"frequency", "doc_frequency", "term_info",
"doc_field_length", "corrector", "iter_docs"):
setattr(self, name, getattr(self.ixreader, name))
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def _subsearcher(self, reader):
return self.__class__(reader, fromindex=self._ix,
weighting=self.weighting, parent=self)
def _offset_for_subsearcher(self, subsearcher):
for ss, offset in self.subsearchers:
if ss is subsearcher:
return offset
def leaf_searchers(self):
if self.is_atomic():
return [(self, 0)]
else:
return self.subsearchers
def is_atomic(self):
return self.reader().is_atomic()
def has_parent(self):
return self.parent is not None
def get_parent(self):
if self.has_parent():
return self.parent()
else:
return self
def doc_count(self):
return self.ixreader.doc_count()
def doc_count_all(self):
return self._doccount
def field_length(self, fieldname):
if self.has_parent():
return self.get_parent().field_length(fieldname)
else:
return self.reader().field_length(fieldname)
def max_field_length(self, fieldname):
if self.has_parent():
return self.get_parent().max_field_length(fieldname)
else:
return self.reader().max_field_length(fieldname)
def up_to_date(self):
if not self._ix:
raise Exception("No reference to index")
return self._ix.latest_generation() == self.ixreader.generation()
def refresh(self):
if not self._ix:
raise Exception("No reference to index")
if self._ix.latest_generation() == self.reader().generation():
return self
self.is_closed = True
newreader = self._ix.reader(reuse=self.ixreader)
return self.__class__(newreader, fromindex=self._ix,
weighting=self.weighting)
def close(self):
if self._closereader:
self.ixreader.close()
self.is_closed = True
def avg_field_length(self, fieldname, default=None):
if not self.schema[fieldname].scorable:
return default
return self.field_length(fieldname) / (self._doccount or 1)
def reader(self):
return self.ixreader
def context(self, **kwargs):
if "weighting" not in kwargs:
kwargs["weighting"] = self.weighting
return SearchContext(**kwargs)
def boolean_context(self):
return self.context(needs_current=False, weighting=None)
def postings(self, fieldname, text, weighting=None, qf=1):
weighting = weighting or self.weighting
globalscorer = weighting.scorer(self, fieldname, text, qf=qf)
if self.is_atomic():
return self.ixreader.postings(fieldname, text, scorer=globalscorer)
else:
from whoosh.matching import MultiMatcher
matchers = []
docoffsets = []
term = (fieldname, text)
for subsearcher, offset in self.subsearchers:
r = subsearcher.reader()
if term in r:
scorer = weighting.scorer(subsearcher, fieldname, text, qf=qf)
m = r.postings(fieldname, text, scorer=scorer)
matchers.append(m)
docoffsets.append(offset)
if not matchers:
raise TermNotFound(fieldname, text)
return MultiMatcher(matchers, docoffsets, globalscorer)
def idf(self, fieldname, text):
# caches the result. So Weighting objects should call *this* method
# which will then call *their own* idf() methods.
cache = self._idf_cache
term = (fieldname, text)
if term in cache:
return cache[term]
idf = self.weighting.idf(self, fieldname, text)
cache[term] = idf
return idf
def document(self, **kw):
for p in self.documents(**kw):
return p
def documents(self, **kw):
ixreader = self.ixreader
return (ixreader.stored_fields(docnum)
for docnum in self.document_numbers(**kw))
def _kw_to_text(self, kw):
for k, v in iteritems(kw):
field = self.schema[k]
kw[k] = field.to_bytes(v)
def _query_for_kw(self, kw):
subqueries = []
for key, value in iteritems(kw):
subqueries.append(query.Term(key, value))
if subqueries:
q = query.And(subqueries).normalize()
else:
q = query.Every()
return q
def document_number(self, **kw):
# In the common case where only one keyword was given, just use
# first_id() instead of building a query.
self._kw_to_text(kw)
if len(kw) == 1:
k, v = list(kw.items())[0]
try:
return self.reader().first_id(k, v)
except TermNotFound:
return None
else:
m = self._query_for_kw(kw).matcher(self, self.boolean_context())
if m.is_active():
return m.id()
def document_numbers(self, **kw):
self._kw_to_text(kw)
return self.docs_for_query(self._query_for_kw(kw))
def _find_unique(self, uniques):
# uniques is a list of ("unique_field_name", "field_value") tuples
delset = set()
for name, value in uniques:
docnum = self.document_number(**{name: value})
if docnum is not None:
delset.add(docnum)
return delset
def _query_to_comb(self, fq):
return BitSet(self.docs_for_query(fq), size=self.doc_count_all())
def _filter_to_comb(self, obj):
if obj is None:
return None
if isinstance(obj, (set, DocIdSet)):
c = obj
elif isinstance(obj, Results):
c = obj.docs()
elif isinstance(obj, ResultsPage):
c = obj.results.docs()
elif isinstance(obj, query.Query):
c = self._query_to_comb(obj)
else:
raise Exception("Don't know what to do with filter object %r"
% obj)
return c
def suggest(self, fieldname, text, limit=5, maxdist=2, prefix=0):
c = self.reader().corrector(fieldname)
return c.suggest(text, limit=limit, maxdist=maxdist, prefix=prefix)
def key_terms(self, docnums, fieldname, numterms=5,
model=classify.Bo1Model, normalize=True):
expander = classify.Expander(self.ixreader, fieldname, model=model)
for docnum in docnums:
expander.add_document(docnum)
return expander.expanded_terms(numterms, normalize=normalize)
def key_terms_from_text(self, fieldname, text, numterms=5,
model=classify.Bo1Model, normalize=True):
expander = classify.Expander(self.ixreader, fieldname, model=model)
expander.add_text(text)
return expander.expanded_terms(numterms, normalize=normalize)
def more_like(self, docnum, fieldname, text=None, top=10, numterms=5,
model=classify.Bo1Model, normalize=False, filter=None):
if text:
kts = self.key_terms_from_text(fieldname, text, numterms=numterms,
model=model, normalize=normalize)
else:
kts = self.key_terms([docnum], fieldname, numterms=numterms,
model=model, normalize=normalize)
q = query.Or([query.Term(fieldname, word, boost=weight)
for word, weight in kts])
return self.search(q, limit=top, filter=filter, mask=set([docnum]))
def search_page(self, query, pagenum, pagelen=10, **kwargs):
if pagenum < 1:
raise ValueError("pagenum must be >= 1")
results = self.search(query, limit=pagenum * pagelen, **kwargs)
return ResultsPage(results, pagenum, pagelen)
def find(self, defaultfield, querystring, **kwargs):
from whoosh.qparser import QueryParser
qp = QueryParser(defaultfield, schema=self.ixreader.schema)
q = qp.parse(querystring)
return self.search(q, **kwargs)
def docs_for_query(self, q, for_deletion=False):
# deletion_docs method instead of docs; this lets special queries
# (e.g. nested queries) override what gets deleted
if for_deletion:
method = q.deletion_docs
else:
method = q.docs
if self.subsearchers:
for s, offset in self.subsearchers:
for docnum in method(s):
yield docnum + offset
else:
for docnum in method(self):
yield docnum
def collector(self, limit=10, sortedby=None, reverse=False, groupedby=None,
collapse=None, collapse_limit=1, collapse_order=None,
optimize=True, filter=None, mask=None, terms=False,
maptype=None, scored=True):
from whoosh import collectors
if limit is not None and limit < 1:
raise ValueError("limit must be >= 1")
if not scored and not sortedby:
c = collectors.UnsortedCollector()
elif sortedby:
c = collectors.SortingCollector(sortedby, limit=limit,
reverse=reverse)
elif groupedby or reverse or not limit or limit >= self.doc_count():
# A collector that gathers every matching document
c = collectors.UnlimitedCollector(reverse=reverse)
else:
# A collector that uses block quality optimizations and a heap
# queue to only collect the top N documents
c = collectors.TopCollector(limit, usequality=optimize)
if groupedby:
c = collectors.FacetCollector(c, groupedby, maptype=maptype)
if terms:
c = collectors.TermsCollector(c)
if collapse:
c = collectors.CollapseCollector(c, collapse, limit=collapse_limit,
order=collapse_order)
# Filtering wraps last so it sees the docs first
if filter or mask:
c = collectors.FilterCollector(c, filter, mask)
return c
def search(self, q, **kwargs):
# Call the collector() method to build a collector based on the
# parameters passed to this method
c = self.collector(**kwargs)
# Call the lower-level method to run the collector
self.search_with_collector(q, c)
# Return the results object from the collector
return c.results()
def search_with_collector(self, q, collector, context=None):
# Get the search context object from the searcher
context = context or self.context()
# Allow collector to set up based on the top-level information
collector.prepare(self, q, context)
collector.run()
def correct_query(self, q, qstring, correctors=None, terms=None, maxdist=2,
prefix=0, aliases=None):
reader = self.reader()
# Dictionary of field name alias mappings
if aliases is None:
aliases = {}
# Dictionary of custom per-field correctors
if correctors is None:
correctors = {}
# Remap correctors dict according to aliases
d = {}
for fieldname, corr in iteritems(correctors):
fieldname = aliases.get(fieldname, fieldname)
d[fieldname] = corr
correctors = d
# Fill in default corrector objects for fields that don't have a custom
fieldnames = self.schema.names()
for fieldname in fieldnames:
fieldname = aliases.get(fieldname, fieldname)
if fieldname not in correctors:
correctors[fieldname] = self.reader().corrector(fieldname)
if terms is None:
terms = []
for token in q.all_tokens():
aname = aliases.get(token.fieldname, token.fieldname)
text = token.text
if aname in correctors and (aname, text) not in reader:
# Note that we use the original, not aliases fieldname here
# so if we correct the query we know what it was
terms.append((token.fieldname, token.text))
# Make q query corrector
from whoosh import spelling
sqc = spelling.SimpleQueryCorrector(correctors, terms, aliases)
return sqc.correct_query(q, qstring)
class Results(object):
def __init__(self, searcher, q, top_n, docset=None, facetmaps=None,
runtime=0, highlighter=None):
self.searcher = searcher
self.q = q
self.top_n = top_n
self.docset = docset
self._facetmaps = facetmaps or {}
self.runtime = runtime
self.highlighter = highlighter or highlight.Highlighter()
self.collector = None
self._total = None
self._char_cache = {}
def __repr__(self):
return "<Top %s Results for %r runtime=%s>" % (len(self.top_n),
self.q,
self.runtime)
def __len__(self):
if self._total is None:
self._total = self.collector.count()
return self._total
def __getitem__(self, n):
if isinstance(n, slice):
start, stop, step = n.indices(len(self.top_n))
return [Hit(self, self.top_n[i][1], i, self.top_n[i][0])
for i in xrange(start, stop, step)]
else:
if n >= len(self.top_n):
raise IndexError("results[%r]: Results only has %s hits"
% (n, len(self.top_n)))
return Hit(self, self.top_n[n][1], n, self.top_n[n][0])
def __iter__(self):
for i in xrange(len(self.top_n)):
yield Hit(self, self.top_n[i][1], i, self.top_n[i][0])
def __contains__(self, docnum):
return docnum in self.docs()
def __nonzero__(self):
return not self.is_empty()
__bool__ = __nonzero__
def is_empty(self):
return self.scored_length() == 0
def items(self):
return ((docnum, score) for score, docnum in self.top_n)
def fields(self, n):
return self.searcher.stored_fields(self.top_n[n][1])
def facet_names(self):
return self._facetmaps.keys()
def groups(self, name=None):
if (name is None or name == "facet") and len(self._facetmaps) == 1:
# If there's only one facet, just use it; convert keys() to list
name = list(self._facetmaps.keys())[0]
elif name not in self._facetmaps:
raise KeyError("%r not in facet names %r"
% (name, self.facet_names()))
return self._facetmaps[name].as_dict()
def has_exact_length(self):
if self.collector:
return self.collector.computes_count()
else:
return self._total is not None
def estimated_length(self):
if self.has_exact_length():
return len(self)
else:
return self.q.estimate_size(self.searcher.reader())
def estimated_min_length(self):
if self.has_exact_length():
return len(self)
else:
return self.q.estimate_min_size(self.searcher.reader())
def scored_length(self):
return len(self.top_n)
def docs(self):
if self.docset is None:
self.docset = set(self.collector.all_ids())
return self.docset
def copy(self):
r = copy.copy(self)
r.docset = copy.deepcopy(self.docset)
r.top_n = copy.deepcopy(self.top_n)
return r
def score(self, n):
return self.top_n[n][0]
def docnum(self, n):
return self.top_n[n][1]
def query_terms(self, expand=False, fieldname=None):
return self.q.existing_terms(self.searcher.reader(),
fieldname=fieldname, expand=expand)
def has_matched_terms(self):
return hasattr(self, "docterms") and hasattr(self, "termdocs")
def matched_terms(self):
if not self.has_matched_terms():
raise NoTermsException
return set(self.termdocs.keys())
def _get_fragmenter(self):
return self.highlighter.fragmenter
def _set_fragmenter(self, f):
self.highlighter.fragmenter = f
fragmenter = property(_get_fragmenter, _set_fragmenter)
def _get_formatter(self):
return self.highlighter.formatter
def _set_formatter(self, f):
self.highlighter.formatter = f
formatter = property(_get_formatter, _set_formatter)
def _get_scorer(self):
return self.highlighter.scorer
def _set_scorer(self, s):
self.highlighter.scorer = s
scorer = property(_get_scorer, _set_scorer)
def _get_order(self):
return self.highlighter.order
def _set_order(self, o):
self.highlighter.order = o
order = property(_get_order, _set_order)
def key_terms(self, fieldname, docs=10, numterms=5,
model=classify.Bo1Model, normalize=True):
if not len(self):
return []
docs = min(docs, len(self))
reader = self.searcher.reader()
expander = classify.Expander(reader, fieldname, model=model)
for _, docnum in self.top_n[:docs]:
expander.add_document(docnum)
return expander.expanded_terms(numterms, normalize=normalize)
def extend(self, results):
docs = self.docs()
for item in results.top_n:
if item[1] not in docs:
self.top_n.append(item)
self.docset = docs | results.docs()
self._total = len(self.docset)
def filter(self, results):
if not len(results):
return
otherdocs = results.docs()
items = [item for item in self.top_n if item[1] in otherdocs]
self.docset = self.docs() & otherdocs
self.top_n = items
def upgrade(self, results, reverse=False):
if not len(results):
return
otherdocs = results.docs()
arein = [item for item in self.top_n if item[1] in otherdocs]
notin = [item for item in self.top_n if item[1] not in otherdocs]
if reverse:
items = notin + arein
else:
items = arein + notin
self.top_n = items
def upgrade_and_extend(self, results):
if not len(results):
return
docs = self.docs()
otherdocs = results.docs()
arein = [item for item in self.top_n if item[1] in otherdocs]
notin = [item for item in self.top_n if item[1] not in otherdocs]
other = [item for item in results.top_n if item[1] not in docs]
self.docset = docs | otherdocs
self.top_n = arein + notin + other
class Hit(object):
def __init__(self, results, docnum, pos=None, score=None):
self.results = results
self.searcher = results.searcher
self.reader = self.searcher.reader()
self.pos = self.rank = pos
self.docnum = docnum
self.score = score
self._fields = None
def fields(self):
if self._fields is None:
self._fields = self.searcher.stored_fields(self.docnum)
return self._fields
def matched_terms(self):
if not self.results.has_matched_terms():
raise NoTermsException
return self.results.docterms.get(self.docnum, [])
def highlights(self, fieldname, text=None, top=3, minscore=1):
hliter = self.results.highlighter
return hliter.highlight_hit(self, fieldname, text=text, top=top,
minscore=minscore)
def more_like_this(self, fieldname, text=None, top=10, numterms=5,
model=classify.Bo1Model, normalize=True, filter=None):
return self.searcher.more_like(self.docnum, fieldname, text=text,
top=top, numterms=numterms, model=model,
normalize=normalize, filter=filter)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.fields())
def __eq__(self, other):
if isinstance(other, Hit):
return self.fields() == other.fields()
elif isinstance(other, dict):
return self.fields() == other
else:
return False
def __len__(self):
return len(self.fields())
def __iter__(self):
return iterkeys(self.fields())
def __getitem__(self, fieldname):
if fieldname in self.fields():
return self._fields[fieldname]
reader = self.reader
if reader.has_column(fieldname):
cr = reader.column_reader(fieldname)
return cr[self.docnum]
raise KeyError(fieldname)
def __contains__(self, key):
return (key in self.fields()
or self.reader.has_column(key))
def items(self):
return list(self.fields().items())
def keys(self):
return list(self.fields().keys())
def values(self):
return list(self.fields().values())
def iteritems(self):
return iteritems(self.fields())
def iterkeys(self):
return iterkeys(self.fields())
def itervalues(self):
return itervalues(self.fields())
def get(self, key, default=None):
return self.fields().get(key, default)
def __setitem__(self, key, value):
raise NotImplementedError("You cannot modify a search result")
def __delitem__(self, key, value):
raise NotImplementedError("You cannot modify a search result")
def clear(self):
raise NotImplementedError("You cannot modify a search result")
def update(self, dict=None, **kwargs):
raise NotImplementedError("You cannot modify a search result")
class ResultsPage(object):
def __init__(self, results, pagenum, pagelen=10):
self.results = results
self.total = len(results)
if pagenum < 1:
raise ValueError("pagenum must be >= 1")
self.pagecount = int(ceil(self.total / pagelen))
self.pagenum = min(self.pagecount, pagenum)
offset = (self.pagenum - 1) * pagelen
if (offset + pagelen) > self.total:
pagelen = self.total - offset
self.offset = offset
self.pagelen = pagelen
def __getitem__(self, n):
offset = self.offset
if isinstance(n, slice):
start, stop, step = n.indices(self.pagelen)
return self.results.__getitem__(slice(start + offset,
stop + offset, step))
else:
return self.results.__getitem__(n + offset)
def __iter__(self):
return iter(self.results[self.offset:self.offset + self.pagelen])
def __len__(self):
return self.total
def scored_length(self):
return self.results.scored_length()
def score(self, n):
return self.results.score(n + self.offset)
def docnum(self, n):
return self.results.docnum(n + self.offset)
def is_last_page(self):
return self.pagecount == 0 or self.pagenum == self.pagecount
| true
| true
|
1c40391a980152fa9fcf2a8aea56d280ba128f52
| 1,813
|
py
|
Python
|
jenni/utils.py
|
synamedia-jenni/Jenni
|
44a25453d3f7dc08ca22f75b4d817dfa5c141904
|
[
"Apache-2.0"
] | 2
|
2021-05-11T15:47:52.000Z
|
2021-06-24T21:55:04.000Z
|
jenni/utils.py
|
synamedia-jenni/Jenni
|
44a25453d3f7dc08ca22f75b4d817dfa5c141904
|
[
"Apache-2.0"
] | 2
|
2021-05-19T07:24:41.000Z
|
2021-06-24T21:54:19.000Z
|
jenni/utils.py
|
synamedia-jenni/Jenni
|
44a25453d3f7dc08ca22f75b4d817dfa5c141904
|
[
"Apache-2.0"
] | 1
|
2021-05-14T10:37:53.000Z
|
2021-05-14T10:37:53.000Z
|
from html import escape
from typing import List
from textwrap import dedent
def bool2groovy(b: bool) -> str:
return "true" if b else "false"
def groovy_string_list(l: List[str]) -> str:
if l:
return ", ".join([quote1or3xs(s) for s in l])
else:
return ""
def tidy_text(s: str) -> str:
"""Strips and un-indents the text"""
s = s.lstrip("\n")
s = s.rstrip()
return dedent(s)
def quote3xs(s: str) -> str:
"""
Groovy formatted triple quoted string
:param s: string to return as triple-single-quoted, IE like '''<s>'''
:return: triple-single-quoted string
"""
if "\\" in s:
s = s.replace("\\", "\\\\")
if "'''" in s:
s = s.replace("'''", "\\'''")
return f"'''{s}'''"
def quote1s(s: str) -> str:
"""
Groovy formatted single quoted string
:param s: string to return as single-quoted, IE like '<s>'
:return: single-quoted string
"""
if "\\" in s:
s = s.replace("\\", "\\\\")
if "'" in s:
s = s.replace("'", "\\'")
return f"'{s}'"
def quote1or3xs(s: str) -> str:
"""
Groovy formatted string (single or triple quoted as necessary)
:param s: string
:return: single or triple quoted string
"""
if "\n" in s:
return quote3xs(s)
if "\\" in s:
s = s.replace("\\", "\\\\")
if "'" in s:
s = s.replace("'", "\\'")
return f"'{s}'"
def quote_list(list_of_str: List[str]) -> str:
"""
Groovy formatted list of strings
:param list_of_str: list of strings
:return:
"""
return "[" + (", ".join([quote1or3xs(s) for s in list_of_str])) + "]"
def html_link(url: str, text: str = "") -> str:
if text:
body = text
else:
body = escape(url)
return f'<a href="{url}">{body}</a>'
| 22.6625
| 73
| 0.529509
|
from html import escape
from typing import List
from textwrap import dedent
def bool2groovy(b: bool) -> str:
return "true" if b else "false"
def groovy_string_list(l: List[str]) -> str:
if l:
return ", ".join([quote1or3xs(s) for s in l])
else:
return ""
def tidy_text(s: str) -> str:
s = s.lstrip("\n")
s = s.rstrip()
return dedent(s)
def quote3xs(s: str) -> str:
if "\\" in s:
s = s.replace("\\", "\\\\")
if "'''" in s:
s = s.replace("'''", "\\'''")
return f"'''{s}'''"
def quote1s(s: str) -> str:
if "\\" in s:
s = s.replace("\\", "\\\\")
if "'" in s:
s = s.replace("'", "\\'")
return f"'{s}'"
def quote1or3xs(s: str) -> str:
if "\n" in s:
return quote3xs(s)
if "\\" in s:
s = s.replace("\\", "\\\\")
if "'" in s:
s = s.replace("'", "\\'")
return f"'{s}'"
def quote_list(list_of_str: List[str]) -> str:
return "[" + (", ".join([quote1or3xs(s) for s in list_of_str])) + "]"
def html_link(url: str, text: str = "") -> str:
if text:
body = text
else:
body = escape(url)
return f'<a href="{url}">{body}</a>'
| true
| true
|
1c403949edbafeb3dc3e1894d723f59918cf2de8
| 3,210
|
py
|
Python
|
main.py
|
JDKdevStudio/GraficadorFunciones
|
e8505b47f80fbd189b1825537cdd115859b980d4
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
JDKdevStudio/GraficadorFunciones
|
e8505b47f80fbd189b1825537cdd115859b980d4
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
JDKdevStudio/GraficadorFunciones
|
e8505b47f80fbd189b1825537cdd115859b980d4
|
[
"CC0-1.0"
] | null | null | null |
# Importar librerías
import tkinter
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib import style
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from tkinter import messagebox
from math import *
# Inicializar ventana
root = tkinter.Tk()
root.wm_title("Graficador")
ta = root.geometry("1000x700")
style.use('fivethirtyeight')
# Crear Objeto de Dibujo
fig = Figure()
ax1 = fig.add_subplot(111)
# Crear area de Dibujo de Tkinter
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.draw()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
# Barra de íconos
toolbar = NavigationToolbar2Tk(canvas, root)
toolbar.update()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
# Rango de la gráfica
act_rango = False
ul_ran = ""
ran = ""
# Funciones asignadas
funciones = {"sin": "np.sin", "cos": "np.cos", "tan": "np.tan", "log": "np.log",
"pi": "np.pi", "sqrt": "np.sqrt", "exp": "np.exp"}
# Reemplazar función anterior
def reemplazo(s):
for i in funciones:
if i in s:
s = s.replace(i, funciones[i])
return s
# Animar cambio de proceso
def animate(i):
global act_rango
global ul_ran
if act_rango == True:
try:
lmin = float(ran[0]);
lmax = float(ran[1])
if lmin < lmax:
x = np.arange(lmin, lmax, .01) # .01
ul_ran = [lmin, lmax]
else:
act_rango = False
except:
messagebox.showwarning("Error", "Introduzca los valores del rango de x, separado por coma.")
act_rango = False
ets.delete(0, len(ets.get()))
else:
if ul_ran != "":
x = np.arange(ul_ran[0], ul_ran[1], .01) # .01
else:
x = np.arange(1, 10, .01) # .01
try:
solo = eval(graph_data)
ax1.clear()
ax1.plot(x, solo)
except:
ax1.plot()
ax1.axhline(0, color="gray")
ax1.axvline(0, color="gray")
ani.event_source.stop() # DETIENE ANIMACIÓN
# Definir función según el rango indicado
def represent():
global graph_data
global ran
global act_rango
texto_orig = et.get()
if ets.get() != "":
rann = ets.get()
ran = rann.split(",")
act_rango = True
graph_data = reemplazo(texto_orig)
ani.event_source.start() # INICIA/REANUDA ANIMACIÓN
# Animación Configurada
ani = animation.FuncAnimation(fig, animate, interval=1000)
# Iniciar gráfica
plt.show()
# Configuración tkinter
et = tkinter.Entry(master=root, width=60)
et.config(bg="gray87", justify="left")
# Cargar Función escrita
button = tkinter.Button(master=root, text="SET", bg="gray69", command=represent)
button.pack(side=tkinter.BOTTOM)
# Configuración tkinter
et.pack(side=tkinter.BOTTOM)
ets = tkinter.Entry(master=root, width=20)
ets.config(bg="gray87")
ets.pack(side=tkinter.RIGHT)
# Iniciar interfaz gráfica en loop sin función
tkinter.mainloop()
| 27.672414
| 105
| 0.626791
|
import tkinter
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib import style
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from tkinter import messagebox
from math import *
root = tkinter.Tk()
root.wm_title("Graficador")
ta = root.geometry("1000x700")
style.use('fivethirtyeight')
fig = Figure()
ax1 = fig.add_subplot(111)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.draw()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
toolbar = NavigationToolbar2Tk(canvas, root)
toolbar.update()
canvas.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
act_rango = False
ul_ran = ""
ran = ""
funciones = {"sin": "np.sin", "cos": "np.cos", "tan": "np.tan", "log": "np.log",
"pi": "np.pi", "sqrt": "np.sqrt", "exp": "np.exp"}
def reemplazo(s):
for i in funciones:
if i in s:
s = s.replace(i, funciones[i])
return s
def animate(i):
global act_rango
global ul_ran
if act_rango == True:
try:
lmin = float(ran[0]);
lmax = float(ran[1])
if lmin < lmax:
x = np.arange(lmin, lmax, .01)
ul_ran = [lmin, lmax]
else:
act_rango = False
except:
messagebox.showwarning("Error", "Introduzca los valores del rango de x, separado por coma.")
act_rango = False
ets.delete(0, len(ets.get()))
else:
if ul_ran != "":
x = np.arange(ul_ran[0], ul_ran[1], .01)
else:
x = np.arange(1, 10, .01)
try:
solo = eval(graph_data)
ax1.clear()
ax1.plot(x, solo)
except:
ax1.plot()
ax1.axhline(0, color="gray")
ax1.axvline(0, color="gray")
ani.event_source.stop()
def represent():
global graph_data
global ran
global act_rango
texto_orig = et.get()
if ets.get() != "":
rann = ets.get()
ran = rann.split(",")
act_rango = True
graph_data = reemplazo(texto_orig)
ani.event_source.start()
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
et = tkinter.Entry(master=root, width=60)
et.config(bg="gray87", justify="left")
button = tkinter.Button(master=root, text="SET", bg="gray69", command=represent)
button.pack(side=tkinter.BOTTOM)
et.pack(side=tkinter.BOTTOM)
ets = tkinter.Entry(master=root, width=20)
ets.config(bg="gray87")
ets.pack(side=tkinter.RIGHT)
tkinter.mainloop()
| true
| true
|
1c403be0633980d1b0fb1e4f024a208cb83a7afa
| 86
|
py
|
Python
|
src/ShapeTemplate/Field.py
|
DaDudek/Tetris
|
b3848132fd315e883e714e1882ec4bfd38b890e1
|
[
"MIT"
] | 2
|
2022-01-16T20:44:08.000Z
|
2022-01-18T13:41:32.000Z
|
src/ShapeTemplate/Field.py
|
DaDudek/Tetris
|
b3848132fd315e883e714e1882ec4bfd38b890e1
|
[
"MIT"
] | null | null | null |
src/ShapeTemplate/Field.py
|
DaDudek/Tetris
|
b3848132fd315e883e714e1882ec4bfd38b890e1
|
[
"MIT"
] | null | null | null |
class Field:
def __init__(self, isFill: bool):
self.isFill: bool = isFill
| 21.5
| 37
| 0.639535
|
class Field:
def __init__(self, isFill: bool):
self.isFill: bool = isFill
| true
| true
|
1c403c61d6391010474c94d303ed009feb41ce0f
| 1,575
|
py
|
Python
|
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/test/test_context.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | 4
|
2016-03-30T14:31:52.000Z
|
2019-02-02T05:01:32.000Z
|
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/test/test_context.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | 1
|
2020-03-06T04:49:42.000Z
|
2020-03-06T04:49:42.000Z
|
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/test/test_context.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | 2
|
2019-08-30T23:36:13.000Z
|
2019-11-08T16:52:01.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.context}.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import SynchronousTestCase
from twisted.python import context
class ContextTest(SynchronousTestCase):
"""
Tests for the module-scope APIs for L{twisted.python.context}.
"""
def test_notPresentIfNotSet(self):
"""
Arbitrary keys which have not been set in the context have an associated
value of C{None}.
"""
self.assertEqual(context.get("x"), None)
def test_setByCall(self):
"""
Values may be associated with keys by passing them in a dictionary as
the first argument to L{twisted.python.context.call}.
"""
self.assertEqual(context.call({"x": "y"}, context.get, "x"), "y")
def test_unsetAfterCall(self):
"""
After a L{twisted.python.context.call} completes, keys specified in the
call are no longer associated with the values from that call.
"""
context.call({"x": "y"}, lambda: None)
self.assertEqual(context.get("x"), None)
def test_setDefault(self):
"""
A default value may be set for a key in the context using
L{twisted.python.context.setDefault}.
"""
key = object()
self.addCleanup(context.defaultContextDict.pop, key, None)
context.setDefault(key, "y")
self.assertEqual("y", context.get(key))
| 30.288462
| 81
| 0.620317
|
from __future__ import division, absolute_import
from twisted.trial.unittest import SynchronousTestCase
from twisted.python import context
class ContextTest(SynchronousTestCase):
def test_notPresentIfNotSet(self):
self.assertEqual(context.get("x"), None)
def test_setByCall(self):
self.assertEqual(context.call({"x": "y"}, context.get, "x"), "y")
def test_unsetAfterCall(self):
context.call({"x": "y"}, lambda: None)
self.assertEqual(context.get("x"), None)
def test_setDefault(self):
key = object()
self.addCleanup(context.defaultContextDict.pop, key, None)
context.setDefault(key, "y")
self.assertEqual("y", context.get(key))
| true
| true
|
1c403df1f61deb78c19aa7ef3027eb475fa15a29
| 6,187
|
py
|
Python
|
telemetry/telemetry/internal/platform/tracing_agent/cpu_tracing_agent_unittest.py
|
tdresser/catapult-csm
|
8f69b07e80198c1af0d5bd368d8ad8ced968884a
|
[
"BSD-3-Clause"
] | 4
|
2017-12-29T03:17:40.000Z
|
2021-07-04T03:28:11.000Z
|
telemetry/telemetry/internal/platform/tracing_agent/cpu_tracing_agent_unittest.py
|
tdresser/catapult-csm
|
8f69b07e80198c1af0d5bd368d8ad8ced968884a
|
[
"BSD-3-Clause"
] | 1
|
2021-08-13T18:39:43.000Z
|
2021-08-13T18:39:43.000Z
|
telemetry/telemetry/internal/platform/tracing_agent/cpu_tracing_agent_unittest.py
|
tdresser/catapult-csm
|
8f69b07e80198c1af0d5bd368d8ad8ced968884a
|
[
"BSD-3-Clause"
] | 6
|
2017-12-05T07:15:08.000Z
|
2021-07-04T03:28:13.000Z
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import time
import unittest
from telemetry import decorators
from telemetry.internal.platform.tracing_agent import cpu_tracing_agent
from telemetry.internal.platform import tracing_agent
from telemetry.internal.platform import linux_platform_backend
from telemetry.internal.platform import mac_platform_backend
from telemetry.internal.platform import win_platform_backend
from telemetry.timeline import tracing_config
from tracing.trace_data import trace_data
SNAPSHOT_KEYS = ['pid', 'ppid', 'name', 'pCpu', 'pMem']
TRACE_EVENT_KEYS = ['name', 'tid', 'pid', 'ph', 'args', 'local', 'id', 'ts']
class FakeAndroidPlatformBackend(object):
def __init__(self):
self.device = 'fake_device'
def GetOSName(self):
return 'android'
class CpuTracingAgentTest(unittest.TestCase):
def setUp(self):
self._config = tracing_config.TracingConfig()
self._config.enable_cpu_trace = True
if sys.platform.startswith('win'):
self._desktop_backend = win_platform_backend.WinPlatformBackend()
elif sys.platform.startswith('darwin'):
self._desktop_backend = mac_platform_backend.MacPlatformBackend()
else:
self._desktop_backend = linux_platform_backend.LinuxPlatformBackend()
self._agent = cpu_tracing_agent.CpuTracingAgent(self._desktop_backend)
@decorators.Enabled('linux', 'mac', 'win')
def testInit(self):
self.assertTrue(isinstance(self._agent,
tracing_agent.TracingAgent))
self.assertFalse(self._agent._snapshots)
self.assertFalse(self._agent._snapshot_ongoing)
@decorators.Enabled('linux', 'mac', 'win')
def testIsSupported(self):
self.assertTrue(cpu_tracing_agent.CpuTracingAgent.IsSupported(
self._desktop_backend))
self.assertFalse(cpu_tracing_agent.CpuTracingAgent.IsSupported(
FakeAndroidPlatformBackend()))
@decorators.Enabled('linux', 'mac', 'win')
def testStartAgentTracing(self):
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent._snapshots)
self.assertTrue(self._agent.StartAgentTracing(self._config, 0))
self.assertTrue(self._agent._snapshot_ongoing)
time.sleep(2)
self.assertTrue(self._agent._snapshots)
self._agent.StopAgentTracing()
@decorators.Enabled('linux', 'mac', 'win')
def testStartAgentTracingNotEnabled(self):
self._config.enable_cpu_trace = False
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent.StartAgentTracing(self._config, 0))
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent._snapshots)
time.sleep(2)
self.assertFalse(self._agent._snapshots)
@decorators.Enabled('linux', 'mac', 'win')
def testStopAgentTracingBeforeStart(self):
self.assertRaises(AssertionError, self._agent.StopAgentTracing)
@decorators.Enabled('linux', 'mac', 'win')
def testStopAgentTracing(self):
self._agent.StartAgentTracing(self._config, 0)
self._agent.StopAgentTracing()
self.assertFalse(self._agent._snapshot_ongoing)
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceDataBeforeStop(self):
self._agent.StartAgentTracing(self._config, 0)
self.assertRaises(AssertionError, self._agent.CollectAgentTraceData,
trace_data.TraceDataBuilder())
self._agent.StopAgentTracing()
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceData(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
self.assertFalse(self._agent._snapshot_ongoing)
builder = builder.AsData()
self.assertTrue(builder.HasTracesFor(trace_data.CPU_TRACE_DATA))
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceDataFormat(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
time.sleep(2)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
builder = builder.AsData()
data = builder.GetTraceFor(trace_data.CPU_TRACE_DATA)['traceEvents']
self.assertEquals(set(data[0].keys()), set(TRACE_EVENT_KEYS))
self.assertEquals(set(data[0]['args']['snapshot'].keys()),
set(['processes']))
self.assertTrue(data[0]['args']['snapshot']['processes'])
self.assertEquals(set(data[0]['args']['snapshot']['processes'][0].keys()),
set(SNAPSHOT_KEYS))
@decorators.Enabled('linux', 'mac', 'win')
def testContainsRealProcesses(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
time.sleep(2)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
builder = builder.AsData()
data = builder.GetTraceFor(trace_data.CPU_TRACE_DATA)['traceEvents']
for snapshot in data:
found_unittest_process = False
processes = snapshot['args']['snapshot']['processes']
for process in processes:
if 'run_tests' in process['name']:
found_unittest_process = True
self.assertTrue(found_unittest_process)
@decorators.Enabled('linux', 'mac', 'win')
def testTraceSpecifiesTelemetryClockDomain(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
cpu_trace = builder.AsData().GetTraceFor(trace_data.CPU_TRACE_DATA)
self.assertEqual(cpu_trace['metadata']['clock-domain'], 'TELEMETRY')
@decorators.Enabled('win')
def testWindowsCanHandleProcessesWithSpaces(self):
proc_collector = cpu_tracing_agent.WindowsProcessCollector()
proc_collector.Init()
proc = proc_collector._ParseProcessString(
'0 1 Multi Word Process 50 75')
self.assertEquals(proc['ppid'], 0)
self.assertEquals(proc['pid'], 1)
self.assertEquals(proc['name'], 'Multi Word Process')
self.assertEquals(proc['pCpu'], 50)
| 38.66875
| 78
| 0.737353
|
import sys
import time
import unittest
from telemetry import decorators
from telemetry.internal.platform.tracing_agent import cpu_tracing_agent
from telemetry.internal.platform import tracing_agent
from telemetry.internal.platform import linux_platform_backend
from telemetry.internal.platform import mac_platform_backend
from telemetry.internal.platform import win_platform_backend
from telemetry.timeline import tracing_config
from tracing.trace_data import trace_data
SNAPSHOT_KEYS = ['pid', 'ppid', 'name', 'pCpu', 'pMem']
TRACE_EVENT_KEYS = ['name', 'tid', 'pid', 'ph', 'args', 'local', 'id', 'ts']
class FakeAndroidPlatformBackend(object):
def __init__(self):
self.device = 'fake_device'
def GetOSName(self):
return 'android'
class CpuTracingAgentTest(unittest.TestCase):
def setUp(self):
self._config = tracing_config.TracingConfig()
self._config.enable_cpu_trace = True
if sys.platform.startswith('win'):
self._desktop_backend = win_platform_backend.WinPlatformBackend()
elif sys.platform.startswith('darwin'):
self._desktop_backend = mac_platform_backend.MacPlatformBackend()
else:
self._desktop_backend = linux_platform_backend.LinuxPlatformBackend()
self._agent = cpu_tracing_agent.CpuTracingAgent(self._desktop_backend)
@decorators.Enabled('linux', 'mac', 'win')
def testInit(self):
self.assertTrue(isinstance(self._agent,
tracing_agent.TracingAgent))
self.assertFalse(self._agent._snapshots)
self.assertFalse(self._agent._snapshot_ongoing)
@decorators.Enabled('linux', 'mac', 'win')
def testIsSupported(self):
self.assertTrue(cpu_tracing_agent.CpuTracingAgent.IsSupported(
self._desktop_backend))
self.assertFalse(cpu_tracing_agent.CpuTracingAgent.IsSupported(
FakeAndroidPlatformBackend()))
@decorators.Enabled('linux', 'mac', 'win')
def testStartAgentTracing(self):
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent._snapshots)
self.assertTrue(self._agent.StartAgentTracing(self._config, 0))
self.assertTrue(self._agent._snapshot_ongoing)
time.sleep(2)
self.assertTrue(self._agent._snapshots)
self._agent.StopAgentTracing()
@decorators.Enabled('linux', 'mac', 'win')
def testStartAgentTracingNotEnabled(self):
self._config.enable_cpu_trace = False
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent.StartAgentTracing(self._config, 0))
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent._snapshots)
time.sleep(2)
self.assertFalse(self._agent._snapshots)
@decorators.Enabled('linux', 'mac', 'win')
def testStopAgentTracingBeforeStart(self):
self.assertRaises(AssertionError, self._agent.StopAgentTracing)
@decorators.Enabled('linux', 'mac', 'win')
def testStopAgentTracing(self):
self._agent.StartAgentTracing(self._config, 0)
self._agent.StopAgentTracing()
self.assertFalse(self._agent._snapshot_ongoing)
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceDataBeforeStop(self):
self._agent.StartAgentTracing(self._config, 0)
self.assertRaises(AssertionError, self._agent.CollectAgentTraceData,
trace_data.TraceDataBuilder())
self._agent.StopAgentTracing()
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceData(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
self.assertFalse(self._agent._snapshot_ongoing)
builder = builder.AsData()
self.assertTrue(builder.HasTracesFor(trace_data.CPU_TRACE_DATA))
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceDataFormat(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
time.sleep(2)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
builder = builder.AsData()
data = builder.GetTraceFor(trace_data.CPU_TRACE_DATA)['traceEvents']
self.assertEquals(set(data[0].keys()), set(TRACE_EVENT_KEYS))
self.assertEquals(set(data[0]['args']['snapshot'].keys()),
set(['processes']))
self.assertTrue(data[0]['args']['snapshot']['processes'])
self.assertEquals(set(data[0]['args']['snapshot']['processes'][0].keys()),
set(SNAPSHOT_KEYS))
@decorators.Enabled('linux', 'mac', 'win')
def testContainsRealProcesses(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
time.sleep(2)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
builder = builder.AsData()
data = builder.GetTraceFor(trace_data.CPU_TRACE_DATA)['traceEvents']
for snapshot in data:
found_unittest_process = False
processes = snapshot['args']['snapshot']['processes']
for process in processes:
if 'run_tests' in process['name']:
found_unittest_process = True
self.assertTrue(found_unittest_process)
@decorators.Enabled('linux', 'mac', 'win')
def testTraceSpecifiesTelemetryClockDomain(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
cpu_trace = builder.AsData().GetTraceFor(trace_data.CPU_TRACE_DATA)
self.assertEqual(cpu_trace['metadata']['clock-domain'], 'TELEMETRY')
@decorators.Enabled('win')
def testWindowsCanHandleProcessesWithSpaces(self):
proc_collector = cpu_tracing_agent.WindowsProcessCollector()
proc_collector.Init()
proc = proc_collector._ParseProcessString(
'0 1 Multi Word Process 50 75')
self.assertEquals(proc['ppid'], 0)
self.assertEquals(proc['pid'], 1)
self.assertEquals(proc['name'], 'Multi Word Process')
self.assertEquals(proc['pCpu'], 50)
| true
| true
|
1c403e410b2f69d84e79c61035f85fe8687ac95e
| 2,162
|
py
|
Python
|
models/user.py
|
Leaniz/gordologo
|
fcd8b8a3bfea1fb6e597dfd1701884ddd07db107
|
[
"MIT"
] | 1
|
2021-08-03T20:06:42.000Z
|
2021-08-03T20:06:42.000Z
|
models/user.py
|
Leaniz/gordologo
|
fcd8b8a3bfea1fb6e597dfd1701884ddd07db107
|
[
"MIT"
] | null | null | null |
models/user.py
|
Leaniz/gordologo
|
fcd8b8a3bfea1fb6e597dfd1701884ddd07db107
|
[
"MIT"
] | null | null | null |
from elasticsearch import Elasticsearch
class UserModel:
elast = Elasticsearch('localhost', port=9200)
elast_idx = "gordologo-users"
def __init__(self, _id, username, email, password_hash):
self.id = _id
self.username = username
self.email = email
self.password_hash = password_hash
def to_dict(self):
d = {
"id": self.id,
"username": self.username,
"email": self.email
}
return d
@classmethod
def find_by_username(cls, username):
query = {
"query": {
"term": {
"username.keyword": {
"value": username,
"case_insensitive": True
}
}
}
}
res = UserModel.elast.search(index=UserModel.elast_idx,
body=query)
hits = res["hits"]["hits"]
if len(hits):
user_dict = hits[0]["_source"]
user_dict["_id"] = hits[0]["_id"]
return cls(**user_dict)
else:
return None
@classmethod
def find_by_email(cls, email):
query = {
"query": {
"term": {
"email.keyword": {
"value": email,
"case_insensitive": True
}
}
}
}
res = UserModel.elast.search(index=UserModel.elast_idx,
body=query)
hits = res["hits"]["hits"]
if len(hits):
user_dict = hits[0]["_source"]
user_dict["_id"] = hits[0]["_id"]
return cls(**user_dict)
else:
return None
@classmethod
def find_by_id(cls, id_):
res = UserModel.elast.get(index=UserModel.elast_idx,
id=id_,
ignore=404)
if res["found"]:
user_dict = res["_source"]
user_dict["_id"] = res["_id"]
return cls(**user_dict)
else:
return None
| 27.717949
| 64
| 0.440796
|
from elasticsearch import Elasticsearch
class UserModel:
elast = Elasticsearch('localhost', port=9200)
elast_idx = "gordologo-users"
def __init__(self, _id, username, email, password_hash):
self.id = _id
self.username = username
self.email = email
self.password_hash = password_hash
def to_dict(self):
d = {
"id": self.id,
"username": self.username,
"email": self.email
}
return d
@classmethod
def find_by_username(cls, username):
query = {
"query": {
"term": {
"username.keyword": {
"value": username,
"case_insensitive": True
}
}
}
}
res = UserModel.elast.search(index=UserModel.elast_idx,
body=query)
hits = res["hits"]["hits"]
if len(hits):
user_dict = hits[0]["_source"]
user_dict["_id"] = hits[0]["_id"]
return cls(**user_dict)
else:
return None
@classmethod
def find_by_email(cls, email):
query = {
"query": {
"term": {
"email.keyword": {
"value": email,
"case_insensitive": True
}
}
}
}
res = UserModel.elast.search(index=UserModel.elast_idx,
body=query)
hits = res["hits"]["hits"]
if len(hits):
user_dict = hits[0]["_source"]
user_dict["_id"] = hits[0]["_id"]
return cls(**user_dict)
else:
return None
@classmethod
def find_by_id(cls, id_):
res = UserModel.elast.get(index=UserModel.elast_idx,
id=id_,
ignore=404)
if res["found"]:
user_dict = res["_source"]
user_dict["_id"] = res["_id"]
return cls(**user_dict)
else:
return None
| true
| true
|
1c403e61eae75de53b3c4cc91917228410ca239f
| 552
|
py
|
Python
|
rldb/db/paper__dqn2013/algo__dqn2013/__init__.py
|
seungjaeryanlee/sotarl
|
8c471c4666d6210c68f3cb468e439a2b168c785d
|
[
"MIT"
] | 45
|
2019-05-13T17:39:33.000Z
|
2022-03-07T23:44:13.000Z
|
rldb/db/paper__dqn2013/algo__dqn2013/__init__.py
|
seungjaeryanlee/sotarl
|
8c471c4666d6210c68f3cb468e439a2b168c785d
|
[
"MIT"
] | 2
|
2019-03-29T01:41:59.000Z
|
2019-07-02T02:48:31.000Z
|
rldb/db/paper__dqn2013/algo__dqn2013/__init__.py
|
seungjaeryanlee/sotarl
|
8c471c4666d6210c68f3cb468e439a2b168c785d
|
[
"MIT"
] | 2
|
2020-04-07T20:57:30.000Z
|
2020-07-08T12:55:15.000Z
|
"""
DQN2013 scores from DQN2013 paper.
7 entries
------------------------------------------------------------------------
7 unique entries
"""
from .entries import entries
# Specify ALGORITHM
algo = {
# ALGORITHM
"algo-title": "Deep Q-Network (2013)",
"algo-nickname": "DQN2013",
"algo-source-title": "Playing Atari with Deep Reinforcement Learning",
# HYPERPARAMETERS
"algo-frames": 10 * 1000 * 1000, # Number of frames
}
# Populate entries
entries = [{**entry, **algo} for entry in entries]
assert len(entries) == 7
| 20.444444
| 74
| 0.576087
|
from .entries import entries
algo = {
"algo-title": "Deep Q-Network (2013)",
"algo-nickname": "DQN2013",
"algo-source-title": "Playing Atari with Deep Reinforcement Learning",
"algo-frames": 10 * 1000 * 1000,
}
entries = [{**entry, **algo} for entry in entries]
assert len(entries) == 7
| true
| true
|
1c403ee9a8c848544a66cf3a4bc06b9e8cb3cbcb
| 3,568
|
py
|
Python
|
cinder/volume/drivers/huawei/huawei_utils.py
|
whitepages/cinder
|
bd70ce6f4dd58ba904a7c941700cdce54e5a705e
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/huawei/huawei_utils.py
|
whitepages/cinder
|
bd70ce6f4dd58ba904a7c941700cdce54e5a705e
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:38:29.000Z
|
2021-03-21T11:38:29.000Z
|
cinder/volume/drivers/huawei/huawei_utils.py
|
isabella232/cinder
|
bd70ce6f4dd58ba904a7c941700cdce54e5a705e
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:37:47.000Z
|
2021-03-21T11:37:47.000Z
|
# Copyright (c) 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import json
import six
import time
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.volume.drivers.huawei import constants
LOG = logging.getLogger(__name__)
def encode_name(name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes)
vol_encoded = vol_encoded.decode("utf-8") # Make it compatible to py3.
newuuid = vol_encoded.replace("=", "")
return newuuid
def encode_host_name(name):
if name and (len(name) > constants.MAX_HOSTNAME_LENGTH):
name = six.text_type(hash(name))
return name
def wait_for_condition(func, interval, timeout):
start_time = time.time()
def _inner():
try:
res = func()
except Exception as ex:
raise exception.VolumeBackendAPIException(data=ex)
if res:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = (_('wait_for_condition: %s timed out.')
% func.__name__)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
def get_volume_size(volume):
"""Calculate the volume size.
We should divide the given volume size by 512 for the 18000 system
calculates volume size with sectors, which is 512 bytes.
"""
volume_size = units.Gi / 512 # 1G
if int(volume['size']) != 0:
volume_size = int(volume['size']) * units.Gi / 512
return volume_size
def get_remote_device_info(valid_hypermetro_devices):
remote_device_info = {}
try:
if valid_hypermetro_devices:
remote_device_info = json.loads(valid_hypermetro_devices)
else:
return
except ValueError as err:
msg = _("Get remote device info error. %s.") % err
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if len(remote_device_info) == 1:
for device_key, device_value in remote_device_info.items():
return remote_device_info.get(device_key)
def get_volume_metadata(volume):
if type(volume) is objects.Volume:
return volume.metadata
if 'volume_metadata' in volume:
metadata = volume.get('volume_metadata')
return {item['key']: item['value'] for item in metadata}
return {}
def get_snapshot_metadata_value(snapshot):
if type(snapshot) is objects.Snapshot:
return snapshot.metadata
if 'snapshot_metadata' in snapshot:
metadata = snapshot.get('snapshot_metadata')
return {item['key']: item['value'] for item in metadata}
return {}
| 29.245902
| 78
| 0.681614
|
import base64
import json
import six
import time
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.volume.drivers.huawei import constants
LOG = logging.getLogger(__name__)
def encode_name(name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes)
vol_encoded = vol_encoded.decode("utf-8")
newuuid = vol_encoded.replace("=", "")
return newuuid
def encode_host_name(name):
if name and (len(name) > constants.MAX_HOSTNAME_LENGTH):
name = six.text_type(hash(name))
return name
def wait_for_condition(func, interval, timeout):
start_time = time.time()
def _inner():
try:
res = func()
except Exception as ex:
raise exception.VolumeBackendAPIException(data=ex)
if res:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = (_('wait_for_condition: %s timed out.')
% func.__name__)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
def get_volume_size(volume):
volume_size = units.Gi / 512
if int(volume['size']) != 0:
volume_size = int(volume['size']) * units.Gi / 512
return volume_size
def get_remote_device_info(valid_hypermetro_devices):
remote_device_info = {}
try:
if valid_hypermetro_devices:
remote_device_info = json.loads(valid_hypermetro_devices)
else:
return
except ValueError as err:
msg = _("Get remote device info error. %s.") % err
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if len(remote_device_info) == 1:
for device_key, device_value in remote_device_info.items():
return remote_device_info.get(device_key)
def get_volume_metadata(volume):
if type(volume) is objects.Volume:
return volume.metadata
if 'volume_metadata' in volume:
metadata = volume.get('volume_metadata')
return {item['key']: item['value'] for item in metadata}
return {}
def get_snapshot_metadata_value(snapshot):
if type(snapshot) is objects.Snapshot:
return snapshot.metadata
if 'snapshot_metadata' in snapshot:
metadata = snapshot.get('snapshot_metadata')
return {item['key']: item['value'] for item in metadata}
return {}
| true
| true
|
1c40402432cf3ee52f098bce63ed42d524eccb02
| 1,175
|
py
|
Python
|
products/pubsub/helpers/python/provider_subscription.py
|
btorresgil/magic-modules
|
f1a5e5ac9f921c3122466d153d3b99ad45e24a4f
|
[
"Apache-2.0"
] | 1
|
2019-10-23T06:16:05.000Z
|
2019-10-23T06:16:05.000Z
|
products/pubsub/helpers/python/provider_subscription.py
|
btorresgil/magic-modules
|
f1a5e5ac9f921c3122466d153d3b99ad45e24a4f
|
[
"Apache-2.0"
] | 65
|
2019-06-30T00:26:56.000Z
|
2019-12-04T05:23:56.000Z
|
products/pubsub/helpers/python/provider_subscription.py
|
btorresgil/magic-modules
|
f1a5e5ac9f921c3122466d153d3b99ad45e24a4f
|
[
"Apache-2.0"
] | 5
|
2019-03-12T02:11:18.000Z
|
2019-10-23T06:16:08.000Z
|
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def decode_request(response, module):
if 'name' in response:
response['name'] = response['name'].split('/')[-1]
if 'topic' in response:
response['topic'] = response['topic'].split('/')[-1]
return response
def encode_request(request, module):
request['topic'] = '/'.join(['projects', module.params['project'],
'topics', replace_resource_dict(request['topic'], 'name')])
request['name'] = '/'.join(['projects', module.params['project'],
'subscriptions', module.params['name']])
return request
| 37.903226
| 92
| 0.662128
|
def decode_request(response, module):
if 'name' in response:
response['name'] = response['name'].split('/')[-1]
if 'topic' in response:
response['topic'] = response['topic'].split('/')[-1]
return response
def encode_request(request, module):
request['topic'] = '/'.join(['projects', module.params['project'],
'topics', replace_resource_dict(request['topic'], 'name')])
request['name'] = '/'.join(['projects', module.params['project'],
'subscriptions', module.params['name']])
return request
| true
| true
|
1c40419f0bf533355ac9e1b0ad221b90295e7c6b
| 46,268
|
py
|
Python
|
zerver/tests/test_realm.py
|
sa2c/zulip
|
a00d911ed1071e6a8bbaa17d8df9e96115973588
|
[
"Apache-2.0"
] | 1
|
2021-05-15T00:44:42.000Z
|
2021-05-15T00:44:42.000Z
|
zerver/tests/test_realm.py
|
sa2c/zulip
|
a00d911ed1071e6a8bbaa17d8df9e96115973588
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_realm.py
|
sa2c/zulip
|
a00d911ed1071e6a8bbaa17d8df9e96115973588
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import re
from typing import Any, Dict, List, Mapping, Union
from unittest import mock
import orjson
from django.conf import settings
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.actions import (
do_add_deactivated_redirect,
do_change_plan_type,
do_change_realm_subdomain,
do_create_realm,
do_deactivate_realm,
do_deactivate_stream,
do_scrub_realm,
do_send_realm_reactivation_email,
do_set_realm_property,
)
from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description
from zerver.lib.send_email import send_future_email
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import reset_emails_in_zulip_realm, tornado_redirected_to_list
from zerver.models import (
Attachment,
CustomProfileField,
Message,
Realm,
RealmAuditLog,
ScheduledEmail,
UserMessage,
UserProfile,
get_realm,
get_stream,
get_user_profile_by_id,
)
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(
self, user_profile: UserProfile, new_realm_name: str
) -> None:
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_realm_creation_ensures_internal_realms(self) -> None:
with mock.patch("zerver.lib.actions.server_initialized", return_value=False):
with mock.patch(
"zerver.lib.actions.create_internal_realm"
) as mock_create_internal, self.assertLogs(level="INFO") as info_logs:
do_create_realm("testrealm", "Test Realm")
mock_create_internal.assert_called_once()
self.assertEqual(
info_logs.output,
["INFO:root:Server not yet initialized. Creating the internal realm first."],
)
def test_do_set_realm_name_caching(self) -> None:
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
realm = get_realm("zulip")
new_name = "Zed You Elle Eye Pea"
do_set_realm_property(realm, "name", new_name, acting_user=None)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user("hamlet"), new_name)
def test_update_realm_name_events(self) -> None:
realm = get_realm("zulip")
new_name = "Puliz"
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
do_set_realm_property(realm, "name", new_name, acting_user=None)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="name",
value=new_name,
),
)
def test_update_realm_description_events(self) -> None:
realm = get_realm("zulip")
new_description = "zulip dev group"
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
do_set_realm_property(realm, "description", new_description, acting_user=None)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="description",
value=new_description,
),
)
def test_update_realm_description(self) -> None:
self.login("iago")
new_description = "zulip dev group"
data = dict(description=new_description)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch("/json/realm", data)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.description, new_description)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="description",
value=new_description,
),
)
def test_realm_description_length(self) -> None:
new_description = "A" * 1001
data = dict(description=new_description)
# create an admin user
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "description is too long (limit: 1000 characters)")
realm = get_realm("zulip")
self.assertNotEqual(realm.description, new_description)
def test_realm_name_length(self) -> None:
new_name = "A" * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=new_name)
# create an admin user
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "name is too long (limit: 40 characters)")
realm = get_realm("zulip")
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self) -> None:
new_name = "Mice will play while the cat is away"
self.login("othello")
req = dict(name=new_name)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization administrator")
def test_unauthorized_name_change(self) -> None:
data = {"full_name": "Sir Hamlet"}
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
do_set_realm_property(user_profile.realm, "name_changes_disabled", True, acting_user=None)
url = "/json/settings"
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
# Realm admins can change their name even setting is disabled.
data = {"full_name": "New Iago"}
self.login("iago")
url = "/json/settings"
result = self.client_patch(url, data)
self.assert_in_success_response(['"full_name":"New Iago"'], result)
def test_do_deactivate_realm_clears_user_realm_cache(self) -> None:
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
hamlet_id = self.example_user("hamlet").id
get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
user = get_user_profile_by_id(hamlet_id)
self.assertTrue(user.realm.deactivated)
def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None:
"""The main complicated thing about changing realm subdomains is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
hamlet_id = self.example_user("hamlet").id
user = get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
iago = self.example_user("iago")
do_change_realm_subdomain(realm, "newzulip", acting_user=iago)
user = get_user_profile_by_id(hamlet_id)
self.assertEqual(user.realm.string_id, "newzulip")
placeholder_realm = get_realm("zulip")
self.assertTrue(placeholder_realm.deactivated)
self.assertEqual(placeholder_realm.deactivated_redirect, user.realm.uri)
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_SUBDOMAIN_CHANGED, acting_user=iago
).last()
expected_extra_data = {"old_subdomain": "zulip", "new_subdomain": "newzulip"}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None:
user = self.example_user("hamlet")
send_future_email(
"zerver/emails/followup_day1",
user.realm,
to_user_ids=[user.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm, acting_user=None)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_change_realm_description_clears_cached_descriptions(self) -> None:
realm = get_realm("zulip")
rendered_description = get_realm_rendered_description(realm)
text_description = get_realm_text_description(realm)
realm.description = "New description"
realm.save(update_fields=["description"])
new_rendered_description = get_realm_rendered_description(realm)
self.assertNotEqual(rendered_description, new_rendered_description)
self.assertIn(realm.description, new_rendered_description)
new_text_description = get_realm_text_description(realm)
self.assertNotEqual(text_description, new_text_description)
self.assertEqual(realm.description, new_text_description)
def test_do_deactivate_realm_on_deactivated_realm(self) -> None:
"""Ensure early exit is working in realm deactivation"""
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
def test_do_set_deactivated_redirect_on_deactivated_realm(self) -> None:
"""Ensure that the redirect url is working when deactivating realm"""
realm = get_realm("zulip")
redirect_url = "new_server.zulip.com"
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
do_add_deactivated_redirect(realm, redirect_url)
self.assertEqual(realm.deactivated_redirect, redirect_url)
new_redirect_url = "test.zulip.com"
do_add_deactivated_redirect(realm, new_redirect_url)
self.assertEqual(realm.deactivated_redirect, new_redirect_url)
self.assertNotEqual(realm.deactivated_redirect, redirect_url)
def test_realm_reactivation_link(self) -> None:
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
confirmation_url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
response = self.client_get(confirmation_url)
self.assert_in_success_response(
["Your organization has been successfully reactivated"], response
)
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
def test_realm_reactivation_confirmation_object(self) -> None:
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
confirmation = Confirmation.objects.last()
self.assertEqual(confirmation.content_object, realm)
self.assertEqual(confirmation.realm, realm)
def test_do_send_realm_reactivation_email(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
do_send_realm_reactivation_email(realm, acting_user=iago)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
self.assertEqual(self.email_envelope_from(outbox[0]), settings.NOREPLY_EMAIL_ADDRESS)
self.assertRegex(
self.email_display_from(outbox[0]),
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn("Reactivate your Zulip organization", outbox[0].subject)
self.assertIn("Dear former administrators", outbox[0].body)
admins = realm.get_human_admin_users()
confirmation_url = self.get_confirmation_url_from_outbox(admins[0].delivery_email)
response = self.client_get(confirmation_url)
self.assert_in_success_response(
["Your organization has been successfully reactivated"], response
)
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_REACTIVATION_EMAIL_SENT, acting_user=iago
).count(),
1,
)
def test_realm_reactivation_with_random_link(self) -> None:
random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b"
response = self.client_get(random_link)
self.assert_in_success_response(
["The organization reactivation link has expired or is not valid."], response
)
def test_change_notifications_stream(self) -> None:
# We need an admin user.
self.login("iago")
disabled_notif_stream_id = -1
req = dict(notifications_stream_id=orjson.dumps(disabled_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = 4
req = dict(notifications_stream_id=orjson.dumps(new_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id=orjson.dumps(invalid_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid stream id")
realm = get_realm("zulip")
assert realm.notifications_stream is not None
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.notifications_stream_id = verona.id
realm.save(update_fields=["notifications_stream"])
notifications_stream = realm.get_notifications_stream()
assert notifications_stream is not None
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream, acting_user=None)
self.assertIsNone(realm.get_notifications_stream())
def test_change_signup_notifications_stream(self) -> None:
# We need an admin user.
self.login("iago")
disabled_signup_notifications_stream_id = -1
req = dict(
signup_notifications_stream_id=orjson.dumps(
disabled_signup_notifications_stream_id
).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.signup_notifications_stream, None)
new_signup_notifications_stream_id = 4
req = dict(
signup_notifications_stream_id=orjson.dumps(new_signup_notifications_stream_id).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id)
invalid_signup_notifications_stream_id = 1234
req = dict(
signup_notifications_stream_id=orjson.dumps(
invalid_signup_notifications_stream_id
).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid stream id")
realm = get_realm("zulip")
assert realm.signup_notifications_stream is not None
self.assertNotEqual(
realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id
)
def test_get_default_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.signup_notifications_stream = verona
realm.save(update_fields=["signup_notifications_stream"])
signup_notifications_stream = realm.get_signup_notifications_stream()
assert signup_notifications_stream is not None
self.assertEqual(signup_notifications_stream, verona)
do_deactivate_stream(signup_notifications_stream, acting_user=None)
self.assertIsNone(realm.get_signup_notifications_stream())
def test_change_realm_default_language(self) -> None:
new_lang = "de"
realm = get_realm("zulip")
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
self.login("iago")
req = dict(default_language=new_lang)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=invalid_lang)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, f"Invalid language '{invalid_lang}'")
realm = get_realm("zulip")
self.assertNotEqual(realm.default_language, invalid_lang)
def test_deactivate_realm_by_owner(self) -> None:
self.login("desdemona")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
result = self.client_post("/json/realm/deactivate")
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertTrue(realm.deactivated)
def test_deactivate_realm_by_non_owner(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
result = self.client_post("/json/realm/deactivate")
self.assert_json_error(result, "Must be an organization owner")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
def test_change_bot_creation_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(bot_creation_policy=orjson.dumps(Realm.BOT_CREATION_LIMIT_GENERIC_BOTS).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_add_bot_permission = 4
req = dict(bot_creation_policy=orjson.dumps(invalid_add_bot_permission).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid bot_creation_policy")
def test_change_email_address_visibility(self) -> None:
# We need an admin user.
user_profile = self.example_user("iago")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.login_user(user_profile)
invalid_value = 12
req = dict(email_address_visibility=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid email_address_visibility")
reset_emails_in_zulip_realm()
realm = get_realm("zulip")
req = dict(
email_address_visibility=orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
edited_user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(
edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver"
)
# Check normal user cannot access email
result = self.api_get(cordelia, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()["user"]["email"], f"user{hamlet.id}@zulip.testserver")
self.assertEqual(result.json()["user"].get("delivery_email"), None)
# Check administrator gets delivery_email with EMAIL_ADDRESS_VISIBILITY_ADMINS
result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()["user"]["email"], f"user{hamlet.id}@zulip.testserver")
self.assertEqual(result.json()["user"].get("delivery_email"), hamlet.delivery_email)
req = dict(
email_address_visibility=orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY)
edited_user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(
edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver"
)
# Check even administrator doesn't get delivery_email with
# EMAIL_ADDRESS_VISIBILITY_NOBODY
result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()["user"]["email"], f"user{hamlet.id}@zulip.testserver")
self.assertEqual(result.json()["user"].get("delivery_email"), None)
def test_change_stream_creation_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(create_stream_policy=orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(create_stream_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid create_stream_policy")
def test_change_invite_to_stream_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(invite_to_stream_policy=orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(invite_to_stream_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid invite_to_stream_policy")
def test_change_invite_to_realm_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(invite_to_realm_policy=orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(invite_to_realm_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid invite_to_realm_policy")
def test_change_move_messages_between_streams_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(
move_messages_between_streams_policy=orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(move_messages_between_streams_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid move_messages_between_streams_policy")
def test_user_group_edit_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(
user_group_edit_policy=orjson.dumps(Realm.USER_GROUP_EDIT_POLICY_ADMINS).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(user_group_edit_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid user_group_edit_policy")
def test_private_message_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(
private_message_policy=orjson.dumps(Realm.PRIVATE_MESSAGE_POLICY_DISABLED).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(private_message_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid private_message_policy")
def test_change_wildcard_mention_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(
wildcard_mention_policy=orjson.dumps(Realm.WILDCARD_MENTION_POLICY_EVERYONE).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(wildcard_mention_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid wildcard_mention_policy")
def test_invalid_integer_attribute_values(self) -> None:
integer_values = [key for key, value in Realm.property_types.items() if value is int]
invalid_values = dict(
bot_creation_policy=10,
create_stream_policy=10,
invite_to_stream_policy=10,
email_address_visibility=10,
message_retention_days=10,
video_chat_provider=10,
giphy_rating=10,
waiting_period_threshold=-10,
digest_weekday=10,
user_group_edit_policy=10,
private_message_policy=10,
message_content_delete_limit_seconds=-10,
wildcard_mention_policy=10,
invite_to_realm_policy=10,
move_messages_between_streams_policy=10,
)
# We need an admin user.
self.login("iago")
for name in integer_values:
invalid_value = invalid_values.get(name)
if invalid_value is None:
raise AssertionError(f"No test created for {name}")
self.do_test_invalid_integer_attribute_value(name, invalid_value)
def do_test_invalid_integer_attribute_value(self, val_name: str, invalid_val: int) -> None:
possible_messages = {
f"Invalid {val_name}",
f"Bad value for '{val_name}'",
f"Bad value for '{val_name}': {invalid_val}",
f"Invalid {val_name} {invalid_val}",
}
req = {val_name: invalid_val}
result = self.client_patch("/json/realm", req)
msg = self.get_json_error(result)
self.assertTrue(msg in possible_messages)
def test_change_video_chat_provider(self) -> None:
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
self.login("iago")
invalid_video_chat_provider_value = 10
req = {"video_chat_provider": orjson.dumps(invalid_video_chat_provider_value).decode()}
result = self.client_patch("/json/realm", req)
self.assert_json_error(
result, ("Invalid video_chat_provider {}").format(invalid_video_chat_provider_value)
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["disabled"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["disabled"]["id"]
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["big_blue_button"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider,
Realm.VIDEO_CHAT_PROVIDERS["big_blue_button"]["id"],
)
req = {
"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS["zoom"]["id"]).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
def test_initial_plan_type(self) -> None:
with self.settings(BILLING_ENABLED=True):
self.assertEqual(do_create_realm("hosted", "hosted").plan_type, Realm.LIMITED)
self.assertEqual(
get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX
)
self.assertEqual(
get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED
)
self.assertEqual(get_realm("hosted").upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
with self.settings(BILLING_ENABLED=False):
self.assertEqual(do_create_realm("onpremise", "onpremise").plan_type, Realm.SELF_HOSTED)
self.assertEqual(
get_realm("onpremise").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX
)
self.assertEqual(get_realm("onpremise").message_visibility_limit, None)
self.assertEqual(get_realm("onpremise").upload_quota_gb, None)
def test_change_plan_type(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
do_change_plan_type(realm, Realm.STANDARD, acting_user=iago)
realm = get_realm("zulip")
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED
).last()
expected_extra_data = {"old_value": Realm.SELF_HOSTED, "new_value": Realm.STANDARD}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.LIMITED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
do_change_plan_type(realm, Realm.STANDARD_FREE, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.STANDARD_FREE)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED, acting_user=iago)
do_change_plan_type(realm, Realm.SELF_HOSTED, acting_user=iago)
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
def test_message_retention_days(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization owner")
self.login("desdemona")
req = dict(message_retention_days=orjson.dumps(0).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': 0")
req = dict(message_retention_days=orjson.dumps(-10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -10")
req = dict(message_retention_days=orjson.dumps("invalid").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': invalid")
req = dict(message_retention_days=orjson.dumps(-1).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -1")
req = dict(message_retention_days=orjson.dumps("forever").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
do_change_plan_type(realm, Realm.LIMITED, acting_user=None)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Available on Zulip Standard. Upgrade to access.")
do_change_plan_type(realm, Realm.STANDARD, acting_user=None)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
class RealmAPITest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.login("desdemona")
def set_up_db(self, attr: str, value: Any) -> None:
realm = get_realm("zulip")
setattr(realm, attr, value)
realm.save(update_fields=[attr])
def update_with_api(self, name: str, value: Union[int, str]) -> Realm:
if not isinstance(value, str):
value = orjson.dumps(value).decode()
result = self.client_patch("/json/realm", {name: value})
self.assert_json_success(result)
return get_realm("zulip") # refresh data
def update_with_api_multiple_value(self, data_dict: Dict[str, Any]) -> Realm:
result = self.client_patch("/json/realm", data_dict)
self.assert_json_success(result)
return get_realm("zulip")
def do_test_realm_update_api(self, name: str) -> None:
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests: List[bool] = [False, True]
test_values: Dict[str, Any] = dict(
default_language=["de", "en"],
default_code_block_language=["javascript", ""],
description=["Realm description", "New description"],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=["Zulip", "New Name"],
waiting_period_threshold=[10, 20],
create_stream_policy=[
Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY,
Realm.POLICY_MODERATORS_ONLY,
],
user_group_edit_policy=[
Realm.USER_GROUP_EDIT_POLICY_ADMINS,
Realm.USER_GROUP_EDIT_POLICY_MEMBERS,
],
private_message_policy=[
Realm.PRIVATE_MESSAGE_POLICY_UNLIMITED,
Realm.PRIVATE_MESSAGE_POLICY_DISABLED,
],
invite_to_stream_policy=[
Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY,
Realm.POLICY_MODERATORS_ONLY,
],
wildcard_mention_policy=[
Realm.WILDCARD_MENTION_POLICY_EVERYONE,
Realm.WILDCARD_MENTION_POLICY_MEMBERS,
Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS,
Realm.WILDCARD_MENTION_POLICY_STREAM_ADMINS,
Realm.WILDCARD_MENTION_POLICY_ADMINS,
Realm.WILDCARD_MENTION_POLICY_NOBODY,
Realm.WILDCARD_MENTION_POLICY_MODERATORS,
],
bot_creation_policy=[1, 2],
email_address_visibility=[
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY,
],
video_chat_provider=[
dict(
video_chat_provider=orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
).decode(),
),
],
giphy_rating=[
Realm.GIPHY_RATING_OPTIONS["y"]["id"],
Realm.GIPHY_RATING_OPTIONS["r"]["id"],
],
message_content_delete_limit_seconds=[1000, 1100, 1200],
invite_to_realm_policy=[
Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY,
Realm.POLICY_MODERATORS_ONLY,
],
move_messages_between_streams_policy=[
Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY,
Realm.POLICY_MODERATORS_ONLY,
],
)
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError(f"No test created for {name}")
if name == "video_chat_provider":
self.set_up_db(name, vals[0][name])
realm = self.update_with_api_multiple_value(vals[0])
self.assertEqual(getattr(realm, name), orjson.loads(vals[0][name]))
else:
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self) -> None:
for prop in Realm.property_types:
with self.subTest(property=prop):
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self) -> None:
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db("allow_message_editing", False)
self.set_up_db("message_content_edit_limit_seconds", 0)
self.set_up_db("allow_community_topic_editing", False)
realm = self.update_with_api("allow_message_editing", True)
realm = self.update_with_api("message_content_edit_limit_seconds", 100)
realm = self.update_with_api("allow_community_topic_editing", True)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api("allow_message_editing", False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api("message_content_edit_limit_seconds", 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api("allow_community_topic_editing", False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, False)
def test_update_realm_allow_message_deleting(self) -> None:
"""Tests updating the realm property 'allow_message_deleting'."""
self.set_up_db("allow_message_deleting", True)
self.set_up_db("message_content_delete_limit_seconds", 0)
realm = self.update_with_api("allow_message_deleting", False)
self.assertEqual(realm.allow_message_deleting, False)
self.assertEqual(realm.message_content_delete_limit_seconds, 0)
realm = self.update_with_api("allow_message_deleting", True)
realm = self.update_with_api("message_content_delete_limit_seconds", 100)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 100)
realm = self.update_with_api("message_content_delete_limit_seconds", 600)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
class ScrubRealmTest(ZulipTestCase):
def test_scrub_realm(self) -> None:
zulip = get_realm("zulip")
lear = get_realm("lear")
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
create_stream_if_needed(lear, "Shakespeare")
self.subscribe(cordelia, "Shakespeare")
self.subscribe(king, "Shakespeare")
Message.objects.all().delete()
UserMessage.objects.all().delete()
for i in range(5):
self.send_stream_message(iago, "Scotland")
self.send_stream_message(othello, "Scotland")
self.send_stream_message(cordelia, "Shakespeare")
self.send_stream_message(king, "Shakespeare")
Attachment.objects.filter(realm=zulip).delete()
Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt", size=512)
Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt", size=512)
Attachment.objects.filter(realm=lear).delete()
Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt", size=512)
Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt", size=512)
CustomProfileField.objects.create(realm=lear)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
with self.assertLogs(level="WARNING"):
do_scrub_realm(zulip, acting_user=None)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0)
self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2)
self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0)
zulip_users = UserProfile.objects.filter(realm=zulip)
for user in zulip_users:
self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
lear_users = UserProfile.objects.filter(realm=lear)
for user in lear_users:
self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
| 43.731569
| 100
| 0.671285
|
import datetime
import re
from typing import Any, Dict, List, Mapping, Union
from unittest import mock
import orjson
from django.conf import settings
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.actions import (
do_add_deactivated_redirect,
do_change_plan_type,
do_change_realm_subdomain,
do_create_realm,
do_deactivate_realm,
do_deactivate_stream,
do_scrub_realm,
do_send_realm_reactivation_email,
do_set_realm_property,
)
from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description
from zerver.lib.send_email import send_future_email
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import reset_emails_in_zulip_realm, tornado_redirected_to_list
from zerver.models import (
Attachment,
CustomProfileField,
Message,
Realm,
RealmAuditLog,
ScheduledEmail,
UserMessage,
UserProfile,
get_realm,
get_stream,
get_user_profile_by_id,
)
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(
self, user_profile: UserProfile, new_realm_name: str
) -> None:
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_realm_creation_ensures_internal_realms(self) -> None:
with mock.patch("zerver.lib.actions.server_initialized", return_value=False):
with mock.patch(
"zerver.lib.actions.create_internal_realm"
) as mock_create_internal, self.assertLogs(level="INFO") as info_logs:
do_create_realm("testrealm", "Test Realm")
mock_create_internal.assert_called_once()
self.assertEqual(
info_logs.output,
["INFO:root:Server not yet initialized. Creating the internal realm first."],
)
def test_do_set_realm_name_caching(self) -> None:
realm = get_realm("zulip")
new_name = "Zed You Elle Eye Pea"
do_set_realm_property(realm, "name", new_name, acting_user=None)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user("hamlet"), new_name)
def test_update_realm_name_events(self) -> None:
realm = get_realm("zulip")
new_name = "Puliz"
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
do_set_realm_property(realm, "name", new_name, acting_user=None)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="name",
value=new_name,
),
)
def test_update_realm_description_events(self) -> None:
realm = get_realm("zulip")
new_description = "zulip dev group"
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
do_set_realm_property(realm, "description", new_description, acting_user=None)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="description",
value=new_description,
),
)
def test_update_realm_description(self) -> None:
self.login("iago")
new_description = "zulip dev group"
data = dict(description=new_description)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_patch("/json/realm", data)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.description, new_description)
event = events[0]["event"]
self.assertEqual(
event,
dict(
type="realm",
op="update",
property="description",
value=new_description,
),
)
def test_realm_description_length(self) -> None:
new_description = "A" * 1001
data = dict(description=new_description)
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "description is too long (limit: 1000 characters)")
realm = get_realm("zulip")
self.assertNotEqual(realm.description, new_description)
def test_realm_name_length(self) -> None:
new_name = "A" * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=new_name)
self.login("iago")
result = self.client_patch("/json/realm", data)
self.assert_json_error(result, "name is too long (limit: 40 characters)")
realm = get_realm("zulip")
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self) -> None:
new_name = "Mice will play while the cat is away"
self.login("othello")
req = dict(name=new_name)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization administrator")
def test_unauthorized_name_change(self) -> None:
data = {"full_name": "Sir Hamlet"}
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
do_set_realm_property(user_profile.realm, "name_changes_disabled", True, acting_user=None)
url = "/json/settings"
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
self.assert_in_response("", result)
data = {"full_name": "New Iago"}
self.login("iago")
url = "/json/settings"
result = self.client_patch(url, data)
self.assert_in_success_response(['"full_name":"New Iago"'], result)
def test_do_deactivate_realm_clears_user_realm_cache(self) -> None:
hamlet_id = self.example_user("hamlet").id
get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
user = get_user_profile_by_id(hamlet_id)
self.assertTrue(user.realm.deactivated)
def test_do_change_realm_subdomain_clears_user_realm_cache(self) -> None:
hamlet_id = self.example_user("hamlet").id
user = get_user_profile_by_id(hamlet_id)
realm = get_realm("zulip")
iago = self.example_user("iago")
do_change_realm_subdomain(realm, "newzulip", acting_user=iago)
user = get_user_profile_by_id(hamlet_id)
self.assertEqual(user.realm.string_id, "newzulip")
placeholder_realm = get_realm("zulip")
self.assertTrue(placeholder_realm.deactivated)
self.assertEqual(placeholder_realm.deactivated_redirect, user.realm.uri)
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_SUBDOMAIN_CHANGED, acting_user=iago
).last()
expected_extra_data = {"old_subdomain": "zulip", "new_subdomain": "newzulip"}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
def test_do_deactivate_realm_clears_scheduled_jobs(self) -> None:
user = self.example_user("hamlet")
send_future_email(
"zerver/emails/followup_day1",
user.realm,
to_user_ids=[user.id],
delay=datetime.timedelta(hours=1),
)
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm, acting_user=None)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_change_realm_description_clears_cached_descriptions(self) -> None:
realm = get_realm("zulip")
rendered_description = get_realm_rendered_description(realm)
text_description = get_realm_text_description(realm)
realm.description = "New description"
realm.save(update_fields=["description"])
new_rendered_description = get_realm_rendered_description(realm)
self.assertNotEqual(rendered_description, new_rendered_description)
self.assertIn(realm.description, new_rendered_description)
new_text_description = get_realm_text_description(realm)
self.assertNotEqual(text_description, new_text_description)
self.assertEqual(realm.description, new_text_description)
def test_do_deactivate_realm_on_deactivated_realm(self) -> None:
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
def test_do_set_deactivated_redirect_on_deactivated_realm(self) -> None:
realm = get_realm("zulip")
redirect_url = "new_server.zulip.com"
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
do_add_deactivated_redirect(realm, redirect_url)
self.assertEqual(realm.deactivated_redirect, redirect_url)
new_redirect_url = "test.zulip.com"
do_add_deactivated_redirect(realm, new_redirect_url)
self.assertEqual(realm.deactivated_redirect, new_redirect_url)
self.assertNotEqual(realm.deactivated_redirect, redirect_url)
def test_realm_reactivation_link(self) -> None:
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
confirmation_url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
response = self.client_get(confirmation_url)
self.assert_in_success_response(
["Your organization has been successfully reactivated"], response
)
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
def test_realm_reactivation_confirmation_object(self) -> None:
realm = get_realm("zulip")
do_deactivate_realm(realm, acting_user=None)
self.assertTrue(realm.deactivated)
create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
confirmation = Confirmation.objects.last()
self.assertEqual(confirmation.content_object, realm)
self.assertEqual(confirmation.realm, realm)
def test_do_send_realm_reactivation_email(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
do_send_realm_reactivation_email(realm, acting_user=iago)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
self.assertEqual(self.email_envelope_from(outbox[0]), settings.NOREPLY_EMAIL_ADDRESS)
self.assertRegex(
self.email_display_from(outbox[0]),
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn("Reactivate your Zulip organization", outbox[0].subject)
self.assertIn("Dear former administrators", outbox[0].body)
admins = realm.get_human_admin_users()
confirmation_url = self.get_confirmation_url_from_outbox(admins[0].delivery_email)
response = self.client_get(confirmation_url)
self.assert_in_success_response(
["Your organization has been successfully reactivated"], response
)
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_REACTIVATION_EMAIL_SENT, acting_user=iago
).count(),
1,
)
def test_realm_reactivation_with_random_link(self) -> None:
random_link = "/reactivate/5e89081eb13984e0f3b130bf7a4121d153f1614b"
response = self.client_get(random_link)
self.assert_in_success_response(
["The organization reactivation link has expired or is not valid."], response
)
def test_change_notifications_stream(self) -> None:
self.login("iago")
disabled_notif_stream_id = -1
req = dict(notifications_stream_id=orjson.dumps(disabled_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = 4
req = dict(notifications_stream_id=orjson.dumps(new_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
assert realm.notifications_stream is not None
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id=orjson.dumps(invalid_notif_stream_id).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid stream id")
realm = get_realm("zulip")
assert realm.notifications_stream is not None
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.notifications_stream_id = verona.id
realm.save(update_fields=["notifications_stream"])
notifications_stream = realm.get_notifications_stream()
assert notifications_stream is not None
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream, acting_user=None)
self.assertIsNone(realm.get_notifications_stream())
def test_change_signup_notifications_stream(self) -> None:
self.login("iago")
disabled_signup_notifications_stream_id = -1
req = dict(
signup_notifications_stream_id=orjson.dumps(
disabled_signup_notifications_stream_id
).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.signup_notifications_stream, None)
new_signup_notifications_stream_id = 4
req = dict(
signup_notifications_stream_id=orjson.dumps(new_signup_notifications_stream_id).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
assert realm.signup_notifications_stream is not None
self.assertEqual(realm.signup_notifications_stream.id, new_signup_notifications_stream_id)
invalid_signup_notifications_stream_id = 1234
req = dict(
signup_notifications_stream_id=orjson.dumps(
invalid_signup_notifications_stream_id
).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid stream id")
realm = get_realm("zulip")
assert realm.signup_notifications_stream is not None
self.assertNotEqual(
realm.signup_notifications_stream.id, invalid_signup_notifications_stream_id
)
def test_get_default_signup_notifications_stream(self) -> None:
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.signup_notifications_stream = verona
realm.save(update_fields=["signup_notifications_stream"])
signup_notifications_stream = realm.get_signup_notifications_stream()
assert signup_notifications_stream is not None
self.assertEqual(signup_notifications_stream, verona)
do_deactivate_stream(signup_notifications_stream, acting_user=None)
self.assertIsNone(realm.get_signup_notifications_stream())
def test_change_realm_default_language(self) -> None:
new_lang = "de"
realm = get_realm("zulip")
self.assertNotEqual(realm.default_language, new_lang)
self.login("iago")
req = dict(default_language=new_lang)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.default_language, new_lang)
invalid_lang = "invalid_lang"
req = dict(default_language=invalid_lang)
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, f"Invalid language '{invalid_lang}'")
realm = get_realm("zulip")
self.assertNotEqual(realm.default_language, invalid_lang)
def test_deactivate_realm_by_owner(self) -> None:
self.login("desdemona")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
result = self.client_post("/json/realm/deactivate")
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertTrue(realm.deactivated)
def test_deactivate_realm_by_non_owner(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
result = self.client_post("/json/realm/deactivate")
self.assert_json_error(result, "Must be an organization owner")
realm = get_realm("zulip")
self.assertFalse(realm.deactivated)
def test_change_bot_creation_policy(self) -> None:
self.login("iago")
req = dict(bot_creation_policy=orjson.dumps(Realm.BOT_CREATION_LIMIT_GENERIC_BOTS).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_add_bot_permission = 4
req = dict(bot_creation_policy=orjson.dumps(invalid_add_bot_permission).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid bot_creation_policy")
def test_change_email_address_visibility(self) -> None:
user_profile = self.example_user("iago")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
self.login_user(user_profile)
invalid_value = 12
req = dict(email_address_visibility=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid email_address_visibility")
reset_emails_in_zulip_realm()
realm = get_realm("zulip")
req = dict(
email_address_visibility=orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
edited_user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(
edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver"
)
result = self.api_get(cordelia, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()["user"]["email"], f"user{hamlet.id}@zulip.testserver")
self.assertEqual(result.json()["user"].get("delivery_email"), None)
result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()["user"]["email"], f"user{hamlet.id}@zulip.testserver")
self.assertEqual(result.json()["user"].get("delivery_email"), hamlet.delivery_email)
req = dict(
email_address_visibility=orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
realm = get_realm("zulip")
self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY)
edited_user_profile = get_user_profile_by_id(user_profile.id)
self.assertEqual(
edited_user_profile.email, f"user{edited_user_profile.id}@zulip.testserver"
)
# EMAIL_ADDRESS_VISIBILITY_NOBODY
result = self.api_get(user_profile, f"/api/v1/users/{hamlet.id}")
self.assert_json_success(result)
self.assertEqual(result.json()["user"]["email"], f"user{hamlet.id}@zulip.testserver")
self.assertEqual(result.json()["user"].get("delivery_email"), None)
def test_change_stream_creation_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(create_stream_policy=orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(create_stream_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid create_stream_policy")
def test_change_invite_to_stream_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(invite_to_stream_policy=orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(invite_to_stream_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid invite_to_stream_policy")
def test_change_invite_to_realm_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(invite_to_realm_policy=orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(invite_to_realm_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid invite_to_realm_policy")
def test_change_move_messages_between_streams_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(
move_messages_between_streams_policy=orjson.dumps(Realm.POLICY_ADMINS_ONLY).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(move_messages_between_streams_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid move_messages_between_streams_policy")
def test_user_group_edit_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(
user_group_edit_policy=orjson.dumps(Realm.USER_GROUP_EDIT_POLICY_ADMINS).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(user_group_edit_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid user_group_edit_policy")
def test_private_message_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(
private_message_policy=orjson.dumps(Realm.PRIVATE_MESSAGE_POLICY_DISABLED).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(private_message_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid private_message_policy")
def test_change_wildcard_mention_policy(self) -> None:
# We need an admin user.
self.login("iago")
req = dict(
wildcard_mention_policy=orjson.dumps(Realm.WILDCARD_MENTION_POLICY_EVERYONE).decode()
)
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
invalid_value = 10
req = dict(wildcard_mention_policy=orjson.dumps(invalid_value).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Invalid wildcard_mention_policy")
def test_invalid_integer_attribute_values(self) -> None:
integer_values = [key for key, value in Realm.property_types.items() if value is int]
invalid_values = dict(
bot_creation_policy=10,
create_stream_policy=10,
invite_to_stream_policy=10,
email_address_visibility=10,
message_retention_days=10,
video_chat_provider=10,
giphy_rating=10,
waiting_period_threshold=-10,
digest_weekday=10,
user_group_edit_policy=10,
private_message_policy=10,
message_content_delete_limit_seconds=-10,
wildcard_mention_policy=10,
invite_to_realm_policy=10,
move_messages_between_streams_policy=10,
)
# We need an admin user.
self.login("iago")
for name in integer_values:
invalid_value = invalid_values.get(name)
if invalid_value is None:
raise AssertionError(f"No test created for {name}")
self.do_test_invalid_integer_attribute_value(name, invalid_value)
def do_test_invalid_integer_attribute_value(self, val_name: str, invalid_val: int) -> None:
possible_messages = {
f"Invalid {val_name}",
f"Bad value for '{val_name}'",
f"Bad value for '{val_name}': {invalid_val}",
f"Invalid {val_name} {invalid_val}",
}
req = {val_name: invalid_val}
result = self.client_patch("/json/realm", req)
msg = self.get_json_error(result)
self.assertTrue(msg in possible_messages)
def test_change_video_chat_provider(self) -> None:
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
self.login("iago")
invalid_video_chat_provider_value = 10
req = {"video_chat_provider": orjson.dumps(invalid_video_chat_provider_value).decode()}
result = self.client_patch("/json/realm", req)
self.assert_json_error(
result, ("Invalid video_chat_provider {}").format(invalid_video_chat_provider_value)
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["disabled"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["disabled"]["id"]
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider, Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
req = {
"video_chat_provider": orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["big_blue_button"]["id"]
).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
self.assertEqual(
get_realm("zulip").video_chat_provider,
Realm.VIDEO_CHAT_PROVIDERS["big_blue_button"]["id"],
)
req = {
"video_chat_provider": orjson.dumps(Realm.VIDEO_CHAT_PROVIDERS["zoom"]["id"]).decode()
}
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
def test_initial_plan_type(self) -> None:
with self.settings(BILLING_ENABLED=True):
self.assertEqual(do_create_realm("hosted", "hosted").plan_type, Realm.LIMITED)
self.assertEqual(
get_realm("hosted").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX
)
self.assertEqual(
get_realm("hosted").message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED
)
self.assertEqual(get_realm("hosted").upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
with self.settings(BILLING_ENABLED=False):
self.assertEqual(do_create_realm("onpremise", "onpremise").plan_type, Realm.SELF_HOSTED)
self.assertEqual(
get_realm("onpremise").max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX
)
self.assertEqual(get_realm("onpremise").message_visibility_limit, None)
self.assertEqual(get_realm("onpremise").upload_quota_gb, None)
def test_change_plan_type(self) -> None:
realm = get_realm("zulip")
iago = self.example_user("iago")
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
do_change_plan_type(realm, Realm.STANDARD, acting_user=iago)
realm = get_realm("zulip")
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED
).last()
expected_extra_data = {"old_value": Realm.SELF_HOSTED, "new_value": Realm.STANDARD}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
self.assertEqual(realm.plan_type, Realm.STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.LIMITED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, Realm.MESSAGE_VISIBILITY_LIMITED)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_LIMITED)
do_change_plan_type(realm, Realm.STANDARD_FREE, acting_user=iago)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.STANDARD_FREE)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, Realm.UPLOAD_QUOTA_STANDARD)
do_change_plan_type(realm, Realm.LIMITED, acting_user=iago)
do_change_plan_type(realm, Realm.SELF_HOSTED, acting_user=iago)
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
self.assertEqual(realm.message_visibility_limit, None)
self.assertEqual(realm.upload_quota_gb, None)
def test_message_retention_days(self) -> None:
self.login("iago")
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.SELF_HOSTED)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Must be an organization owner")
self.login("desdemona")
req = dict(message_retention_days=orjson.dumps(0).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': 0")
req = dict(message_retention_days=orjson.dumps(-10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -10")
req = dict(message_retention_days=orjson.dumps("invalid").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': invalid")
req = dict(message_retention_days=orjson.dumps(-1).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Bad value for 'message_retention_days': -1")
req = dict(message_retention_days=orjson.dumps("forever").decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
do_change_plan_type(realm, Realm.LIMITED, acting_user=None)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_error(result, "Available on Zulip Standard. Upgrade to access.")
do_change_plan_type(realm, Realm.STANDARD, acting_user=None)
req = dict(message_retention_days=orjson.dumps(10).decode())
result = self.client_patch("/json/realm", req)
self.assert_json_success(result)
class RealmAPITest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.login("desdemona")
def set_up_db(self, attr: str, value: Any) -> None:
realm = get_realm("zulip")
setattr(realm, attr, value)
realm.save(update_fields=[attr])
def update_with_api(self, name: str, value: Union[int, str]) -> Realm:
if not isinstance(value, str):
value = orjson.dumps(value).decode()
result = self.client_patch("/json/realm", {name: value})
self.assert_json_success(result)
return get_realm("zulip") # refresh data
def update_with_api_multiple_value(self, data_dict: Dict[str, Any]) -> Realm:
result = self.client_patch("/json/realm", data_dict)
self.assert_json_success(result)
return get_realm("zulip")
def do_test_realm_update_api(self, name: str) -> None:
bool_tests: List[bool] = [False, True]
test_values: Dict[str, Any] = dict(
default_language=["de", "en"],
default_code_block_language=["javascript", ""],
description=["Realm description", "New description"],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=["Zulip", "New Name"],
waiting_period_threshold=[10, 20],
create_stream_policy=[
Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY,
Realm.POLICY_MODERATORS_ONLY,
],
user_group_edit_policy=[
Realm.USER_GROUP_EDIT_POLICY_ADMINS,
Realm.USER_GROUP_EDIT_POLICY_MEMBERS,
],
private_message_policy=[
Realm.PRIVATE_MESSAGE_POLICY_UNLIMITED,
Realm.PRIVATE_MESSAGE_POLICY_DISABLED,
],
invite_to_stream_policy=[
Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY,
Realm.POLICY_MODERATORS_ONLY,
],
wildcard_mention_policy=[
Realm.WILDCARD_MENTION_POLICY_EVERYONE,
Realm.WILDCARD_MENTION_POLICY_MEMBERS,
Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS,
Realm.WILDCARD_MENTION_POLICY_STREAM_ADMINS,
Realm.WILDCARD_MENTION_POLICY_ADMINS,
Realm.WILDCARD_MENTION_POLICY_NOBODY,
Realm.WILDCARD_MENTION_POLICY_MODERATORS,
],
bot_creation_policy=[1, 2],
email_address_visibility=[
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS,
Realm.EMAIL_ADDRESS_VISIBILITY_NOBODY,
],
video_chat_provider=[
dict(
video_chat_provider=orjson.dumps(
Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
).decode(),
),
],
giphy_rating=[
Realm.GIPHY_RATING_OPTIONS["y"]["id"],
Realm.GIPHY_RATING_OPTIONS["r"]["id"],
],
message_content_delete_limit_seconds=[1000, 1100, 1200],
invite_to_realm_policy=[
Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY,
Realm.POLICY_MODERATORS_ONLY,
],
move_messages_between_streams_policy=[
Realm.POLICY_ADMINS_ONLY,
Realm.POLICY_MEMBERS_ONLY,
Realm.POLICY_FULL_MEMBERS_ONLY,
Realm.POLICY_MODERATORS_ONLY,
],
)
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError(f"No test created for {name}")
if name == "video_chat_provider":
self.set_up_db(name, vals[0][name])
realm = self.update_with_api_multiple_value(vals[0])
self.assertEqual(getattr(realm, name), orjson.loads(vals[0][name]))
else:
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self) -> None:
for prop in Realm.property_types:
with self.subTest(property=prop):
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self) -> None:
self.set_up_db("allow_message_editing", False)
self.set_up_db("message_content_edit_limit_seconds", 0)
self.set_up_db("allow_community_topic_editing", False)
realm = self.update_with_api("allow_message_editing", True)
realm = self.update_with_api("message_content_edit_limit_seconds", 100)
realm = self.update_with_api("allow_community_topic_editing", True)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api("allow_message_editing", False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api("message_content_edit_limit_seconds", 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, True)
realm = self.update_with_api("allow_community_topic_editing", False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
self.assertEqual(realm.allow_community_topic_editing, False)
def test_update_realm_allow_message_deleting(self) -> None:
self.set_up_db("allow_message_deleting", True)
self.set_up_db("message_content_delete_limit_seconds", 0)
realm = self.update_with_api("allow_message_deleting", False)
self.assertEqual(realm.allow_message_deleting, False)
self.assertEqual(realm.message_content_delete_limit_seconds, 0)
realm = self.update_with_api("allow_message_deleting", True)
realm = self.update_with_api("message_content_delete_limit_seconds", 100)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 100)
realm = self.update_with_api("message_content_delete_limit_seconds", 600)
self.assertEqual(realm.allow_message_deleting, True)
self.assertEqual(realm.message_content_delete_limit_seconds, 600)
class ScrubRealmTest(ZulipTestCase):
def test_scrub_realm(self) -> None:
zulip = get_realm("zulip")
lear = get_realm("lear")
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
create_stream_if_needed(lear, "Shakespeare")
self.subscribe(cordelia, "Shakespeare")
self.subscribe(king, "Shakespeare")
Message.objects.all().delete()
UserMessage.objects.all().delete()
for i in range(5):
self.send_stream_message(iago, "Scotland")
self.send_stream_message(othello, "Scotland")
self.send_stream_message(cordelia, "Shakespeare")
self.send_stream_message(king, "Shakespeare")
Attachment.objects.filter(realm=zulip).delete()
Attachment.objects.create(realm=zulip, owner=iago, path_id="a/b/temp1.txt", size=512)
Attachment.objects.create(realm=zulip, owner=othello, path_id="a/b/temp2.txt", size=512)
Attachment.objects.filter(realm=lear).delete()
Attachment.objects.create(realm=lear, owner=cordelia, path_id="c/d/temp1.txt", size=512)
Attachment.objects.create(realm=lear, owner=king, path_id="c/d/temp2.txt", size=512)
CustomProfileField.objects.create(realm=lear)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 10)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 20)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertNotEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
with self.assertLogs(level="WARNING"):
do_scrub_realm(zulip, acting_user=None)
self.assertEqual(Message.objects.filter(sender__in=[iago, othello]).count(), 0)
self.assertEqual(Message.objects.filter(sender__in=[cordelia, king]).count(), 10)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[iago, othello]).count(), 0)
self.assertEqual(UserMessage.objects.filter(user_profile__in=[cordelia, king]).count(), 20)
self.assertEqual(Attachment.objects.filter(realm=zulip).count(), 0)
self.assertEqual(Attachment.objects.filter(realm=lear).count(), 2)
self.assertEqual(CustomProfileField.objects.filter(realm=zulip).count(), 0)
self.assertNotEqual(CustomProfileField.objects.filter(realm=lear).count(), 0)
zulip_users = UserProfile.objects.filter(realm=zulip)
for user in zulip_users:
self.assertTrue(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertTrue(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
lear_users = UserProfile.objects.filter(realm=lear)
for user in lear_users:
self.assertIsNone(re.search("Scrubbed [a-z0-9]{15}", user.full_name))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.email))
self.assertIsNone(re.search("scrubbed-[a-z0-9]{15}@" + zulip.host, user.delivery_email))
| true
| true
|
1c40421ca57f259b646740244643e36fd23b79df
| 2,420
|
py
|
Python
|
predico/predicates.py
|
pauleveritt/predico
|
2bbf88a7775d31907d2229a32a89490172acd988
|
[
"Apache-2.0"
] | null | null | null |
predico/predicates.py
|
pauleveritt/predico
|
2bbf88a7775d31907d2229a32a89490172acd988
|
[
"Apache-2.0"
] | null | null | null |
predico/predicates.py
|
pauleveritt/predico
|
2bbf88a7775d31907d2229a32a89490172acd988
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from typing import Type, Any, Optional
from predico.services.request.base_request import Request
from predico.services.resource.base_resource import Resource
@dataclass
class Predicate:
value: Any
key: str
rank: int = 10
def __str__(self):
value = getattr(self.value, '__name__', False)
if not value:
value = self.value # Hope it's a string
return f'{self.key}-{value}'
def matches(self, request: Request, **args) -> bool:
raise NotImplementedError
@dataclass
class ForPredicate(Predicate):
value: Type[Any]
key: str = 'for_'
def matches(self, request: Request, **args) -> bool:
# for_ and self.value are both classes. Are they the same?
for_ = args['for_']
return for_ is self.value
@dataclass
class ResourcePredicate(Predicate):
value: Type[Resource]
key: str = 'resource'
def matches(self, request: Request, **args) -> bool:
resource = args.get('resource', request.resource)
resource_class = resource.__class__
return resource_class is self.value
@dataclass
class ResourceIdPredicate(Predicate):
""" Match on the resource id of yourself """
value: str
key: str = 'resourceid'
rank: int = 30
def matches(self, request: Request, **args) -> bool:
resource = args.get('resource', request.resource)
resourceid = resource.id
return self.value == resourceid
@dataclass
class ParentIdPredicate(Predicate):
""" Match on the resource id of a parent """
value: str
key: str = 'parentid'
rank: int = 20
def matches(self, request: Request, **args) -> bool:
resource = args.get('resource', request.resource)
return self.value in resource.parentids
@dataclass
class TemplateStringPredicate(Predicate):
""" Non-matching predicate to provide rendering info """
value: str
key: str = 'template_string'
def __str__(self):
return f'{self.key}'
def matches(self, request: Request, **args) -> bool:
raise NotImplementedError
@dataclass
class RendererPredicate(Predicate):
""" Non-matching predicate for the renderer adapter """
value: Type[Any]
key: str = 'renderer'
def __str__(self):
return f'{self.key}'
def matches(self, request: Request, **args) -> bool:
raise NotImplementedError
| 24.2
| 66
| 0.655372
|
from dataclasses import dataclass
from typing import Type, Any, Optional
from predico.services.request.base_request import Request
from predico.services.resource.base_resource import Resource
@dataclass
class Predicate:
value: Any
key: str
rank: int = 10
def __str__(self):
value = getattr(self.value, '__name__', False)
if not value:
value = self.value
return f'{self.key}-{value}'
def matches(self, request: Request, **args) -> bool:
raise NotImplementedError
@dataclass
class ForPredicate(Predicate):
value: Type[Any]
key: str = 'for_'
def matches(self, request: Request, **args) -> bool:
# for_ and self.value are both classes. Are they the same?
for_ = args['for_']
return for_ is self.value
@dataclass
class ResourcePredicate(Predicate):
value: Type[Resource]
key: str = 'resource'
def matches(self, request: Request, **args) -> bool:
resource = args.get('resource', request.resource)
resource_class = resource.__class__
return resource_class is self.value
@dataclass
class ResourceIdPredicate(Predicate):
value: str
key: str = 'resourceid'
rank: int = 30
def matches(self, request: Request, **args) -> bool:
resource = args.get('resource', request.resource)
resourceid = resource.id
return self.value == resourceid
@dataclass
class ParentIdPredicate(Predicate):
value: str
key: str = 'parentid'
rank: int = 20
def matches(self, request: Request, **args) -> bool:
resource = args.get('resource', request.resource)
return self.value in resource.parentids
@dataclass
class TemplateStringPredicate(Predicate):
value: str
key: str = 'template_string'
def __str__(self):
return f'{self.key}'
def matches(self, request: Request, **args) -> bool:
raise NotImplementedError
@dataclass
class RendererPredicate(Predicate):
value: Type[Any]
key: str = 'renderer'
def __str__(self):
return f'{self.key}'
def matches(self, request: Request, **args) -> bool:
raise NotImplementedError
| true
| true
|
1c404227fdcac709e21234d010f4c8fb5da52912
| 5,015
|
py
|
Python
|
api.py
|
YAJATapps/Messenger-API
|
815ab70bdfcb74d37f1c967b2a083beda17965b0
|
[
"Apache-2.0"
] | null | null | null |
api.py
|
YAJATapps/Messenger-API
|
815ab70bdfcb74d37f1c967b2a083beda17965b0
|
[
"Apache-2.0"
] | null | null | null |
api.py
|
YAJATapps/Messenger-API
|
815ab70bdfcb74d37f1c967b2a083beda17965b0
|
[
"Apache-2.0"
] | null | null | null |
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import hashlib
from mangum import Mangum
import mysql.connector
import time
app = FastAPI()
origins = [
"https://messenger.yajatkumar.com",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
appDb = mysql.connector.connect(
host=os.environ['DB_HOST'],
user=os.environ['DB_USER'],
password=os.environ['DB_PASS'],
database=os.environ['DB_NAME']
)
@app.post('/api/v1/users/add')
async def add_user(user: str = None, pwd: str = None):
# Add a new user to UsersAuth database with passed username and sha256 encrypted password
if user == None or pwd == None:
return 'user/pwdMissing'
exists = await valid_username(user)
if exists:
return 'alreadyExists'
else:
appCursor = appDb.cursor()
sql = "INSERT INTO UsersAuth (username, password) VALUES (%s, %s)"
val = (user, sha256(pwd))
appCursor.execute(sql, val)
appDb.commit()
return 'addedUser'
@app.post('/api/v1/users/user')
async def valid_username(user: str = None):
# Returns true if user exists
if user == None:
return 'userMissing'
appCursor = appDb.cursor()
sql = "SELECT * FROM UsersAuth WHERE username=%s"
val = (user,)
appCursor.execute(sql, val)
result = appCursor.fetchone()
appDb.commit()
return result != None
@app.post('/api/v1/users/login')
async def valid_login(user: str = None, pwd: str = None):
# Returns true if user and pwd credentials are correct
if user == None or pwd == None:
return 'user/pwdMissing'
appCursor = appDb.cursor()
sql = "SELECT * FROM UsersAuth WHERE username=%s AND password=%s"
val = (user, sha256(pwd))
appCursor.execute(sql, val)
result = appCursor.fetchone()
appDb.commit()
return result != None
@app.post('/api/v1/messages/add')
async def add_message(frm: str = None, to: str = None, msg: str = None):
# Add a new message with ids of from and to. Also the current time
if frm == None or to == None or msg == None:
return 'frm/to/msgMissing'
appCursor = appDb.cursor()
currentTime = time.strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO Messages (msgFrom, msgTo, message, msgTime) VALUES (%s, %s, %s, %s)"
val = (int(frm), int(to), msg, currentTime)
appCursor.execute(sql, val)
appDb.commit()
return 'addedMessage'
@app.post('/api/v1/users/find')
async def search_users(user: str = None):
# Returns users which contain the user str
if user == None:
return 'userMissing'
appCursor = appDb.cursor()
sql = "SELECT id, username FROM UsersAuth WHERE username LIKE '%" + user + "%'"
appCursor.execute(sql)
result = appCursor.fetchall()
appDb.commit()
users = []
for x in result:
users.append({"id": x[0], "name": x[1]})
return users
@app.post('/api/v1/messages/list')
async def fetch_messages(frm: str = None, to: str = None):
# Returns messages which were sent to or from userId
if frm == None or to == None:
return 'frm/toMissing'
if not frm.isdigit():
return 'frmNotInt'
if not to.isdigit():
return 'toNotInt'
appCursor = appDb.cursor()
sql = "SELECT * FROM Messages WHERE (msgFrom=%s AND msgTo=%s) OR (msgFrom=%s AND msgTo=%s) ORDER BY msgTime"
val = (frm, to, to, frm)
appCursor.execute(sql, val)
result = appCursor.fetchall()
appDb.commit()
messages = []
# Return the array with an additional parameter of sent
for x in result:
messages.append({"msg": x[3], "sent": x[1] == int(frm)})
return messages
@app.post('/api/v1/users/id')
async def fetch_id(user: str = None):
# Returns id from username
if user == None:
return 'userMissing'
appCursor = appDb.cursor()
sql = "SELECT id FROM UsersAuth WHERE username=%s"
val = (user,)
appCursor.execute(sql, val)
result = appCursor.fetchone()
appDb.commit()
return result
@app.post('/api/v1/users/contacts')
async def fetch_contacts(user: str = None):
# Returns users who were contacted by user or the contacts who contacted user
if user == None:
return 'userMissing'
appCursor = appDb.cursor()
sql = "SELECT id, username FROM UsersAuth WHERE id IN (SELECT msgTo FROM Messages WHERE msgFrom=%s)\
OR id IN (SELECT msgFrom FROM Messages WHERE msgTo=%s)"
val = (user, user)
appCursor.execute(sql, val)
result = appCursor.fetchall()
appDb.commit()
contacts = []
for x in result:
contacts.append({"id": x[0], "name": x[1]})
return contacts
def sha256(hash: str):
# Util function to return sha256 hash of the passed argument
return hashlib.sha256(hash.encode()).hexdigest()
handler = Mangum(app)
| 22.795455
| 112
| 0.635892
|
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import hashlib
from mangum import Mangum
import mysql.connector
import time
app = FastAPI()
origins = [
"https://messenger.yajatkumar.com",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
appDb = mysql.connector.connect(
host=os.environ['DB_HOST'],
user=os.environ['DB_USER'],
password=os.environ['DB_PASS'],
database=os.environ['DB_NAME']
)
@app.post('/api/v1/users/add')
async def add_user(user: str = None, pwd: str = None):
if user == None or pwd == None:
return 'user/pwdMissing'
exists = await valid_username(user)
if exists:
return 'alreadyExists'
else:
appCursor = appDb.cursor()
sql = "INSERT INTO UsersAuth (username, password) VALUES (%s, %s)"
val = (user, sha256(pwd))
appCursor.execute(sql, val)
appDb.commit()
return 'addedUser'
@app.post('/api/v1/users/user')
async def valid_username(user: str = None):
if user == None:
return 'userMissing'
appCursor = appDb.cursor()
sql = "SELECT * FROM UsersAuth WHERE username=%s"
val = (user,)
appCursor.execute(sql, val)
result = appCursor.fetchone()
appDb.commit()
return result != None
@app.post('/api/v1/users/login')
async def valid_login(user: str = None, pwd: str = None):
if user == None or pwd == None:
return 'user/pwdMissing'
appCursor = appDb.cursor()
sql = "SELECT * FROM UsersAuth WHERE username=%s AND password=%s"
val = (user, sha256(pwd))
appCursor.execute(sql, val)
result = appCursor.fetchone()
appDb.commit()
return result != None
@app.post('/api/v1/messages/add')
async def add_message(frm: str = None, to: str = None, msg: str = None):
if frm == None or to == None or msg == None:
return 'frm/to/msgMissing'
appCursor = appDb.cursor()
currentTime = time.strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO Messages (msgFrom, msgTo, message, msgTime) VALUES (%s, %s, %s, %s)"
val = (int(frm), int(to), msg, currentTime)
appCursor.execute(sql, val)
appDb.commit()
return 'addedMessage'
@app.post('/api/v1/users/find')
async def search_users(user: str = None):
if user == None:
return 'userMissing'
appCursor = appDb.cursor()
sql = "SELECT id, username FROM UsersAuth WHERE username LIKE '%" + user + "%'"
appCursor.execute(sql)
result = appCursor.fetchall()
appDb.commit()
users = []
for x in result:
users.append({"id": x[0], "name": x[1]})
return users
@app.post('/api/v1/messages/list')
async def fetch_messages(frm: str = None, to: str = None):
if frm == None or to == None:
return 'frm/toMissing'
if not frm.isdigit():
return 'frmNotInt'
if not to.isdigit():
return 'toNotInt'
appCursor = appDb.cursor()
sql = "SELECT * FROM Messages WHERE (msgFrom=%s AND msgTo=%s) OR (msgFrom=%s AND msgTo=%s) ORDER BY msgTime"
val = (frm, to, to, frm)
appCursor.execute(sql, val)
result = appCursor.fetchall()
appDb.commit()
messages = []
for x in result:
messages.append({"msg": x[3], "sent": x[1] == int(frm)})
return messages
@app.post('/api/v1/users/id')
async def fetch_id(user: str = None):
if user == None:
return 'userMissing'
appCursor = appDb.cursor()
sql = "SELECT id FROM UsersAuth WHERE username=%s"
val = (user,)
appCursor.execute(sql, val)
result = appCursor.fetchone()
appDb.commit()
return result
@app.post('/api/v1/users/contacts')
async def fetch_contacts(user: str = None):
if user == None:
return 'userMissing'
appCursor = appDb.cursor()
sql = "SELECT id, username FROM UsersAuth WHERE id IN (SELECT msgTo FROM Messages WHERE msgFrom=%s)\
OR id IN (SELECT msgFrom FROM Messages WHERE msgTo=%s)"
val = (user, user)
appCursor.execute(sql, val)
result = appCursor.fetchall()
appDb.commit()
contacts = []
for x in result:
contacts.append({"id": x[0], "name": x[1]})
return contacts
def sha256(hash: str):
return hashlib.sha256(hash.encode()).hexdigest()
handler = Mangum(app)
| true
| true
|
1c404242f0b6bd6bdb19467bcc2784e17f1dafc5
| 228
|
py
|
Python
|
backend/apps/contact/admin.py
|
skiv23/portfolio
|
3c1a7b0cf0fb67148ce4b0491132e3a01375c9b0
|
[
"MIT"
] | null | null | null |
backend/apps/contact/admin.py
|
skiv23/portfolio
|
3c1a7b0cf0fb67148ce4b0491132e3a01375c9b0
|
[
"MIT"
] | null | null | null |
backend/apps/contact/admin.py
|
skiv23/portfolio
|
3c1a7b0cf0fb67148ce4b0491132e3a01375c9b0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import admin
from . import models
admin.site.register(models.Title)
admin.site.register(models.Contact)
admin.site.register(models.ContactMeEntry)
admin.site.register(models.Photo)
| 20.727273
| 42
| 0.776316
|
from django.contrib import admin
from . import models
admin.site.register(models.Title)
admin.site.register(models.Contact)
admin.site.register(models.ContactMeEntry)
admin.site.register(models.Photo)
| true
| true
|
1c40427349091ac0fefe2bc8521d70b844ce44a4
| 2,339
|
py
|
Python
|
outdated-unused/OldImageProcessing.py
|
KhangOP/PaladinsAssistantBot
|
9b705dc688610ba52909f0b0e152d8684006c6a6
|
[
"MIT"
] | null | null | null |
outdated-unused/OldImageProcessing.py
|
KhangOP/PaladinsAssistantBot
|
9b705dc688610ba52909f0b0e152d8684006c6a6
|
[
"MIT"
] | null | null | null |
outdated-unused/OldImageProcessing.py
|
KhangOP/PaladinsAssistantBot
|
9b705dc688610ba52909f0b0e152d8684006c6a6
|
[
"MIT"
] | null | null | null |
"""
# Creates a image desks
async def create_deck_image2(player_name, champ_name, deck):
image_size_x = 256
image_size_y = 196
# Main image
# deck_image = Image.new('RGB', (image_size_x * 5, image_size_x * 2), color=color)
# Champ icon image
champ_url = await get_champ_image(champ_name)
response = requests.get(champ_url)
champ_icon_image = Image.open(BytesIO(response.content))
champ_icon_image = champ_icon_image.resize((image_size_x, image_size_x))
img2 = champ_icon_image.resize((1, 1))
color = img2.getpixel((0, 0))
deck_image = Image.new('RGB', (image_size_x * 5, image_size_x * 2), color=color)
deck_image.paste(champ_icon_image, (0, 0, image_size_x, image_size_x))
# Loop to add all the cards in
for i, card in enumerate(deck.cards):
card_m = str(card).split("(")
number = str(card_m[1]).split(")")[0]
card_icon_url = await get_deck_cards_url(card_m[0])
response = requests.get(card_icon_url)
card_icon_image = Image.open(BytesIO(response.content))
# box – The crop rectangle, as a (left, upper, right, lower)- tuple.
deck_image.paste(card_icon_image, (image_size_x*i, image_size_x,
image_size_x * (i + 1), image_size_x*2-(image_size_x-image_size_y)))
draw = ImageDraw.Draw(deck_image)
draw.text((image_size_x * i, image_size_x*2 + 10 - (image_size_x-image_size_y)), str(card),
font=ImageFont.truetype("arial", 32))
# This works, found online
# img2 = champ_icon_image.resize((1, 1))
# color = img2.getpixel((0, 0))
color = (255, 255, 255)
# Adding in other text on image
draw = ImageDraw.Draw(deck_image)
draw.text((image_size_x, 0), str(player_name), color, font=ImageFont.truetype("arial", 64))
draw.text((image_size_x, 64), str(champ_name), color, font=ImageFont.truetype("arial", 64))
draw.text((image_size_x, 128), str(deck.deckName), color, font=ImageFont.truetype("arial", 64))
# Creates a buffer to store the image in
final_buffer = BytesIO()
# Store the pillow image we just created into the buffer with the PNG format
deck_image.save(final_buffer, "png")
# seek back to the start of the buffer stream
final_buffer.seek(0)
return final_buffer
"""
| 37.126984
| 111
| 0.662676
| true
| true
|
|
1c4042fed14fd9b63c76fd881336abf66138e572
| 9,544
|
py
|
Python
|
scikit-learn-weighted_kde/sklearn/feature_selection/from_model.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
scikit-learn-weighted_kde/sklearn/feature_selection/from_model.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
scikit-learn-weighted_kde/sklearn/feature_selection/from_model.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
# License: BSD 3 clause
import numpy as np
from .base import SelectorMixin
from ..base import TransformerMixin, BaseEstimator, clone
from ..externals import six
from ..utils import safe_mask, check_array, deprecated
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
def _get_feature_importances(estimator):
"""Retrieve or aggregate feature importances from estimator"""
if hasattr(estimator, "feature_importances_"):
importances = estimator.feature_importances_
elif hasattr(estimator, "coef_"):
if estimator.coef_.ndim == 1:
importances = np.abs(estimator.coef_)
else:
importances = np.sum(np.abs(estimator.coef_), axis=0)
else:
raise ValueError(
"The underlying estimator %s has no `coef_` or "
"`feature_importances_` attribute. Either pass a fitted estimator"
" to SelectFromModel or call fit before calling transform."
% estimator.__class__.__name__)
return importances
def _calculate_threshold(estimator, importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
# determine default from estimator
est_name = estimator.__class__.__name__
if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or
"Lasso" in est_name):
# the natural default threshold is 0 when l1 penalty was used
threshold = 1e-5
else:
threshold = "mean"
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
class _LearntSelectorMixin(TransformerMixin):
# Note because of the extra threshold parameter in transform, this does
# not naturally extend from SelectorMixin
"""Transformer mixin selecting features based on importance weights.
This implementation can be mixin on any estimator that exposes a
``feature_importances_`` or ``coef_`` attribute to evaluate the relative
importance of individual features for feature selection.
"""
@deprecated('Support to use estimators as feature selectors will be '
'removed in version 0.19. Use SelectFromModel instead.')
def transform(self, X, threshold=None):
"""Reduce X to its most important features.
Uses ``coef_`` or ``feature_importances_`` to determine the most
important features. For models with a ``coef_`` for each class, the
absolute sum over the classes is used.
Parameters
----------
X : array or scipy sparse matrix of shape [n_samples, n_features]
The input samples.
threshold : string, float or None, optional (default=None)
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the threshold value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if
available, the object attribute ``threshold`` is used. Otherwise,
"mean" is used by default.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
check_is_fitted(self, ('coef_', 'feature_importances_'),
all_or_any=any)
X = check_array(X, 'csc')
importances = _get_feature_importances(self)
if len(importances) != X.shape[1]:
raise ValueError("X has different number of features than"
" during model fitting.")
if threshold is None:
threshold = getattr(self, 'threshold', None)
threshold = _calculate_threshold(self, importances, threshold)
# Selection
try:
mask = importances >= threshold
except TypeError:
# Fails in Python 3.x when threshold is str;
# result is array of True
raise ValueError("Invalid threshold: all features are discarded.")
if np.any(mask):
mask = safe_mask(X, mask)
return X[:, mask]
else:
raise ValueError("Invalid threshold: all features are discarded.")
class SelectFromModel(BaseEstimator, SelectorMixin):
"""Meta-transformer for selecting features based on importance weights.
.. versionadded:: 0.17
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if ``prefit`` is set to True)
or a non-fitted estimator.
threshold : string, float, optional default None
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the ``threshold`` value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if the
estimator has a parameter penalty set to l1, either explicitly
or implicitly (e.g, Lasso), the threshold is used is 1e-5.
Otherwise, "mean" is used by default.
prefit : bool, default False
Whether a prefit model is expected to be passed into the constructor
directly or not. If True, ``transform`` must be called directly
and SelectFromModel cannot be used with ``cross_val_score``,
``GridSearchCV`` and similar utilities that clone the estimator.
Otherwise train the model using ``fit`` and then ``transform`` to do
feature selection.
Attributes
----------
`estimator_`: an estimator
The base estimator from which the transformer is built.
This is stored only when a non-fitted estimator is passed to the
``SelectFromModel``, i.e when prefit is False.
`threshold_`: float
The threshold value used for feature selection.
"""
def __init__(self, estimator, threshold=None, prefit=False):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
def _get_support_mask(self):
# SelectFromModel can directly call on transform.
if self.prefit:
estimator = self.estimator
elif hasattr(self, 'estimator_'):
estimator = self.estimator_
else:
raise ValueError(
'Either fit the model before transform or set "prefit=True"'
' while passing the fitted estimator to the constructor.')
scores = _get_feature_importances(estimator)
self.threshold_ = _calculate_threshold(estimator, scores,
self.threshold)
return scores >= self.threshold_
def fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
def partial_fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
| 36.849421
| 79
| 0.620914
|
import numpy as np
from .base import SelectorMixin
from ..base import TransformerMixin, BaseEstimator, clone
from ..externals import six
from ..utils import safe_mask, check_array, deprecated
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
def _get_feature_importances(estimator):
if hasattr(estimator, "feature_importances_"):
importances = estimator.feature_importances_
elif hasattr(estimator, "coef_"):
if estimator.coef_.ndim == 1:
importances = np.abs(estimator.coef_)
else:
importances = np.sum(np.abs(estimator.coef_), axis=0)
else:
raise ValueError(
"The underlying estimator %s has no `coef_` or "
"`feature_importances_` attribute. Either pass a fitted estimator"
" to SelectFromModel or call fit before calling transform."
% estimator.__class__.__name__)
return importances
def _calculate_threshold(estimator, importances, threshold):
if threshold is None:
est_name = estimator.__class__.__name__
if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or
"Lasso" in est_name):
threshold = 1e-5
else:
threshold = "mean"
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
class _LearntSelectorMixin(TransformerMixin):
@deprecated('Support to use estimators as feature selectors will be '
'removed in version 0.19. Use SelectFromModel instead.')
def transform(self, X, threshold=None):
check_is_fitted(self, ('coef_', 'feature_importances_'),
all_or_any=any)
X = check_array(X, 'csc')
importances = _get_feature_importances(self)
if len(importances) != X.shape[1]:
raise ValueError("X has different number of features than"
" during model fitting.")
if threshold is None:
threshold = getattr(self, 'threshold', None)
threshold = _calculate_threshold(self, importances, threshold)
try:
mask = importances >= threshold
except TypeError:
raise ValueError("Invalid threshold: all features are discarded.")
if np.any(mask):
mask = safe_mask(X, mask)
return X[:, mask]
else:
raise ValueError("Invalid threshold: all features are discarded.")
class SelectFromModel(BaseEstimator, SelectorMixin):
def __init__(self, estimator, threshold=None, prefit=False):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
def _get_support_mask(self):
if self.prefit:
estimator = self.estimator
elif hasattr(self, 'estimator_'):
estimator = self.estimator_
else:
raise ValueError(
'Either fit the model before transform or set "prefit=True"'
' while passing the fitted estimator to the constructor.')
scores = _get_feature_importances(estimator)
self.threshold_ = _calculate_threshold(estimator, scores,
self.threshold)
return scores >= self.threshold_
def fit(self, X, y=None, **fit_params):
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
def partial_fit(self, X, y=None, **fit_params):
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
| true
| true
|
1c404332924554e60f631695c9e565921d0c6fc1
| 3,116
|
py
|
Python
|
Amazon-Alexa-Reviews-feedback-prediction/code.py
|
Sufi737/ga-learner-dsmp-repo
|
a71c9bf138d5d9ba1ff4f37238d5e152cbf4380c
|
[
"MIT"
] | 2
|
2020-06-16T16:54:37.000Z
|
2021-07-08T13:16:59.000Z
|
Amazon-Alexa-Reviews-feedback-prediction/code.py
|
Sufi737/ga-learner-dsmp-repo
|
a71c9bf138d5d9ba1ff4f37238d5e152cbf4380c
|
[
"MIT"
] | null | null | null |
Amazon-Alexa-Reviews-feedback-prediction/code.py
|
Sufi737/ga-learner-dsmp-repo
|
a71c9bf138d5d9ba1ff4f37238d5e152cbf4380c
|
[
"MIT"
] | null | null | null |
# --------------
# import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# Load the dataset
df = pd.read_csv(path, sep ="\t")
print(type(df['date']))
# Converting date attribute from string to datetime.date datatype
df['date'] = pd.to_datetime(df['date'])
# calculate the total length of word
df['length'] = df['verified_reviews'].apply(lambda x: len(x))
# --------------
## Rating vs feedback
# set figure size
plt.figure(figsize=(12,7))
# generate countplot
sns.countplot(data=df, x='rating', hue='feedback')
# display plot
plt.show()
## Product rating vs feedback
# set figure size
plt.figure(figsize=(12,7))
# generate barplot
sns.barplot(data=df, x='rating', y='variation', hue='feedback')
# display plot
# --------------
# import packages
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# declare empty list 'corpus'
corpus=[]
# for loop to fill in corpus
for i in range(0,3150):
# retain alphabets
review = re.sub('[^a-zA-Z]', ' ', df['verified_reviews'][i] )
# convert to lower case
review=review.lower()
# tokenize
review=review.split()
# initialize stemmer object
ps=PorterStemmer()
# perform stemming
review=[ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# join elements of list
review=' '.join(review)
# add to 'corpus'
corpus.append(review)
# --------------
# import libraries
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
# Instantiate count vectorizer
cv = CountVectorizer(max_features = 1500)
# Independent variable
X = cv.fit_transform(corpus).toarray()
# dependent variable
y = df['feedback']
# Counts
count = y.value_counts()
print(count)
# Split the dataset
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# --------------
# import packages
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score
# Instantiate calssifier
rf = RandomForestClassifier(random_state=2)
# fit model on training data
rf.fit(X_train, y_train)
# predict on test data
y_pred = rf.predict(X_test)
# calculate the accuracy score
score = accuracy_score(y_test, y_pred)
# calculate the precision
precision = precision_score(y_test, y_pred)
# display 'score' and 'precision'
print(precision)
print(score)
# --------------
# import packages
from imblearn.over_sampling import SMOTE
# Instantiate smote
smote = SMOTE(random_state=9)
# fit_sample onm training data
X_train, y_train = smote.fit_sample(X_train, y_train)
# fit modelk on training data
rf.fit(X_train, y_train)
# predict on test data
y_pred = rf.predict(X_test)
# calculate the accuracy score
score = accuracy_score(y_test, y_pred)
# calculate the precision
precision = precision_score(y_test, y_pred)
# display precision and score
print(score)
print(precision)
| 20.912752
| 101
| 0.720475
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv(path, sep ="\t")
print(type(df['date']))
df['date'] = pd.to_datetime(df['date'])
df['length'] = df['verified_reviews'].apply(lambda x: len(x))
e=(12,7))
sns.countplot(data=df, x='rating', hue='feedback')
plt.show()
)
sns.barplot(data=df, x='rating', y='variation', hue='feedback')
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus=[]
for i in range(0,3150):
review = re.sub('[^a-zA-Z]', ' ', df['verified_reviews'][i] )
review=review.lower()
review=review.split()
ps=PorterStemmer()
review=[ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review=' '.join(review)
corpus.append(review)
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
cv = CountVectorizer(max_features = 1500)
X = cv.fit_transform(corpus).toarray()
y = df['feedback']
count = y.value_counts()
print(count)
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score
rf = RandomForestClassifier(random_state=2)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
score = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
print(precision)
print(score)
from imblearn.over_sampling import SMOTE
smote = SMOTE(random_state=9)
X_train, y_train = smote.fit_sample(X_train, y_train)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
score = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
print(score)
print(precision)
| true
| true
|
1c4043c02674893a7b35774dafdb1e1cced67274
| 1,864
|
py
|
Python
|
dasladen/log.py
|
pagotti/dasladen
|
75bb384a2048619bd8fe5dd4588cb438dc02e589
|
[
"MIT"
] | 2
|
2020-06-01T09:34:45.000Z
|
2021-03-16T15:42:30.000Z
|
dasladen/log.py
|
pagotti/dasladen
|
75bb384a2048619bd8fe5dd4588cb438dc02e589
|
[
"MIT"
] | 2
|
2021-04-20T16:45:46.000Z
|
2021-07-02T15:48:46.000Z
|
dasladen/log.py
|
pagotti/dasladen
|
75bb384a2048619bd8fe5dd4588cb438dc02e589
|
[
"MIT"
] | 2
|
2020-04-17T23:28:30.000Z
|
2020-06-01T09:37:56.000Z
|
"""
Log Module
Features:
- Log to file
- Log to console
"""
import time
import logging
from . import compat
class FileHandler(object):
def __init__(self):
self.file = None
def open(self, key):
self.file = compat.open('log/{}.log'.format(key), 'a', 0)
def write(self, data):
self.file.write(u"{} {}\n".format(get_time(), data))
def close(self):
self.file.close()
class ConsoleHandler(object):
def open(self, key):
pass
# noinspection PyMethodMayBeStatic
def write(self, data):
print(u"{} {}".format(get_time(), data))
def close(self):
pass
class DebugHandler(object):
def open(self, key):
pass
# noinspection PyMethodMayBeStatic
def write(self, data):
logging.info(data)
def close(self):
pass
class LogManager(object):
def __init__(self):
self.log_manager_instance = dict()
self.handlers = []
def add(self, key):
self.log_manager_instance[key] = self.handlers
def get(self, key):
return self.log_manager_instance[key]
_manager = LogManager()
def add_log_handler(log_handler):
_manager.handlers.append(log_handler)
def get_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def get_time_filename():
return time.strftime("%Y%m%d_%H%M%S", time.localtime())
class Logger(object):
def __init__(self, filename):
self.key = filename
_manager.add(filename)
def write(self, data):
for handler in _manager.get(self.key):
handler.write(data)
def __enter__(self):
for handler in _manager.get(self.key):
handler.open(self.key)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for handler in _manager.get(self.key):
handler.close()
| 18.64
| 65
| 0.614807
|
import time
import logging
from . import compat
class FileHandler(object):
def __init__(self):
self.file = None
def open(self, key):
self.file = compat.open('log/{}.log'.format(key), 'a', 0)
def write(self, data):
self.file.write(u"{} {}\n".format(get_time(), data))
def close(self):
self.file.close()
class ConsoleHandler(object):
def open(self, key):
pass
def write(self, data):
print(u"{} {}".format(get_time(), data))
def close(self):
pass
class DebugHandler(object):
def open(self, key):
pass
def write(self, data):
logging.info(data)
def close(self):
pass
class LogManager(object):
def __init__(self):
self.log_manager_instance = dict()
self.handlers = []
def add(self, key):
self.log_manager_instance[key] = self.handlers
def get(self, key):
return self.log_manager_instance[key]
_manager = LogManager()
def add_log_handler(log_handler):
_manager.handlers.append(log_handler)
def get_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def get_time_filename():
return time.strftime("%Y%m%d_%H%M%S", time.localtime())
class Logger(object):
def __init__(self, filename):
self.key = filename
_manager.add(filename)
def write(self, data):
for handler in _manager.get(self.key):
handler.write(data)
def __enter__(self):
for handler in _manager.get(self.key):
handler.open(self.key)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for handler in _manager.get(self.key):
handler.close()
| true
| true
|
1c4043df8db9292235261a8d7600bc65b6fee9d9
| 268
|
py
|
Python
|
1st Year - CS/Python - Language/Lab 15 - 28th May/swap1stlast.py
|
rahularepaka/CSLab
|
6d223b8814ad04a5821cbe63bf059f5726ff22ce
|
[
"MIT"
] | null | null | null |
1st Year - CS/Python - Language/Lab 15 - 28th May/swap1stlast.py
|
rahularepaka/CSLab
|
6d223b8814ad04a5821cbe63bf059f5726ff22ce
|
[
"MIT"
] | null | null | null |
1st Year - CS/Python - Language/Lab 15 - 28th May/swap1stlast.py
|
rahularepaka/CSLab
|
6d223b8814ad04a5821cbe63bf059f5726ff22ce
|
[
"MIT"
] | null | null | null |
import math
n = int(input())
length = (math.log10(n)) + 1
last = n%10
n_str = str(n)
first = int(n_str[0])
temp = first
first = last
last = temp
n_last = n%10
n_last = n/10
n_last = int(n_last)
n_last_str = str(n_last)
print(str(first)+n_last_str[1:]+str(last))
| 12.761905
| 42
| 0.652985
|
import math
n = int(input())
length = (math.log10(n)) + 1
last = n%10
n_str = str(n)
first = int(n_str[0])
temp = first
first = last
last = temp
n_last = n%10
n_last = n/10
n_last = int(n_last)
n_last_str = str(n_last)
print(str(first)+n_last_str[1:]+str(last))
| true
| true
|
1c4044d2b136624b7d29bcf8552335ac76d0db28
| 353
|
py
|
Python
|
wagtail/wagtailembeds/apps.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailembeds/apps.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailembeds/apps.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
from .finders import get_finders
class WagtailEmbedsAppConfig(AppConfig):
name = 'wagtail.wagtailembeds'
label = 'wagtailembeds'
verbose_name = "Wagtail embeds"
def ready(self):
# Check configuration on startup
get_finders()
| 22.0625
| 56
| 0.739377
|
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
from .finders import get_finders
class WagtailEmbedsAppConfig(AppConfig):
name = 'wagtail.wagtailembeds'
label = 'wagtailembeds'
verbose_name = "Wagtail embeds"
def ready(self):
get_finders()
| true
| true
|
1c4045a0c7e598cc9b3628a0f6c725ceda8892bb
| 676
|
py
|
Python
|
meteors.py
|
VaultHack/Codename-GAME-
|
86faefb872298dc71494110bdc22ebb3f31a0350
|
[
"Apache-2.0"
] | 3
|
2018-03-21T18:27:40.000Z
|
2018-03-29T06:25:50.000Z
|
meteors.py
|
VaultHack/Codename-GAME-
|
86faefb872298dc71494110bdc22ebb3f31a0350
|
[
"Apache-2.0"
] | null | null | null |
meteors.py
|
VaultHack/Codename-GAME-
|
86faefb872298dc71494110bdc22ebb3f31a0350
|
[
"Apache-2.0"
] | 2
|
2018-03-21T18:27:55.000Z
|
2018-03-29T06:25:37.000Z
|
import pygame, random
from pygame.sprite import Sprite
class Meteors(Sprite):
def __init__(self, game_settings, screen):
super().__init__()
self.screen = screen
self.game_settings = game_settings
#
self.image = pygame.image.load("images/meteor1.png")
self.rect = self.image.get_rect()
self.screen_rect = self.screen.get_rect()
#
self.rect.x = self.rect.width
self.rect.y = self.rect.height
#
self.y = float(self.rect.y)
def blitme(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.y += self.game_settings.meteors_speed_factor
self.rect.y = self.y
if self.rect.top >= self.screen_rect.bottom:
self.kill()
| 23.310345
| 54
| 0.701183
|
import pygame, random
from pygame.sprite import Sprite
class Meteors(Sprite):
def __init__(self, game_settings, screen):
super().__init__()
self.screen = screen
self.game_settings = game_settings
self.image = pygame.image.load("images/meteor1.png")
self.rect = self.image.get_rect()
self.screen_rect = self.screen.get_rect()
self.rect.x = self.rect.width
self.rect.y = self.rect.height
self.y = float(self.rect.y)
def blitme(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.y += self.game_settings.meteors_speed_factor
self.rect.y = self.y
if self.rect.top >= self.screen_rect.bottom:
self.kill()
| true
| true
|
1c404714760ad68eec15387c4f7353a92c6813b3
| 3,593
|
py
|
Python
|
test/pathod/test_language_actions.py
|
dolfly/mitmproxy
|
4604c25c6055a37e5f25a238d2a089759bd5d98a
|
[
"MIT"
] | null | null | null |
test/pathod/test_language_actions.py
|
dolfly/mitmproxy
|
4604c25c6055a37e5f25a238d2a089759bd5d98a
|
[
"MIT"
] | null | null | null |
test/pathod/test_language_actions.py
|
dolfly/mitmproxy
|
4604c25c6055a37e5f25a238d2a089759bd5d98a
|
[
"MIT"
] | null | null | null |
import cStringIO
from pathod.language import actions
from pathod import language
def parse_request(s):
return language.parse_pathoc(s).next()
def test_unique_name():
assert not actions.PauseAt(0, "f").unique_name
assert actions.DisconnectAt(0).unique_name
class TestDisconnects:
def test_parse_pathod(self):
a = language.parse_pathod("400:d0").next().actions[0]
assert a.spec() == "d0"
a = language.parse_pathod("400:dr").next().actions[0]
assert a.spec() == "dr"
def test_at(self):
e = actions.DisconnectAt.expr()
v = e.parseString("d0")[0]
assert isinstance(v, actions.DisconnectAt)
assert v.offset == 0
v = e.parseString("d100")[0]
assert v.offset == 100
e = actions.DisconnectAt.expr()
v = e.parseString("dr")[0]
assert v.offset == "r"
def test_spec(self):
assert actions.DisconnectAt("r").spec() == "dr"
assert actions.DisconnectAt(10).spec() == "d10"
class TestInject:
def test_parse_pathod(self):
a = language.parse_pathod("400:ir,@100").next().actions[0]
assert a.offset == "r"
assert a.value.datatype == "bytes"
assert a.value.usize == 100
a = language.parse_pathod("400:ia,@100").next().actions[0]
assert a.offset == "a"
def test_at(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,'foo'")[0]
assert v.value.val == "foo"
assert v.offset == 0
assert isinstance(v, actions.InjectAt)
v = e.parseString("ir,'foo'")[0]
assert v.offset == "r"
def test_serve(self):
s = cStringIO.StringIO()
r = language.parse_pathod("400:i0,'foo'").next()
assert language.serve(r, s, {})
def test_spec(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,'foo'")[0]
assert v.spec() == 'i0,"foo"'
def test_spec(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,@100")[0]
v2 = v.freeze({})
v3 = v2.freeze({})
assert v2.value.val == v3.value.val
class TestPauses:
def test_parse_pathod(self):
e = actions.PauseAt.expr()
v = e.parseString("p10,10")[0]
assert v.seconds == 10
assert v.offset == 10
v = e.parseString("p10,f")[0]
assert v.seconds == "f"
v = e.parseString("pr,f")[0]
assert v.offset == "r"
v = e.parseString("pa,f")[0]
assert v.offset == "a"
def test_request(self):
r = language.parse_pathod('400:p10,10').next()
assert r.actions[0].spec() == "p10,10"
def test_spec(self):
assert actions.PauseAt("r", 5).spec() == "pr,5"
assert actions.PauseAt(0, 5).spec() == "p0,5"
assert actions.PauseAt(0, "f").spec() == "p0,f"
def test_freeze(self):
l = actions.PauseAt("r", 5)
assert l.freeze({}).spec() == l.spec()
class Test_Action:
def test_cmp(self):
a = actions.DisconnectAt(0)
b = actions.DisconnectAt(1)
c = actions.DisconnectAt(0)
assert a < b
assert a == c
l = sorted([b, a])
assert l[0].offset == 0
def test_resolve(self):
r = parse_request('GET:"/foo"')
e = actions.DisconnectAt("r")
ret = e.resolve({}, r)
assert isinstance(ret.offset, int)
def test_repr(self):
e = actions.DisconnectAt("r")
assert repr(e)
def test_freeze(self):
l = actions.DisconnectAt(5)
assert l.freeze({}).spec() == l.spec()
| 26.419118
| 66
| 0.561091
|
import cStringIO
from pathod.language import actions
from pathod import language
def parse_request(s):
return language.parse_pathoc(s).next()
def test_unique_name():
assert not actions.PauseAt(0, "f").unique_name
assert actions.DisconnectAt(0).unique_name
class TestDisconnects:
def test_parse_pathod(self):
a = language.parse_pathod("400:d0").next().actions[0]
assert a.spec() == "d0"
a = language.parse_pathod("400:dr").next().actions[0]
assert a.spec() == "dr"
def test_at(self):
e = actions.DisconnectAt.expr()
v = e.parseString("d0")[0]
assert isinstance(v, actions.DisconnectAt)
assert v.offset == 0
v = e.parseString("d100")[0]
assert v.offset == 100
e = actions.DisconnectAt.expr()
v = e.parseString("dr")[0]
assert v.offset == "r"
def test_spec(self):
assert actions.DisconnectAt("r").spec() == "dr"
assert actions.DisconnectAt(10).spec() == "d10"
class TestInject:
def test_parse_pathod(self):
a = language.parse_pathod("400:ir,@100").next().actions[0]
assert a.offset == "r"
assert a.value.datatype == "bytes"
assert a.value.usize == 100
a = language.parse_pathod("400:ia,@100").next().actions[0]
assert a.offset == "a"
def test_at(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,'foo'")[0]
assert v.value.val == "foo"
assert v.offset == 0
assert isinstance(v, actions.InjectAt)
v = e.parseString("ir,'foo'")[0]
assert v.offset == "r"
def test_serve(self):
s = cStringIO.StringIO()
r = language.parse_pathod("400:i0,'foo'").next()
assert language.serve(r, s, {})
def test_spec(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,'foo'")[0]
assert v.spec() == 'i0,"foo"'
def test_spec(self):
e = actions.InjectAt.expr()
v = e.parseString("i0,@100")[0]
v2 = v.freeze({})
v3 = v2.freeze({})
assert v2.value.val == v3.value.val
class TestPauses:
def test_parse_pathod(self):
e = actions.PauseAt.expr()
v = e.parseString("p10,10")[0]
assert v.seconds == 10
assert v.offset == 10
v = e.parseString("p10,f")[0]
assert v.seconds == "f"
v = e.parseString("pr,f")[0]
assert v.offset == "r"
v = e.parseString("pa,f")[0]
assert v.offset == "a"
def test_request(self):
r = language.parse_pathod('400:p10,10').next()
assert r.actions[0].spec() == "p10,10"
def test_spec(self):
assert actions.PauseAt("r", 5).spec() == "pr,5"
assert actions.PauseAt(0, 5).spec() == "p0,5"
assert actions.PauseAt(0, "f").spec() == "p0,f"
def test_freeze(self):
l = actions.PauseAt("r", 5)
assert l.freeze({}).spec() == l.spec()
class Test_Action:
def test_cmp(self):
a = actions.DisconnectAt(0)
b = actions.DisconnectAt(1)
c = actions.DisconnectAt(0)
assert a < b
assert a == c
l = sorted([b, a])
assert l[0].offset == 0
def test_resolve(self):
r = parse_request('GET:"/foo"')
e = actions.DisconnectAt("r")
ret = e.resolve({}, r)
assert isinstance(ret.offset, int)
def test_repr(self):
e = actions.DisconnectAt("r")
assert repr(e)
def test_freeze(self):
l = actions.DisconnectAt(5)
assert l.freeze({}).spec() == l.spec()
| true
| true
|
1c4047fca52bbc1922c2ef1b4056844b19f0cf9c
| 402
|
py
|
Python
|
Projects/urls.py
|
Shreya549/Project-Generator-Backend
|
f77b945dd9a84bde0aecb8755a5157a5cf4fec4f
|
[
"MIT"
] | null | null | null |
Projects/urls.py
|
Shreya549/Project-Generator-Backend
|
f77b945dd9a84bde0aecb8755a5157a5cf4fec4f
|
[
"MIT"
] | null | null | null |
Projects/urls.py
|
Shreya549/Project-Generator-Backend
|
f77b945dd9a84bde0aecb8755a5157a5cf4fec4f
|
[
"MIT"
] | 1
|
2022-02-22T17:20:13.000Z
|
2022-02-22T17:20:13.000Z
|
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from .views import (
ProjectViewSet,
ViewProjectViewSet,
MyProjectsViewSet,
)
router = SimpleRouter()
router.register('new', ProjectViewSet, basename="new")
router.register('view', ViewProjectViewSet, basename="view")
router.register('my', MyProjectsViewSet, basename='my')
urlpatterns = router.urls
| 23.647059
| 60
| 0.771144
|
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from .views import (
ProjectViewSet,
ViewProjectViewSet,
MyProjectsViewSet,
)
router = SimpleRouter()
router.register('new', ProjectViewSet, basename="new")
router.register('view', ViewProjectViewSet, basename="view")
router.register('my', MyProjectsViewSet, basename='my')
urlpatterns = router.urls
| true
| true
|
1c404828cc0e3e521538877140a9b2e03c49c434
| 7,270
|
py
|
Python
|
cogs/twittercog.py
|
nluedtke/brochat-bot
|
9a9a3e89fbcd35a791b1842d73f3aa0e2c3f985d
|
[
"MIT"
] | 18
|
2017-08-23T17:26:30.000Z
|
2021-04-04T03:05:04.000Z
|
cogs/twittercog.py
|
nluedtke/brochat-bot
|
9a9a3e89fbcd35a791b1842d73f3aa0e2c3f985d
|
[
"MIT"
] | 36
|
2017-08-21T14:23:43.000Z
|
2020-06-29T14:11:57.000Z
|
cogs/twittercog.py
|
nluedtke/brochat-bot
|
9a9a3e89fbcd35a791b1842d73f3aa0e2c3f985d
|
[
"MIT"
] | 5
|
2017-08-24T04:05:41.000Z
|
2021-08-13T09:44:36.000Z
|
import asyncio
from datetime import datetime
from random import randint, shuffle
import common
from cogs.duelcog import item_chance_roll
from discord.ext import commands
from twython import TwythonError
class Twitter(commands.Cog):
""" Twitter Fetchers"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="trump")
async def get_trump(self, ctx):
"""Get Trump's latest Yuge success!"""
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
twitter_id = 'realdonaldtrump'
tweet_text = \
':pen_ballpoint::monkey: Trump has been saying things, as ' \
'usual...'
rt_text = \
':pen_ballpoint::monkey: Trump has been repeating things, as ' \
'usual... (RT ALERT)'
try:
await get_last_tweet(twitter_id, tweet_text,
rt_text, ctx, max(1, common.missed_trumps))
except TwythonError:
await ctx.send("Twitter is acting up, try again later.")
else:
if common.trump_chance_roll_rdy:
await item_chance_roll(ctx.message.author.display_name,
ctx.message.channel,
92 - (common.missed_trumps * 2))
common.trump_chance_roll_rdy = False
common.missed_trumps = 0
@commands.command(name='news')
async def get_news(self, ctx):
"""Grab a news story"""
if common.twitter is None:
return
shuffle(common.news_handles)
found_art = False
while not found_art:
source = common.news_handles.pop(0)
common.news_handles.append(source)
tweet_text = "It looks like @" + source + " is reporting:"
rt_text = "It looks like @" + source + " is retweeting:"
try:
await get_last_tweet(source, tweet_text, rt_text, ctx)
except TwythonError:
print("Error in get_news, trying another source")
else:
found_art = True
return
@commands.command(name='toggle-news', hidden=True)
async def toggle_news(self, ctx):
"""Toggle the news feed on and off"""
if common.NEWS_FEED_ON:
common.NEWS_FEED_ON = False
await ctx.send("News Feed turned off.")
else:
if not common.NEWS_FEED_CREATED:
ctx.loop.create_task(handle_news(ctx, self.bot))
common.NEWS_FEED_CREATED = True
common.NEWS_FEED_ON = True
await ctx.send("News Feed turned on.")
async def get_last_tweet(_id, tweet_text, rt_text, ctx, c=1):
"""
Gets the last tweet for id.
:param _id: Twitter id
:param tweet_text: flavor text for tweets
:param rt_text: flavor text for retweets
:param ctx: Context
:param c: number of tweets to get
:return:
"""
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
try:
last_tweet = common.twitter.get_user_timeline(screen_name=_id, count=c,
include_retweets=True)
except TwythonError as e:
raise e
else:
for i in range(c):
# if it's a retweet, send the original tweet
if 'retweeted_status' in last_tweet[i]:
rt_id = last_tweet[i]['retweeted_status']['id']
rt_screen_name = last_tweet[i]['retweeted_status']['user'][
'screen_name']
await ctx.send('{}\n\nhttps://twitter.com/{}/status/{}'
.format(rt_text, rt_screen_name, str(rt_id)))
# otherwise, send the tweet
else:
await ctx.send('{}\n\nhttps://twitter.com/{}/status/{}'
.format(tweet_text, last_tweet[i]['user']
['screen_name'],
str(last_tweet[i]['id'])))
async def check_trumps_mouth(bot):
"""
Waits for an update from the prez
:return: None
"""
c_to_send = None
decay = 0
await bot.wait_until_ready()
for channel in bot.get_all_channels():
if channel.name == 'gen_testing' \
or channel.name == common.ARGS['channel']:
c_to_send = channel
break
if common.twitter is None:
return
common.last_id = common.twitter.get_user_timeline(
screen_name='realdonaldtrump', count=1, include_retweets=False)[0]['id']
delay = common.trump_del * 60
while not bot.is_closed():
await asyncio.sleep(delay)
print("Checked trump at {}".format(datetime.now()))
try:
trumps_lt_id = common.twitter.get_user_timeline(
screen_name='realdonaldtrump', count=1,
include_retweets=False)[0]['id']
except:
print("Error caught in check_trump, shortening delay")
delay = 60
else:
if decay > 0:
delay = (common.trump_del - decay) * 60
decay -= 1
else:
delay = common.trump_del * 60
if trumps_lt_id != common.last_id:
common.trump_tweets_seen += 1
await c_to_send.send("New Message from the prez! Try !trump")
decay = common.trump_del - 1
delay = (common.trump_del - decay) * 60
common.last_id = trumps_lt_id
common.trump_chance_roll_rdy = True
common.missed_trumps += 1
async def handle_news(ctx, bot):
"""
Handles the news feed
:return:
"""
shuffle(common.news_handles)
await bot.wait_until_ready()
for channel in bot.get_all_channels():
if channel.name == 'gen_testing' or channel.name == 'newsfeed':
c_to_send = channel
break
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
delay = (common.news_del * 60) + (randint(0, 10) * 60)
while not bot.is_closed():
next_source = common.news_handles.pop(0)
common.news_handles.append(next_source)
print("Next news source will be {}".format(next_source))
await asyncio.sleep(delay)
if common.NEWS_FEED_ON:
try:
news = common.twitter.get_user_timeline(
screen_name=next_source, count=1,
include_retweets=False)
except:
print("Error caught in news, shortening delay")
delay = 30
else:
delay = (common.news_del * 60) + (randint(0, 10) * 60)
await c_to_send.send("https://twitter.com/{0}/status/{1}"
.format(news[0]['user']['screen_name'],
str(news[0]['id'])))
else:
common.NEWS_FEED_CREATED = False
print("Destroying News Feed Task")
return
def setup(bot):
bot.add_cog(Twitter(bot))
# bot.loop.create_task(check_trumps_mouth(bot))
| 33.657407
| 80
| 0.550894
|
import asyncio
from datetime import datetime
from random import randint, shuffle
import common
from cogs.duelcog import item_chance_roll
from discord.ext import commands
from twython import TwythonError
class Twitter(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="trump")
async def get_trump(self, ctx):
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
twitter_id = 'realdonaldtrump'
tweet_text = \
':pen_ballpoint::monkey: Trump has been saying things, as ' \
'usual...'
rt_text = \
':pen_ballpoint::monkey: Trump has been repeating things, as ' \
'usual... (RT ALERT)'
try:
await get_last_tweet(twitter_id, tweet_text,
rt_text, ctx, max(1, common.missed_trumps))
except TwythonError:
await ctx.send("Twitter is acting up, try again later.")
else:
if common.trump_chance_roll_rdy:
await item_chance_roll(ctx.message.author.display_name,
ctx.message.channel,
92 - (common.missed_trumps * 2))
common.trump_chance_roll_rdy = False
common.missed_trumps = 0
@commands.command(name='news')
async def get_news(self, ctx):
if common.twitter is None:
return
shuffle(common.news_handles)
found_art = False
while not found_art:
source = common.news_handles.pop(0)
common.news_handles.append(source)
tweet_text = "It looks like @" + source + " is reporting:"
rt_text = "It looks like @" + source + " is retweeting:"
try:
await get_last_tweet(source, tweet_text, rt_text, ctx)
except TwythonError:
print("Error in get_news, trying another source")
else:
found_art = True
return
@commands.command(name='toggle-news', hidden=True)
async def toggle_news(self, ctx):
if common.NEWS_FEED_ON:
common.NEWS_FEED_ON = False
await ctx.send("News Feed turned off.")
else:
if not common.NEWS_FEED_CREATED:
ctx.loop.create_task(handle_news(ctx, self.bot))
common.NEWS_FEED_CREATED = True
common.NEWS_FEED_ON = True
await ctx.send("News Feed turned on.")
async def get_last_tweet(_id, tweet_text, rt_text, ctx, c=1):
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
try:
last_tweet = common.twitter.get_user_timeline(screen_name=_id, count=c,
include_retweets=True)
except TwythonError as e:
raise e
else:
for i in range(c):
if 'retweeted_status' in last_tweet[i]:
rt_id = last_tweet[i]['retweeted_status']['id']
rt_screen_name = last_tweet[i]['retweeted_status']['user'][
'screen_name']
await ctx.send('{}\n\nhttps://twitter.com/{}/status/{}'
.format(rt_text, rt_screen_name, str(rt_id)))
# otherwise, send the tweet
else:
await ctx.send('{}\n\nhttps://twitter.com/{}/status/{}'
.format(tweet_text, last_tweet[i]['user']
['screen_name'],
str(last_tweet[i]['id'])))
async def check_trumps_mouth(bot):
c_to_send = None
decay = 0
await bot.wait_until_ready()
for channel in bot.get_all_channels():
if channel.name == 'gen_testing' \
or channel.name == common.ARGS['channel']:
c_to_send = channel
break
if common.twitter is None:
return
common.last_id = common.twitter.get_user_timeline(
screen_name='realdonaldtrump', count=1, include_retweets=False)[0]['id']
delay = common.trump_del * 60
while not bot.is_closed():
await asyncio.sleep(delay)
print("Checked trump at {}".format(datetime.now()))
try:
trumps_lt_id = common.twitter.get_user_timeline(
screen_name='realdonaldtrump', count=1,
include_retweets=False)[0]['id']
except:
print("Error caught in check_trump, shortening delay")
delay = 60
else:
if decay > 0:
delay = (common.trump_del - decay) * 60
decay -= 1
else:
delay = common.trump_del * 60
if trumps_lt_id != common.last_id:
common.trump_tweets_seen += 1
await c_to_send.send("New Message from the prez! Try !trump")
decay = common.trump_del - 1
delay = (common.trump_del - decay) * 60
common.last_id = trumps_lt_id
common.trump_chance_roll_rdy = True
common.missed_trumps += 1
async def handle_news(ctx, bot):
shuffle(common.news_handles)
await bot.wait_until_ready()
for channel in bot.get_all_channels():
if channel.name == 'gen_testing' or channel.name == 'newsfeed':
c_to_send = channel
break
if common.twitter is None:
await ctx.send("Twitter not activated.")
return
delay = (common.news_del * 60) + (randint(0, 10) * 60)
while not bot.is_closed():
next_source = common.news_handles.pop(0)
common.news_handles.append(next_source)
print("Next news source will be {}".format(next_source))
await asyncio.sleep(delay)
if common.NEWS_FEED_ON:
try:
news = common.twitter.get_user_timeline(
screen_name=next_source, count=1,
include_retweets=False)
except:
print("Error caught in news, shortening delay")
delay = 30
else:
delay = (common.news_del * 60) + (randint(0, 10) * 60)
await c_to_send.send("https://twitter.com/{0}/status/{1}"
.format(news[0]['user']['screen_name'],
str(news[0]['id'])))
else:
common.NEWS_FEED_CREATED = False
print("Destroying News Feed Task")
return
def setup(bot):
bot.add_cog(Twitter(bot))
# bot.loop.create_task(check_trumps_mouth(bot))
| true
| true
|
1c4048e58ed6a692da19af14e93f886adc543c8c
| 4,683
|
py
|
Python
|
openpeerpower/components/fritzbox_netmonitor/sensor.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
openpeerpower/components/fritzbox_netmonitor/sensor.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
openpeerpower/components/fritzbox_netmonitor/sensor.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Support for monitoring an AVM Fritz!Box router."""
from datetime import timedelta
import logging
from fritzconnection.core.exceptions import FritzConnectionException
from fritzconnection.lib.fritzstatus import FritzStatus
from requests.exceptions import RequestException
import voluptuous as vol
from openpeerpower.components.sensor import PLATFORM_SCHEMA, SensorEntity
from openpeerpower.const import CONF_HOST, CONF_NAME, STATE_UNAVAILABLE
import openpeerpower.helpers.config_validation as cv
from openpeerpower.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "fritz_netmonitor"
DEFAULT_HOST = "169.254.1.1" # This IP is valid for all FRITZ!Box routers.
ATTR_BYTES_RECEIVED = "bytes_received"
ATTR_BYTES_SENT = "bytes_sent"
ATTR_TRANSMISSION_RATE_UP = "transmission_rate_up"
ATTR_TRANSMISSION_RATE_DOWN = "transmission_rate_down"
ATTR_EXTERNAL_IP = "external_ip"
ATTR_IS_CONNECTED = "is_connected"
ATTR_IS_LINKED = "is_linked"
ATTR_MAX_BYTE_RATE_DOWN = "max_byte_rate_down"
ATTR_MAX_BYTE_RATE_UP = "max_byte_rate_up"
ATTR_UPTIME = "uptime"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
STATE_ONLINE = "online"
STATE_OFFLINE = "offline"
ICON = "mdi:web"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
}
)
def setup_platform(opp, config, add_entities, discovery_info=None):
"""Set up the FRITZ!Box monitor sensors."""
name = config[CONF_NAME]
host = config[CONF_HOST]
try:
fstatus = FritzStatus(address=host)
except (ValueError, TypeError, FritzConnectionException):
fstatus = None
if fstatus is None:
_LOGGER.error("Failed to establish connection to FRITZ!Box: %s", host)
return 1
_LOGGER.info("Successfully connected to FRITZ!Box")
add_entities([FritzboxMonitorSensor(name, fstatus)], True)
class FritzboxMonitorSensor(SensorEntity):
"""Implementation of a fritzbox monitor sensor."""
def __init__(self, name, fstatus):
"""Initialize the sensor."""
self._name = name
self._fstatus = fstatus
self._state = STATE_UNAVAILABLE
self._is_linked = self._is_connected = None
self._external_ip = self._uptime = None
self._bytes_sent = self._bytes_received = None
self._transmission_rate_up = None
self._transmission_rate_down = None
self._max_byte_rate_up = self._max_byte_rate_down = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name.rstrip()
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
# Don't return attributes if FritzBox is unreachable
if self._state == STATE_UNAVAILABLE:
return {}
return {
ATTR_IS_LINKED: self._is_linked,
ATTR_IS_CONNECTED: self._is_connected,
ATTR_EXTERNAL_IP: self._external_ip,
ATTR_UPTIME: self._uptime,
ATTR_BYTES_SENT: self._bytes_sent,
ATTR_BYTES_RECEIVED: self._bytes_received,
ATTR_TRANSMISSION_RATE_UP: self._transmission_rate_up,
ATTR_TRANSMISSION_RATE_DOWN: self._transmission_rate_down,
ATTR_MAX_BYTE_RATE_UP: self._max_byte_rate_up,
ATTR_MAX_BYTE_RATE_DOWN: self._max_byte_rate_down,
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Retrieve information from the FritzBox."""
try:
self._is_linked = self._fstatus.is_linked
self._is_connected = self._fstatus.is_connected
self._external_ip = self._fstatus.external_ip
self._uptime = self._fstatus.uptime
self._bytes_sent = self._fstatus.bytes_sent
self._bytes_received = self._fstatus.bytes_received
transmission_rate = self._fstatus.transmission_rate
self._transmission_rate_up = transmission_rate[0]
self._transmission_rate_down = transmission_rate[1]
self._max_byte_rate_up = self._fstatus.max_byte_rate[0]
self._max_byte_rate_down = self._fstatus.max_byte_rate[1]
self._state = STATE_ONLINE if self._is_connected else STATE_OFFLINE
except RequestException as err:
self._state = STATE_UNAVAILABLE
_LOGGER.warning("Could not reach FRITZ!Box: %s", err)
| 35.477273
| 79
| 0.704036
|
from datetime import timedelta
import logging
from fritzconnection.core.exceptions import FritzConnectionException
from fritzconnection.lib.fritzstatus import FritzStatus
from requests.exceptions import RequestException
import voluptuous as vol
from openpeerpower.components.sensor import PLATFORM_SCHEMA, SensorEntity
from openpeerpower.const import CONF_HOST, CONF_NAME, STATE_UNAVAILABLE
import openpeerpower.helpers.config_validation as cv
from openpeerpower.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "fritz_netmonitor"
DEFAULT_HOST = "169.254.1.1"
ATTR_BYTES_RECEIVED = "bytes_received"
ATTR_BYTES_SENT = "bytes_sent"
ATTR_TRANSMISSION_RATE_UP = "transmission_rate_up"
ATTR_TRANSMISSION_RATE_DOWN = "transmission_rate_down"
ATTR_EXTERNAL_IP = "external_ip"
ATTR_IS_CONNECTED = "is_connected"
ATTR_IS_LINKED = "is_linked"
ATTR_MAX_BYTE_RATE_DOWN = "max_byte_rate_down"
ATTR_MAX_BYTE_RATE_UP = "max_byte_rate_up"
ATTR_UPTIME = "uptime"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
STATE_ONLINE = "online"
STATE_OFFLINE = "offline"
ICON = "mdi:web"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
}
)
def setup_platform(opp, config, add_entities, discovery_info=None):
name = config[CONF_NAME]
host = config[CONF_HOST]
try:
fstatus = FritzStatus(address=host)
except (ValueError, TypeError, FritzConnectionException):
fstatus = None
if fstatus is None:
_LOGGER.error("Failed to establish connection to FRITZ!Box: %s", host)
return 1
_LOGGER.info("Successfully connected to FRITZ!Box")
add_entities([FritzboxMonitorSensor(name, fstatus)], True)
class FritzboxMonitorSensor(SensorEntity):
def __init__(self, name, fstatus):
self._name = name
self._fstatus = fstatus
self._state = STATE_UNAVAILABLE
self._is_linked = self._is_connected = None
self._external_ip = self._uptime = None
self._bytes_sent = self._bytes_received = None
self._transmission_rate_up = None
self._transmission_rate_down = None
self._max_byte_rate_up = self._max_byte_rate_down = None
@property
def name(self):
return self._name.rstrip()
@property
def icon(self):
return ICON
@property
def state(self):
return self._state
@property
def extra_state_attributes(self):
if self._state == STATE_UNAVAILABLE:
return {}
return {
ATTR_IS_LINKED: self._is_linked,
ATTR_IS_CONNECTED: self._is_connected,
ATTR_EXTERNAL_IP: self._external_ip,
ATTR_UPTIME: self._uptime,
ATTR_BYTES_SENT: self._bytes_sent,
ATTR_BYTES_RECEIVED: self._bytes_received,
ATTR_TRANSMISSION_RATE_UP: self._transmission_rate_up,
ATTR_TRANSMISSION_RATE_DOWN: self._transmission_rate_down,
ATTR_MAX_BYTE_RATE_UP: self._max_byte_rate_up,
ATTR_MAX_BYTE_RATE_DOWN: self._max_byte_rate_down,
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
try:
self._is_linked = self._fstatus.is_linked
self._is_connected = self._fstatus.is_connected
self._external_ip = self._fstatus.external_ip
self._uptime = self._fstatus.uptime
self._bytes_sent = self._fstatus.bytes_sent
self._bytes_received = self._fstatus.bytes_received
transmission_rate = self._fstatus.transmission_rate
self._transmission_rate_up = transmission_rate[0]
self._transmission_rate_down = transmission_rate[1]
self._max_byte_rate_up = self._fstatus.max_byte_rate[0]
self._max_byte_rate_down = self._fstatus.max_byte_rate[1]
self._state = STATE_ONLINE if self._is_connected else STATE_OFFLINE
except RequestException as err:
self._state = STATE_UNAVAILABLE
_LOGGER.warning("Could not reach FRITZ!Box: %s", err)
| true
| true
|
1c404903e53fc5a58dce23929d40cd330cab97d2
| 8,596
|
py
|
Python
|
tensorflow/contrib/learn/python/learn/metric_spec.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 65
|
2016-09-26T01:30:40.000Z
|
2021-08-11T17:00:41.000Z
|
tensorflow/contrib/learn/python/learn/metric_spec.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 5
|
2017-02-21T08:37:52.000Z
|
2017-03-29T05:46:05.000Z
|
tensorflow/contrib/learn/python/learn/metric_spec.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 11
|
2017-09-10T16:22:21.000Z
|
2021-08-09T09:24:50.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The metric spec class to flexibly connect models and metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import tf_logging as logging
class MetricSpec(object):
"""MetricSpec connects a model to metric functions.
The MetricSpec class contains all information necessary to connect the
output of a `model_fn` to the metrics (usually, streaming metrics) that are
used in evaluation.
It is passed in the `metrics` argument of `Estimator.evaluate`. The
`Estimator` then knows which predictions, labels, and weight to use to call a
given metric function.
When building the ops to run in evaluation, `Estimator` will call
`create_metric_ops`, which will connect the given `metric_fn` to the model
as detailed in the docstring for `create_metric_ops`, and return the metric.
Example:
Assuming a model has an input function which returns inputs containing
(among other things) a tensor with key "input_key", and a labels dictionary
containing "label_key". Let's assume that the `model_fn` for this model
returns a prediction with key "prediction_key".
In order to compute the accuracy of the "prediction_key" prediction, we
would add
```
"prediction accuracy": MetricSpec(metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key")
```
to the metrics argument to `evaluate`. `prediction_accuracy_fn` can be either
a predefined function in metric_ops (e.g., `streaming_accuracy`) or a custom
function you define.
If we would like the accuracy to be weighted by "input_key", we can add that
as the `weight_key` argument.
```
"prediction accuracy": MetricSpec(metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key",
weight_key="input_key")
```
An end-to-end example is as follows:
```
estimator = tf.contrib.learn.Estimator(...)
estimator.fit(...)
_ = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
'prediction accuracy':
metric_spec.MetricSpec(
metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key")
})
```
"""
def __init__(self,
metric_fn,
prediction_key=None,
label_key=None,
weight_key=None):
"""Constructor.
Creates a MetricSpec.
Args:
metric_fn: A function to use as a metric. Must accept `predictions`,
`labels` and optionally, `weights` tensors as inputs, and must return
either a single tensor which is interpreted as a value of this metric,
or a pair `(value_op, update_op)`, where value_op is the op to call to
obtain the value of the metric, and update_op should be evaluated for
each batch in order to update internal state.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`metric_fn`. Optional. If `None`, the `model_fn` must return a single
tensor or a dict with only a single entry as `predictions`.
label_key: The key for a tensor in the `labels` dict (output from the
`input_fn`) to use as the `labels` input to the `metric_fn`.
Optional. If `None`, the `input_fn` must return a single tensor or a
dict with only a single entry as `labels`.
weight_key: The key for a tensor in the `inputs` dict (output from the
`input_fn`) to use as the `weights` input to the `metric_fn`.
Optional. If `None`, no weights will be passed to the `metric_fn`.
"""
self._metric_fn = metric_fn
self._prediction_key = prediction_key
self._label_key = label_key
self._weight_key = weight_key
@property
def prediction_key(self):
return self._prediction_key
@property
def label_key(self):
return self._label_key
@property
def weight_key(self):
return self._weight_key
@property
def metric_fn(self):
return self._metric_fn
def __str__(self):
if hasattr(self.metric_fn, '__name__'):
fn_name = self.metric_fn.__name__
elif (hasattr(self.metric_fn, 'func') and
hasattr(self.metric_fn.func, '__name__')):
fn_name = self.metric_fn.func.__name__ # If it's a functools.partial.
else:
fn_name = '%s' % self.metric_fn
return ('MetricSpec(metric_fn=%s, ' % fn_name +
'prediction_key=%s, ' % self.prediction_key +
'label_key=%s, ' % self.label_key +
'weight_key=%s)' % self.weight_key
)
def create_metric_ops(self, inputs, labels, predictions):
"""Connect our `metric_fn` to the specified members of the given dicts.
This function will call the `metric_fn` given in our constructor as follows:
```
metric_fn(predictions[self.prediction_key],
labels[self.label_key],
weights=weights[self.weight_key])
```
And returns the result. The `weights` argument is only passed if
`self.weight_key` is not `None`.
`predictions` and `labels` may be single tensors as well as dicts. If
`predictions` is a single tensor, `self.prediction_key` must be `None`. If
`predictions` is a single element dict, `self.prediction_key` is allowed to
be `None`. Conversely, if `labels` is a single tensor, `self.label_key` must
be `None`. If `labels` is a single element dict, `self.label_key` is allowed
to be `None`.
Args:
inputs: A dict of inputs produced by the `input_fn`
labels: A dict of labels or a single label tensor produced by the
`input_fn`.
predictions: A dict of predictions or a single tensor produced by the
`model_fn`.
Returns:
The result of calling `metric_fn`.
Raises:
ValueError: If `predictions` or `labels` is a single `Tensor` and
`self.prediction_key` or `self.label_key` is not `None`; or if
`self.label_key` is `None` but `labels` is a dict with more than one
element, or if `self.prediction_key` is `None but `predictions` is a
dict with more than one element.
"""
def _get_dict(name, dict_or_tensor, key):
"""Get a single tensor or an element of a dict or raise ValueError."""
if key:
if not isinstance(dict_or_tensor, dict):
raise ValueError('MetricSpec with ' + name + '_key specified'
' requires ' +
name + 's dict, got %s' % dict_or_tensor)
if key not in dict_or_tensor:
raise KeyError(
'Key \'%s\' missing from %s.' % (key, dict_or_tensor.keys()))
return dict_or_tensor[key]
else:
if isinstance(dict_or_tensor, dict):
if len(dict_or_tensor) != 1:
raise ValueError('MetricSpec without specified ' + name + '_key'
' requires ' + name + 's tensor or single element'
' dict, got %s' % dict_or_tensor)
return dict_or_tensor.values()[0]
else:
return dict_or_tensor
# Get the predictions
prediction = _get_dict('prediction', predictions, self.prediction_key)
# Get the labels
label = _get_dict('label', labels, self.label_key)
try:
if self.weight_key:
return self.metric_fn(prediction, label,
weights=inputs[self.weight_key])
else:
return self.metric_fn(prediction, label)
except: # pylint: disable=bare-except
logging.error('Could not create metric ops for %s.' % self)
raise
| 38.035398
| 80
| 0.648557
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import tf_logging as logging
class MetricSpec(object):
def __init__(self,
metric_fn,
prediction_key=None,
label_key=None,
weight_key=None):
self._metric_fn = metric_fn
self._prediction_key = prediction_key
self._label_key = label_key
self._weight_key = weight_key
@property
def prediction_key(self):
return self._prediction_key
@property
def label_key(self):
return self._label_key
@property
def weight_key(self):
return self._weight_key
@property
def metric_fn(self):
return self._metric_fn
def __str__(self):
if hasattr(self.metric_fn, '__name__'):
fn_name = self.metric_fn.__name__
elif (hasattr(self.metric_fn, 'func') and
hasattr(self.metric_fn.func, '__name__')):
fn_name = self.metric_fn.func.__name__
else:
fn_name = '%s' % self.metric_fn
return ('MetricSpec(metric_fn=%s, ' % fn_name +
'prediction_key=%s, ' % self.prediction_key +
'label_key=%s, ' % self.label_key +
'weight_key=%s)' % self.weight_key
)
def create_metric_ops(self, inputs, labels, predictions):
def _get_dict(name, dict_or_tensor, key):
if key:
if not isinstance(dict_or_tensor, dict):
raise ValueError('MetricSpec with ' + name + '_key specified'
' requires ' +
name + 's dict, got %s' % dict_or_tensor)
if key not in dict_or_tensor:
raise KeyError(
'Key \'%s\' missing from %s.' % (key, dict_or_tensor.keys()))
return dict_or_tensor[key]
else:
if isinstance(dict_or_tensor, dict):
if len(dict_or_tensor) != 1:
raise ValueError('MetricSpec without specified ' + name + '_key'
' requires ' + name + 's tensor or single element'
' dict, got %s' % dict_or_tensor)
return dict_or_tensor.values()[0]
else:
return dict_or_tensor
# Get the predictions
prediction = _get_dict('prediction', predictions, self.prediction_key)
# Get the labels
label = _get_dict('label', labels, self.label_key)
try:
if self.weight_key:
return self.metric_fn(prediction, label,
weights=inputs[self.weight_key])
else:
return self.metric_fn(prediction, label)
except: # pylint: disable=bare-except
logging.error('Could not create metric ops for %s.' % self)
raise
| true
| true
|
1c404b4e8623172045b89333c0e18f6f6df29823
| 7,991
|
py
|
Python
|
tests/attributes/test_datetime.py
|
yaal-coop/sheraf
|
774e3781bc6ff2e16c6cc39f268d475b5e64fcea
|
[
"MIT"
] | 1
|
2020-03-18T09:54:52.000Z
|
2020-03-18T09:54:52.000Z
|
tests/attributes/test_datetime.py
|
yaal-fr/sheraf
|
9821a53d8b0ea0aba420175e4cfa81529262f88c
|
[
"MIT"
] | null | null | null |
tests/attributes/test_datetime.py
|
yaal-fr/sheraf
|
9821a53d8b0ea0aba420175e4cfa81529262f88c
|
[
"MIT"
] | null | null | null |
import datetime
import libfaketime
import pytest
import pytz
import sheraf
import tests
def test_datetime_timestamp(sheraf_connection):
class Model(tests.UUIDAutoModel):
date = sheraf.DateTimeAttribute()
m = Model.create()
assert m.date is None
m = Model.read(m.id)
test_date = datetime.datetime(2014, 8, 16, 10, 10, 1, 1)
m.date = test_date
assert test_date == m.date
assert (test_date - datetime.datetime(1970, 1, 1)).total_seconds() == m.mapping[
"date"
]
test_date2 = datetime.datetime(2032, 8, 16, 10, 10, 1, 1)
m.toto = test_date2
assert test_date2 == m.toto
m = Model.read(m.id)
with pytest.raises(AttributeError):
m.toto
assert test_date == m.date
m.date = None
m = Model.read(m.id)
assert m.date is None
def test_datetime_timezoned_timestamp(sheraf_connection):
class Model(tests.UUIDAutoModel):
date = sheraf.DateTimeAttribute()
m = Model.create()
test_date = datetime.datetime(
2014, 8, 16, 10, 10, 1, 1, tzinfo=pytz.timezone("US/Eastern")
)
m.date = test_date
sanitized_test_date = test_date.replace(tzinfo=None)
assert sanitized_test_date == m.date
m = Model.read(m.id)
assert sanitized_test_date == m.date
assert (
sanitized_test_date - datetime.datetime(1970, 1, 1)
).total_seconds() == m.mapping["date"]
def test_datetime_default(sheraf_connection):
_default_test_date = datetime.datetime(2016, 8, 16, 10, 10, 1, 1)
class Model(tests.UUIDAutoModel):
_date = sheraf.DateTimeAttribute(default=_default_test_date)
m = Model.create()
assert _default_test_date == m._date
m = Model.read(m.id)
_test_date = datetime.datetime(2014, 8, 16, 10, 10, 1, 1)
m._date = _test_date
assert _test_date == m._date
_test_date2 = datetime.datetime(2032, 8, 16, 10, 10, 1, 1)
m.toto = _test_date2
assert _test_date2 == m.toto
m = Model.read(m.id)
with pytest.raises(AttributeError):
m.toto
assert _test_date == m._date
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_creation_datetime(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
something = sheraf.SimpleAttribute()
with libfaketime.fake_time("2014-08-04 02:00:00") as fk:
with sheraf.connection() as conn:
m = Model.create()
fk.tick()
assert m.creation_datetime() is None
conn.transaction_manager.commit()
# TODO: The creation datetime should have the transaction commit datetime and not the object creation one
assert datetime.datetime(2014, 8, 4, 2, 0, 0) == m.creation_datetime()
with libfaketime.fake_time("2014-08-04 03:00:00"):
with sheraf.connection():
m = Model.read(m.id)
assert datetime.datetime(2014, 8, 4, 2, 0, 0) == m.creation_datetime()
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_creation_datetime_deprecated_format(sheraf_database):
"""Pour les vieux models avec l'ancien format de meta dates."""
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
pass
with sheraf.connection() as conn:
_datetime = datetime.datetime(2014, 8, 4, 1, 1)
m = Model.create()
conn.transaction_manager.commit()
for date_format in ("%d/%m/%Y %H:%M:%S:%f", "%d/%m/%Y %H:%M"):
m._creation = _datetime.strftime(date_format)
assert _datetime == m.creation_datetime()
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_lastupdate_datetime(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
pass
with libfaketime.fake_time("2014-08-04 02:00:00") as fk:
with sheraf.connection(commit=True) as conn:
m = Model.create()
fk.tick()
assert m.last_update_datetime() is None
conn.transaction_manager.commit()
assert datetime.datetime(2014, 8, 4, 2, 0, 1) == m.last_update_datetime()
with sheraf.connection():
m = Model.read(m.id)
assert datetime.datetime(2014, 8, 4, 2, 0, 1) == m.last_update_datetime()
with libfaketime.fake_time("2014-08-04 08:00:00") as fk:
with sheraf.connection(commit=True) as conn:
m = Model.read(m.id)
m.save()
fk.tick()
assert datetime.datetime(2014, 8, 4, 2, 0, 1) == m.last_update_datetime()
conn.transaction_manager.commit()
assert datetime.datetime(2014, 8, 4, 8, 0, 1) == m.last_update_datetime()
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_lastupdate_datetime_setattr(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
attr = sheraf.SimpleAttribute()
with libfaketime.fake_time("2014-08-04 02:00:00"):
with sheraf.connection(commit=True):
m = Model.create()
with libfaketime.fake_time("2014-08-04 06:00:00"):
with sheraf.connection() as conn:
m = Model.read(m.id)
m.attr = 42
assert datetime.datetime(2014, 8, 4, 2) == m.last_update_datetime()
conn.transaction_manager.commit()
assert datetime.datetime(2014, 8, 4, 6) == m.last_update_datetime()
with sheraf.connection():
m = Model.read(m.id)
assert datetime.datetime(2014, 8, 4, 6) == m.last_update_datetime()
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_default_meta_datetimes(sheraf_database):
sheraf_database.reset()
""" Pour les vieux models ne possèdant pas de meta dates (création et dernière mise à jour) """
class Model(tests.UUIDAutoModel):
pass
with sheraf.connection():
m = Model.create()
assert m.creation_datetime() is None
assert m.last_update_datetime() is None
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_auto_initialization(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
dta = sheraf.DateTimeAttribute(lazy=False, default=datetime.datetime.now)
with sheraf.connection():
assert datetime.datetime(2014, 8, 4, 1, 1, 1) == Model.create().dta
def test_time_attribute(sheraf_database):
class Model(tests.UUIDAutoModel):
time = sheraf.TimeAttribute()
with sheraf.connection(commit=True):
m = Model.create()
assert m.time is None
m.time = datetime.time(12, 13, 14)
assert m.mapping["time"] == 43994000000
assert m.time == datetime.time(12, 13, 14)
with sheraf.connection():
m = Model.read(m.id)
m.time = datetime.time(12, 13, 14)
assert m.mapping["time"] == 43994000000
assert m.time == datetime.time(12, 13, 14)
with sheraf.connection(commit=True):
m = Model.read(m.id)
m.time = None
assert m.time is None
assert m.mapping["time"] == -1
with sheraf.connection():
m = Model.read(m.id)
assert m.time is None
assert m.mapping["time"] == -1
def test_date_attribute(sheraf_database):
class Model(tests.UUIDAutoModel):
date = sheraf.DateAttribute()
with sheraf.connection(commit=True):
m = Model.create()
assert m.date is None
m.date = datetime.date(1971, 1, 1)
assert m.mapping["date"] == 365
assert m.date == datetime.date(1971, 1, 1)
with sheraf.connection():
m = Model.read(m.id)
m.date = datetime.date(1971, 1, 1)
assert m.mapping["date"] == 365
assert m.date == datetime.date(1971, 1, 1)
with sheraf.connection(commit=True):
m = Model.read(m.id)
m.date = None
assert m.date is None
assert m.mapping["date"] == -1
with sheraf.connection():
m = Model.read(m.id)
assert m.date is None
assert m.mapping["date"] == -1
| 30.268939
| 117
| 0.636341
|
import datetime
import libfaketime
import pytest
import pytz
import sheraf
import tests
def test_datetime_timestamp(sheraf_connection):
class Model(tests.UUIDAutoModel):
date = sheraf.DateTimeAttribute()
m = Model.create()
assert m.date is None
m = Model.read(m.id)
test_date = datetime.datetime(2014, 8, 16, 10, 10, 1, 1)
m.date = test_date
assert test_date == m.date
assert (test_date - datetime.datetime(1970, 1, 1)).total_seconds() == m.mapping[
"date"
]
test_date2 = datetime.datetime(2032, 8, 16, 10, 10, 1, 1)
m.toto = test_date2
assert test_date2 == m.toto
m = Model.read(m.id)
with pytest.raises(AttributeError):
m.toto
assert test_date == m.date
m.date = None
m = Model.read(m.id)
assert m.date is None
def test_datetime_timezoned_timestamp(sheraf_connection):
class Model(tests.UUIDAutoModel):
date = sheraf.DateTimeAttribute()
m = Model.create()
test_date = datetime.datetime(
2014, 8, 16, 10, 10, 1, 1, tzinfo=pytz.timezone("US/Eastern")
)
m.date = test_date
sanitized_test_date = test_date.replace(tzinfo=None)
assert sanitized_test_date == m.date
m = Model.read(m.id)
assert sanitized_test_date == m.date
assert (
sanitized_test_date - datetime.datetime(1970, 1, 1)
).total_seconds() == m.mapping["date"]
def test_datetime_default(sheraf_connection):
_default_test_date = datetime.datetime(2016, 8, 16, 10, 10, 1, 1)
class Model(tests.UUIDAutoModel):
_date = sheraf.DateTimeAttribute(default=_default_test_date)
m = Model.create()
assert _default_test_date == m._date
m = Model.read(m.id)
_test_date = datetime.datetime(2014, 8, 16, 10, 10, 1, 1)
m._date = _test_date
assert _test_date == m._date
_test_date2 = datetime.datetime(2032, 8, 16, 10, 10, 1, 1)
m.toto = _test_date2
assert _test_date2 == m.toto
m = Model.read(m.id)
with pytest.raises(AttributeError):
m.toto
assert _test_date == m._date
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_creation_datetime(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
something = sheraf.SimpleAttribute()
with libfaketime.fake_time("2014-08-04 02:00:00") as fk:
with sheraf.connection() as conn:
m = Model.create()
fk.tick()
assert m.creation_datetime() is None
conn.transaction_manager.commit()
assert datetime.datetime(2014, 8, 4, 2, 0, 0) == m.creation_datetime()
with libfaketime.fake_time("2014-08-04 03:00:00"):
with sheraf.connection():
m = Model.read(m.id)
assert datetime.datetime(2014, 8, 4, 2, 0, 0) == m.creation_datetime()
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_creation_datetime_deprecated_format(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
pass
with sheraf.connection() as conn:
_datetime = datetime.datetime(2014, 8, 4, 1, 1)
m = Model.create()
conn.transaction_manager.commit()
for date_format in ("%d/%m/%Y %H:%M:%S:%f", "%d/%m/%Y %H:%M"):
m._creation = _datetime.strftime(date_format)
assert _datetime == m.creation_datetime()
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_lastupdate_datetime(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
pass
with libfaketime.fake_time("2014-08-04 02:00:00") as fk:
with sheraf.connection(commit=True) as conn:
m = Model.create()
fk.tick()
assert m.last_update_datetime() is None
conn.transaction_manager.commit()
assert datetime.datetime(2014, 8, 4, 2, 0, 1) == m.last_update_datetime()
with sheraf.connection():
m = Model.read(m.id)
assert datetime.datetime(2014, 8, 4, 2, 0, 1) == m.last_update_datetime()
with libfaketime.fake_time("2014-08-04 08:00:00") as fk:
with sheraf.connection(commit=True) as conn:
m = Model.read(m.id)
m.save()
fk.tick()
assert datetime.datetime(2014, 8, 4, 2, 0, 1) == m.last_update_datetime()
conn.transaction_manager.commit()
assert datetime.datetime(2014, 8, 4, 8, 0, 1) == m.last_update_datetime()
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_lastupdate_datetime_setattr(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
attr = sheraf.SimpleAttribute()
with libfaketime.fake_time("2014-08-04 02:00:00"):
with sheraf.connection(commit=True):
m = Model.create()
with libfaketime.fake_time("2014-08-04 06:00:00"):
with sheraf.connection() as conn:
m = Model.read(m.id)
m.attr = 42
assert datetime.datetime(2014, 8, 4, 2) == m.last_update_datetime()
conn.transaction_manager.commit()
assert datetime.datetime(2014, 8, 4, 6) == m.last_update_datetime()
with sheraf.connection():
m = Model.read(m.id)
assert datetime.datetime(2014, 8, 4, 6) == m.last_update_datetime()
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_default_meta_datetimes(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
pass
with sheraf.connection():
m = Model.create()
assert m.creation_datetime() is None
assert m.last_update_datetime() is None
@libfaketime.fake_time("2014-08-04 01:01:01")
def test_datetime_auto_initialization(sheraf_database):
sheraf_database.reset()
class Model(tests.UUIDAutoModel):
dta = sheraf.DateTimeAttribute(lazy=False, default=datetime.datetime.now)
with sheraf.connection():
assert datetime.datetime(2014, 8, 4, 1, 1, 1) == Model.create().dta
def test_time_attribute(sheraf_database):
class Model(tests.UUIDAutoModel):
time = sheraf.TimeAttribute()
with sheraf.connection(commit=True):
m = Model.create()
assert m.time is None
m.time = datetime.time(12, 13, 14)
assert m.mapping["time"] == 43994000000
assert m.time == datetime.time(12, 13, 14)
with sheraf.connection():
m = Model.read(m.id)
m.time = datetime.time(12, 13, 14)
assert m.mapping["time"] == 43994000000
assert m.time == datetime.time(12, 13, 14)
with sheraf.connection(commit=True):
m = Model.read(m.id)
m.time = None
assert m.time is None
assert m.mapping["time"] == -1
with sheraf.connection():
m = Model.read(m.id)
assert m.time is None
assert m.mapping["time"] == -1
def test_date_attribute(sheraf_database):
class Model(tests.UUIDAutoModel):
date = sheraf.DateAttribute()
with sheraf.connection(commit=True):
m = Model.create()
assert m.date is None
m.date = datetime.date(1971, 1, 1)
assert m.mapping["date"] == 365
assert m.date == datetime.date(1971, 1, 1)
with sheraf.connection():
m = Model.read(m.id)
m.date = datetime.date(1971, 1, 1)
assert m.mapping["date"] == 365
assert m.date == datetime.date(1971, 1, 1)
with sheraf.connection(commit=True):
m = Model.read(m.id)
m.date = None
assert m.date is None
assert m.mapping["date"] == -1
with sheraf.connection():
m = Model.read(m.id)
assert m.date is None
assert m.mapping["date"] == -1
| true
| true
|
1c404bbb89804ba8726d68feddaa3e1754470abe
| 12,814
|
py
|
Python
|
tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_householder.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | 2
|
2020-02-21T06:30:00.000Z
|
2021-08-08T19:29:15.000Z
|
tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_householder.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_householder.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | 1
|
2020-05-31T13:08:33.000Z
|
2020-05-31T13:08:33.000Z
|
# Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.
# DO NOT MODIFY DIRECTLY.
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# pylint: disable=line-too-long
# pylint: disable=reimported
# pylint: disable=g-bool-id-comparison
# pylint: disable=g-statement-before-imports
# pylint: disable=bad-continuation
# pylint: disable=useless-import-alias
# pylint: disable=property-with-parameters
# pylint: disable=trailing-whitespace
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a Householder transformation."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
from tensorflow_probability.python.internal.backend.numpy import errors
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops
from tensorflow_probability.python.internal.backend.numpy import control_flow as control_flow_ops
from tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_util
# from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorHouseholder",]
# @tf_export("linalg.LinearOperatorHouseholder")
class LinearOperatorHouseholder(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of Householder transformations.
This operator acts like a [batch] of householder reflections with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorHouseholder` is initialized with a (batch) vector.
A Householder reflection, defined via a vector `v`, which reflects points
in `R^n` about the hyperplane orthogonal to `v` and through the origin.
```python
# Create a 2 x 2 householder transform.
vec = [1 / np.sqrt(2), 1. / np.sqrt(2)]
operator = LinearOperatorHouseholder(vec)
operator.to_dense()
==> [[0., -1.]
[-1., -0.]]
tensor_shape.TensorShape(operator.shape)
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
tensor_shape.TensorShape(operator.shape) = [B1,...,Bb] + [N, N], with b >= 0
tensor_shape.TensorShape(x.shape) = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
reflection_axis,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorHouseholder"):
r"""Initialize a `LinearOperatorHouseholder`.
Args:
reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The vector defining the hyperplane to reflect about.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This is autoset to true
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
This is autoset to false.
is_square: Expect that this operator acts like square [batch] matrices.
This is autoset to true.
name: A name for this `LinearOperator`.
Raises:
ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is
not `False` or `is_square` is not `True`.
"""
parameters = dict(
reflection_axis=reflection_axis,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[reflection_axis]):
self._reflection_axis = linear_operator_util.convert_nonref_to_tensor(
reflection_axis, name="reflection_axis")
self._check_reflection_axis(self._reflection_axis)
# Check and auto-set hints.
if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Householder operator is always self adjoint.")
else:
is_self_adjoint = True
if is_positive_definite is True: # pylint:disable=g-bool-id-comparison
raise ValueError(
"A Householder operator is always non-positive definite.")
else:
is_positive_definite = False
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Householder operator is always square.")
is_square = True
super(LinearOperatorHouseholder, self).__init__(
dtype=self._reflection_axis.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents([self._reflection_axis])
def _check_reflection_axis(self, reflection_axis):
"""Static check of reflection_axis."""
if (tensor_shape.TensorShape(reflection_axis.shape).ndims is not None and
tensor_shape.TensorShape(reflection_axis.shape).ndims < 1):
raise ValueError(
"Argument reflection_axis must have at least 1 dimension. "
"Found: %s" % reflection_axis)
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
d_shape = tensor_shape.TensorShape(self._reflection_axis.shape)
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._reflection_axis)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _assert_positive_definite(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Householder operators are always "
"non-positive definite.")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Given a vector `v`, we would like to reflect `x` about the hyperplane
# orthogonal to `v` going through the origin. We first project `x` to `v`
# to get v * dot(v, x) / dot(v, v). After we project, we can reflect the
# projection about the hyperplane by flipping sign to get
# -v * dot(v, x) / dot(v, v). Finally, we can add back the component
# that is orthogonal to v. This is invariant under reflection, since the
# whole hyperplane is invariant. This component is equal to x - v * dot(v,
# x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v)
# for the reflection.
# Note that because this is a reflection, it lies in O(n) (for real vector
# spaces) or U(n) (for complex vector spaces), and thus is its own adjoint.
reflection_axis = ops.convert_to_tensor(
self.reflection_axis)
x = linalg.adjoint(x) if adjoint_arg else x
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
mat = normalized_axis[..., _ops.newaxis]
x_dot_normalized_v = _linalg.matmul(mat, x, adjoint_a=True)
return x - 2 * mat * x_dot_normalized_v
def _trace(self):
# We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
shape = self.shape_tensor()
return _ops.cast(
self._domain_dimension_tensor(shape=shape) - 2,
self.dtype) * array_ops.ones(
shape=self._batch_shape_tensor(shape=shape), dtype=self.dtype)
def _determinant(self):
# For householder transformations, the determinant is -1.
return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _log_abs_determinant(self):
# Orthogonal matrix -> log|Q| = 0.
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# A householder reflection is a reflection, hence is idempotent. Thus we
# can just apply a matmul.
return self._matmul(rhs, adjoint, adjoint_arg)
def _to_dense(self):
reflection_axis = ops.convert_to_tensor(
self.reflection_axis)
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
mat = normalized_axis[..., _ops.newaxis]
matrix = -2 * _linalg.matmul(mat, mat, adjoint_b=True)
return _linalg.set_diag(
matrix, 1. + _linalg.diag_part(matrix))
def _diag_part(self):
reflection_axis = ops.convert_to_tensor(
self.reflection_axis)
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)
def _eigvals(self):
# We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
result_shape = array_ops.shape(self.reflection_axis)
n = result_shape[-1]
ones_shape = array_ops.concat([result_shape[:-1], [n - 1]], axis=-1)
neg_shape = array_ops.concat([result_shape[:-1], [1]], axis=-1)
eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype)
eigvals = array_ops.concat(
[-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1)
return eigvals
def _cond(self):
# Householder matrices are rotations which have condition number 1.
return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
@property
def reflection_axis(self):
return self._reflection_axis
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg
from tensorflow_probability.python.internal.backend.numpy import ops as _ops
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
from tensorflow_probability.python.internal.backend.numpy import private
distribution_util = private.LazyLoader(
"distribution_util", globals(),
"tensorflow_probability.substrates.numpy.internal.distribution_util")
tensorshape_util = private.LazyLoader(
"tensorshape_util", globals(),
"tensorflow_probability.substrates.numpy.internal.tensorshape_util")
| 41.603896
| 99
| 0.699938
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.internal.backend.numpy import errors
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops
from tensorflow_probability.python.internal.backend.numpy import control_flow as control_flow_ops
from tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator
from tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_util
__all__ = ["LinearOperatorHouseholder",]
class LinearOperatorHouseholder(linear_operator.LinearOperator):
def __init__(self,
reflection_axis,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorHouseholder"):
parameters = dict(
reflection_axis=reflection_axis,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[reflection_axis]):
self._reflection_axis = linear_operator_util.convert_nonref_to_tensor(
reflection_axis, name="reflection_axis")
self._check_reflection_axis(self._reflection_axis)
if is_self_adjoint is False:
raise ValueError("A Householder operator is always self adjoint.")
else:
is_self_adjoint = True
if is_positive_definite is True:
raise ValueError(
"A Householder operator is always non-positive definite.")
else:
is_positive_definite = False
if is_square is False:
raise ValueError("A Householder operator is always square.")
is_square = True
super(LinearOperatorHouseholder, self).__init__(
dtype=self._reflection_axis.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
self._set_graph_parents([self._reflection_axis])
def _check_reflection_axis(self, reflection_axis):
if (tensor_shape.TensorShape(reflection_axis.shape).ndims is not None and
tensor_shape.TensorShape(reflection_axis.shape).ndims < 1):
raise ValueError(
"Argument reflection_axis must have at least 1 dimension. "
"Found: %s" % reflection_axis)
def _shape(self):
d_shape = tensor_shape.TensorShape(self._reflection_axis.shape)
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._reflection_axis)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _assert_positive_definite(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Householder operators are always "
"non-positive definite.")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
reflection_axis = ops.convert_to_tensor(
self.reflection_axis)
x = linalg.adjoint(x) if adjoint_arg else x
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
mat = normalized_axis[..., _ops.newaxis]
x_dot_normalized_v = _linalg.matmul(mat, x, adjoint_a=True)
return x - 2 * mat * x_dot_normalized_v
def _trace(self):
shape = self.shape_tensor()
return _ops.cast(
self._domain_dimension_tensor(shape=shape) - 2,
self.dtype) * array_ops.ones(
shape=self._batch_shape_tensor(shape=shape), dtype=self.dtype)
def _determinant(self):
return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _log_abs_determinant(self):
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
return self._matmul(rhs, adjoint, adjoint_arg)
def _to_dense(self):
reflection_axis = ops.convert_to_tensor(
self.reflection_axis)
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
mat = normalized_axis[..., _ops.newaxis]
matrix = -2 * _linalg.matmul(mat, mat, adjoint_b=True)
return _linalg.set_diag(
matrix, 1. + _linalg.diag_part(matrix))
def _diag_part(self):
reflection_axis = ops.convert_to_tensor(
self.reflection_axis)
normalized_axis = reflection_axis / linalg.norm(
reflection_axis, axis=-1, keepdims=True)
return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)
def _eigvals(self):
result_shape = array_ops.shape(self.reflection_axis)
n = result_shape[-1]
ones_shape = array_ops.concat([result_shape[:-1], [n - 1]], axis=-1)
neg_shape = array_ops.concat([result_shape[:-1], [1]], axis=-1)
eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype)
eigvals = array_ops.concat(
[-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1)
return eigvals
def _cond(self):
return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
@property
def reflection_axis(self):
return self._reflection_axis
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg
from tensorflow_probability.python.internal.backend.numpy import ops as _ops
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
from tensorflow_probability.python.internal.backend.numpy import private
distribution_util = private.LazyLoader(
"distribution_util", globals(),
"tensorflow_probability.substrates.numpy.internal.distribution_util")
tensorshape_util = private.LazyLoader(
"tensorshape_util", globals(),
"tensorflow_probability.substrates.numpy.internal.tensorshape_util")
| true
| true
|
1c404c22e8d3ea4700a7f2e9200aa33937f5295d
| 479
|
py
|
Python
|
compsocsite/polls/migrations/0057_item_item_description.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | 8
|
2017-03-07T19:46:51.000Z
|
2021-06-01T01:41:37.000Z
|
compsocsite/polls/migrations/0057_item_item_description.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | null | null | null |
compsocsite/polls/migrations/0057_item_item_description.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | 9
|
2016-06-09T03:36:20.000Z
|
2019-09-11T20:56:23.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-25 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0056_auto_20160722_1606'),
]
operations = [
migrations.AddField(
model_name='item',
name='item_description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| 22.809524
| 75
| 0.626305
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0056_auto_20160722_1606'),
]
operations = [
migrations.AddField(
model_name='item',
name='item_description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| true
| true
|
1c404c92f557fecce42c589d4825172437f2ff78
| 3,228
|
py
|
Python
|
lib/setupDeployment.py
|
cmusatyalab/PyEdgeSim
|
edc13488d66ec9582543e5f116c6611ef7b65d9d
|
[
"Apache-2.0"
] | null | null | null |
lib/setupDeployment.py
|
cmusatyalab/PyEdgeSim
|
edc13488d66ec9582543e5f116c6611ef7b65d9d
|
[
"Apache-2.0"
] | null | null | null |
lib/setupDeployment.py
|
cmusatyalab/PyEdgeSim
|
edc13488d66ec9582543e5f116c6611ef7b65d9d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import os
import pprint
import json
import shutil
sys.path.append("./lib")
from pyutils import *
from config import *
from simlogging import mconsole,configureLogging
from advantedgeapilib import *
from setupBuildEnv import setMEEPPATH
cnf = initConfig()
api = AdvantEDGEApi()
def main():
configureLogging()
getOpenRTiST(cnf)
startOpenRTiST(cnf)
pass
def deployAdvantEDGE(cnf):
# meepctl = os.path.join(*[cnf['ADVANTEDGEDIR'],"bin","meepctl","meepctl"])
entry = input("Deploy AdvantEDGE? [y/N] ") or "n"
if entry in ['Y','y']:
setMEEPPATH(cnf['ADVANTEDGEDIR'])
meepctl = "meepctl"
if oscmd("{} deploy dep".format(meepctl)) != 0: return -1
if oscmd("{} dockerize all".format(meepctl)) != 0: return -1
if oscmd("{} deploy core".format(meepctl)) != 0: return -1
return 0
def getOpenRTiST(cnf):
entry = input("Get OpenRTiST? [y/N] ") or "n"
if entry in ['Y','y']:
srcimg = "cmusatyalab/openrtist"
dstimg = "meep-docker-registry:30001/openrtist:real"
oscmd("docker pull {}".format(srcimg))
oscmd("docker tag {} {}".format(srcimg,dstimg))
oscmd("docker push {}".format(dstimg))
return 0
def startOpenRTiST(cnf):
entry = input("Deploy the scenario? [y/N] ") or "n"
if entry in ['Y','y']:
scenname = cnf['SCENARIO']
sandbox = cnf['SANDBOX']
api.setSandbox(sandbox)
mconsole("(Re)starting OpenRTiST scenario {} in sandbox {}".format(scenname,sandbox))
api.startScenario(scenname,restart=True)
return 0
def stopDeployment(cnf,settletime=120):
mconsole("Shutting down deployment and waiting {} seconds".format(settletime))
cmdstr = "kubectl get namespace -o json|jq -r '.items[] | select( .metadata.name | test(\"{}\")) | .metadata.name'" \
.format(cnf['SANDBOX'])
ns = cmd0(cmdstr)
if len(ns) > 0:
oscmd("kubectl delete namespace {}".format(cnf['SANDBOX']))
cmdstr = "bash -c 'meepctl delete core;meepctl delete dep;sleep {}'".format(settletime)
oscmd(cmdstr)
def startDeployment(cnf,settletime=0):
mconsole("Starting deployment and waiting {} seconds".format(settletime))
cmdstr = "bash -c 'meepctl deploy dep;meepctl deploy core;sleep {}'".format(settletime)
oscmd(cmdstr)
mconsole("You will need to recreate sandbox {} and deploy scenario {} in it" \
.format(cnf['SANDBOX'],cnf['SCENARIO']))
def installCharts(cnf):
datadir = "./data"
destdir = os.path.expanduser("~/.meep/virt-engine")
entry = input("Install the scenario charts? [y/N] ") or "n"
if entry in ['Y','y']:
for root, _, files in os.walk(datadir):
if len(files) > 0:
fdestdir = root.replace(datadir,destdir)
os.makedirs(fdestdir,exist_ok=True)
for fn in files:
srcfn = os.path.join(root,fn)
dstfn = os.path.join(fdestdir,fn)
shutil.copy2(srcfn,dstfn)
mconsole("In AdvantEDGE console, import and save scenario {} and create sandbox {}".format(cnf['SCENARIO'],cnf['SANDBOX']))
return 0
if __name__ == '__main__': main()
| 35.472527
| 131
| 0.625465
|
import sys
import os
import pprint
import json
import shutil
sys.path.append("./lib")
from pyutils import *
from config import *
from simlogging import mconsole,configureLogging
from advantedgeapilib import *
from setupBuildEnv import setMEEPPATH
cnf = initConfig()
api = AdvantEDGEApi()
def main():
configureLogging()
getOpenRTiST(cnf)
startOpenRTiST(cnf)
pass
def deployAdvantEDGE(cnf):
entry = input("Deploy AdvantEDGE? [y/N] ") or "n"
if entry in ['Y','y']:
setMEEPPATH(cnf['ADVANTEDGEDIR'])
meepctl = "meepctl"
if oscmd("{} deploy dep".format(meepctl)) != 0: return -1
if oscmd("{} dockerize all".format(meepctl)) != 0: return -1
if oscmd("{} deploy core".format(meepctl)) != 0: return -1
return 0
def getOpenRTiST(cnf):
entry = input("Get OpenRTiST? [y/N] ") or "n"
if entry in ['Y','y']:
srcimg = "cmusatyalab/openrtist"
dstimg = "meep-docker-registry:30001/openrtist:real"
oscmd("docker pull {}".format(srcimg))
oscmd("docker tag {} {}".format(srcimg,dstimg))
oscmd("docker push {}".format(dstimg))
return 0
def startOpenRTiST(cnf):
entry = input("Deploy the scenario? [y/N] ") or "n"
if entry in ['Y','y']:
scenname = cnf['SCENARIO']
sandbox = cnf['SANDBOX']
api.setSandbox(sandbox)
mconsole("(Re)starting OpenRTiST scenario {} in sandbox {}".format(scenname,sandbox))
api.startScenario(scenname,restart=True)
return 0
def stopDeployment(cnf,settletime=120):
mconsole("Shutting down deployment and waiting {} seconds".format(settletime))
cmdstr = "kubectl get namespace -o json|jq -r '.items[] | select( .metadata.name | test(\"{}\")) | .metadata.name'" \
.format(cnf['SANDBOX'])
ns = cmd0(cmdstr)
if len(ns) > 0:
oscmd("kubectl delete namespace {}".format(cnf['SANDBOX']))
cmdstr = "bash -c 'meepctl delete core;meepctl delete dep;sleep {}'".format(settletime)
oscmd(cmdstr)
def startDeployment(cnf,settletime=0):
mconsole("Starting deployment and waiting {} seconds".format(settletime))
cmdstr = "bash -c 'meepctl deploy dep;meepctl deploy core;sleep {}'".format(settletime)
oscmd(cmdstr)
mconsole("You will need to recreate sandbox {} and deploy scenario {} in it" \
.format(cnf['SANDBOX'],cnf['SCENARIO']))
def installCharts(cnf):
datadir = "./data"
destdir = os.path.expanduser("~/.meep/virt-engine")
entry = input("Install the scenario charts? [y/N] ") or "n"
if entry in ['Y','y']:
for root, _, files in os.walk(datadir):
if len(files) > 0:
fdestdir = root.replace(datadir,destdir)
os.makedirs(fdestdir,exist_ok=True)
for fn in files:
srcfn = os.path.join(root,fn)
dstfn = os.path.join(fdestdir,fn)
shutil.copy2(srcfn,dstfn)
mconsole("In AdvantEDGE console, import and save scenario {} and create sandbox {}".format(cnf['SCENARIO'],cnf['SANDBOX']))
return 0
if __name__ == '__main__': main()
| true
| true
|
1c404e29b56c9d3befa180e62b8cbeb33576e541
| 23,687
|
py
|
Python
|
certbot/reverter.py
|
cybersimon/certbot
|
174e59486338b8f3e17300ef9937a3182e733ac7
|
[
"Apache-2.0"
] | null | null | null |
certbot/reverter.py
|
cybersimon/certbot
|
174e59486338b8f3e17300ef9937a3182e733ac7
|
[
"Apache-2.0"
] | null | null | null |
certbot/reverter.py
|
cybersimon/certbot
|
174e59486338b8f3e17300ef9937a3182e733ac7
|
[
"Apache-2.0"
] | null | null | null |
"""Reverter class saves configuration checkpoints and allows for recovery."""
import csv
import glob
import logging
import os
import shutil
import time
import traceback
import zope.component
from certbot import constants
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.display import util as display_util
logger = logging.getLogger(__name__)
class Reverter(object):
"""Reverter Class - save and revert configuration checkpoints.
This class can be used by the plugins, especially Installers, to
undo changes made to the user's system. Modifications to files and
commands to do undo actions taken by the plugin should be registered
with this class before the action is taken.
Once a change has been registered with this class, there are three
states the change can be in. First, the change can be a temporary
change. This should be used for changes that will soon be reverted,
such as config changes for the purpose of solving a challenge.
Changes are added to this state through calls to
:func:`~add_to_temp_checkpoint` and reverted when
:func:`~revert_temporary_config` or :func:`~recovery_routine` is
called.
The second state a change can be in is in progress. These changes
are not temporary, however, they also have not been finalized in a
checkpoint. A change must become in progress before it can be
finalized. Changes are added to this state through calls to
:func:`~add_to_checkpoint` and reverted when
:func:`~recovery_routine` is called.
The last state a change can be in is finalized in a checkpoint. A
change is put into this state by first becoming an in progress
change and then calling :func:`~finalize_checkpoint`. Changes
in this state can be reverted through calls to
:func:`~rollback_checkpoints`.
As a final note, creating new files and registering undo commands
are handled specially and use the methods
:func:`~register_file_creation` and :func:`~register_undo_command`
respectively. Both of these methods can be used to create either
temporary or in progress changes.
.. note:: Consider moving everything over to CSV format.
:param config: Configuration.
:type config: :class:`certbot.interfaces.IConfig`
"""
def __init__(self, config):
self.config = config
util.make_or_verify_dir(
config.backup_dir, constants.CONFIG_DIRS_MODE, os.geteuid(),
self.config.strict_permissions)
def revert_temporary_config(self):
"""Reload users original configuration files after a temporary save.
This function should reinstall the users original configuration files
for all saves with temporary=True
:raises .ReverterError: when unable to revert config
"""
if os.path.isdir(self.config.temp_checkpoint_dir):
try:
self._recover_checkpoint(self.config.temp_checkpoint_dir)
except errors.ReverterError:
# We have a partial or incomplete recovery
logger.fatal("Incomplete or failed recovery for %s",
self.config.temp_checkpoint_dir)
raise errors.ReverterError("Unable to revert temporary config")
def rollback_checkpoints(self, rollback=1):
"""Revert 'rollback' number of configuration checkpoints.
:param int rollback: Number of checkpoints to reverse. A str num will be
cast to an integer. So "2" is also acceptable.
:raises .ReverterError:
if there is a problem with the input or if the function is
unable to correctly revert the configuration checkpoints
"""
try:
rollback = int(rollback)
except ValueError:
logger.error("Rollback argument must be a positive integer")
raise errors.ReverterError("Invalid Input")
# Sanity check input
if rollback < 0:
logger.error("Rollback argument must be a positive integer")
raise errors.ReverterError("Invalid Input")
backups = os.listdir(self.config.backup_dir)
backups.sort()
if not backups:
logger.warning(
"Certbot hasn't modified your configuration, so rollback "
"isn't available.")
elif len(backups) < rollback:
logger.warning("Unable to rollback %d checkpoints, only %d exist",
rollback, len(backups))
while rollback > 0 and backups:
cp_dir = os.path.join(self.config.backup_dir, backups.pop())
try:
self._recover_checkpoint(cp_dir)
except errors.ReverterError:
logger.fatal("Failed to load checkpoint during rollback")
raise errors.ReverterError(
"Unable to load checkpoint during rollback")
rollback -= 1
def view_config_changes(self, for_logging=False, num=None):
"""Displays all saved checkpoints.
All checkpoints are printed by
:meth:`certbot.interfaces.IDisplay.notification`.
.. todo:: Decide on a policy for error handling, OSError IOError...
:raises .errors.ReverterError: If invalid directory structure.
"""
backups = os.listdir(self.config.backup_dir)
backups.sort(reverse=True)
if num:
backups = backups[:num]
if not backups:
logger.info("Certbot has not saved backups of your configuration")
return
# Make sure there isn't anything unexpected in the backup folder
# There should only be timestamped (float) directories
try:
for bkup in backups:
float(bkup)
except ValueError:
raise errors.ReverterError(
"Invalid directories in {0}".format(self.config.backup_dir))
output = []
for bkup in backups:
output.append(time.ctime(float(bkup)))
cur_dir = os.path.join(self.config.backup_dir, bkup)
with open(os.path.join(cur_dir, "CHANGES_SINCE")) as changes_fd:
output.append(changes_fd.read())
output.append("Affected files:")
with open(os.path.join(cur_dir, "FILEPATHS")) as paths_fd:
filepaths = paths_fd.read().splitlines()
for path in filepaths:
output.append(" {0}".format(path))
if os.path.isfile(os.path.join(cur_dir, "NEW_FILES")):
with open(os.path.join(cur_dir, "NEW_FILES")) as new_fd:
output.append("New Configuration Files:")
filepaths = new_fd.read().splitlines()
for path in filepaths:
output.append(" {0}".format(path))
output.append(os.linesep)
if for_logging:
return os.linesep.join(output)
zope.component.getUtility(interfaces.IDisplay).notification(
os.linesep.join(output), display_util.HEIGHT)
def add_to_temp_checkpoint(self, save_files, save_notes):
"""Add files to temporary checkpoint.
:param set save_files: set of filepaths to save
:param str save_notes: notes about changes during the save
"""
self._add_to_checkpoint_dir(
self.config.temp_checkpoint_dir, save_files, save_notes)
def add_to_checkpoint(self, save_files, save_notes):
"""Add files to a permanent checkpoint.
:param set save_files: set of filepaths to save
:param str save_notes: notes about changes during the save
"""
# Check to make sure we are not overwriting a temp file
self._check_tempfile_saves(save_files)
self._add_to_checkpoint_dir(
self.config.in_progress_dir, save_files, save_notes)
def _add_to_checkpoint_dir(self, cp_dir, save_files, save_notes):
"""Add save files to checkpoint directory.
:param str cp_dir: Checkpoint directory filepath
:param set save_files: set of files to save
:param str save_notes: notes about changes made during the save
:raises IOError: if unable to open cp_dir + FILEPATHS file
:raises .ReverterError: if unable to add checkpoint
"""
util.make_or_verify_dir(
cp_dir, constants.CONFIG_DIRS_MODE, os.geteuid(),
self.config.strict_permissions)
op_fd, existing_filepaths = self._read_and_append(
os.path.join(cp_dir, "FILEPATHS"))
idx = len(existing_filepaths)
for filename in save_files:
# No need to copy/index already existing files
# The oldest copy already exists in the directory...
if filename not in existing_filepaths:
# Tag files with index so multiple files can
# have the same filename
logger.debug("Creating backup of %s", filename)
try:
shutil.copy2(filename, os.path.join(
cp_dir, os.path.basename(filename) + "_" + str(idx)))
op_fd.write(filename + os.linesep)
# http://stackoverflow.com/questions/4726260/effective-use-of-python-shutil-copy2
except IOError:
op_fd.close()
logger.error(
"Unable to add file %s to checkpoint %s",
filename, cp_dir)
raise errors.ReverterError(
"Unable to add file {0} to checkpoint "
"{1}".format(filename, cp_dir))
idx += 1
op_fd.close()
with open(os.path.join(cp_dir, "CHANGES_SINCE"), "a") as notes_fd:
notes_fd.write(save_notes)
def _read_and_append(self, filepath): # pylint: disable=no-self-use
"""Reads the file lines and returns a file obj.
Read the file returning the lines, and a pointer to the end of the file.
"""
# Open up filepath differently depending on if it already exists
if os.path.isfile(filepath):
op_fd = open(filepath, "r+")
lines = op_fd.read().splitlines()
else:
lines = []
op_fd = open(filepath, "w")
return op_fd, lines
def _recover_checkpoint(self, cp_dir):
"""Recover a specific checkpoint.
Recover a specific checkpoint provided by cp_dir
Note: this function does not reload augeas.
:param str cp_dir: checkpoint directory file path
:raises errors.ReverterError: If unable to recover checkpoint
"""
# Undo all commands
if os.path.isfile(os.path.join(cp_dir, "COMMANDS")):
self._run_undo_commands(os.path.join(cp_dir, "COMMANDS"))
# Revert all changed files
if os.path.isfile(os.path.join(cp_dir, "FILEPATHS")):
try:
with open(os.path.join(cp_dir, "FILEPATHS")) as paths_fd:
filepaths = paths_fd.read().splitlines()
for idx, path in enumerate(filepaths):
shutil.copy2(os.path.join(
cp_dir,
os.path.basename(path) + "_" + str(idx)), path)
except (IOError, OSError):
# This file is required in all checkpoints.
logger.error("Unable to recover files from %s", cp_dir)
raise errors.ReverterError(
"Unable to recover files from %s" % cp_dir)
# Remove any newly added files if they exist
self._remove_contained_files(os.path.join(cp_dir, "NEW_FILES"))
try:
shutil.rmtree(cp_dir)
except OSError:
logger.error("Unable to remove directory: %s", cp_dir)
raise errors.ReverterError(
"Unable to remove directory: %s" % cp_dir)
def _run_undo_commands(self, filepath): # pylint: disable=no-self-use
"""Run all commands in a file."""
with open(filepath, 'rb') as csvfile:
csvreader = csv.reader(csvfile)
for command in reversed(list(csvreader)):
try:
util.run_script(command)
except errors.SubprocessError:
logger.error(
"Unable to run undo command: %s", " ".join(command))
def _check_tempfile_saves(self, save_files):
"""Verify save isn't overwriting any temporary files.
:param set save_files: Set of files about to be saved.
:raises certbot.errors.ReverterError:
when save is attempting to overwrite a temporary file.
"""
protected_files = []
# Get temp modified files
temp_path = os.path.join(self.config.temp_checkpoint_dir, "FILEPATHS")
if os.path.isfile(temp_path):
with open(temp_path, "r") as protected_fd:
protected_files.extend(protected_fd.read().splitlines())
# Get temp new files
new_path = os.path.join(self.config.temp_checkpoint_dir, "NEW_FILES")
if os.path.isfile(new_path):
with open(new_path, "r") as protected_fd:
protected_files.extend(protected_fd.read().splitlines())
# Verify no save_file is in protected_files
for filename in protected_files:
if filename in save_files:
raise errors.ReverterError(
"Attempting to overwrite challenge "
"file - %s" % filename)
def register_file_creation(self, temporary, *files):
r"""Register the creation of all files during certbot execution.
Call this method before writing to the file to make sure that the
file will be cleaned up if the program exits unexpectedly.
(Before a save occurs)
:param bool temporary: If the file creation registry is for
a temp or permanent save.
:param \*files: file paths (str) to be registered
:raises certbot.errors.ReverterError: If
call does not contain necessary parameters or if the file creation
is unable to be registered.
"""
# Make sure some files are provided... as this is an error
# Made this mistake in my initial implementation of apache.dvsni.py
if not files:
raise errors.ReverterError("Forgot to provide files to registration call")
cp_dir = self._get_cp_dir(temporary)
# Append all new files (that aren't already registered)
new_fd = None
try:
new_fd, ex_files = self._read_and_append(os.path.join(cp_dir, "NEW_FILES"))
for path in files:
if path not in ex_files:
new_fd.write("{0}{1}".format(path, os.linesep))
except (IOError, OSError):
logger.error("Unable to register file creation(s) - %s", files)
raise errors.ReverterError(
"Unable to register file creation(s) - {0}".format(files))
finally:
if new_fd is not None:
new_fd.close()
def register_undo_command(self, temporary, command):
"""Register a command to be run to undo actions taken.
.. warning:: This function does not enforce order of operations in terms
of file modification vs. command registration. All undo commands
are run first before all normal files are reverted to their previous
state. If you need to maintain strict order, you may create
checkpoints before and after the the command registration. This
function may be improved in the future based on demand.
:param bool temporary: Whether the command should be saved in the
IN_PROGRESS or TEMPORARY checkpoints.
:param command: Command to be run.
:type command: list of str
"""
commands_fp = os.path.join(self._get_cp_dir(temporary), "COMMANDS")
command_file = None
try:
if os.path.isfile(commands_fp):
command_file = open(commands_fp, "ab")
else:
command_file = open(commands_fp, "wb")
csvwriter = csv.writer(command_file)
csvwriter.writerow(command)
except (IOError, OSError):
logger.error("Unable to register undo command")
raise errors.ReverterError(
"Unable to register undo command.")
finally:
if command_file is not None:
command_file.close()
def _get_cp_dir(self, temporary):
"""Return the proper reverter directory."""
if temporary:
cp_dir = self.config.temp_checkpoint_dir
else:
cp_dir = self.config.in_progress_dir
util.make_or_verify_dir(
cp_dir, constants.CONFIG_DIRS_MODE, os.geteuid(),
self.config.strict_permissions)
return cp_dir
def recovery_routine(self):
"""Revert configuration to most recent finalized checkpoint.
Remove all changes (temporary and permanent) that have not been
finalized. This is useful to protect against crashes and other
execution interruptions.
:raises .errors.ReverterError: If unable to recover the configuration
"""
# First, any changes found in IConfig.temp_checkpoint_dir are removed,
# then IN_PROGRESS changes are removed The order is important.
# IN_PROGRESS is unable to add files that are already added by a TEMP
# change. Thus TEMP must be rolled back first because that will be the
# 'latest' occurrence of the file.
self.revert_temporary_config()
if os.path.isdir(self.config.in_progress_dir):
try:
self._recover_checkpoint(self.config.in_progress_dir)
except errors.ReverterError:
# We have a partial or incomplete recovery
logger.fatal("Incomplete or failed recovery for IN_PROGRESS "
"checkpoint - %s",
self.config.in_progress_dir)
raise errors.ReverterError(
"Incomplete or failed recovery for IN_PROGRESS checkpoint "
"- %s" % self.config.in_progress_dir)
def _remove_contained_files(self, file_list): # pylint: disable=no-self-use
"""Erase all files contained within file_list.
:param str file_list: file containing list of file paths to be deleted
:returns: Success
:rtype: bool
:raises certbot.errors.ReverterError: If
all files within file_list cannot be removed
"""
# Check to see that file exists to differentiate can't find file_list
# and can't remove filepaths within file_list errors.
if not os.path.isfile(file_list):
return False
try:
with open(file_list, "r") as list_fd:
filepaths = list_fd.read().splitlines()
for path in filepaths:
# Files are registered before they are added... so
# check to see if file exists first
if os.path.lexists(path):
os.remove(path)
else:
logger.warning(
"File: %s - Could not be found to be deleted %s - "
"LE probably shut down unexpectedly",
os.linesep, path)
except (IOError, OSError):
logger.fatal(
"Unable to remove filepaths contained within %s", file_list)
raise errors.ReverterError(
"Unable to remove filepaths contained within "
"{0}".format(file_list))
return True
def finalize_checkpoint(self, title):
"""Finalize the checkpoint.
Timestamps and permanently saves all changes made through the use
of :func:`~add_to_checkpoint` and :func:`~register_file_creation`
:param str title: Title describing checkpoint
:raises certbot.errors.ReverterError: when the
checkpoint is not able to be finalized.
"""
# Check to make sure an "in progress" directory exists
if not os.path.isdir(self.config.in_progress_dir):
return
changes_since_path = os.path.join(self.config.in_progress_dir, "CHANGES_SINCE")
changes_since_tmp_path = os.path.join(self.config.in_progress_dir, "CHANGES_SINCE.tmp")
if not os.path.exists(changes_since_path):
logger.info("Rollback checkpoint is empty (no changes made?)")
with open(changes_since_path, 'w') as f:
f.write("No changes\n")
# Add title to self.config.in_progress_dir CHANGES_SINCE
try:
with open(changes_since_tmp_path, "w") as changes_tmp:
changes_tmp.write("-- %s --\n" % title)
with open(changes_since_path, "r") as changes_orig:
changes_tmp.write(changes_orig.read())
# Move self.config.in_progress_dir to Backups directory
shutil.move(changes_since_tmp_path, changes_since_path)
except (IOError, OSError):
logger.error("Unable to finalize checkpoint - adding title")
logger.debug("Exception was:\n%s", traceback.format_exc())
raise errors.ReverterError("Unable to add title")
# rename the directory as a timestamp
self._timestamp_progress_dir()
def _checkpoint_timestamp(self):
"Determine the timestamp of the checkpoint, enforcing monotonicity."
timestamp = str(time.time())
others = glob.glob(os.path.join(self.config.backup_dir, "[0-9]*"))
others = [os.path.basename(d) for d in others]
others.append(timestamp)
others.sort()
if others[-1] != timestamp:
timetravel = str(float(others[-1]) + 1)
logger.warning("Current timestamp %s does not correspond to newest reverter "
"checkpoint; your clock probably jumped. Time travelling to %s",
timestamp, timetravel)
timestamp = timetravel
elif len(others) > 1 and others[-2] == timestamp:
# It is possible if the checkpoints are made extremely quickly
# that will result in a name collision.
logger.debug("Race condition with timestamp %s, incrementing by 0.01", timestamp)
timetravel = str(float(others[-1]) + 0.01)
timestamp = timetravel
return timestamp
def _timestamp_progress_dir(self):
"""Timestamp the checkpoint."""
# It is possible save checkpoints faster than 1 per second resulting in
# collisions in the naming convention.
for _ in xrange(2):
timestamp = self._checkpoint_timestamp()
final_dir = os.path.join(self.config.backup_dir, timestamp)
try:
os.rename(self.config.in_progress_dir, final_dir)
return
except OSError:
logger.warning("Extreme, unexpected race condition, retrying (%s)", timestamp)
# After 10 attempts... something is probably wrong here...
logger.error(
"Unable to finalize checkpoint, %s -> %s",
self.config.in_progress_dir, final_dir)
raise errors.ReverterError(
"Unable to finalize checkpoint renaming")
| 40.352641
| 97
| 0.615612
|
import csv
import glob
import logging
import os
import shutil
import time
import traceback
import zope.component
from certbot import constants
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.display import util as display_util
logger = logging.getLogger(__name__)
class Reverter(object):
def __init__(self, config):
self.config = config
util.make_or_verify_dir(
config.backup_dir, constants.CONFIG_DIRS_MODE, os.geteuid(),
self.config.strict_permissions)
def revert_temporary_config(self):
if os.path.isdir(self.config.temp_checkpoint_dir):
try:
self._recover_checkpoint(self.config.temp_checkpoint_dir)
except errors.ReverterError:
logger.fatal("Incomplete or failed recovery for %s",
self.config.temp_checkpoint_dir)
raise errors.ReverterError("Unable to revert temporary config")
def rollback_checkpoints(self, rollback=1):
try:
rollback = int(rollback)
except ValueError:
logger.error("Rollback argument must be a positive integer")
raise errors.ReverterError("Invalid Input")
if rollback < 0:
logger.error("Rollback argument must be a positive integer")
raise errors.ReverterError("Invalid Input")
backups = os.listdir(self.config.backup_dir)
backups.sort()
if not backups:
logger.warning(
"Certbot hasn't modified your configuration, so rollback "
"isn't available.")
elif len(backups) < rollback:
logger.warning("Unable to rollback %d checkpoints, only %d exist",
rollback, len(backups))
while rollback > 0 and backups:
cp_dir = os.path.join(self.config.backup_dir, backups.pop())
try:
self._recover_checkpoint(cp_dir)
except errors.ReverterError:
logger.fatal("Failed to load checkpoint during rollback")
raise errors.ReverterError(
"Unable to load checkpoint during rollback")
rollback -= 1
def view_config_changes(self, for_logging=False, num=None):
backups = os.listdir(self.config.backup_dir)
backups.sort(reverse=True)
if num:
backups = backups[:num]
if not backups:
logger.info("Certbot has not saved backups of your configuration")
return
# There should only be timestamped (float) directories
try:
for bkup in backups:
float(bkup)
except ValueError:
raise errors.ReverterError(
"Invalid directories in {0}".format(self.config.backup_dir))
output = []
for bkup in backups:
output.append(time.ctime(float(bkup)))
cur_dir = os.path.join(self.config.backup_dir, bkup)
with open(os.path.join(cur_dir, "CHANGES_SINCE")) as changes_fd:
output.append(changes_fd.read())
output.append("Affected files:")
with open(os.path.join(cur_dir, "FILEPATHS")) as paths_fd:
filepaths = paths_fd.read().splitlines()
for path in filepaths:
output.append(" {0}".format(path))
if os.path.isfile(os.path.join(cur_dir, "NEW_FILES")):
with open(os.path.join(cur_dir, "NEW_FILES")) as new_fd:
output.append("New Configuration Files:")
filepaths = new_fd.read().splitlines()
for path in filepaths:
output.append(" {0}".format(path))
output.append(os.linesep)
if for_logging:
return os.linesep.join(output)
zope.component.getUtility(interfaces.IDisplay).notification(
os.linesep.join(output), display_util.HEIGHT)
def add_to_temp_checkpoint(self, save_files, save_notes):
self._add_to_checkpoint_dir(
self.config.temp_checkpoint_dir, save_files, save_notes)
def add_to_checkpoint(self, save_files, save_notes):
# Check to make sure we are not overwriting a temp file
self._check_tempfile_saves(save_files)
self._add_to_checkpoint_dir(
self.config.in_progress_dir, save_files, save_notes)
def _add_to_checkpoint_dir(self, cp_dir, save_files, save_notes):
util.make_or_verify_dir(
cp_dir, constants.CONFIG_DIRS_MODE, os.geteuid(),
self.config.strict_permissions)
op_fd, existing_filepaths = self._read_and_append(
os.path.join(cp_dir, "FILEPATHS"))
idx = len(existing_filepaths)
for filename in save_files:
# No need to copy/index already existing files
# The oldest copy already exists in the directory...
if filename not in existing_filepaths:
# Tag files with index so multiple files can
# have the same filename
logger.debug("Creating backup of %s", filename)
try:
shutil.copy2(filename, os.path.join(
cp_dir, os.path.basename(filename) + "_" + str(idx)))
op_fd.write(filename + os.linesep)
# http://stackoverflow.com/questions/4726260/effective-use-of-python-shutil-copy2
except IOError:
op_fd.close()
logger.error(
"Unable to add file %s to checkpoint %s",
filename, cp_dir)
raise errors.ReverterError(
"Unable to add file {0} to checkpoint "
"{1}".format(filename, cp_dir))
idx += 1
op_fd.close()
with open(os.path.join(cp_dir, "CHANGES_SINCE"), "a") as notes_fd:
notes_fd.write(save_notes)
def _read_and_append(self, filepath): # pylint: disable=no-self-use
# Open up filepath differently depending on if it already exists
if os.path.isfile(filepath):
op_fd = open(filepath, "r+")
lines = op_fd.read().splitlines()
else:
lines = []
op_fd = open(filepath, "w")
return op_fd, lines
def _recover_checkpoint(self, cp_dir):
# Undo all commands
if os.path.isfile(os.path.join(cp_dir, "COMMANDS")):
self._run_undo_commands(os.path.join(cp_dir, "COMMANDS"))
# Revert all changed files
if os.path.isfile(os.path.join(cp_dir, "FILEPATHS")):
try:
with open(os.path.join(cp_dir, "FILEPATHS")) as paths_fd:
filepaths = paths_fd.read().splitlines()
for idx, path in enumerate(filepaths):
shutil.copy2(os.path.join(
cp_dir,
os.path.basename(path) + "_" + str(idx)), path)
except (IOError, OSError):
# This file is required in all checkpoints.
logger.error("Unable to recover files from %s", cp_dir)
raise errors.ReverterError(
"Unable to recover files from %s" % cp_dir)
# Remove any newly added files if they exist
self._remove_contained_files(os.path.join(cp_dir, "NEW_FILES"))
try:
shutil.rmtree(cp_dir)
except OSError:
logger.error("Unable to remove directory: %s", cp_dir)
raise errors.ReverterError(
"Unable to remove directory: %s" % cp_dir)
def _run_undo_commands(self, filepath): # pylint: disable=no-self-use
with open(filepath, 'rb') as csvfile:
csvreader = csv.reader(csvfile)
for command in reversed(list(csvreader)):
try:
util.run_script(command)
except errors.SubprocessError:
logger.error(
"Unable to run undo command: %s", " ".join(command))
def _check_tempfile_saves(self, save_files):
protected_files = []
# Get temp modified files
temp_path = os.path.join(self.config.temp_checkpoint_dir, "FILEPATHS")
if os.path.isfile(temp_path):
with open(temp_path, "r") as protected_fd:
protected_files.extend(protected_fd.read().splitlines())
# Get temp new files
new_path = os.path.join(self.config.temp_checkpoint_dir, "NEW_FILES")
if os.path.isfile(new_path):
with open(new_path, "r") as protected_fd:
protected_files.extend(protected_fd.read().splitlines())
# Verify no save_file is in protected_files
for filename in protected_files:
if filename in save_files:
raise errors.ReverterError(
"Attempting to overwrite challenge "
"file - %s" % filename)
def register_file_creation(self, temporary, *files):
# Make sure some files are provided... as this is an error
# Made this mistake in my initial implementation of apache.dvsni.py
if not files:
raise errors.ReverterError("Forgot to provide files to registration call")
cp_dir = self._get_cp_dir(temporary)
# Append all new files (that aren't already registered)
new_fd = None
try:
new_fd, ex_files = self._read_and_append(os.path.join(cp_dir, "NEW_FILES"))
for path in files:
if path not in ex_files:
new_fd.write("{0}{1}".format(path, os.linesep))
except (IOError, OSError):
logger.error("Unable to register file creation(s) - %s", files)
raise errors.ReverterError(
"Unable to register file creation(s) - {0}".format(files))
finally:
if new_fd is not None:
new_fd.close()
def register_undo_command(self, temporary, command):
commands_fp = os.path.join(self._get_cp_dir(temporary), "COMMANDS")
command_file = None
try:
if os.path.isfile(commands_fp):
command_file = open(commands_fp, "ab")
else:
command_file = open(commands_fp, "wb")
csvwriter = csv.writer(command_file)
csvwriter.writerow(command)
except (IOError, OSError):
logger.error("Unable to register undo command")
raise errors.ReverterError(
"Unable to register undo command.")
finally:
if command_file is not None:
command_file.close()
def _get_cp_dir(self, temporary):
if temporary:
cp_dir = self.config.temp_checkpoint_dir
else:
cp_dir = self.config.in_progress_dir
util.make_or_verify_dir(
cp_dir, constants.CONFIG_DIRS_MODE, os.geteuid(),
self.config.strict_permissions)
return cp_dir
def recovery_routine(self):
self.revert_temporary_config()
if os.path.isdir(self.config.in_progress_dir):
try:
self._recover_checkpoint(self.config.in_progress_dir)
except errors.ReverterError:
logger.fatal("Incomplete or failed recovery for IN_PROGRESS "
"checkpoint - %s",
self.config.in_progress_dir)
raise errors.ReverterError(
"Incomplete or failed recovery for IN_PROGRESS checkpoint "
"- %s" % self.config.in_progress_dir)
def _remove_contained_files(self, file_list):
# and can't remove filepaths within file_list errors.
if not os.path.isfile(file_list):
return False
try:
with open(file_list, "r") as list_fd:
filepaths = list_fd.read().splitlines()
for path in filepaths:
if os.path.lexists(path):
os.remove(path)
else:
logger.warning(
"File: %s - Could not be found to be deleted %s - "
"LE probably shut down unexpectedly",
os.linesep, path)
except (IOError, OSError):
logger.fatal(
"Unable to remove filepaths contained within %s", file_list)
raise errors.ReverterError(
"Unable to remove filepaths contained within "
"{0}".format(file_list))
return True
def finalize_checkpoint(self, title):
if not os.path.isdir(self.config.in_progress_dir):
return
changes_since_path = os.path.join(self.config.in_progress_dir, "CHANGES_SINCE")
changes_since_tmp_path = os.path.join(self.config.in_progress_dir, "CHANGES_SINCE.tmp")
if not os.path.exists(changes_since_path):
logger.info("Rollback checkpoint is empty (no changes made?)")
with open(changes_since_path, 'w') as f:
f.write("No changes\n")
try:
with open(changes_since_tmp_path, "w") as changes_tmp:
changes_tmp.write("-- %s --\n" % title)
with open(changes_since_path, "r") as changes_orig:
changes_tmp.write(changes_orig.read())
shutil.move(changes_since_tmp_path, changes_since_path)
except (IOError, OSError):
logger.error("Unable to finalize checkpoint - adding title")
logger.debug("Exception was:\n%s", traceback.format_exc())
raise errors.ReverterError("Unable to add title")
self._timestamp_progress_dir()
def _checkpoint_timestamp(self):
timestamp = str(time.time())
others = glob.glob(os.path.join(self.config.backup_dir, "[0-9]*"))
others = [os.path.basename(d) for d in others]
others.append(timestamp)
others.sort()
if others[-1] != timestamp:
timetravel = str(float(others[-1]) + 1)
logger.warning("Current timestamp %s does not correspond to newest reverter "
"checkpoint; your clock probably jumped. Time travelling to %s",
timestamp, timetravel)
timestamp = timetravel
elif len(others) > 1 and others[-2] == timestamp:
logger.debug("Race condition with timestamp %s, incrementing by 0.01", timestamp)
timetravel = str(float(others[-1]) + 0.01)
timestamp = timetravel
return timestamp
def _timestamp_progress_dir(self):
for _ in xrange(2):
timestamp = self._checkpoint_timestamp()
final_dir = os.path.join(self.config.backup_dir, timestamp)
try:
os.rename(self.config.in_progress_dir, final_dir)
return
except OSError:
logger.warning("Extreme, unexpected race condition, retrying (%s)", timestamp)
logger.error(
"Unable to finalize checkpoint, %s -> %s",
self.config.in_progress_dir, final_dir)
raise errors.ReverterError(
"Unable to finalize checkpoint renaming")
| true
| true
|
1c404edecd0b785e89bb58d20b2579dc0a826c04
| 5,675
|
py
|
Python
|
airflow/providers/apache/hive/transfers/mssql_to_hive.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3
|
2021-01-29T20:33:56.000Z
|
2021-08-06T17:35:16.000Z
|
airflow/providers/apache/hive/transfers/mssql_to_hive.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 210
|
2021-07-17T00:25:52.000Z
|
2021-12-29T00:44:48.000Z
|
airflow/providers/apache/hive/transfers/mssql_to_hive.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-04-14T11:15:17.000Z
|
2021-12-15T16:58:24.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member
"""This module contains operator to move data from MSSQL to Hive."""
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from typing import Dict, Optional
import pymssql
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
class MsSqlToHiveOperator(BaseOperator):
"""
Moves data from Microsoft SQL Server to Hive. The operator runs
your query against Microsoft SQL Server, stores the file locally
before loading it into a Hive table. If the ``create`` or
``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the Microsoft SQL Server
database. (templated)
:type sql: str
:param hive_table: target Hive table, use dot notation to target a specific
database. (templated)
:type hive_table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every execution
:type recreate: bool
:param partition: target partition as a dict of partition columns and
values. (templated)
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
:param mssql_conn_id: source Microsoft SQL Server connection
:type mssql_conn_id: str
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:type hive_cli_conn_id: str
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
template_fields = ('sql', 'partition', 'hive_table')
template_ext = ('.sql',)
ui_color = '#a0e08c'
def __init__(
self,
*,
sql: str,
hive_table: str,
create: bool = True,
recreate: bool = False,
partition: Optional[Dict] = None,
delimiter: str = chr(1),
mssql_conn_id: str = 'mssql_default',
hive_cli_conn_id: str = 'hive_cli_default',
tblproperties: Optional[Dict] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = delimiter
self.mssql_conn_id = mssql_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
self.tblproperties = tblproperties
@classmethod
# pylint: disable=c-extension-no-member,no-member
def type_map(cls, mssql_type: int) -> str:
"""Maps MsSQL type to Hive type."""
map_dict = {
pymssql.BINARY.value: 'INT',
pymssql.DECIMAL.value: 'FLOAT',
pymssql.NUMBER.value: 'INT',
}
return map_dict.get(mssql_type, 'STRING')
def execute(self, context: Dict[str, str]):
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
self.log.info("Dumping Microsoft SQL Server query results to local file")
with mssql.get_conn() as conn:
with conn.cursor() as cursor:
cursor.execute(self.sql)
with NamedTemporaryFile("w") as tmp_file:
csv_writer = csv.writer(tmp_file, delimiter=self.delimiter, encoding='utf-8')
field_dict = OrderedDict()
col_count = 0
for field in cursor.description:
col_count += 1
col_position = f"Column{col_count}"
field_dict[col_position if field[0] == '' else field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor)
tmp_file.flush()
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
self.log.info("Loading file into Hive")
hive.load_file(
tmp_file.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
| 39.964789
| 106
| 0.656916
|
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from typing import Dict, Optional
import pymssql
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
class MsSqlToHiveOperator(BaseOperator):
template_fields = ('sql', 'partition', 'hive_table')
template_ext = ('.sql',)
ui_color = '#a0e08c'
def __init__(
self,
*,
sql: str,
hive_table: str,
create: bool = True,
recreate: bool = False,
partition: Optional[Dict] = None,
delimiter: str = chr(1),
mssql_conn_id: str = 'mssql_default',
hive_cli_conn_id: str = 'hive_cli_default',
tblproperties: Optional[Dict] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = delimiter
self.mssql_conn_id = mssql_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
self.tblproperties = tblproperties
@classmethod
def type_map(cls, mssql_type: int) -> str:
map_dict = {
pymssql.BINARY.value: 'INT',
pymssql.DECIMAL.value: 'FLOAT',
pymssql.NUMBER.value: 'INT',
}
return map_dict.get(mssql_type, 'STRING')
def execute(self, context: Dict[str, str]):
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
self.log.info("Dumping Microsoft SQL Server query results to local file")
with mssql.get_conn() as conn:
with conn.cursor() as cursor:
cursor.execute(self.sql)
with NamedTemporaryFile("w") as tmp_file:
csv_writer = csv.writer(tmp_file, delimiter=self.delimiter, encoding='utf-8')
field_dict = OrderedDict()
col_count = 0
for field in cursor.description:
col_count += 1
col_position = f"Column{col_count}"
field_dict[col_position if field[0] == '' else field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor)
tmp_file.flush()
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
self.log.info("Loading file into Hive")
hive.load_file(
tmp_file.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
| true
| true
|
1c404ef125034a509e7cc02ad5734de3ad8881d4
| 596
|
py
|
Python
|
nnaps/tests/test_mesa_compress.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 4
|
2020-09-24T12:55:58.000Z
|
2021-05-19T14:46:10.000Z
|
nnaps/tests/test_mesa_compress.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 4
|
2021-06-02T09:28:35.000Z
|
2021-06-04T08:32:24.000Z
|
nnaps/tests/test_mesa_compress.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 3
|
2020-10-05T13:18:27.000Z
|
2021-06-02T09:29:11.000Z
|
import pytest
import numpy as np
from nnaps.mesa.compress_mesa import read_mesa_output
from pathlib import Path
base_path = Path(__file__).parent
class TestReadMesaOutput:
def test_read_mesa_output_new_format(self):
filename = base_path / 'test_data/history_mesa_v15140.data'
model = read_mesa_output(filename=filename, only_first=False)
assert type(model[0]['version_number'][0]) == np.str_
assert model[0]['version_number'][0] == '15140'
assert type(model[1]['model_number'][0]) == np.float_
assert model[1]['model_number'][0] == 521
| 28.380952
| 69
| 0.704698
|
import pytest
import numpy as np
from nnaps.mesa.compress_mesa import read_mesa_output
from pathlib import Path
base_path = Path(__file__).parent
class TestReadMesaOutput:
def test_read_mesa_output_new_format(self):
filename = base_path / 'test_data/history_mesa_v15140.data'
model = read_mesa_output(filename=filename, only_first=False)
assert type(model[0]['version_number'][0]) == np.str_
assert model[0]['version_number'][0] == '15140'
assert type(model[1]['model_number'][0]) == np.float_
assert model[1]['model_number'][0] == 521
| true
| true
|
1c404f65d738535acb8ea84bc81dd3184b2d119e
| 5,991
|
py
|
Python
|
spider_scripts_20201122/download_picture.py
|
uvioresistant/python_tool_scripts
|
4f61aaaf7955a18401091e3bd04321e588344de3
|
[
"Apache-2.0"
] | null | null | null |
spider_scripts_20201122/download_picture.py
|
uvioresistant/python_tool_scripts
|
4f61aaaf7955a18401091e3bd04321e588344de3
|
[
"Apache-2.0"
] | null | null | null |
spider_scripts_20201122/download_picture.py
|
uvioresistant/python_tool_scripts
|
4f61aaaf7955a18401091e3bd04321e588344de3
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
from time import time, sleep
from multiprocessing.pool import ThreadPool
# 发请求,写入jpg到本地
def url_response(url):
path, url = url
r = requests.get(url, stream=True)
with open(path, 'wb') as f:
for ch in r:
f.write(ch)
start = time()
if __name__ == '__main__':
path = []
for count in range(1, 55):
path.append('G:\\J\\p2018.10.6_zero\\For_bro\\chapter_7\\粉体技术与工程' + str(count) + '.jpg')
origin_url = "http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/245ef5d4-73d5-425e-aab1-712d125c7ecb_page-0,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/7c070ff8-bbe6-41e9-b7af-253f171b732f_page-1,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/58174fc7-05e8-452f-a31c-e6f0fdb2560f_page-2,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/c0d9602f-009c-436d-b3dd-05023c05ec1b_page-3,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/337eead9-bb90-4ca2-8222-0210a6f765b4_page-4,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a7f5874a-11e7-438e-9988-7e08492e047e_page-5,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/74706502-2acb-407a-a8f0-bb3be730e0ad_page-6,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/5a56cb23-520e-4f31-b272-2955d8d1f281_page-7,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/efe75be7-512b-432f-a772-acd0b27ecfb2_page-8,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/e83c8107-e5f0-439c-98bd-53b8dd4151f6_page-9,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/68f1d794-337e-49d4-ba5e-18517d1ca884_page-10,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/cdcde5f0-c96b-4bbd-bede-bc61626dc327_page-11,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/7a2c35c9-f2aa-4e41-aff8-f17e24eb5bde_page-12,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a1850526-3cf3-4586-8476-51592bd5fdf6_page-13,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/c5d2795f-e677-4a82-819d-ca31ef5ddd7f_page-14,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/6d21580a-69a8-4cee-9447-b19c589734e1_page-15,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/9ef168c3-6c4c-4f9a-ba36-0c09dd2a0419_page-16,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/eafbb3fe-9e7f-4e6a-840f-c32c0472a156_page-17,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/8b5c844f-fd80-43cc-8b2a-178fb0920a50_page-18,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2606828b-d748-428b-a8e4-9843fc57786a_page-19,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/38109ef7-8ab6-4c1c-8b99-0bdfa8224c13_page-20,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a1f007be-1025-4605-8650-071269621848_page-21,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/9dd15db1-a39c-41a1-aac0-53cb7504bd3e_page-22,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/c0836422-0090-4e4e-be8e-09188d52dfdd_page-23,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/93f9379b-07e3-4d50-a02d-1305e5e8004d_page-24,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/17f6dc02-d295-48b7-94e9-b00a04a420ec_page-25,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/281908f0-a12d-46d7-82e3-723447842b36_page-26,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2dc2bf2e-6a1d-4435-8b3e-bd04f13d9aa3_page-27,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/24eb9cfb-0239-4790-9c40-9ed4caebd7ee_page-28,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/df8d1628-6987-454f-a9ce-96ae34bbda6f_page-29,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/048423c6-956e-4e20-af2c-e3246afb85aa_page-30,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/7014d5e9-db3a-460c-82da-9ddb075bf58b_page-31,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/81ff23f3-3924-43d6-b3c0-888008818fa0_page-32,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/c58e13a8-430f-431b-82b1-98eee5d4da65_page-33,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2daa477f-c409-4f30-aef6-a40427db647e_page-34,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/51ee30ea-64e7-4f71-ab5a-65f3298f3abf_page-35,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/3dc8e404-131c-497e-8daf-8ffd31da4426_page-36,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/703db686-ecea-4dd2-a315-04949da120bc_page-37,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/1176f9df-45b4-4ce6-8c6c-e80fba7a221f_page-38,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/551c6db3-2fb8-42a1-8e54-e2115bd874a0_page-39,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/bb609e95-4a99-44bf-b7d0-751140f20665_page-40,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/d02c91e7-a15b-4f20-9cf4-bea3ce0c1540_page-41,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/418e8d58-5dd0-4f31-a63a-e21935556437_page-42,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/9321613a-5743-46fc-b1fb-0516c82f73f9_page-43,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/8e207131-7c9a-43ca-8a77-646d542a2190_page-44,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a6a81ede-09ed-4e22-becf-e436a05195f6_page-45,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/beaa18f1-d893-47c7-90f8-a24363ffef79_page-46,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a6bd98f2-e6d7-4d47-a888-623f4b4eda4f_page-47,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a10b2f86-d90a-4969-aa56-8c3ddd1f072d_page-48,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/161faab5-44b1-41e6-8a3e-c3b070dc6c54_page-49,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/13442f6b-d93c-42a6-bbc3-5138f72035d3_page-50,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2da67a84-da12-45b0-959b-5e6ad0391185_page-51,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2f475124-d0df-4bdf-9f0c-b093db44e448_page-52,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/31c31630-8c0d-459e-9593-9ce6fee6f2b8_page-53"
url = origin_url.split(',')
urls = list(zip(path, url))
for x in urls:
sleep(5)
url_response(x)
# pool = ThreadPool(9).imap_unordered(url_response, urls) # 暂时别用这句代码,没找到sleep方法,可能就是这句代码,把网站给跑崩的
print(f"Time to download: {time() - start}")
| 157.657895
| 5,246
| 0.780003
|
import os
import requests
from time import time, sleep
from multiprocessing.pool import ThreadPool
def url_response(url):
path, url = url
r = requests.get(url, stream=True)
with open(path, 'wb') as f:
for ch in r:
f.write(ch)
start = time()
if __name__ == '__main__':
path = []
for count in range(1, 55):
path.append('G:\\J\\p2018.10.6_zero\\For_bro\\chapter_7\\粉体技术与工程' + str(count) + '.jpg')
origin_url = "http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/245ef5d4-73d5-425e-aab1-712d125c7ecb_page-0,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/7c070ff8-bbe6-41e9-b7af-253f171b732f_page-1,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/58174fc7-05e8-452f-a31c-e6f0fdb2560f_page-2,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/c0d9602f-009c-436d-b3dd-05023c05ec1b_page-3,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/337eead9-bb90-4ca2-8222-0210a6f765b4_page-4,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a7f5874a-11e7-438e-9988-7e08492e047e_page-5,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/74706502-2acb-407a-a8f0-bb3be730e0ad_page-6,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/5a56cb23-520e-4f31-b272-2955d8d1f281_page-7,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/efe75be7-512b-432f-a772-acd0b27ecfb2_page-8,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/e83c8107-e5f0-439c-98bd-53b8dd4151f6_page-9,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/68f1d794-337e-49d4-ba5e-18517d1ca884_page-10,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/cdcde5f0-c96b-4bbd-bede-bc61626dc327_page-11,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/7a2c35c9-f2aa-4e41-aff8-f17e24eb5bde_page-12,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a1850526-3cf3-4586-8476-51592bd5fdf6_page-13,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/c5d2795f-e677-4a82-819d-ca31ef5ddd7f_page-14,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/6d21580a-69a8-4cee-9447-b19c589734e1_page-15,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/9ef168c3-6c4c-4f9a-ba36-0c09dd2a0419_page-16,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/eafbb3fe-9e7f-4e6a-840f-c32c0472a156_page-17,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/8b5c844f-fd80-43cc-8b2a-178fb0920a50_page-18,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2606828b-d748-428b-a8e4-9843fc57786a_page-19,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/38109ef7-8ab6-4c1c-8b99-0bdfa8224c13_page-20,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a1f007be-1025-4605-8650-071269621848_page-21,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/9dd15db1-a39c-41a1-aac0-53cb7504bd3e_page-22,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/c0836422-0090-4e4e-be8e-09188d52dfdd_page-23,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/93f9379b-07e3-4d50-a02d-1305e5e8004d_page-24,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/17f6dc02-d295-48b7-94e9-b00a04a420ec_page-25,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/281908f0-a12d-46d7-82e3-723447842b36_page-26,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2dc2bf2e-6a1d-4435-8b3e-bd04f13d9aa3_page-27,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/24eb9cfb-0239-4790-9c40-9ed4caebd7ee_page-28,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/df8d1628-6987-454f-a9ce-96ae34bbda6f_page-29,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/048423c6-956e-4e20-af2c-e3246afb85aa_page-30,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/7014d5e9-db3a-460c-82da-9ddb075bf58b_page-31,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/81ff23f3-3924-43d6-b3c0-888008818fa0_page-32,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/c58e13a8-430f-431b-82b1-98eee5d4da65_page-33,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2daa477f-c409-4f30-aef6-a40427db647e_page-34,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/51ee30ea-64e7-4f71-ab5a-65f3298f3abf_page-35,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/3dc8e404-131c-497e-8daf-8ffd31da4426_page-36,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/703db686-ecea-4dd2-a315-04949da120bc_page-37,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/1176f9df-45b4-4ce6-8c6c-e80fba7a221f_page-38,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/551c6db3-2fb8-42a1-8e54-e2115bd874a0_page-39,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/bb609e95-4a99-44bf-b7d0-751140f20665_page-40,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/d02c91e7-a15b-4f20-9cf4-bea3ce0c1540_page-41,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/418e8d58-5dd0-4f31-a63a-e21935556437_page-42,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/9321613a-5743-46fc-b1fb-0516c82f73f9_page-43,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/8e207131-7c9a-43ca-8a77-646d542a2190_page-44,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a6a81ede-09ed-4e22-becf-e436a05195f6_page-45,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/beaa18f1-d893-47c7-90f8-a24363ffef79_page-46,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a6bd98f2-e6d7-4d47-a888-623f4b4eda4f_page-47,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/a10b2f86-d90a-4969-aa56-8c3ddd1f072d_page-48,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/161faab5-44b1-41e6-8a3e-c3b070dc6c54_page-49,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/13442f6b-d93c-42a6-bbc3-5138f72035d3_page-50,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2da67a84-da12-45b0-959b-5e6ad0391185_page-51,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/2f475124-d0df-4bdf-9f0c-b093db44e448_page-52,http://met2.fzu.edu.cn/meol/data/convert/2020/10/12/31c31630-8c0d-459e-9593-9ce6fee6f2b8_page-53"
url = origin_url.split(',')
urls = list(zip(path, url))
for x in urls:
sleep(5)
url_response(x)
- start}")
| true
| true
|
1c404fa40746e8ead0655123648395b5752349ff
| 4,363
|
py
|
Python
|
examples/faster-rcnn/generate_anchors.py
|
rohitbhio/neon
|
4fb5ff6a4b622facfb07b28da94b992159aac8cc
|
[
"Apache-2.0"
] | 1
|
2020-12-30T10:13:41.000Z
|
2020-12-30T10:13:41.000Z
|
examples/faster-rcnn/generate_anchors.py
|
rohitbhio/neon
|
4fb5ff6a4b622facfb07b28da94b992159aac8cc
|
[
"Apache-2.0"
] | null | null | null |
examples/faster-rcnn/generate_anchors.py
|
rohitbhio/neon
|
4fb5ff6a4b622facfb07b28da94b992159aac8cc
|
[
"Apache-2.0"
] | null | null | null |
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
from __future__ import division
from __future__ import print_function
from builtins import range
import numpy as np
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_all_anchors(conv_size_x, conv_size_y, im_scale, scales=np.array((8, 16, 32))):
anchors = generate_anchors(scales=scales)
num_anchors = anchors.shape[0]
# generate shifts to apply to anchors
# note: 1/self.SCALE is the feature stride
shift_x = np.arange(0, conv_size_x) * 1.0 / im_scale
shift_y = np.arange(0, conv_size_y) * 1.0 / im_scale
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add K anchors (1, K, 4) to A shifts (A, 1, 4) to get
# shift anchors (A, K, 4), then reshape to (A*K, 4) shifted anchors
K = num_anchors
A = shifts.shape[0]
# Generate anchors in A*K order (different from Caffe) so that we don't have to
# reshape and transpose before loading back to GPU
all_anchors = (anchors.reshape((1, K, 4)).transpose(
(1, 0, 2)) + shifts.reshape((1, A, 4)))
all_anchors = all_anchors.reshape((A * K, 4))
return all_anchors
# all_anchors is in (CHW) format, matching the CHWN output of the conv
# layer.
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2**np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
if __name__ == '__main__':
import time
t = time.time()
a = generate_anchors()
print(time.time() - t)
print(a)
from IPython import embed
embed()
| 30.089655
| 91
| 0.556727
|
from __future__ import division
from __future__ import print_function
from builtins import range
import numpy as np
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_all_anchors(conv_size_x, conv_size_y, im_scale, scales=np.array((8, 16, 32))):
anchors = generate_anchors(scales=scales)
num_anchors = anchors.shape[0]
# generate shifts to apply to anchors
# note: 1/self.SCALE is the feature stride
shift_x = np.arange(0, conv_size_x) * 1.0 / im_scale
shift_y = np.arange(0, conv_size_y) * 1.0 / im_scale
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add K anchors (1, K, 4) to A shifts (A, 1, 4) to get
# shift anchors (A, K, 4), then reshape to (A*K, 4) shifted anchors
K = num_anchors
A = shifts.shape[0]
# Generate anchors in A*K order (different from Caffe) so that we don't have to
all_anchors = (anchors.reshape((1, K, 4)).transpose(
(1, 0, 2)) + shifts.reshape((1, A, 4)))
all_anchors = all_anchors.reshape((A * K, 4))
return all_anchors
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2**np.arange(3, 6)):
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors
def _whctrs(anchor):
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
if __name__ == '__main__':
import time
t = time.time()
a = generate_anchors()
print(time.time() - t)
print(a)
from IPython import embed
embed()
| true
| true
|
1c4050cf536e965eec609a12a6ad864299e1cdae
| 597
|
py
|
Python
|
datacube/index/__init__.py
|
agdc-research-trial/gdf
|
82ed29c263eaf65f5c1fbb4e9207c99e9700b85c
|
[
"Apache-2.0"
] | 1
|
2015-06-01T01:31:44.000Z
|
2015-06-01T01:31:44.000Z
|
datacube/index/__init__.py
|
agdc-research-trial/gdf
|
82ed29c263eaf65f5c1fbb4e9207c99e9700b85c
|
[
"Apache-2.0"
] | null | null | null |
datacube/index/__init__.py
|
agdc-research-trial/gdf
|
82ed29c263eaf65f5c1fbb4e9207c99e9700b85c
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
"""
Modules for interfacing with the index/database.
"""
from ._api import index_connect
from .fields import UnknownFieldError
from .exceptions import DuplicateRecordError, MissingRecordError, IndexSetupError
from datacube.index.abstract import AbstractIndex as Index
__all__ = [
'index_connect',
'Index',
'DuplicateRecordError',
'IndexSetupError',
'MissingRecordError',
'UnknownFieldError',
]
| 25.956522
| 92
| 0.760469
|
from ._api import index_connect
from .fields import UnknownFieldError
from .exceptions import DuplicateRecordError, MissingRecordError, IndexSetupError
from datacube.index.abstract import AbstractIndex as Index
__all__ = [
'index_connect',
'Index',
'DuplicateRecordError',
'IndexSetupError',
'MissingRecordError',
'UnknownFieldError',
]
| true
| true
|
1c405150879a0f0714a98304287b9c17bdd4928d
| 3,872
|
py
|
Python
|
Python/design-circular-queue.py
|
xiaohalo/LeetCode
|
68211ba081934b21bb1968046b7e3c1459b3da2d
|
[
"MIT"
] | 9
|
2019-06-30T07:15:18.000Z
|
2022-02-10T20:13:40.000Z
|
Python/design-circular-queue.py
|
xiaohalo/LeetCode
|
68211ba081934b21bb1968046b7e3c1459b3da2d
|
[
"MIT"
] | null | null | null |
Python/design-circular-queue.py
|
xiaohalo/LeetCode
|
68211ba081934b21bb1968046b7e3c1459b3da2d
|
[
"MIT"
] | 9
|
2019-01-16T22:16:49.000Z
|
2022-02-06T17:33:41.000Z
|
# Time: O(1)
# Space: O(k)
# Design your implementation of the circular queue.
# The circular queue is a linear data structure in which
# the operations are performed based on FIFO (First In First Out)
# principle and the last position is connected back to
# the first position to make a circle. It is also called ‘Ring Buffer’.
# One of the Benefits of the circular queue is that
# we can make use of the spaces in front of the queue.
# In a normal queue, once the queue becomes full,
# we can not insert the next element even if there is a space in front of the queue.
# But using the circular queue, we can use the space to store new values.
# Your implementation should support following operations:
#
# MyCircularQueue(k): Constructor, set the size of the queue to be k.
# Front: Get the front item from the queue. If the queue is empty, return -1.
# Rear: Get the last item from the queue. If the queue is empty, return -1.
# enQueue(value): Insert an element into the circular queue. Return true if the operation is successful.
# deQueue(): Delete an element from the circular queue. Return true if the operation is successful.
# isEmpty(): Checks whether the circular queue is empty or not.
# isFull(): Checks whether the circular queue is full or not.
# Example:
#
# MyCircularQueue circularQueue = new MycircularQueue(3); // set the size to be 3
# circularQueue.enQueue(1); // return true
# circularQueue.enQueue(2); // return true
# circularQueue.enQueue(3); // return true
# circularQueue.enQueue(4); // return false, the queue is full
# circularQueue.Rear(); // return 3
# circularQueue.isFull(); // return true
# circularQueue.deQueue(); // return true
# circularQueue.enQueue(4); // return true
# circularQueue.Rear(); // return 4
#
# Note:
# - All values will be in the range of [1, 1000].
# - The number of operations will be in the range of [1, 1000].
# - Please do not use the built-in Queue library.
class MyCircularQueue(object):
def __init__(self, k):
"""
Initialize your data structure here. Set the size of the queue to be k.
:type k: int
"""
self.__start = 0
self.__size = 0
self.__buffer = [0] * k
def enQueue(self, value):
"""
Insert an element into the circular queue. Return true if the operation is successful.
:type value: int
:rtype: bool
"""
if self.isFull():
return False
self.__buffer[(self.__start+self.__size) % len(self.__buffer)] = value
self.__size += 1
return True
def deQueue(self):
"""
Delete an element from the circular queue. Return true if the operation is successful.
:rtype: bool
"""
if self.isEmpty():
return False
self.__start = (self.__start+1) % len(self.__buffer)
self.__size -= 1
return True
def Front(self):
"""
Get the front item from the queue.
:rtype: int
"""
return -1 if self.isEmpty() else self.__buffer[self.__start]
def Rear(self):
"""
Get the last item from the queue.
:rtype: int
"""
return -1 if self.isEmpty() else self.__buffer[(self.__start+self.__size-1) % len(self.__buffer)]
def isEmpty(self):
"""
Checks whether the circular queue is empty or not.
:rtype: bool
"""
return self.__size == 0
def isFull(self):
"""
Checks whether the circular queue is full or not.
:rtype: bool
"""
return self.__size == len(self.__buffer)
# Your MyCircularQueue object will be instantiated and called as such:
# obj = MyCircularQueue(k)
# param_1 = obj.enQueue(value)
# param_2 = obj.deQueue()
# param_3 = obj.Front()
# param_4 = obj.Rear()
# param_5 = obj.isEmpty()
# param_6 = obj.isFull()
| 34.571429
| 105
| 0.647211
|
class MyCircularQueue(object):
def __init__(self, k):
self.__start = 0
self.__size = 0
self.__buffer = [0] * k
def enQueue(self, value):
if self.isFull():
return False
self.__buffer[(self.__start+self.__size) % len(self.__buffer)] = value
self.__size += 1
return True
def deQueue(self):
if self.isEmpty():
return False
self.__start = (self.__start+1) % len(self.__buffer)
self.__size -= 1
return True
def Front(self):
return -1 if self.isEmpty() else self.__buffer[self.__start]
def Rear(self):
return -1 if self.isEmpty() else self.__buffer[(self.__start+self.__size-1) % len(self.__buffer)]
def isEmpty(self):
return self.__size == 0
def isFull(self):
return self.__size == len(self.__buffer)
| true
| true
|
1c40516d6c979e49fd898f56797b8adbeeec9eb8
| 31
|
py
|
Python
|
models/main.py
|
feamster/internet_video
|
53a88778155e50dfdfe581d4041c64bd411a0701
|
[
"MIT"
] | null | null | null |
models/main.py
|
feamster/internet_video
|
53a88778155e50dfdfe581d4041c64bd411a0701
|
[
"MIT"
] | null | null | null |
models/main.py
|
feamster/internet_video
|
53a88778155e50dfdfe581d4041c64bd411a0701
|
[
"MIT"
] | 1
|
2022-03-08T10:04:02.000Z
|
2022-03-08T10:04:02.000Z
|
import os
import pandas as pd
| 7.75
| 19
| 0.774194
|
import os
import pandas as pd
| true
| true
|
1c40523fc1f2c8fbb02985f67c895b5f8b7c6506
| 7,397
|
py
|
Python
|
ci_scripts/chinese_samplecode_processor.py
|
zhangting2020/FluidDoc
|
1a08e8aedce1e44829ae652f6c588adf61f2c2f6
|
[
"Apache-2.0"
] | null | null | null |
ci_scripts/chinese_samplecode_processor.py
|
zhangting2020/FluidDoc
|
1a08e8aedce1e44829ae652f6c588adf61f2c2f6
|
[
"Apache-2.0"
] | null | null | null |
ci_scripts/chinese_samplecode_processor.py
|
zhangting2020/FluidDoc
|
1a08e8aedce1e44829ae652f6c588adf61f2c2f6
|
[
"Apache-2.0"
] | 1
|
2020-11-25T11:42:24.000Z
|
2020-11-25T11:42:24.000Z
|
import math
import os
import pickle
import shutil
import subprocess
import multiprocessing
import sys
def remove_desc_code(srcls, filename):
if filename == 'fluid_cn/one_hot_cn.rst':
srcls.pop(13)
srcls.pop(28)
srcls.pop(44)
if filename == 'layers_cn/one_hot_cn.rst':
srcls.pop(15)
srcls.pop(30)
srcls.pop(46)
if filename == 'profiler_cn/profiler_cn.rst':
srcls.pop(41)
if filename == 'layers_cn/natural_exp_decay_cn.rst':
srcls.pop(13)
if filename == 'layers_cn/transpose_cn.rst':
srcls.pop(20)
if filename == 'layers_cn/array_length_cn.rst':
srcls.pop(36)
if filename == 'layers_cn/inverse_time_decay_cn.rst':
srcls.pop(13)
if filename == 'layers_cn/stack_cn.rst':
srcls.pop(12)
srcls.pop(33)
if filename == 'layers_cn/sums_cn.rst':
srcls.pop(11)
if filename == 'layers_cn/sum_cn.rst':
for i in range(len(srcls) - 1, 61, -1):
srcls.pop(i)
if filename == 'layers_cn/softmax_cn.rst':
srcls.pop(30)
srcls.pop(57)
if filename == 'layers_cn/array_write_cn.rst':
srcls.pop(37)
if filename == 'layers_cn/lod_append_cn.rst':
srcls.pop(11)
if filename == 'layers_cn/reorder_lod_tensor_by_rank_cn.rst':
srcls.pop(25)
if filename == 'layers_cn/round_cn.rst':
srcls.pop(10)
if filename == 'layers_cn/squeeze_cn.rst':
srcls.pop(11)
srcls.pop(19)
srcls.pop(27)
if filename == 'layers_cn/unsqueeze_cn.rst':
srcls.pop(11)
if filename == 'layers_cn/array_read_cn.rst':
srcls.pop(51)
if filename == 'layers_cn/scatter_cn.rst':
srcls.pop(9)
if filename == 'layers_cn/topk_cn.rst':
srcls.pop(11)
if filename == 'optimizer_cn/ModelAverage_cn.rst':
srcls.pop(15)
return srcls
def check_indent(code_line):
indent = ""
for c in code_line:
if c == '\t':
indent += ' '
elif c == ' ':
indent += ' '
if c != ' ' and c != '\t':
break
return indent
def find_all(src_str, substr):
indices = []
get_one = src_str.find(substr)
while get_one != -1:
indices.append(get_one)
get_one = src_str.find(substr, get_one + 1)
return indices
def extract_sample_code(srcfile, status_all):
filename = srcfile.name
srcc = srcfile.read()
srcfile.seek(0, 0)
srcls = srcfile.readlines()
srcls = remove_desc_code(
srcls, filename) # remove description info for samplecode
status = []
sample_code_begins = find_all(srcc, " code-block:: python")
if len(sample_code_begins) == 0:
status.append(-1)
else:
for i in range(0, len(srcls)):
if srcls[i].find(".. code-block:: python") != -1:
content = ""
start = i
blank_line = 1
while srcls[start + blank_line].strip() == '':
blank_line += 1
startindent = ""
# remove indent error
if srcls[start + blank_line].find("from") != -1:
startindent += srcls[start + blank_line][:srcls[
start + blank_line].find("from")]
elif srcls[start + blank_line].find("import") != -1:
startindent += srcls[start + blank_line][:srcls[
start + blank_line].find("import")]
else:
startindent += check_indent(srcls[start + blank_line])
content += srcls[start + blank_line][len(startindent):]
for j in range(start + blank_line + 1, len(srcls)):
# planish a blank line
if not srcls[j].startswith(startindent) and srcls[
j] != '\n':
break
if srcls[j].find(" code-block:: python") != -1:
break
content += srcls[j].replace(startindent, "", 1)
status.append(run_sample_code(content, filename))
status_all[filename] = status
return status_all
def run_sample_code(content, filename):
# three status ,-1:no sample code; 1: running error; 0:normal
fname = filename.split("/")[-1].replace("_cn", "").replace(".rst",
"") + ".py"
tempf = open("temp/" + fname, 'w')
content = "# -*- coding: utf-8 -*-\n" + content
tempf.write(content)
tempf.close()
cmd = ["python", "temp/" + fname]
subprc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, error = subprc.communicate()
err = "".join(error.decode(encoding='utf-8'))
if subprc.returncode != 0:
print("\nSample code error found in ", filename, ":\n")
print(err)
status = 1
else:
status = 0
os.remove("temp/" + fname)
return status
def test(file):
temp = []
src = open(file, 'r')
status_all = {}
extract_sample_code(src, status_all)
temp.append(status_all)
src.close()
return temp
if os.path.isdir("temp"):
shutil.rmtree("temp")
if os.path.isdir("infer_model"):
shutil.rmtree("infer_model")
if os.path.isdir("image"):
shutil.rmtree("image")
if os.path.isdir("my_paddle_model"):
shutil.rmtree("my_paddle_model")
if os.path.isdir("my_paddle_vars"):
shutil.rmtree("my_paddle_vars")
if not os.path.isdir("temp"):
os.mkdir("temp")
output = []
if len(sys.argv) < 2:
print("Error: inadequate number of arguments")
print("Please one file")
sys.exit(1)
else:
if not os.path.exists(sys.argv[1]):
print("File not found")
sys.exit(1)
res = test(sys.argv[1])
output.append(res)
status_groups = {-1: [], 0: [], 1: []}
# polishes show format
ci_pass = True
for one_file in output:
for dicts in one_file:
for key in dicts:
status = dicts[key]
for ele in status:
if ele != 0:
ci_pass = False
break
if len(status) == 1:
status_groups[status[0]].append(key)
else:
for u in range(0, len(status)):
status_groups[status[u]].append(key + '_' + str(u + 1))
error_api = status_groups[-1] + status_groups[1]
total_error_number = len(error_api)
print("****************************************************")
print("----------------End of the Check--------------------")
print("****************************************************")
if total_error_number > 0:
print("Error sample code number is:{}".format(total_error_number))
type_one_number = len(status_groups[-1])
type_two_number = len(status_groups[1])
if type_one_number > 0:
print("Error type one sample number is:{}".format(type_one_number))
print("Error raised from type one:no sample code.",
str(status_groups[-1]))
if type_two_number > 0:
print("Error type two sample number is:{}".format(type_two_number))
print("Error raised from type two:running error sample code.",
str(status_groups[1]))
if not ci_pass:
print("Mistakes found in sample codes.")
exit(1)
else:
print("Sample code check is successful!")
| 31.476596
| 75
| 0.55536
|
import math
import os
import pickle
import shutil
import subprocess
import multiprocessing
import sys
def remove_desc_code(srcls, filename):
if filename == 'fluid_cn/one_hot_cn.rst':
srcls.pop(13)
srcls.pop(28)
srcls.pop(44)
if filename == 'layers_cn/one_hot_cn.rst':
srcls.pop(15)
srcls.pop(30)
srcls.pop(46)
if filename == 'profiler_cn/profiler_cn.rst':
srcls.pop(41)
if filename == 'layers_cn/natural_exp_decay_cn.rst':
srcls.pop(13)
if filename == 'layers_cn/transpose_cn.rst':
srcls.pop(20)
if filename == 'layers_cn/array_length_cn.rst':
srcls.pop(36)
if filename == 'layers_cn/inverse_time_decay_cn.rst':
srcls.pop(13)
if filename == 'layers_cn/stack_cn.rst':
srcls.pop(12)
srcls.pop(33)
if filename == 'layers_cn/sums_cn.rst':
srcls.pop(11)
if filename == 'layers_cn/sum_cn.rst':
for i in range(len(srcls) - 1, 61, -1):
srcls.pop(i)
if filename == 'layers_cn/softmax_cn.rst':
srcls.pop(30)
srcls.pop(57)
if filename == 'layers_cn/array_write_cn.rst':
srcls.pop(37)
if filename == 'layers_cn/lod_append_cn.rst':
srcls.pop(11)
if filename == 'layers_cn/reorder_lod_tensor_by_rank_cn.rst':
srcls.pop(25)
if filename == 'layers_cn/round_cn.rst':
srcls.pop(10)
if filename == 'layers_cn/squeeze_cn.rst':
srcls.pop(11)
srcls.pop(19)
srcls.pop(27)
if filename == 'layers_cn/unsqueeze_cn.rst':
srcls.pop(11)
if filename == 'layers_cn/array_read_cn.rst':
srcls.pop(51)
if filename == 'layers_cn/scatter_cn.rst':
srcls.pop(9)
if filename == 'layers_cn/topk_cn.rst':
srcls.pop(11)
if filename == 'optimizer_cn/ModelAverage_cn.rst':
srcls.pop(15)
return srcls
def check_indent(code_line):
indent = ""
for c in code_line:
if c == '\t':
indent += ' '
elif c == ' ':
indent += ' '
if c != ' ' and c != '\t':
break
return indent
def find_all(src_str, substr):
indices = []
get_one = src_str.find(substr)
while get_one != -1:
indices.append(get_one)
get_one = src_str.find(substr, get_one + 1)
return indices
def extract_sample_code(srcfile, status_all):
filename = srcfile.name
srcc = srcfile.read()
srcfile.seek(0, 0)
srcls = srcfile.readlines()
srcls = remove_desc_code(
srcls, filename)
status = []
sample_code_begins = find_all(srcc, " code-block:: python")
if len(sample_code_begins) == 0:
status.append(-1)
else:
for i in range(0, len(srcls)):
if srcls[i].find(".. code-block:: python") != -1:
content = ""
start = i
blank_line = 1
while srcls[start + blank_line].strip() == '':
blank_line += 1
startindent = ""
if srcls[start + blank_line].find("from") != -1:
startindent += srcls[start + blank_line][:srcls[
start + blank_line].find("from")]
elif srcls[start + blank_line].find("import") != -1:
startindent += srcls[start + blank_line][:srcls[
start + blank_line].find("import")]
else:
startindent += check_indent(srcls[start + blank_line])
content += srcls[start + blank_line][len(startindent):]
for j in range(start + blank_line + 1, len(srcls)):
if not srcls[j].startswith(startindent) and srcls[
j] != '\n':
break
if srcls[j].find(" code-block:: python") != -1:
break
content += srcls[j].replace(startindent, "", 1)
status.append(run_sample_code(content, filename))
status_all[filename] = status
return status_all
def run_sample_code(content, filename):
fname = filename.split("/")[-1].replace("_cn", "").replace(".rst",
"") + ".py"
tempf = open("temp/" + fname, 'w')
content = "# -*- coding: utf-8 -*-\n" + content
tempf.write(content)
tempf.close()
cmd = ["python", "temp/" + fname]
subprc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, error = subprc.communicate()
err = "".join(error.decode(encoding='utf-8'))
if subprc.returncode != 0:
print("\nSample code error found in ", filename, ":\n")
print(err)
status = 1
else:
status = 0
os.remove("temp/" + fname)
return status
def test(file):
temp = []
src = open(file, 'r')
status_all = {}
extract_sample_code(src, status_all)
temp.append(status_all)
src.close()
return temp
if os.path.isdir("temp"):
shutil.rmtree("temp")
if os.path.isdir("infer_model"):
shutil.rmtree("infer_model")
if os.path.isdir("image"):
shutil.rmtree("image")
if os.path.isdir("my_paddle_model"):
shutil.rmtree("my_paddle_model")
if os.path.isdir("my_paddle_vars"):
shutil.rmtree("my_paddle_vars")
if not os.path.isdir("temp"):
os.mkdir("temp")
output = []
if len(sys.argv) < 2:
print("Error: inadequate number of arguments")
print("Please one file")
sys.exit(1)
else:
if not os.path.exists(sys.argv[1]):
print("File not found")
sys.exit(1)
res = test(sys.argv[1])
output.append(res)
status_groups = {-1: [], 0: [], 1: []}
ci_pass = True
for one_file in output:
for dicts in one_file:
for key in dicts:
status = dicts[key]
for ele in status:
if ele != 0:
ci_pass = False
break
if len(status) == 1:
status_groups[status[0]].append(key)
else:
for u in range(0, len(status)):
status_groups[status[u]].append(key + '_' + str(u + 1))
error_api = status_groups[-1] + status_groups[1]
total_error_number = len(error_api)
print("****************************************************")
print("----------------End of the Check--------------------")
print("****************************************************")
if total_error_number > 0:
print("Error sample code number is:{}".format(total_error_number))
type_one_number = len(status_groups[-1])
type_two_number = len(status_groups[1])
if type_one_number > 0:
print("Error type one sample number is:{}".format(type_one_number))
print("Error raised from type one:no sample code.",
str(status_groups[-1]))
if type_two_number > 0:
print("Error type two sample number is:{}".format(type_two_number))
print("Error raised from type two:running error sample code.",
str(status_groups[1]))
if not ci_pass:
print("Mistakes found in sample codes.")
exit(1)
else:
print("Sample code check is successful!")
| true
| true
|
1c405262bdb1dd0e3eeb6b00bc43174e6c73608a
| 4,358
|
py
|
Python
|
photologue/templatetags/photologue_tags.py
|
erdnaxe/django-photologue
|
97768fef566cb19eb3c8454d58d9abf2e7b9f2b6
|
[
"BSD-3-Clause"
] | 364
|
2015-01-03T00:06:55.000Z
|
2019-03-10T20:00:11.000Z
|
photologue/templatetags/photologue_tags.py
|
erdnaxe/django-photologue
|
97768fef566cb19eb3c8454d58d9abf2e7b9f2b6
|
[
"BSD-3-Clause"
] | 79
|
2015-01-03T03:35:36.000Z
|
2019-03-13T20:05:55.000Z
|
photologue/templatetags/photologue_tags.py
|
erdnaxe/django-photologue
|
97768fef566cb19eb3c8454d58d9abf2e7b9f2b6
|
[
"BSD-3-Clause"
] | 135
|
2015-01-09T01:36:11.000Z
|
2019-03-12T02:54:59.000Z
|
import random
from django import template
from ..models import Gallery, Photo
register = template.Library()
@register.inclusion_tag('photologue/tags/next_in_gallery.html')
def next_in_gallery(photo, gallery):
return {'photo': photo.get_next_in_gallery(gallery)}
@register.inclusion_tag('photologue/tags/prev_in_gallery.html')
def previous_in_gallery(photo, gallery):
return {'photo': photo.get_previous_in_gallery(gallery)}
@register.simple_tag
def cycle_lite_gallery(gallery_title, height, width):
"""Generate image tags for jquery slideshow gallery.
See http://malsup.com/jquery/cycle/lite/"""
html = ""
first = 'class="first"'
for p in Gallery.objects.get(title=gallery_title).public():
html += u'<img src="%s" alt="%s" height="%s" width="%s" %s />' % (
p.get_display_url(), p.title, height, width, first)
first = None
return html
@register.tag
def get_photo(parser, token):
"""Get a single photo from the photologue library and return the img tag to display it.
Takes 3 args:
- the photo to display. This can be either the slug of a photo, or a variable that holds either a photo instance or
a integer (photo id)
- the photosize to use.
- a CSS class to apply to the img tag.
"""
try:
# Split the contents of the tag, i.e. tag name + argument.
tag_name, photo, photosize, css_class = token.split_contents()
except ValueError:
msg = '%r tag requires 3 arguments' % token.contents[0]
raise template.TemplateSyntaxError(msg)
return PhotoNode(photo, photosize[1:-1], css_class[1:-1])
class PhotoNode(template.Node):
def __init__(self, photo, photosize, css_class):
self.photo = photo
self.photosize = photosize
self.css_class = css_class
def render(self, context):
try:
a = template.Variable(self.photo).resolve(context)
except:
a = self.photo
if isinstance(a, Photo):
p = a
else:
try:
p = Photo.objects.get(slug=a)
except Photo.DoesNotExist:
# Ooops. Fail silently
return None
if not p.is_public:
return None
func = getattr(p, 'get_%s_url' % (self.photosize), None)
if func is None:
return 'A "%s" photo size has not been defined.' % (self.photosize)
else:
return u'<img class="%s" src="%s" alt="%s" />' % (self.css_class, func(), p.title)
@register.tag
def get_rotating_photo(parser, token):
"""Pick at random a photo from a given photologue gallery and return the img tag to display it.
Takes 3 args:
- the gallery to pick a photo from. This can be either the slug of a gallery, or a variable that holds either a
gallery instance or a gallery slug.
- the photosize to use.
- a CSS class to apply to the img tag.
"""
try:
# Split the contents of the tag, i.e. tag name + argument.
tag_name, gallery, photosize, css_class = token.split_contents()
except ValueError:
msg = '%r tag requires 3 arguments' % token.contents[0]
raise template.TemplateSyntaxError(msg)
return PhotoGalleryNode(gallery, photosize[1:-1], css_class[1:-1])
class PhotoGalleryNode(template.Node):
def __init__(self, gallery, photosize, css_class):
self.gallery = gallery
self.photosize = photosize
self.css_class = css_class
def render(self, context):
try:
a = template.resolve_variable(self.gallery, context)
except:
a = self.gallery
if isinstance(a, Gallery):
g = a
else:
try:
g = Gallery.objects.get(slug=a)
except Gallery.DoesNotExist:
return None
photos = g.public()
if len(photos) > 1:
r = random.randint(0, len(photos) - 1)
p = photos[r]
elif len(photos) == 1:
p = photos[0]
else:
return None
func = getattr(p, 'get_%s_url' % (self.photosize), None)
if func is None:
return 'A "%s" photo size has not been defined.' % (self.photosize)
else:
return u'<img class="%s" src="%s" alt="%s" />' % (self.css_class, func(), p.title)
| 33.015152
| 119
| 0.61129
|
import random
from django import template
from ..models import Gallery, Photo
register = template.Library()
@register.inclusion_tag('photologue/tags/next_in_gallery.html')
def next_in_gallery(photo, gallery):
return {'photo': photo.get_next_in_gallery(gallery)}
@register.inclusion_tag('photologue/tags/prev_in_gallery.html')
def previous_in_gallery(photo, gallery):
return {'photo': photo.get_previous_in_gallery(gallery)}
@register.simple_tag
def cycle_lite_gallery(gallery_title, height, width):
html = ""
first = 'class="first"'
for p in Gallery.objects.get(title=gallery_title).public():
html += u'<img src="%s" alt="%s" height="%s" width="%s" %s />' % (
p.get_display_url(), p.title, height, width, first)
first = None
return html
@register.tag
def get_photo(parser, token):
try:
tag_name, photo, photosize, css_class = token.split_contents()
except ValueError:
msg = '%r tag requires 3 arguments' % token.contents[0]
raise template.TemplateSyntaxError(msg)
return PhotoNode(photo, photosize[1:-1], css_class[1:-1])
class PhotoNode(template.Node):
def __init__(self, photo, photosize, css_class):
self.photo = photo
self.photosize = photosize
self.css_class = css_class
def render(self, context):
try:
a = template.Variable(self.photo).resolve(context)
except:
a = self.photo
if isinstance(a, Photo):
p = a
else:
try:
p = Photo.objects.get(slug=a)
except Photo.DoesNotExist:
return None
if not p.is_public:
return None
func = getattr(p, 'get_%s_url' % (self.photosize), None)
if func is None:
return 'A "%s" photo size has not been defined.' % (self.photosize)
else:
return u'<img class="%s" src="%s" alt="%s" />' % (self.css_class, func(), p.title)
@register.tag
def get_rotating_photo(parser, token):
try:
tag_name, gallery, photosize, css_class = token.split_contents()
except ValueError:
msg = '%r tag requires 3 arguments' % token.contents[0]
raise template.TemplateSyntaxError(msg)
return PhotoGalleryNode(gallery, photosize[1:-1], css_class[1:-1])
class PhotoGalleryNode(template.Node):
def __init__(self, gallery, photosize, css_class):
self.gallery = gallery
self.photosize = photosize
self.css_class = css_class
def render(self, context):
try:
a = template.resolve_variable(self.gallery, context)
except:
a = self.gallery
if isinstance(a, Gallery):
g = a
else:
try:
g = Gallery.objects.get(slug=a)
except Gallery.DoesNotExist:
return None
photos = g.public()
if len(photos) > 1:
r = random.randint(0, len(photos) - 1)
p = photos[r]
elif len(photos) == 1:
p = photos[0]
else:
return None
func = getattr(p, 'get_%s_url' % (self.photosize), None)
if func is None:
return 'A "%s" photo size has not been defined.' % (self.photosize)
else:
return u'<img class="%s" src="%s" alt="%s" />' % (self.css_class, func(), p.title)
| true
| true
|
1c40528b610cb5e8d43e5244e7310c3a82c04fe2
| 6,169
|
py
|
Python
|
examples/lm1b/unit_test/sampled_softmax_utest.py
|
yangkevin2/count-sketch
|
164f1aaef662935043cb628421c6185b5ea6c654
|
[
"Apache-2.0"
] | 109
|
2017-12-14T10:52:19.000Z
|
2022-03-26T21:22:10.000Z
|
lm/unit_test/sampled_softmax_utest.py
|
JoPfeiff/PyTorch_GBW_LM
|
74f180ff1cecd54f6810d139d9b816aa97abd84a
|
[
"Apache-2.0"
] | 15
|
2017-12-10T16:14:10.000Z
|
2021-12-30T07:20:48.000Z
|
lm/unit_test/sampled_softmax_utest.py
|
JoPfeiff/PyTorch_GBW_LM
|
74f180ff1cecd54f6810d139d9b816aa97abd84a
|
[
"Apache-2.0"
] | 19
|
2018-01-05T05:09:51.000Z
|
2021-07-21T03:44:16.000Z
|
import unittest
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import model
from log_uniform import LogUniformSampler
def EXPECT_NEAR(x, y, epsilon):
return np.all(abs(x - y) <= epsilon)
class ComputeSampledLogitsTest(unittest.TestCase):
def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels, sampled, subtract_log_q):
"""Randomly generates input/output data for a single test case.
This function returns numpy constants for use in a test case.
Args:
num_classes: An int. The number of embedding classes in the test case.
dim: An int. The dimension of the embedding.
batch_size: An int. The batch size.
num_true: An int. The number of target classes per training example.
labels: A list of batch_size * num_true ints. The target classes.
sampled: A list of indices in [0, num_classes).
subtract_log_q: A bool corresponding to the parameter in
_compute_sampled_logits().
Returns:
weights: Embedding weights to use as test input. It is a numpy array
of shape [num_classes, dim]
biases: Embedding biases to use as test input. It is a numpy array
of shape [num_classes].
hidden_acts: Forward activations of the network to use as test input.
It is a numpy array of shape [batch_size, dim].
sampled_vals: A tuple based on `sampled` to use as test input in the
format returned by a *_candidate_sampler function.
exp_logits: The output logits expected from _compute_sampled_logits().
It is a numpy array of shape [batch_size, num_true + len(sampled)].
exp_labels: The output labels expected from _compute_sampled_logits().
It is a numpy array of shape [batch_size, num_true + len(sampled)].
"""
weights = np.random.randn(num_classes, dim).astype(np.float32)
biases = np.random.randn(num_classes).astype(np.float32)
hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)
true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)
sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)
sampled_vals = (torch.LongTensor(sampled), torch.from_numpy(np.squeeze(true_exp)), torch.from_numpy(sampled_exp))
sampled_w, sampled_b = weights[sampled], biases[sampled]
true_w, true_b = weights[labels], biases[labels]
true_logits = np.sum(hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape((batch_size, num_true, dim)), axis=2)
true_b = true_b.reshape((batch_size, num_true))
true_logits += true_b
sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
if subtract_log_q:
true_logits -= np.log(true_exp)
sampled_logits -= np.log(sampled_exp[np.newaxis, :])
exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)
exp_labels = np.hstack((np.ones_like(true_logits) / num_true, np.zeros_like(sampled_logits)))
return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels
def test_SampledSoftmaxLoss(self):
# A simple test to verify the numerics.
def _SoftmaxCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
stable_exp_logits = np.exp(logits - np.amax(logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
np.random.seed(1000)
num_classes = 5
batch_size = 3
nsampled = 4
nhid = 10
labels = [0, 1, 2]
(weights, biases, hidden_acts, sampled_values, exp_logits, exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=nhid,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
ss = model.SampledSoftmax(num_classes, nsampled, nhid, tied_weight=None)
ss.params.weight.data = torch.from_numpy(weights)
ss.params.bias.data = torch.from_numpy(biases)
ss.params.cuda()
hidden_acts = Variable(torch.from_numpy(hidden_acts)).cuda()
labels = Variable(torch.LongTensor(labels)).cuda()
logits, new_targets = ss.sampled(hidden_acts, labels, sampled_values)
self.assertTrue(EXPECT_NEAR(exp_logits, logits.data.cpu().numpy(), 1e-4))
criterion = nn.CrossEntropyLoss()
loss = criterion(logits.view(-1, nsampled+1), new_targets)
expected_sampled_softmax_loss = np.mean(_SoftmaxCrossEntropyWithLogits(exp_logits, exp_labels))
self.assertTrue(EXPECT_NEAR(expected_sampled_softmax_loss, loss.item(), 1e-4))
def test_AccidentalMatch(self):
np.random.seed(1000)
num_classes = 5
batch_size = 3
nsampled = 4
nhid = 10
labels = np.random.randint(low=0, high=num_classes, size=batch_size)
(weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=nhid,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
ss = model.SampledSoftmax(num_classes, nsampled, nhid, tied_weight=None)
ss.params.weight.data = torch.from_numpy(weights)
ss.params.bias.data = torch.from_numpy(biases)
ss.params.cuda()
hidden_acts = Variable(torch.from_numpy(hidden_acts)).cuda()
labels = Variable(torch.LongTensor(labels)).cuda()
sampler = LogUniformSampler(nsampled)
sampled_values = sampler.sample(nsampled, labels.data.cpu().numpy())
sample_ids, true_freq, sample_freq = sampled_values
logits, new_targets = ss.sampled(hidden_acts, labels, sampled_values, remove_accidental_match=True)
criterion = nn.CrossEntropyLoss()
loss = criterion(logits.view(-1, nsampled+1), new_targets)
np_logits = logits.data.cpu().numpy()
for row in range(batch_size):
label = labels[row]
for col in range(nsampled):
if sample_ids[col] == label:
self.assertTrue(EXPECT_NEAR(np.exp(np_logits[row, col+1]), 0, 1e-4))
if __name__ == '__main__':
unittest.main()
| 41.126667
| 121
| 0.702869
|
import unittest
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import model
from log_uniform import LogUniformSampler
def EXPECT_NEAR(x, y, epsilon):
return np.all(abs(x - y) <= epsilon)
class ComputeSampledLogitsTest(unittest.TestCase):
def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels, sampled, subtract_log_q):
weights = np.random.randn(num_classes, dim).astype(np.float32)
biases = np.random.randn(num_classes).astype(np.float32)
hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)
true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)
sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)
sampled_vals = (torch.LongTensor(sampled), torch.from_numpy(np.squeeze(true_exp)), torch.from_numpy(sampled_exp))
sampled_w, sampled_b = weights[sampled], biases[sampled]
true_w, true_b = weights[labels], biases[labels]
true_logits = np.sum(hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape((batch_size, num_true, dim)), axis=2)
true_b = true_b.reshape((batch_size, num_true))
true_logits += true_b
sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
if subtract_log_q:
true_logits -= np.log(true_exp)
sampled_logits -= np.log(sampled_exp[np.newaxis, :])
exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)
exp_labels = np.hstack((np.ones_like(true_logits) / num_true, np.zeros_like(sampled_logits)))
return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels
def test_SampledSoftmaxLoss(self):
def _SoftmaxCrossEntropyWithLogits(logits, targets):
assert logits.shape == targets.shape
stable_exp_logits = np.exp(logits - np.amax(logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
np.random.seed(1000)
num_classes = 5
batch_size = 3
nsampled = 4
nhid = 10
labels = [0, 1, 2]
(weights, biases, hidden_acts, sampled_values, exp_logits, exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=nhid,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
ss = model.SampledSoftmax(num_classes, nsampled, nhid, tied_weight=None)
ss.params.weight.data = torch.from_numpy(weights)
ss.params.bias.data = torch.from_numpy(biases)
ss.params.cuda()
hidden_acts = Variable(torch.from_numpy(hidden_acts)).cuda()
labels = Variable(torch.LongTensor(labels)).cuda()
logits, new_targets = ss.sampled(hidden_acts, labels, sampled_values)
self.assertTrue(EXPECT_NEAR(exp_logits, logits.data.cpu().numpy(), 1e-4))
criterion = nn.CrossEntropyLoss()
loss = criterion(logits.view(-1, nsampled+1), new_targets)
expected_sampled_softmax_loss = np.mean(_SoftmaxCrossEntropyWithLogits(exp_logits, exp_labels))
self.assertTrue(EXPECT_NEAR(expected_sampled_softmax_loss, loss.item(), 1e-4))
def test_AccidentalMatch(self):
np.random.seed(1000)
num_classes = 5
batch_size = 3
nsampled = 4
nhid = 10
labels = np.random.randint(low=0, high=num_classes, size=batch_size)
(weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=nhid,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
ss = model.SampledSoftmax(num_classes, nsampled, nhid, tied_weight=None)
ss.params.weight.data = torch.from_numpy(weights)
ss.params.bias.data = torch.from_numpy(biases)
ss.params.cuda()
hidden_acts = Variable(torch.from_numpy(hidden_acts)).cuda()
labels = Variable(torch.LongTensor(labels)).cuda()
sampler = LogUniformSampler(nsampled)
sampled_values = sampler.sample(nsampled, labels.data.cpu().numpy())
sample_ids, true_freq, sample_freq = sampled_values
logits, new_targets = ss.sampled(hidden_acts, labels, sampled_values, remove_accidental_match=True)
criterion = nn.CrossEntropyLoss()
loss = criterion(logits.view(-1, nsampled+1), new_targets)
np_logits = logits.data.cpu().numpy()
for row in range(batch_size):
label = labels[row]
for col in range(nsampled):
if sample_ids[col] == label:
self.assertTrue(EXPECT_NEAR(np.exp(np_logits[row, col+1]), 0, 1e-4))
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c405311e59e59e3362a45d3645408633424900f
| 5,077
|
py
|
Python
|
ipython_odoo/tracer.py
|
voronind/ipython-odoo
|
48fcc7232beb9f7459ec1f0d7e0cf9cc41f85475
|
[
"MIT"
] | null | null | null |
ipython_odoo/tracer.py
|
voronind/ipython-odoo
|
48fcc7232beb9f7459ec1f0d7e0cf9cc41f85475
|
[
"MIT"
] | null | null | null |
ipython_odoo/tracer.py
|
voronind/ipython-odoo
|
48fcc7232beb9f7459ec1f0d7e0cf9cc41f85475
|
[
"MIT"
] | null | null | null |
import inspect
import logging
import re
import sys
from contextlib import contextmanager
from functools import wraps
from odoo.models import BaseModel
from .hierarchy import recordset_models
from functools import reduce
INTEREST_METHODS = {
'sale.order': {
'_prepare',
'search',
},
'all': {
'create',
'write',
'unlink',
},
}
logger = logging.getLogger('debug')
def path_model_method(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
logger.debug('soma')
return method(self, *args, **kwargs)
return wrapped_method
def patch_model_methods(env):
any_model_methods = INTEREST_METHODS.pop('all', set())
for model_name, method_names in INTEREST_METHODS:
models = recordset_models(env[model_name])
for method_name in method_names:
method = models.__dict__.get(method_name)
if method:
pass
# TRACE
all_method_names = reduce(set.union, list(INTEREST_METHODS.values()))
def get_Model_methods():
instancemethod = type(BaseModel.create)
method_names = set()
for attr_name in dir(BaseModel):
if isinstance(getattr(BaseModel, attr_name), (instancemethod, property)):
method_names.add(attr_name)
return method_names
def get_skip_method_names():
method_names = get_Model_methods()
method_names -= {
'create',
'search',
}
method_names |= {
'<lambda>',
'_cache',
'ids',
# mail
'message_get_reply_to',
'message_post',
'_get_tracked_fields',
'message_track',
'_message_track',
'_track_subtype',
'_message_track_post_template',
'_message_get_auto_subscribe_fields',
'message_auto_subscribe',
'_message_post_process_attachments',
'_track_template',
'_message_track_get_changes',
'message_post_with_view',
'message_post_with_template',
# sale
'compute_actual_finish_date',
'check_done',
# HZ
'loop',
'aggregate',
}
return method_names
skip_methods = get_skip_method_names()
def get_self(frame):
func_name = frame.f_code.co_name
if func_name.startswith('__') and func_name.endswith('__'):
return None, None
if func_name in skip_methods:
return None, None
if func_name.startswith('_compute_'):
return None, None
try:
args_info = inspect.getargvalues(frame)
except IndexError:
# print 'IndexError'
return None, None
self = 'self' in args_info[0] and args_info.locals.get('self')
if not isinstance(self, BaseModel):
return None, None
if self._original_module in {
'base',
'mail',
'decimal_precision',
'bus',
'account',
}:
return None, None
try:
if args_info[0] and args_info[0][0] == 'self':
args_info[0].pop(0)
args = inspect.formatargvalues(*args_info)
except KeyError:
print(frame.f_code.co_name)
print(args_info)
raise KeyError
return self, args
class Tracer(object):
def __init__(self):
self.level = None
def __enter__(self):
sys.settrace(self.trace)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sys.settrace(None)
def print_indent(self, line, newline=False):
indent = ' ' * self.level
if newline:
print()
print('{}{}'.format(indent, line))
def print_return_indent(self, line):
indent = ' ' * (self.level + 1)
print('{}{}'.format(indent, line))
def print_return(self, arg):
if isinstance(arg, dict):
self.print_return_indent('return {')
for key, value in sorted(arg.items()):
self.print_return_indent(' {!r}: {}'.format(key, value))
self.print_return_indent(' }')
else:
self.print_return_indent('{} {}'.format('return', arg))
def trace(self, frame, event, arg):
if event == 'call':
rs, args = get_self(frame)
if rs is not None:
if self.level is None:
self.level = 0
else:
self.level += 1
file_path = inspect.getsourcefile(frame.f_code)
match = re.match('.+/(?:addons|custom_addons)/(.+)', file_path)
if match:
addon_model_path = match.groups()[0]
self.print_indent('# {}:{}'.format(addon_model_path, frame.f_lineno), newline=True)
self.print_indent('{}.{}{}'.format(rs, frame.f_code.co_name, args))
return self.trace
elif event == 'return':
rs, args = get_self(frame)
if rs is not None:
self.print_return(arg)
if self.level:
self.level -= 1
| 23.835681
| 103
| 0.572385
|
import inspect
import logging
import re
import sys
from contextlib import contextmanager
from functools import wraps
from odoo.models import BaseModel
from .hierarchy import recordset_models
from functools import reduce
INTEREST_METHODS = {
'sale.order': {
'_prepare',
'search',
},
'all': {
'create',
'write',
'unlink',
},
}
logger = logging.getLogger('debug')
def path_model_method(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
logger.debug('soma')
return method(self, *args, **kwargs)
return wrapped_method
def patch_model_methods(env):
any_model_methods = INTEREST_METHODS.pop('all', set())
for model_name, method_names in INTEREST_METHODS:
models = recordset_models(env[model_name])
for method_name in method_names:
method = models.__dict__.get(method_name)
if method:
pass
all_method_names = reduce(set.union, list(INTEREST_METHODS.values()))
def get_Model_methods():
instancemethod = type(BaseModel.create)
method_names = set()
for attr_name in dir(BaseModel):
if isinstance(getattr(BaseModel, attr_name), (instancemethod, property)):
method_names.add(attr_name)
return method_names
def get_skip_method_names():
method_names = get_Model_methods()
method_names -= {
'create',
'search',
}
method_names |= {
'<lambda>',
'_cache',
'ids',
'message_get_reply_to',
'message_post',
'_get_tracked_fields',
'message_track',
'_message_track',
'_track_subtype',
'_message_track_post_template',
'_message_get_auto_subscribe_fields',
'message_auto_subscribe',
'_message_post_process_attachments',
'_track_template',
'_message_track_get_changes',
'message_post_with_view',
'message_post_with_template',
'compute_actual_finish_date',
'check_done',
'loop',
'aggregate',
}
return method_names
skip_methods = get_skip_method_names()
def get_self(frame):
func_name = frame.f_code.co_name
if func_name.startswith('__') and func_name.endswith('__'):
return None, None
if func_name in skip_methods:
return None, None
if func_name.startswith('_compute_'):
return None, None
try:
args_info = inspect.getargvalues(frame)
except IndexError:
return None, None
self = 'self' in args_info[0] and args_info.locals.get('self')
if not isinstance(self, BaseModel):
return None, None
if self._original_module in {
'base',
'mail',
'decimal_precision',
'bus',
'account',
}:
return None, None
try:
if args_info[0] and args_info[0][0] == 'self':
args_info[0].pop(0)
args = inspect.formatargvalues(*args_info)
except KeyError:
print(frame.f_code.co_name)
print(args_info)
raise KeyError
return self, args
class Tracer(object):
def __init__(self):
self.level = None
def __enter__(self):
sys.settrace(self.trace)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sys.settrace(None)
def print_indent(self, line, newline=False):
indent = ' ' * self.level
if newline:
print()
print('{}{}'.format(indent, line))
def print_return_indent(self, line):
indent = ' ' * (self.level + 1)
print('{}{}'.format(indent, line))
def print_return(self, arg):
if isinstance(arg, dict):
self.print_return_indent('return {')
for key, value in sorted(arg.items()):
self.print_return_indent(' {!r}: {}'.format(key, value))
self.print_return_indent(' }')
else:
self.print_return_indent('{} {}'.format('return', arg))
def trace(self, frame, event, arg):
if event == 'call':
rs, args = get_self(frame)
if rs is not None:
if self.level is None:
self.level = 0
else:
self.level += 1
file_path = inspect.getsourcefile(frame.f_code)
match = re.match('.+/(?:addons|custom_addons)/(.+)', file_path)
if match:
addon_model_path = match.groups()[0]
self.print_indent('# {}:{}'.format(addon_model_path, frame.f_lineno), newline=True)
self.print_indent('{}.{}{}'.format(rs, frame.f_code.co_name, args))
return self.trace
elif event == 'return':
rs, args = get_self(frame)
if rs is not None:
self.print_return(arg)
if self.level:
self.level -= 1
| true
| true
|
1c4054afc1bb7fe4eaab14ed54d92e4a4cfc8153
| 412
|
py
|
Python
|
Model/Opportunity.py
|
AIESECMX/Vtiger_Automatization
|
9edf486af70ff5e353c1ad19ffceba03ef44006c
|
[
"MIT"
] | null | null | null |
Model/Opportunity.py
|
AIESECMX/Vtiger_Automatization
|
9edf486af70ff5e353c1ad19ffceba03ef44006c
|
[
"MIT"
] | null | null | null |
Model/Opportunity.py
|
AIESECMX/Vtiger_Automatization
|
9edf486af70ff5e353c1ad19ffceba03ef44006c
|
[
"MIT"
] | null | null | null |
class Opportunity:
def __init__(self,expaid,title,url,programme,start_date,end_date,appsClosed,created,updated,status,lc,enabler):
self.id = expaid
self.title = title
self.url = url
self.programme = programme
self.start_date=start_date
self.end_date=end_date
self.appsClosed=appsClosed
self.created=created
self.updated=updated
self.status=status
self.lc =lc
self.enabler=enabler
| 20.6
| 113
| 0.764563
|
class Opportunity:
def __init__(self,expaid,title,url,programme,start_date,end_date,appsClosed,created,updated,status,lc,enabler):
self.id = expaid
self.title = title
self.url = url
self.programme = programme
self.start_date=start_date
self.end_date=end_date
self.appsClosed=appsClosed
self.created=created
self.updated=updated
self.status=status
self.lc =lc
self.enabler=enabler
| true
| true
|
1c4054c3a375c8595621946893894ac18fada8eb
| 6,327
|
py
|
Python
|
model/vaccination_preference_diagrams/model_phasediagram_numax_I0_1.py
|
lubo93/vaccination
|
4ddaf44455e72e9fc80cee03a6021f3ee754adfe
|
[
"MIT"
] | null | null | null |
model/vaccination_preference_diagrams/model_phasediagram_numax_I0_1.py
|
lubo93/vaccination
|
4ddaf44455e72e9fc80cee03a6021f3ee754adfe
|
[
"MIT"
] | null | null | null |
model/vaccination_preference_diagrams/model_phasediagram_numax_I0_1.py
|
lubo93/vaccination
|
4ddaf44455e72e9fc80cee03a6021f3ee754adfe
|
[
"MIT"
] | null | null | null |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from lib.simulation import epidemic_model
from matplotlib import rcParams, colors
from matplotlib.colors import LinearSegmentedColormap
# customized settings
params = { # 'backend': 'ps',
'font.family': 'serif',
'font.serif': 'Latin Modern Roman',
'font.size': 10,
'axes.labelsize': 'medium',
'axes.titlesize': 'medium',
'legend.fontsize': 'medium',
'xtick.labelsize': 'small',
'ytick.labelsize': 'small',
'savefig.dpi': 150,
'text.usetex': True}
# tell matplotlib about your params
rcParams.update(params)
# set nice figure sizes
fig_width_pt = 2*245 # Get this from LaTeX using \showthe\columnwidth
golden_mean = (np.sqrt(5.) - 1.) / 2. # Aesthetic ratio
ratio = golden_mean
inches_per_pt = 1. / 72.27 # Convert pt to inches
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width*ratio # height in inches
fig_size = [fig_width, 0.5*fig_width]
rcParams.update({'figure.figsize': fig_size})
### prime/boost protocols
# simulation parameters/initial conditions
I0_arr = np.linspace(1e-4,1e-1, 30)
nu_max_arr = np.linspace(0, 1e-1, 30)
f_arr = []
F_arr = []
R_0_arr = []
I0_ARR, NU_MAX = np.meshgrid(I0_arr,nu_max_arr)
beta = 3/14
for (I0,nu_max) in zip(np.ravel(I0_ARR),np.ravel(NU_MAX)):
# simulation parameters/initial conditions
# [beta, betap, betapp, beta_1, beta_1p, beta_1pp, \
# beta_2, beta_2p, beta_2pp, nu_1, nu_2, eta_1, eta_2, \
# gamma, gammap, gammapp, sigma, sigma_1, sigma_2, IFR, IFR1, IFR2, td]
params1 = [beta, beta/10, beta/20, beta/2, beta/10/2, beta/20/2, \
beta/10, beta/10/10, beta/20/10, nu_max, 0, \
1e-2, 3e-3, 1/14, 2/14, 4/14, 1/5, 1/5, 1/5, 1e-2, 1e-3, 1e-3, 21]
params2 = [beta, beta/10, beta/20, beta/2, beta/10/2, beta/20/2, \
beta/10, beta/10/10, beta/20/10, nu_max/2, nu_max/2, \
1e-2, 3e-3, 1/14, 2/14, 4/14, 1/5, 1/5, 1/5, 1e-2, 1e-3, 1e-3, 21]
# [S0, S0p, S0pp, E0, E0p, E0pp, I0, I0p, I0pp, R0, D0]
initial_conditions = [1-I0, 0, 0, 0, 0, 0, I0, 0, 0, 0, 0]
model1 = epidemic_model(params1,
initial_conditions,
time_step = 1e-1,
duration = 300,
Euler = False)
model1.simulate()
model2 = epidemic_model(params2,
initial_conditions,
time_step = 1e-1,
duration = 300,
Euler = False)
model2.simulate()
if model1.reproduction_number >= 1e2 and nu_max >= 1e2:
print(model2.delta_d, model1.delta_d)
print(model2.D_tot, model1.D_tot)
print(model2.vaccine_total, model1.vaccine_total)
print(model1.S+model1.Sp+model1.Spp+model1.I+model1.Ip+model1.Ipp+model1.R+model1.D)
print(model2.S+model2.Sp+model2.Spp+model2.I+model2.Ip+model2.Ipp+model2.R+model2.D)
fig, ax = plt.subplots()
plt.plot(model1.t_arr, model1.S_arr, label = r"$S(t)$")
plt.plot(model2.t_arr, model2.S_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.Sp_arr, label = r"$S^{\star}(t)$")
plt.plot(model2.t_arr, model2.Sp_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.Spp_arr, label = r"$S^{\star \star}(t)$")
plt.plot(model2.t_arr, model2.Spp_arr, '.', linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.I_arr, label = r"$I(t)$")
plt.plot(model2.t_arr, model2.I_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.Ip_arr, label = r"$I^{\star}(t)$")
plt.plot(model2.t_arr, model2.Ip_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.Ipp_arr, label = r"$I^{\star \star}(t)$")
plt.plot(model2.t_arr, model2.Ipp_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.R_arr, label = r"$R(t)$")
plt.plot(model2.t_arr, model2.R_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.D_arr, label = r"$D(t)$")
plt.plot(model2.t_arr, model2.D_arr, linestyle = '--', color = 'grey')
plt.legend(frameon = False, fontsize = 8, ncol = 2)
plt.xlabel(r"$t$")
plt.ylabel(r"proportion")
plt.ylim(-0.1,1)
plt.tight_layout()
plt.margins(0,0)
plt.savefig('SIR.png', dpi=480, bbox_inches = 'tight',
pad_inches = 0)
plt.show()
print(model2.delta_d)
f_arr.append((model2.delta_d-model1.delta_d)/max(model1.delta_d,model2.delta_d))
F_arr.append((model2.D_tot-model1.D_tot)/max(model1.D_tot,model2.D_tot))
R_0_arr.append(model1.reproduction_number)
f_arr = np.asarray(f_arr)
F_arr = np.asarray(F_arr)
R_0_arr = np.asarray(R_0_arr)
R_0 = R_0_arr.reshape(I0_ARR.shape)
f = f_arr.reshape(I0_ARR.shape)
F = F_arr.reshape(I0_ARR.shape)
print("f", f)
print("F", F)
#f = f < 0
#F = F < 0
#f = np.ma.masked_where(f == False, f)
#F = np.ma.masked_where(F == False, F)
cmap=LinearSegmentedColormap.from_list("", ["#b7241b", "w", "#265500"], N=128)
# set color for which f,F < 0 is True
# cmap = colors.ListedColormap(['#b7241b'])
# set color for which f,F > 0 is False
# cmap.set_bad(color='#265500')
fig, ax = plt.subplots(ncols = 2, constrained_layout = "True")
ax[0].set_title(r"$\delta(d_1,d_2)=(d_2-d_1)/\mathrm{max}(d_1,d_2)$")
cm1 = ax[0].pcolormesh(I0_ARR, NU_MAX, f, cmap=cmap, alpha = 1, linewidth=0, \
antialiased=True, vmin = -1, vmax = 1)
ax[0].axhline(y=0.013, xmin=0, xmax=1, ls="--", color="k")
ax[0].set_xlabel(r"$I(0)$")
ax[0].set_ylabel(r"$\nu_{\mathrm{max}}$")
ax[1].set_title(r"$\Delta(D_1,D_2)=(D_2-D_1)/\mathrm{max}(D_1,D_2)$")
cm2 = ax[1].pcolormesh(I0_ARR, NU_MAX, F, cmap=cmap, alpha = 1, linewidth=0, \
antialiased=True, vmin = -1, vmax = 1)
ax[1].axhline(y=0.013, xmin=0, xmax=1, ls="--", color="k")
ax[1].set_xlabel(r"$I(0)$")
ax[0].set_xlim([0,0.1])
ax[1].set_xlim([0,0.1])
ax[0].set_ylim([0,0.1])
ax[1].set_ylim([0,0.1])
ax[1].set_yticks([])
#fig.colorbar(cm1, ax=ax[0])
plt.colorbar(cm2, ax=ax[1], shrink=0.9)
plt.savefig("numax_I0_1.png", dpi = 300)
| 33.654255
| 92
| 0.607871
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from lib.simulation import epidemic_model
from matplotlib import rcParams, colors
from matplotlib.colors import LinearSegmentedColormap
params = {
'font.family': 'serif',
'font.serif': 'Latin Modern Roman',
'font.size': 10,
'axes.labelsize': 'medium',
'axes.titlesize': 'medium',
'legend.fontsize': 'medium',
'xtick.labelsize': 'small',
'ytick.labelsize': 'small',
'savefig.dpi': 150,
'text.usetex': True}
rcParams.update(params)
fig_width_pt = 2*245
golden_mean = (np.sqrt(5.) - 1.) / 2.
ratio = golden_mean
inches_per_pt = 1. / 72.27
fig_width = fig_width_pt * inches_per_pt
fig_height = fig_width*ratio
fig_size = [fig_width, 0.5*fig_width]
rcParams.update({'figure.figsize': fig_size})
rr = np.linspace(0, 1e-1, 30)
f_arr = []
F_arr = []
R_0_arr = []
I0_ARR, NU_MAX = np.meshgrid(I0_arr,nu_max_arr)
beta = 3/14
for (I0,nu_max) in zip(np.ravel(I0_ARR),np.ravel(NU_MAX)):
params1 = [beta, beta/10, beta/20, beta/2, beta/10/2, beta/20/2, \
beta/10, beta/10/10, beta/20/10, nu_max, 0, \
1e-2, 3e-3, 1/14, 2/14, 4/14, 1/5, 1/5, 1/5, 1e-2, 1e-3, 1e-3, 21]
params2 = [beta, beta/10, beta/20, beta/2, beta/10/2, beta/20/2, \
beta/10, beta/10/10, beta/20/10, nu_max/2, nu_max/2, \
1e-2, 3e-3, 1/14, 2/14, 4/14, 1/5, 1/5, 1/5, 1e-2, 1e-3, 1e-3, 21]
initial_conditions = [1-I0, 0, 0, 0, 0, 0, I0, 0, 0, 0, 0]
model1 = epidemic_model(params1,
initial_conditions,
time_step = 1e-1,
duration = 300,
Euler = False)
model1.simulate()
model2 = epidemic_model(params2,
initial_conditions,
time_step = 1e-1,
duration = 300,
Euler = False)
model2.simulate()
if model1.reproduction_number >= 1e2 and nu_max >= 1e2:
print(model2.delta_d, model1.delta_d)
print(model2.D_tot, model1.D_tot)
print(model2.vaccine_total, model1.vaccine_total)
print(model1.S+model1.Sp+model1.Spp+model1.I+model1.Ip+model1.Ipp+model1.R+model1.D)
print(model2.S+model2.Sp+model2.Spp+model2.I+model2.Ip+model2.Ipp+model2.R+model2.D)
fig, ax = plt.subplots()
plt.plot(model1.t_arr, model1.S_arr, label = r"$S(t)$")
plt.plot(model2.t_arr, model2.S_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.Sp_arr, label = r"$S^{\star}(t)$")
plt.plot(model2.t_arr, model2.Sp_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.Spp_arr, label = r"$S^{\star \star}(t)$")
plt.plot(model2.t_arr, model2.Spp_arr, '.', linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.I_arr, label = r"$I(t)$")
plt.plot(model2.t_arr, model2.I_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.Ip_arr, label = r"$I^{\star}(t)$")
plt.plot(model2.t_arr, model2.Ip_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.Ipp_arr, label = r"$I^{\star \star}(t)$")
plt.plot(model2.t_arr, model2.Ipp_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.R_arr, label = r"$R(t)$")
plt.plot(model2.t_arr, model2.R_arr, linestyle = '--', color = 'grey')
plt.plot(model1.t_arr, model1.D_arr, label = r"$D(t)$")
plt.plot(model2.t_arr, model2.D_arr, linestyle = '--', color = 'grey')
plt.legend(frameon = False, fontsize = 8, ncol = 2)
plt.xlabel(r"$t$")
plt.ylabel(r"proportion")
plt.ylim(-0.1,1)
plt.tight_layout()
plt.margins(0,0)
plt.savefig('SIR.png', dpi=480, bbox_inches = 'tight',
pad_inches = 0)
plt.show()
print(model2.delta_d)
f_arr.append((model2.delta_d-model1.delta_d)/max(model1.delta_d,model2.delta_d))
F_arr.append((model2.D_tot-model1.D_tot)/max(model1.D_tot,model2.D_tot))
R_0_arr.append(model1.reproduction_number)
f_arr = np.asarray(f_arr)
F_arr = np.asarray(F_arr)
R_0_arr = np.asarray(R_0_arr)
R_0 = R_0_arr.reshape(I0_ARR.shape)
f = f_arr.reshape(I0_ARR.shape)
F = F_arr.reshape(I0_ARR.shape)
print("f", f)
print("F", F)
cmap=LinearSegmentedColormap.from_list("", ["#b7241b", "w", "#265500"], N=128)
fig, ax = plt.subplots(ncols = 2, constrained_layout = "True")
ax[0].set_title(r"$\delta(d_1,d_2)=(d_2-d_1)/\mathrm{max}(d_1,d_2)$")
cm1 = ax[0].pcolormesh(I0_ARR, NU_MAX, f, cmap=cmap, alpha = 1, linewidth=0, \
antialiased=True, vmin = -1, vmax = 1)
ax[0].axhline(y=0.013, xmin=0, xmax=1, ls="--", color="k")
ax[0].set_xlabel(r"$I(0)$")
ax[0].set_ylabel(r"$\nu_{\mathrm{max}}$")
ax[1].set_title(r"$\Delta(D_1,D_2)=(D_2-D_1)/\mathrm{max}(D_1,D_2)$")
cm2 = ax[1].pcolormesh(I0_ARR, NU_MAX, F, cmap=cmap, alpha = 1, linewidth=0, \
antialiased=True, vmin = -1, vmax = 1)
ax[1].axhline(y=0.013, xmin=0, xmax=1, ls="--", color="k")
ax[1].set_xlabel(r"$I(0)$")
ax[0].set_xlim([0,0.1])
ax[1].set_xlim([0,0.1])
ax[0].set_ylim([0,0.1])
ax[1].set_ylim([0,0.1])
ax[1].set_yticks([])
plt.colorbar(cm2, ax=ax[1], shrink=0.9)
plt.savefig("numax_I0_1.png", dpi = 300)
| true
| true
|
1c40556aaae46f312fe9acfff59f8c7132fd844b
| 8,588
|
py
|
Python
|
edx_rest_framework_extensions/auth/jwt/authentication.py
|
openedx/edx-drf-extensions
|
f923f48da8a639f8037d917264057e456785a0e7
|
[
"Apache-2.0"
] | null | null | null |
edx_rest_framework_extensions/auth/jwt/authentication.py
|
openedx/edx-drf-extensions
|
f923f48da8a639f8037d917264057e456785a0e7
|
[
"Apache-2.0"
] | 1
|
2022-02-11T14:47:15.000Z
|
2022-02-11T14:47:15.000Z
|
edx_rest_framework_extensions/auth/jwt/authentication.py
|
openedx/edx-drf-extensions
|
f923f48da8a639f8037d917264057e456785a0e7
|
[
"Apache-2.0"
] | null | null | null |
""" JWT Authentication class. """
import logging
import jwt
from django.contrib.auth import get_user_model
from django.middleware.csrf import CsrfViewMiddleware
from edx_django_utils.monitoring import set_custom_attribute
from rest_framework import exceptions
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from edx_rest_framework_extensions.auth.jwt.constants import USE_JWT_COOKIE_HEADER
from edx_rest_framework_extensions.auth.jwt.decoder import configured_jwt_decode_handler
from edx_rest_framework_extensions.settings import get_setting
logger = logging.getLogger(__name__)
class CSRFCheck(CsrfViewMiddleware):
def _reject(self, request, reason):
# Return the failure reason instead of an HttpResponse
return reason
class JwtAuthentication(JSONWebTokenAuthentication):
"""
JSON Web Token based authentication.
This authentication class is useful for authenticating a JWT using a secret key. Clients should authenticate by
passing the token key in the "Authorization" HTTP header, prepended with the string `"JWT "`.
This class relies on the JWT_AUTH being configured for the application as well as JWT_PAYLOAD_USER_ATTRIBUTES
being configured in the EDX_DRF_EXTENSIONS config.
At a minimum, the JWT payload must contain a username. If an email address
is provided in the payload, it will be used to update the retrieved user's
email address associated with that username.
Example Header:
Authorization: JWT eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJmYzJiNzIwMTE0YmIwN2I0NjVlODQzYTc0ZWM2ODNlNiIs
ImFkbWluaXN0cmF0b3IiOmZhbHNlLCJuYW1lIjoiaG9ub3IiLCJleHA.QHDXdo8gDJ5p9uOErTLZtl2HK_61kgLs71VHp6sLx8rIqj2tt9yCfc_0
JUZpIYMkEd38uf1vj-4HZkzeNBnZZZ3Kdvq7F8ZioREPKNyEVSm2mnzl1v49EthehN9kwfUgFgPXfUh-pCvLDqwCCTdAXMcTJ8qufzEPTYYY54lY
"""
def get_jwt_claim_attribute_map(self):
""" Returns a mapping of JWT claims to user model attributes.
Returns
dict
"""
return get_setting('JWT_PAYLOAD_USER_ATTRIBUTE_MAPPING')
def get_jwt_claim_mergeable_attributes(self):
""" Returns a list of user model attributes that should be merged into from the JWT.
Returns
list
"""
return get_setting('JWT_PAYLOAD_MERGEABLE_USER_ATTRIBUTES')
def authenticate(self, request):
try:
user_and_auth = super().authenticate(request)
# Unauthenticated, CSRF validation not required
if not user_and_auth:
return user_and_auth
# Not using JWT cookies, CSRF validation not required
use_jwt_cookie_requested = request.META.get(USE_JWT_COOKIE_HEADER)
if not use_jwt_cookie_requested:
return user_and_auth
self.enforce_csrf(request)
# CSRF passed validation with authenticated user
return user_and_auth
except jwt.InvalidTokenError as token_error:
# Note: I think this case is not used, but will monitor the custom attribute to verify.
set_custom_attribute('jwt_auth_failed', 'InvalidTokenError:{}'.format(repr(token_error)))
raise exceptions.AuthenticationFailed() from token_error
except Exception as exception:
# Errors in production do not need to be logged (as they may be noisy),
# but debug logging can help quickly resolve issues during development.
logger.debug('Failed JWT Authentication,', exc_info=exception)
# Note: I think this case should only include AuthenticationFailed and PermissionDenied,
# but will monitor the custom attribute to verify.
set_custom_attribute('jwt_auth_failed', 'Exception:{}'.format(repr(exception)))
raise
def authenticate_credentials(self, payload):
"""Get or create an active user with the username contained in the payload."""
# TODO it would be good to refactor this heavily-nested function.
# pylint: disable=too-many-nested-blocks
username = payload.get('preferred_username') or payload.get('username')
if username is None:
raise exceptions.AuthenticationFailed('JWT must include a preferred_username or username claim!')
try:
user, __ = get_user_model().objects.get_or_create(username=username)
attributes_updated = False
attribute_map = self.get_jwt_claim_attribute_map()
attributes_to_merge = self.get_jwt_claim_mergeable_attributes()
for claim, attr in attribute_map.items():
payload_value = payload.get(claim)
if attr in attributes_to_merge:
# Merge new values that aren't already set in the user dictionary
if not payload_value:
continue
current_value = getattr(user, attr, None)
if current_value:
for (key, value) in payload_value.items():
if key in current_value:
if current_value[key] != value:
logger.info(
'Updating attribute %s[%s] for user %s with value %s',
attr,
key,
user.id,
value,
)
current_value[key] = value
attributes_updated = True
else:
logger.info(
'Adding attribute %s[%s] for user %s with value %s',
attr,
key,
user.id,
value,
)
current_value[key] = value
attributes_updated = True
else:
logger.info('Updating attribute %s for user %s with value %s', attr, user.id, payload_value)
setattr(user, attr, payload_value)
attributes_updated = True
else:
if getattr(user, attr) != payload_value and payload_value is not None:
logger.info('Updating attribute %s for user %s with value %s', attr, user.id, payload_value)
setattr(user, attr, payload_value)
attributes_updated = True
if attributes_updated:
user.save()
except Exception as authentication_error:
msg = 'User retrieval failed.'
logger.exception(msg)
raise exceptions.AuthenticationFailed(msg) from authentication_error
return user
def enforce_csrf(self, request):
"""
Enforce CSRF validation for Jwt cookie authentication.
Copied from SessionAuthentication.
See https://github.com/encode/django-rest-framework/blob/3f19e66d9f2569895af6e91455e5cf53b8ce5640/rest_framework/authentication.py#L131-L141 # noqa E501 line too long
"""
check = CSRFCheck() # pylint: disable=no-value-for-parameter
# populates request.META['CSRF_COOKIE'], which is used in process_view()
check.process_request(request)
reason = check.process_view(request, None, (), {})
if reason:
# CSRF failed, bail with explicit error message
raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)
def is_jwt_authenticated(request):
successful_authenticator = getattr(request, 'successful_authenticator', None)
if not isinstance(successful_authenticator, JSONWebTokenAuthentication):
return False
if not getattr(request, 'auth', None):
logger.error(
'Unexpected error: Used JwtAuthentication, '
'but the request auth attribute was not populated with the JWT.'
)
return False
return True
def get_decoded_jwt_from_auth(request):
"""
Grab jwt from request.auth in request if possible.
Returns a decoded jwt dict if it can be found.
Returns None if the jwt is not found.
"""
if not is_jwt_authenticated(request):
return None
return configured_jwt_decode_handler(request.auth)
| 43.593909
| 175
| 0.62599
|
import logging
import jwt
from django.contrib.auth import get_user_model
from django.middleware.csrf import CsrfViewMiddleware
from edx_django_utils.monitoring import set_custom_attribute
from rest_framework import exceptions
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from edx_rest_framework_extensions.auth.jwt.constants import USE_JWT_COOKIE_HEADER
from edx_rest_framework_extensions.auth.jwt.decoder import configured_jwt_decode_handler
from edx_rest_framework_extensions.settings import get_setting
logger = logging.getLogger(__name__)
class CSRFCheck(CsrfViewMiddleware):
def _reject(self, request, reason):
return reason
class JwtAuthentication(JSONWebTokenAuthentication):
def get_jwt_claim_attribute_map(self):
return get_setting('JWT_PAYLOAD_USER_ATTRIBUTE_MAPPING')
def get_jwt_claim_mergeable_attributes(self):
return get_setting('JWT_PAYLOAD_MERGEABLE_USER_ATTRIBUTES')
def authenticate(self, request):
try:
user_and_auth = super().authenticate(request)
if not user_and_auth:
return user_and_auth
use_jwt_cookie_requested = request.META.get(USE_JWT_COOKIE_HEADER)
if not use_jwt_cookie_requested:
return user_and_auth
self.enforce_csrf(request)
return user_and_auth
except jwt.InvalidTokenError as token_error:
set_custom_attribute('jwt_auth_failed', 'InvalidTokenError:{}'.format(repr(token_error)))
raise exceptions.AuthenticationFailed() from token_error
except Exception as exception:
logger.debug('Failed JWT Authentication,', exc_info=exception)
set_custom_attribute('jwt_auth_failed', 'Exception:{}'.format(repr(exception)))
raise
def authenticate_credentials(self, payload):
username = payload.get('preferred_username') or payload.get('username')
if username is None:
raise exceptions.AuthenticationFailed('JWT must include a preferred_username or username claim!')
try:
user, __ = get_user_model().objects.get_or_create(username=username)
attributes_updated = False
attribute_map = self.get_jwt_claim_attribute_map()
attributes_to_merge = self.get_jwt_claim_mergeable_attributes()
for claim, attr in attribute_map.items():
payload_value = payload.get(claim)
if attr in attributes_to_merge:
if not payload_value:
continue
current_value = getattr(user, attr, None)
if current_value:
for (key, value) in payload_value.items():
if key in current_value:
if current_value[key] != value:
logger.info(
'Updating attribute %s[%s] for user %s with value %s',
attr,
key,
user.id,
value,
)
current_value[key] = value
attributes_updated = True
else:
logger.info(
'Adding attribute %s[%s] for user %s with value %s',
attr,
key,
user.id,
value,
)
current_value[key] = value
attributes_updated = True
else:
logger.info('Updating attribute %s for user %s with value %s', attr, user.id, payload_value)
setattr(user, attr, payload_value)
attributes_updated = True
else:
if getattr(user, attr) != payload_value and payload_value is not None:
logger.info('Updating attribute %s for user %s with value %s', attr, user.id, payload_value)
setattr(user, attr, payload_value)
attributes_updated = True
if attributes_updated:
user.save()
except Exception as authentication_error:
msg = 'User retrieval failed.'
logger.exception(msg)
raise exceptions.AuthenticationFailed(msg) from authentication_error
return user
def enforce_csrf(self, request):
check = CSRFCheck() # pylint: disable=no-value-for-parameter
# populates request.META['CSRF_COOKIE'], which is used in process_view()
check.process_request(request)
reason = check.process_view(request, None, (), {})
if reason:
# CSRF failed, bail with explicit error message
raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)
def is_jwt_authenticated(request):
successful_authenticator = getattr(request, 'successful_authenticator', None)
if not isinstance(successful_authenticator, JSONWebTokenAuthentication):
return False
if not getattr(request, 'auth', None):
logger.error(
'Unexpected error: Used JwtAuthentication, '
'but the request auth attribute was not populated with the JWT.'
)
return False
return True
def get_decoded_jwt_from_auth(request):
if not is_jwt_authenticated(request):
return None
return configured_jwt_decode_handler(request.auth)
| true
| true
|
1c40578685ce22c8ce85a3699e7def9a9fb0953a
| 2,733
|
py
|
Python
|
adwords_python3_examples_10.1.0/v201802/campaign_management/graduate_trial.py
|
xyla-io/hazel
|
260ce906761d8b808c21ca61b44cc71ca3329e8c
|
[
"MIT"
] | null | null | null |
adwords_python3_examples_10.1.0/v201802/campaign_management/graduate_trial.py
|
xyla-io/hazel
|
260ce906761d8b808c21ca61b44cc71ca3329e8c
|
[
"MIT"
] | null | null | null |
adwords_python3_examples_10.1.0/v201802/campaign_management/graduate_trial.py
|
xyla-io/hazel
|
260ce906761d8b808c21ca61b44cc71ca3329e8c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Illustrates how to graduate a trial.
See the Campaign Drafts and Experiments guide for more information:
https://developers.google.com/adwords/api/docs/guides/campaign-drafts-experiments
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import uuid
from googleads import adwords
TRIAL_ID = 'INSERT_TRIAL_ID_HERE'
def main(client, trial_id):
# Initialize appropriate services.
trial_service = client.GetService('TrialService', version='v201802')
budget_service = client.GetService('BudgetService', version='v201802')
# To graduate a trial, you must specify a different budget from the base
# campaign. The base campaign (in order to have had a trial based on it) must
# have a non-shared budget, so it cannot be shared with the new independent
# campaign created by graduation.
budget = {
'name': 'Budget #%d' % uuid.uuid4(),
'amount': {'microAmount': 50000000},
'deliveryMethod': 'STANDARD'
}
budget_operation = {'operator': 'ADD', 'operand': budget}
# Add budget
budget_id = budget_service.mutate([budget_operation])['value'][0]['budgetId']
trial = {
'id': trial_id,
'budgetId': budget_id,
'status': 'GRADUATED'
}
trial_operation = {'operator': 'SET', 'operand': trial}
# Update the trial.
trial = trial_service.mutate([trial_operation])['value'][0]
# Graduation is a synchronous operation, so the campaign is already ready. If
# you promote instead, make sure to see the polling scheme demonstrated in
# add_trial.py to wait for the asynchronous operation to finish.
print(('Trial ID %d graduated. Campaign %d was given a new budget ID %d and '
'is no longer dependent on this trial.' % (
trial['id'], trial['trialCampaignId'], budget_id)))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, TRIAL_ID)
| 34.1625
| 81
| 0.72704
|
import uuid
from googleads import adwords
TRIAL_ID = 'INSERT_TRIAL_ID_HERE'
def main(client, trial_id):
trial_service = client.GetService('TrialService', version='v201802')
budget_service = client.GetService('BudgetService', version='v201802')
budget = {
'name': 'Budget #%d' % uuid.uuid4(),
'amount': {'microAmount': 50000000},
'deliveryMethod': 'STANDARD'
}
budget_operation = {'operator': 'ADD', 'operand': budget}
budget_id = budget_service.mutate([budget_operation])['value'][0]['budgetId']
trial = {
'id': trial_id,
'budgetId': budget_id,
'status': 'GRADUATED'
}
trial_operation = {'operator': 'SET', 'operand': trial}
trial = trial_service.mutate([trial_operation])['value'][0]
print(('Trial ID %d graduated. Campaign %d was given a new budget ID %d and '
'is no longer dependent on this trial.' % (
trial['id'], trial['trialCampaignId'], budget_id)))
if __name__ == '__main__':
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, TRIAL_ID)
| true
| true
|
1c4059486f0d38f447f040822c0a5cba9f9c481d
| 1,884
|
py
|
Python
|
mmtbx/command_line/refine_anomalous_substructure.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/command_line/refine_anomalous_substructure.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/command_line/refine_anomalous_substructure.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
from libtbx.utils import Sorry
import sys
def run(args, out=sys.stdout):
from mmtbx.refinement import anomalous_scatterer_groups
import mmtbx.command_line
master_phil = mmtbx.command_line.generate_master_phil_with_inputs(
phil_string="""
map_type = *anom_residual llg
.type = choice
exclude_waters = False
.type = bool
exclude_non_water_light_elements = True
.type = bool
n_cycles_max=None
.type = int
map_sigma_min = 3.0
.type = float
wavelength = None
.type = float
refine = *f_prime *f_double_prime
.type = choice(multi=True)
reset_water_u_iso = True
.type = bool
""",
enable_automatic_twin_detection=True)
usage_string = """\
mmtbx.refine_anomalous_substructure model.pdb data.mtz [options]
Iterative identification of anomalously scattering atoms in the anomalous
residual map (simple or Phaser LLG), followed by refinement of the anomalous
scattering coefficients. Intended as a diagnostic/development tool only!
"""
cmdline = mmtbx.command_line.load_model_and_data(
args=args,
master_phil=master_phil,
out=out,
process_pdb_file=False,
prefer_anomalous=True,
usage_string=usage_string)
fmodel = cmdline.fmodel
if (not fmodel.f_obs().anomalous_flag()):
raise Sorry("Anomalous data required.")
pdb_hierarchy = cmdline.pdb_hierarchy
params = cmdline.params
return anomalous_scatterer_groups.refine_anomalous_substructure(
fmodel=fmodel,
pdb_hierarchy=pdb_hierarchy,
wavelength=params.wavelength,
map_type=params.map_type,
exclude_waters=params.exclude_waters,
exclude_non_water_light_elements=params.exclude_non_water_light_elements,
n_cycles_max=params.n_cycles_max,
map_sigma_min=params.map_sigma_min,
refine=params.refine,
reset_water_u_iso=params.reset_water_u_iso,
out=out)
if (__name__ == "__main__"):
run(sys.argv[1:])
| 29.904762
| 77
| 0.771762
|
from __future__ import division
from libtbx.utils import Sorry
import sys
def run(args, out=sys.stdout):
from mmtbx.refinement import anomalous_scatterer_groups
import mmtbx.command_line
master_phil = mmtbx.command_line.generate_master_phil_with_inputs(
phil_string="""
map_type = *anom_residual llg
.type = choice
exclude_waters = False
.type = bool
exclude_non_water_light_elements = True
.type = bool
n_cycles_max=None
.type = int
map_sigma_min = 3.0
.type = float
wavelength = None
.type = float
refine = *f_prime *f_double_prime
.type = choice(multi=True)
reset_water_u_iso = True
.type = bool
""",
enable_automatic_twin_detection=True)
usage_string = """\
mmtbx.refine_anomalous_substructure model.pdb data.mtz [options]
Iterative identification of anomalously scattering atoms in the anomalous
residual map (simple or Phaser LLG), followed by refinement of the anomalous
scattering coefficients. Intended as a diagnostic/development tool only!
"""
cmdline = mmtbx.command_line.load_model_and_data(
args=args,
master_phil=master_phil,
out=out,
process_pdb_file=False,
prefer_anomalous=True,
usage_string=usage_string)
fmodel = cmdline.fmodel
if (not fmodel.f_obs().anomalous_flag()):
raise Sorry("Anomalous data required.")
pdb_hierarchy = cmdline.pdb_hierarchy
params = cmdline.params
return anomalous_scatterer_groups.refine_anomalous_substructure(
fmodel=fmodel,
pdb_hierarchy=pdb_hierarchy,
wavelength=params.wavelength,
map_type=params.map_type,
exclude_waters=params.exclude_waters,
exclude_non_water_light_elements=params.exclude_non_water_light_elements,
n_cycles_max=params.n_cycles_max,
map_sigma_min=params.map_sigma_min,
refine=params.refine,
reset_water_u_iso=params.reset_water_u_iso,
out=out)
if (__name__ == "__main__"):
run(sys.argv[1:])
| true
| true
|
1c405a2a11e8a5b9f6437e88edb582ea0632bc95
| 9,623
|
py
|
Python
|
yapf/yapflib/file_resources.py
|
spaceone/yapf
|
633744e62187b296e2f53b97defec4ac2eb9d5a6
|
[
"Apache-2.0"
] | 13,769
|
2015-03-19T20:58:55.000Z
|
2022-03-31T15:03:38.000Z
|
yapf/yapflib/file_resources.py
|
spaceone/yapf
|
633744e62187b296e2f53b97defec4ac2eb9d5a6
|
[
"Apache-2.0"
] | 973
|
2015-03-24T22:25:15.000Z
|
2022-03-23T15:40:15.000Z
|
yapf/yapflib/file_resources.py
|
spaceone/yapf
|
633744e62187b296e2f53b97defec4ac2eb9d5a6
|
[
"Apache-2.0"
] | 1,141
|
2015-03-25T03:05:22.000Z
|
2022-03-29T11:06:37.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to file resources.
This module provides functions for interfacing with files: opening, writing, and
querying.
"""
import fnmatch
import os
import re
from lib2to3.pgen2 import tokenize
from yapf.yapflib import errors
from yapf.yapflib import py3compat
from yapf.yapflib import style
CR = '\r'
LF = '\n'
CRLF = '\r\n'
def _GetExcludePatternsFromYapfIgnore(filename):
"""Get a list of file patterns to ignore from .yapfignore."""
ignore_patterns = []
if os.path.isfile(filename) and os.access(filename, os.R_OK):
with open(filename, 'r') as fd:
for line in fd:
if line.strip() and not line.startswith('#'):
ignore_patterns.append(line.strip())
if any(e.startswith('./') for e in ignore_patterns):
raise errors.YapfError('path in .yapfignore should not start with ./')
return ignore_patterns
def _GetExcludePatternsFromPyprojectToml(filename):
"""Get a list of file patterns to ignore from pyproject.toml."""
ignore_patterns = []
try:
import toml
except ImportError:
raise errors.YapfError(
"toml package is needed for using pyproject.toml as a configuration file"
)
if os.path.isfile(filename) and os.access(filename, os.R_OK):
pyproject_toml = toml.load(filename)
ignore_patterns = pyproject_toml.get('tool',
{}).get('yapfignore',
{}).get('ignore_patterns', [])
if any(e.startswith('./') for e in ignore_patterns):
raise errors.YapfError('path in pyproject.toml should not start with ./')
return ignore_patterns
def GetExcludePatternsForDir(dirname):
"""Return patterns of files to exclude from ignorefile in a given directory.
Looks for .yapfignore in the directory dirname.
Arguments:
dirname: (unicode) The name of the directory.
Returns:
A List of file patterns to exclude if ignore file is found, otherwise empty
List.
"""
ignore_patterns = []
yapfignore_file = os.path.join(dirname, '.yapfignore')
if os.path.exists(yapfignore_file):
ignore_patterns += _GetExcludePatternsFromYapfIgnore(yapfignore_file)
pyproject_toml_file = os.path.join(dirname, 'pyproject.toml')
if os.path.exists(pyproject_toml_file):
ignore_patterns += _GetExcludePatternsFromPyprojectToml(pyproject_toml_file)
return ignore_patterns
def GetDefaultStyleForDir(dirname, default_style=style.DEFAULT_STYLE):
"""Return default style name for a given directory.
Looks for .style.yapf or setup.cfg or pyproject.toml in the parent directories.
Arguments:
dirname: (unicode) The name of the directory.
default_style: The style to return if nothing is found. Defaults to the
global default style ('pep8') unless otherwise specified.
Returns:
The filename if found, otherwise return the default style.
"""
dirname = os.path.abspath(dirname)
while True:
# See if we have a .style.yapf file.
style_file = os.path.join(dirname, style.LOCAL_STYLE)
if os.path.exists(style_file):
return style_file
# See if we have a setup.cfg file with a '[yapf]' section.
config_file = os.path.join(dirname, style.SETUP_CONFIG)
try:
fd = open(config_file)
except IOError:
pass # It's okay if it's not there.
else:
with fd:
config = py3compat.ConfigParser()
config.read_file(fd)
if config.has_section('yapf'):
return config_file
# See if we have a pyproject.toml file with a '[tool.yapf]' section.
config_file = os.path.join(dirname, style.PYPROJECT_TOML)
try:
fd = open(config_file)
except IOError:
pass # It's okay if it's not there.
else:
with fd:
try:
import toml
except ImportError:
raise errors.YapfError(
"toml package is needed for using pyproject.toml as a configuration file"
)
pyproject_toml = toml.load(config_file)
style_dict = pyproject_toml.get('tool', {}).get('yapf', None)
if style_dict is not None:
return config_file
if (not dirname or not os.path.basename(dirname) or
dirname == os.path.abspath(os.path.sep)):
break
dirname = os.path.dirname(dirname)
global_file = os.path.expanduser(style.GLOBAL_STYLE)
if os.path.exists(global_file):
return global_file
return default_style
def GetCommandLineFiles(command_line_file_list, recursive, exclude):
"""Return the list of files specified on the command line."""
return _FindPythonFiles(command_line_file_list, recursive, exclude)
def WriteReformattedCode(filename,
reformatted_code,
encoding='',
in_place=False):
"""Emit the reformatted code.
Write the reformatted code into the file, if in_place is True. Otherwise,
write to stdout.
Arguments:
filename: (unicode) The name of the unformatted file.
reformatted_code: (unicode) The reformatted code.
encoding: (unicode) The encoding of the file.
in_place: (bool) If True, then write the reformatted code to the file.
"""
if in_place:
with py3compat.open_with_encoding(
filename, mode='w', encoding=encoding, newline='') as fd:
fd.write(reformatted_code)
else:
py3compat.EncodeAndWriteToStdout(reformatted_code)
def LineEnding(lines):
"""Retrieve the line ending of the original source."""
endings = {CRLF: 0, CR: 0, LF: 0}
for line in lines:
if line.endswith(CRLF):
endings[CRLF] += 1
elif line.endswith(CR):
endings[CR] += 1
elif line.endswith(LF):
endings[LF] += 1
return (sorted(endings, key=endings.get, reverse=True) or [LF])[0]
def _FindPythonFiles(filenames, recursive, exclude):
"""Find all Python files."""
if exclude and any(e.startswith('./') for e in exclude):
raise errors.YapfError("path in '--exclude' should not start with ./")
exclude = exclude and [e.rstrip("/" + os.path.sep) for e in exclude]
python_files = []
for filename in filenames:
if filename != '.' and exclude and IsIgnored(filename, exclude):
continue
if os.path.isdir(filename):
if not recursive:
raise errors.YapfError(
"directory specified without '--recursive' flag: %s" % filename)
# TODO(morbo): Look into a version of os.walk that can handle recursion.
excluded_dirs = []
for dirpath, dirnames, filelist in os.walk(filename):
if dirpath != '.' and exclude and IsIgnored(dirpath, exclude):
excluded_dirs.append(dirpath)
continue
elif any(dirpath.startswith(e) for e in excluded_dirs):
continue
for f in filelist:
filepath = os.path.join(dirpath, f)
if exclude and IsIgnored(filepath, exclude):
continue
if IsPythonFile(filepath):
python_files.append(filepath)
# To prevent it from scanning the contents excluded folders, os.walk()
# lets you amend its list of child dirs `dirnames`. These edits must be
# made in-place instead of creating a modified copy of `dirnames`.
# list.remove() is slow and list.pop() is a headache. Instead clear
# `dirnames` then repopulate it.
dirnames_ = [dirnames.pop(0) for i in range(len(dirnames))]
for dirname in dirnames_:
dir_ = os.path.join(dirpath, dirname)
if IsIgnored(dir_, exclude):
excluded_dirs.append(dir_)
else:
dirnames.append(dirname)
elif os.path.isfile(filename):
python_files.append(filename)
return python_files
def IsIgnored(path, exclude):
"""Return True if filename matches any patterns in exclude."""
if exclude is None:
return False
path = path.lstrip(os.path.sep)
while path.startswith('.' + os.path.sep):
path = path[2:]
return any(fnmatch.fnmatch(path, e.rstrip(os.path.sep)) for e in exclude)
def IsPythonFile(filename):
"""Return True if filename is a Python file."""
if os.path.splitext(filename)[1] == '.py':
return True
try:
with open(filename, 'rb') as fd:
encoding = tokenize.detect_encoding(fd.readline)[0]
# Check for correctness of encoding.
with py3compat.open_with_encoding(
filename, mode='r', encoding=encoding) as fd:
fd.read()
except UnicodeDecodeError:
encoding = 'latin-1'
except (IOError, SyntaxError):
# If we fail to detect encoding (or the encoding cookie is incorrect - which
# will make detect_encoding raise SyntaxError), assume it's not a Python
# file.
return False
try:
with py3compat.open_with_encoding(
filename, mode='r', encoding=encoding) as fd:
first_line = fd.readline(256)
except IOError:
return False
return re.match(r'^#!.*\bpython[23]?\b', first_line)
def FileEncoding(filename):
"""Return the file's encoding."""
with open(filename, 'rb') as fd:
return tokenize.detect_encoding(fd.readline)[0]
| 32.731293
| 87
| 0.67401
|
import fnmatch
import os
import re
from lib2to3.pgen2 import tokenize
from yapf.yapflib import errors
from yapf.yapflib import py3compat
from yapf.yapflib import style
CR = '\r'
LF = '\n'
CRLF = '\r\n'
def _GetExcludePatternsFromYapfIgnore(filename):
ignore_patterns = []
if os.path.isfile(filename) and os.access(filename, os.R_OK):
with open(filename, 'r') as fd:
for line in fd:
if line.strip() and not line.startswith('#'):
ignore_patterns.append(line.strip())
if any(e.startswith('./') for e in ignore_patterns):
raise errors.YapfError('path in .yapfignore should not start with ./')
return ignore_patterns
def _GetExcludePatternsFromPyprojectToml(filename):
ignore_patterns = []
try:
import toml
except ImportError:
raise errors.YapfError(
"toml package is needed for using pyproject.toml as a configuration file"
)
if os.path.isfile(filename) and os.access(filename, os.R_OK):
pyproject_toml = toml.load(filename)
ignore_patterns = pyproject_toml.get('tool',
{}).get('yapfignore',
{}).get('ignore_patterns', [])
if any(e.startswith('./') for e in ignore_patterns):
raise errors.YapfError('path in pyproject.toml should not start with ./')
return ignore_patterns
def GetExcludePatternsForDir(dirname):
ignore_patterns = []
yapfignore_file = os.path.join(dirname, '.yapfignore')
if os.path.exists(yapfignore_file):
ignore_patterns += _GetExcludePatternsFromYapfIgnore(yapfignore_file)
pyproject_toml_file = os.path.join(dirname, 'pyproject.toml')
if os.path.exists(pyproject_toml_file):
ignore_patterns += _GetExcludePatternsFromPyprojectToml(pyproject_toml_file)
return ignore_patterns
def GetDefaultStyleForDir(dirname, default_style=style.DEFAULT_STYLE):
dirname = os.path.abspath(dirname)
while True:
style_file = os.path.join(dirname, style.LOCAL_STYLE)
if os.path.exists(style_file):
return style_file
config_file = os.path.join(dirname, style.SETUP_CONFIG)
try:
fd = open(config_file)
except IOError:
pass
else:
with fd:
config = py3compat.ConfigParser()
config.read_file(fd)
if config.has_section('yapf'):
return config_file
config_file = os.path.join(dirname, style.PYPROJECT_TOML)
try:
fd = open(config_file)
except IOError:
pass
else:
with fd:
try:
import toml
except ImportError:
raise errors.YapfError(
"toml package is needed for using pyproject.toml as a configuration file"
)
pyproject_toml = toml.load(config_file)
style_dict = pyproject_toml.get('tool', {}).get('yapf', None)
if style_dict is not None:
return config_file
if (not dirname or not os.path.basename(dirname) or
dirname == os.path.abspath(os.path.sep)):
break
dirname = os.path.dirname(dirname)
global_file = os.path.expanduser(style.GLOBAL_STYLE)
if os.path.exists(global_file):
return global_file
return default_style
def GetCommandLineFiles(command_line_file_list, recursive, exclude):
return _FindPythonFiles(command_line_file_list, recursive, exclude)
def WriteReformattedCode(filename,
reformatted_code,
encoding='',
in_place=False):
if in_place:
with py3compat.open_with_encoding(
filename, mode='w', encoding=encoding, newline='') as fd:
fd.write(reformatted_code)
else:
py3compat.EncodeAndWriteToStdout(reformatted_code)
def LineEnding(lines):
endings = {CRLF: 0, CR: 0, LF: 0}
for line in lines:
if line.endswith(CRLF):
endings[CRLF] += 1
elif line.endswith(CR):
endings[CR] += 1
elif line.endswith(LF):
endings[LF] += 1
return (sorted(endings, key=endings.get, reverse=True) or [LF])[0]
def _FindPythonFiles(filenames, recursive, exclude):
if exclude and any(e.startswith('./') for e in exclude):
raise errors.YapfError("path in '--exclude' should not start with ./")
exclude = exclude and [e.rstrip("/" + os.path.sep) for e in exclude]
python_files = []
for filename in filenames:
if filename != '.' and exclude and IsIgnored(filename, exclude):
continue
if os.path.isdir(filename):
if not recursive:
raise errors.YapfError(
"directory specified without '--recursive' flag: %s" % filename)
excluded_dirs = []
for dirpath, dirnames, filelist in os.walk(filename):
if dirpath != '.' and exclude and IsIgnored(dirpath, exclude):
excluded_dirs.append(dirpath)
continue
elif any(dirpath.startswith(e) for e in excluded_dirs):
continue
for f in filelist:
filepath = os.path.join(dirpath, f)
if exclude and IsIgnored(filepath, exclude):
continue
if IsPythonFile(filepath):
python_files.append(filepath)
dirnames_ = [dirnames.pop(0) for i in range(len(dirnames))]
for dirname in dirnames_:
dir_ = os.path.join(dirpath, dirname)
if IsIgnored(dir_, exclude):
excluded_dirs.append(dir_)
else:
dirnames.append(dirname)
elif os.path.isfile(filename):
python_files.append(filename)
return python_files
def IsIgnored(path, exclude):
if exclude is None:
return False
path = path.lstrip(os.path.sep)
while path.startswith('.' + os.path.sep):
path = path[2:]
return any(fnmatch.fnmatch(path, e.rstrip(os.path.sep)) for e in exclude)
def IsPythonFile(filename):
if os.path.splitext(filename)[1] == '.py':
return True
try:
with open(filename, 'rb') as fd:
encoding = tokenize.detect_encoding(fd.readline)[0]
with py3compat.open_with_encoding(
filename, mode='r', encoding=encoding) as fd:
fd.read()
except UnicodeDecodeError:
encoding = 'latin-1'
except (IOError, SyntaxError):
# file.
return False
try:
with py3compat.open_with_encoding(
filename, mode='r', encoding=encoding) as fd:
first_line = fd.readline(256)
except IOError:
return False
return re.match(r'^
def FileEncoding(filename):
with open(filename, 'rb') as fd:
return tokenize.detect_encoding(fd.readline)[0]
| true
| true
|
1c405a837b1eef2fd63d9d70b2f5f4c28f1ff4c1
| 417
|
py
|
Python
|
django_and_bootstrap/wsgi.py
|
hosseinkianmehr/solar-site
|
6e7995e70442efded3e7bde7cd776fa74dd74372
|
[
"MIT"
] | 3
|
2021-01-19T20:12:09.000Z
|
2021-11-18T10:06:45.000Z
|
django_and_bootstrap/wsgi.py
|
hosseinkianmehr/solar-site
|
6e7995e70442efded3e7bde7cd776fa74dd74372
|
[
"MIT"
] | null | null | null |
django_and_bootstrap/wsgi.py
|
hosseinkianmehr/solar-site
|
6e7995e70442efded3e7bde7cd776fa74dd74372
|
[
"MIT"
] | null | null | null |
"""
WSGI config for django_and_bootstrap project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_and_bootstrap.settings')
application = get_wsgi_application()
| 24.529412
| 80
| 0.798561
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_and_bootstrap.settings')
application = get_wsgi_application()
| true
| true
|
1c405ac6d1316e911344504e1aea921c0e05b929
| 31,666
|
py
|
Python
|
dashboard/internet_nl_dashboard/scanners/scan_internet_nl_per_account.py
|
platforminternetstandaarden/dashboard-internet.nl
|
8b8129c67703a8ae51b441046ff5a90cf72bf537
|
[
"Apache-2.0"
] | null | null | null |
dashboard/internet_nl_dashboard/scanners/scan_internet_nl_per_account.py
|
platforminternetstandaarden/dashboard-internet.nl
|
8b8129c67703a8ae51b441046ff5a90cf72bf537
|
[
"Apache-2.0"
] | null | null | null |
dashboard/internet_nl_dashboard/scanners/scan_internet_nl_per_account.py
|
platforminternetstandaarden/dashboard-internet.nl
|
8b8129c67703a8ae51b441046ff5a90cf72bf537
|
[
"Apache-2.0"
] | 1
|
2019-03-18T16:24:12.000Z
|
2019-03-18T16:24:12.000Z
|
# SPDX-License-Identifier: Apache-2.0
import logging
from copy import copy
from datetime import datetime, timedelta
from typing import Dict, List, Union
import pytz
from actstream import action
from celery import Task, chain, group
from constance import config
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from websecmap.app.constance import constance_cached_value
from websecmap.organizations.models import Url
from websecmap.reporting.report import recreate_url_reports
from websecmap.scanners.models import InternetNLV2Scan
from websecmap.scanners.scanner import add_model_filter, dns_endpoints, internet_nl_v2_websecmap
from websecmap.scanners.scanner.internet_nl_v2 import InternetNLApiSettings
from dashboard.celery import app
from dashboard.internet_nl_dashboard.logic.mail import (email_configration_is_correct,
send_scan_finished_mails)
from dashboard.internet_nl_dashboard.logic.report import optimize_calculation_and_add_statistics
from dashboard.internet_nl_dashboard.logic.urllist_dashboard_report import create_dashboard_report
from dashboard.internet_nl_dashboard.models import (AccountInternetNLScan, AccountInternetNLScanLog,
UrlList, UrlListReport)
# done: create more flexible filters
# done: map mail scans to an endpoint (changed the scanner for it)
# done: make nice tracking name for internet nl that is echoed in the scan results.
# done: map web scans to endpoints
# done: check status of scan using each individual account
# done: possibly we need to check all relevant endpoints before starting the scan. This makes sure that all
# latest changes have been picked up. Especially if manual scans will happen a lot. Probably just adding
# a task before registering a scan. This might deliver some problems as we've seen before, with a chord
# not being performed after the other task has finished. This might be a bit challenging.
# Indeed: a chord does not work. A chain might. We can verify url filters when there is a larger set of domains.
# Done: How do we get the correct list of urls at the time we're going to scan? We've to make that a task too.
# Done: This is done using chains, where each step is executed in order.
# done: create a function for this, as it is twice the same code.
# todo: probably the urllist will contain information if a scan will be done for web or mail. This cannot be managed
# yet, so this is not implemented yet.
# Todo: add the scan ID to the report, so it's easier to find which scan is what. Is that possible?
from dashboard.lockfile import remove_expired_lock, remove_lock, temporary_file_lock
log = logging.getLogger(__name__)
def create_api_settings(v2_scan_id: InternetNLV2Scan) -> Dict[str, Union[str, int]]:
scan = InternetNLV2Scan.objects.all().filter(id=v2_scan_id).first()
if not scan:
log.error(f'Did not find an internetnLV2scan with id {v2_scan_id}')
return InternetNLApiSettings().__dict__ # type: ignore
# figure out which AccountInternetNLScan object uses this scan. Retrieve the credentials from that account.
account_scan = AccountInternetNLScan.objects.all().filter(scan=scan).first()
if not account_scan:
log.error(f'Could not find accountscan from scan {scan}')
return InternetNLApiSettings().__dict__ # type: ignore
apisettings = InternetNLApiSettings()
apisettings.username = account_scan.account.internet_nl_api_username
apisettings.password = account_scan.account.decrypt_password()
apisettings.url = config.INTERNET_NL_API_URL
# for convenience, remove trailing slashes from the url, this will be entered incorrectly.
apisettings.url = apisettings.url.rstrip("/")
apisettings.maximum_domains = config.INTERNET_NL_MAXIMUM_URLS
return apisettings.__dict__ # type: ignore
# overwrite the create API settings with one that handles credentials for every separate account. This is needed
# for internet.nl to generate some statistics over API usage.
internet_nl_v2_websecmap.create_api_settings = create_api_settings
@app.task(queue='storage')
def initialize_scan(urllist_id: int, manual_or_scheduled: str = "scheduled") -> int:
urllist = UrlList.objects.all().filter(id=urllist_id).first()
if not urllist:
return -1
if urllist.scan_type == "all":
create_scan("mail_dashboard", urllist, manual_or_scheduled)
return create_scan("web", urllist, manual_or_scheduled)
if urllist.scan_type == "mail":
return create_scan("mail_dashboard", urllist, manual_or_scheduled)
if urllist.scan_type == "web":
return create_scan("web", urllist, manual_or_scheduled)
return -1
def create_scan(internal_scan_type: str, urllist: UrlList, manual_or_scheduled: str = "scheduled") -> int:
new_scan = InternetNLV2Scan()
new_scan.type = internal_scan_type
new_scan.save()
internet_nl_v2_websecmap.update_state(new_scan.id, "requested and empty",
"requested a scan to be performed on internet.nl api")
# We need to store the scan type in the InternetNLV2Scan at creation, because the type in the list might change:
accountinternetnlscan = AccountInternetNLScan()
accountinternetnlscan.account = urllist.account
accountinternetnlscan.urllist = urllist
accountinternetnlscan.started_on = datetime.now(pytz.utc)
accountinternetnlscan.scan = new_scan
accountinternetnlscan.state = ""
accountinternetnlscan.save()
# and start the process.
update_state("requested", accountinternetnlscan.id)
# Sprinkling an activity stream action.
action.send(urllist.account, verb=f'started {manual_or_scheduled} scan', target=accountinternetnlscan, public=False)
return accountinternetnlscan.id
@app.task(queue='storage')
def check_running_dashboard_scans(**kwargs) -> Task:
"""
Gets status on all running scans from internet, per account.
This action is guarded by a pid file, only one of this process may be running at a time. Even if things are slow
as otherwise multiple times the same task might be performed. (in case of slow db access, high load etc).
Note that all state tasks have to be performed instantly, so not in a celery task.
:return: None
"""
lock_timeout = 300
lock_name = 'check_running_dashboard_scans'
# You now have five minutes to perform database operations, which should have happened in 1 second.
if temporary_file_lock(lock_name, timeout_in_seconds=lock_timeout):
if kwargs:
scans = AccountInternetNLScan.objects.all()
scans = add_model_filter(scans, **kwargs)
else:
scans = AccountInternetNLScan.objects.all().exclude(
Q(state="finished")
| Q(state__startswith="error")
| Q(state__startswith="cancelled")).only('id')
log.debug(f"Checking the state of scan {scans}.")
tasks = [progress_running_scan(scan.id) for scan in scans]
# All transactional state stuff is done now, so remove the lock
remove_lock(lock_name)
return group(tasks)
# In case of crashes and such, try again with a clean lock after expiration of course:
remove_expired_lock(lock_name, timeout_in_seconds=lock_timeout)
return group([])
def progress_running_scan(scan_id: int) -> Task:
"""
This monitors the state of a dashboard scan. Depending on the state, it determines if an action is needed and
gathers them. This will not handle errors.
This is used in conjunction with Celery: all tasks are performed async, which scales better.
Steps are split into two: the active verb and the past tense verb. When something is happening, the active verb
is used, otherwise the past tense verb. Such as: "scanning endpoints" and "scanned endpoints".
An active verb means that something is currently being performed.
A completed / past tense verb means that the process is ready to move on.
All active verbs have a timeout. This timeout can be different for each verb. The timeout is set to a value that
takes into account the possibility of the system being very busy and all queues full. Therefore, something that
usually would last 10 seconds, will have a timeout of several hours. If this timeout triggers, there is something
very wrong: either an unexpected exception stopped the process or there are deadlocks in the queues.
These timeouts should never be triggered: if they do, it will mean manual intervention to fix a bug etc.
When a timeout is reached on an active verb, it will change the state to something that is not processed in this
monitor anymore. Manual action is required, after the manual action has been performed, the person handling it
can set the state of the failed scan to something this process understands, and we'll happily try again.
Note that celery can also perform several attempts on exceptions etc, this might or might not happen.
Timeouts are stored as the following: timeout on [active verb]: timeout on scanning endpoints.
To prevent duplicate tasks from spawning, this method will adjust the task before the actual content is called.
This does not use django fsm, as that ties everything to a model. It also overcomplicates the process with
branching and such. The on-error feature is nice though.
:return:
"""
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
"""
It's not possible to safely create a scan automagically: this might be called a few times in a row, and then
you'll end up with several new scans. Therefore, to initiate a scan, you need to call another method.
After the scan is initiated, this will pick it up and continue.
"""
if not scan:
log.debug("No scan received to progress...")
return group([])
steps = {
# complete state progression, using active verbs to come to the next state:cl
"requested": discovering_endpoints,
"discovered endpoints": retrieving_scannable_urls,
"retrieved scannable urls": registering_scan_at_internet_nl,
"registered scan at internet.nl": running_scan,
# registered is a old state that somehow, due to unknown factors ends up in the state
"registered": running_scan,
"running scan": running_scan,
"scan results ready": storing_scan_results,
"scan results stored": processing_scan_results,
"imported scan results": creating_report,
"created report": sending_mail,
"sent mail": finishing_scan,
"skipped sending mail: no e-mail addresses associated with account": finishing_scan,
"skipped sending mail: no mail server configured": finishing_scan,
# "finished"
# handle error situations of the scan in websecmap:
"network_error": continue_running_scan,
"configuration_error": continue_running_scan,
"timeout": continue_running_scan,
# monitors on active states:
"discovering endpoints": monitor_timeout,
"retrieving scannable urls": monitor_timeout,
"registering scan at internet.nl": monitor_timeout,
"importing scan results": monitor_timeout,
"creating report": monitor_timeout,
"sending mail": monitor_timeout,
"server_error": monitor_timeout,
}
with transaction.atomic():
# always get the latest state, so we'll not have outdated information if we had to wait in a queue a long while.
# also run this in a transaction, so it's only possible to get a state and update it to an active state once.
scan = AccountInternetNLScan.objects.get(id=scan.id)
next_step = steps.get(scan.state, handle_unknown_state)
return next_step(scan.id)
@app.task(queue="storage")
def recover_and_retry(scan_id: int):
# check the latest valid state from progress running scan, set the state to that state.
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to recover_and_retry with unknown scan: {scan_id}.')
return group([])
valid_states = ['requested', 'discovered endpoints', 'retrieved scannable urls', 'registered scan at internet.nl',
'registered', "running scan", "scan results ready", "scan results stored", "created report",
"sent mail", "skipped sending mail: no e-mail addresses associated with account",
"skipped sending mail: no mail server configured"]
error_states = ["network_error", "configuration_error", "timeout"]
if scan.state in valid_states:
# no recovery needed
return group([])
# get the latest valid state from the scan log:
latest_valid = AccountInternetNLScanLog.objects.all().filter(
scan=scan, state__in=valid_states).order_by('-id').first()
if not latest_valid:
log.error('Trying to recover from a scan that has no log history.')
return group([])
log.warning(f"No valid rollback state for scan {scan_id}.")
log.debug(f"AccountInternetNLScan scan #{scan.id} is rolled back to retry from "
f"'{scan.state}' to '{latest_valid.state}'.")
if scan.state in error_states:
update_state(latest_valid.state, scan.id)
else:
update_state(latest_valid.state, scan.id)
# Also have to rollback the underlying scan, if there already is one.
if scan.scan:
internet_nl_v2_websecmap.recover_and_retry(scan.scan.id)
return group([])
def handle_unknown_state(scan_id):
# probably nothing to be done...
log.warning(f'Scan {scan_id} is in unknown state. It will not progress.')
return group([])
def discovering_endpoints(scan_id: int):
# Always immediately update the current state, so the amount of double calls is minimal:
# "discovered endpoints" to "discovering endpoints" and cause an infinte loop.
update_state("discovering endpoints", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to discovering_endpoints with unknown scan: {scan_id}.')
return group([])
return (
dns_endpoints.compose_discover_task(**{
'urls_filter': {'urls_in_dashboard_list_2__id': scan.urllist.id, 'is_dead': False,
'not_resolvable': False}})
| update_state.si("discovered endpoints", scan.id)
)
def retrieving_scannable_urls(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to retrieving_scannable_urls with unknown scan: {scan_id}.')
return group([])
# This step tries to prevent API calls with an empty list of urls.
update_state("retrieving scannable urls", scan.id)
# mail was added here, due to a problem while registering scans. We always want dns_soa endpoints.
relevant_scan_types = {"web": "dns_a_aaaa", "mail_dashboard": "dns_soa", "mail": "dns_soa"}
return (
get_relevant_urls.si(scan.urllist.id, relevant_scan_types[scan.scan.type])
| check_retrieved_scannable_urls.s()
| update_state.s(scan.id)
)
def registering_scan_at_internet_nl(scan_id: int):
update_state("registering scan at internet.nl", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to registering_scan_at_internet_nl with unknown scan: {scan_id}.')
return group([])
# mail = websecmap, mail_dashboard = internet.nl dashboard, web is the same on both. Mail here is a fallback
# because the dashboard only understands dns_soa endpoints.
relevant_endpoint_types = {"web": "dns_a_aaaa", "mail_dashboard": "dns_soa", "mail": "dns_soa"}
# auto saved.
scan.scan.subject_urls.set(get_relevant_urls(scan.urllist.id, relevant_endpoint_types[scan.scan.type]))
internet_nl_v2_websecmap.update_state(
scan.scan.id, "requested", "requested a scan to be performed on internet.nl api")
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
def running_scan(scan_id: int):
update_state("running scan", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to running_scan with unknown scan: {scan_id}.')
return group([])
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
def continue_running_scan(scan_id: int):
# Used to progress in error situations.
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to continue_running_scan with unknown scan: {scan_id}.')
return group([])
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
def storing_scan_results(scan_id: int):
update_state("storing scan results", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to storing_scan_results with unknown scan: {scan_id}.')
return group([])
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
def processing_scan_results(scan_id: int):
update_state("processing scan results", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to processing_scan_results with unknown scan: {scan_id}.')
return group([])
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
@app.task(queue="storage")
def copy_state_from_websecmap_scan(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
return
up_to_date_scan_information = InternetNLV2Scan.objects.all().get(id=scan.scan.pk)
current_state = up_to_date_scan_information.state
log.debug(f"Copying state from websecmap, current state: '{current_state}'. ")
# conflicting state, make sure it's ignored
if current_state == "requested":
new_state = scan.state
# the websecmap scan progress is not as chatty, make it nicer to better understand scan progress
# the websecmap scan progress is not as chatty, make it nicer to better understand scan progress
elif current_state == "registered":
new_state = "registered scan at internet.nl"
# there is more to do than finishing the scan
elif current_state == "finished":
new_state = "imported scan results"
else:
new_state = scan.scan.state
update_state(new_state, scan.id)
def creating_report(scan_id: int):
update_state("creating report", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to creating_report with unknown scan: {scan_id}.')
return group([])
# Note that calling 'timezone.now()' at canvas creation time, means that you'll have a date in the past
# at the moment the function is actually called. If you need accurate time in the function, make sure the
# function calls 'timezone.now()' when the function is run.
return (group(recreate_url_reports(list(scan.urllist.urls.all().values_list('id', flat=True))))
| create_dashboard_report.si(scan.id)
| connect_urllistreport_to_accountinternetnlscan.s(scan.id)
| upgrade_report_with_statistics.s()
| upgrade_report_with_unscannable_urls.s(scan.id)
| update_state.si("created report", scan.id))
def sending_mail(scan_id: int):
update_state("sending mail", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to sending_mail with unknown scan: {scan_id}.')
return group([])
return (send_after_scan_mail.si(scan.id)
| update_state.s(scan.id))
def finishing_scan(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to finishing_scan with unknown scan: {scan_id}.')
return group([])
# No further actions, so not setting "finishing scan" as a state, but set it to "scan finished" directly.
scan.finished_on = datetime.now(pytz.utc)
scan.save()
update_state("finished", scan.id)
return group([])
def monitor_timeout(scan_id: int):
"""
A timeout is set for a day. If the same state is static for 24 hours, the scan will be set to the previous state.
Except when a scan is requested: the scan might be so large, and the time to process it might be so high,
we will accept three days of timeout.
:param scan:
:return:
"""
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to monitor_timeout with unknown scan: {scan_id}.')
return group([])
# Warning: timeouts are only useful when crashes happened, otherwise its just a capacity issue which timeouts
# will just increase as it creates more tasks to handle
# todo: recover from websecmap errors, by trying to recover there and writing the status to the dashboard.
recovering_strategies = {
"discovering endpoints": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_DISCOVERING_ENDPOINTS'), # 6 * 60,
"state after timeout": "requested"
},
"retrieving scannable urls": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_RETRIEVING_SCANABLE_URLS'), # 6
"state after timeout": "discovered endpoints"
},
"registering scan at internet.nl": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_REGISTERING_SCAN_AT_INTERNET_NL'), # 6
"state after timeout": "retrieved scannable urls"
},
"importing scan results": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_IMPORTING_SCAN_RESULTS'), # 24
"state after timeout": "scan results stored"
},
"creating report": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_CREATING_REPORT'), # 24
"state after timeout": "imported scan results"
},
"sending mail": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_SENDING_MAIL'), # 6
"state after timeout": "created report"
},
# It's unclear where in the process we are... Just try again.
"server_error": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_SERVER_ERROR'), # 1
"state after timeout": "requested"
},
}
strategy = recovering_strategies.get(scan.state, {})
if not strategy:
# Trying to monitor something we don't know. Raise exception, we only want to handle known states.
raise ValueError(f"Scan is at {scan.state} for which no recovery is defined.")
# determine if there is an actual timeout.
if scan.state_changed_on:
scan_will_timeout_on = scan.state_changed_on + timedelta(minutes=strategy['timeout in minutes'])
if timezone.now() > scan_will_timeout_on:
update_state(f"timeout reached for: '{scan.state}', "
f"performing recovery to '{strategy['state after timeout']}'", scan.id)
update_state(strategy['state after timeout'], scan.id)
# No further work to do...
return group([])
@app.task(queue='storage')
def connect_urllistreport_to_accountinternetnlscan(urllistreport_id: int, scan_id: int) -> int:
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
return -1
urllistreport = UrlListReport.objects.all().filter(id=urllistreport_id).first()
if not urllistreport:
return -1
scan.report = urllistreport
scan.save()
return int(urllistreport.id)
@app.task(queue='storage')
def upgrade_report_with_statistics(urllistreport_id: int) -> int:
urllistreport = UrlListReport.objects.all().filter(id=urllistreport_id).first()
if not urllistreport:
return -1
log.debug(f"Creating statistics over urllistreport {urllistreport}.")
urllistreport.calculation = optimize_calculation_and_add_statistics(urllistreport.calculation)
urllistreport.save()
return int(urllistreport.pk)
@app.task(queue='storage')
def upgrade_report_with_unscannable_urls(urllistreport_id: int, scan_id: int):
"""
Urls that cannot be scanned using the internet.nl website are not allowed to be scanned. This is where endpoint
detection comes into view. Only domains with valid endpoints are (should) be scanned. Other domains have to
be ignored.
Yet, when we publish a list of "top 500" domains, only 499 show up in the report. This is due to a number of
complications.
1: some domains show up where it is stated that the requirements for scanning where not met. Yet, somehow,
this domain is in the report while it shouldn't be. This seems to be a bug in the reporting engine (todo) that
tries to retrieve all results, and if the domain has another endpoint, it is added to the report (alas empty).
These empty domains are accounted for, and are displayed correctly in the report as being ignored.
2: some domains do not have any endpoints, such as megaupload.com. Also these should not be scanned.
These domains however short be reflected in the report, the same as the domains that have a single endpoint.
To account for these issues, after report generation an extra step is needed that upgrades the report. (There
already is report upgrading code.) The upgrade will check if all domains are in the report, and if not, add
the url as being empty. This way all urls that are requested are in the report, and if they are empty, they
are ignored in all statistics.
:param urllistreport:
:param scan:
:return:
"""
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
return
urllistreport = UrlListReport.objects.all().filter(id=urllistreport_id).first()
if not urllistreport:
return
log.debug("Adding unscannable urls to report.")
# See if all urls in the list are also mentioned in the report, if not, add them and also make sure the stats
# for the report are correct(!). This means all unscannable domains _will_ be in the report, as that matches
# the list of domains to scan.
urls_in_report: List[str] = [url['url'] for url in urllistreport.calculation['urls']]
urls_in_list: List[Url] = list(scan.urllist.urls.all())
urls_not_in_report = [url.url for url in urls_in_list if url.url not in urls_in_report]
# An empty url looks like this:
empty_url_template = {
"url": "",
"ratings": [],
"endpoints": [],
"total_issues": 0,
"high": 0,
"medium": 0,
"low": 0,
"ok": 0,
"total_endpoints": 0,
"high_endpoints": 0,
"medium_endpoints": 0,
"low_endpoints": 0,
"ok_endpoints": 0,
"total_url_issues": 0,
"url_issues_high": 0,
"url_issues_medium": 0,
"url_issues_low": 0,
"url_ok": 0,
"total_endpoint_issues": 0,
"endpoint_issues_high": 0,
"endpoint_issues_medium": 0,
"endpoint_issues_low": 0,
}
for url_not_in_report in urls_not_in_report:
# Copy the template, otherwise all instances will point to the same text (the last domain in the list of
# missing domains).
tmp_empty_url_template = copy(empty_url_template)
tmp_empty_url_template['url'] = url_not_in_report
urllistreport.calculation['urls'].append(tmp_empty_url_template)
# also update the total urls, as that can be influenced:
urllistreport.calculation['total_urls'] = len(urllistreport.calculation['urls'])
urllistreport.total_urls = len(urllistreport.calculation['urls'])
urllistreport.save()
return
@app.task(queue='storage')
def send_after_scan_mail(scan_id: int) -> str:
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
return ""
# Do not try to send mail if no mailserver is configured
if not email_configration_is_correct():
return "skipped sending mail: no mail server configured"
mails_sent = send_scan_finished_mails(scan)
if not mails_sent:
return "skipped sending mail: no e-mail addresses associated with account"
return "sent mail"
@app.task(queue='storage')
def check_retrieved_scannable_urls(urls: List[int]):
""" Influences the process, see if we can continue. """
if not urls:
return "error retrieving scannable urls: " \
"no urls to scan found. Will not continue as the report will be empty."
return "retrieved scannable urls"
@app.task(queue='storage')
def update_state(state: str, scan_id: int) -> None:
"""Update the current scan state. Also write it to the scan log. From this log we should also be able to see
retries... when celery retries on exceptions etc..."""
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).only('id', 'state').first()
if not scan:
return
# if the state is still the same, just update the last_check, don't append the log.
# Don't get it from the scan object, that info might be obsolete.
last_state_for_scan = AccountInternetNLScanLog.objects.all().filter(
scan=scan
).order_by("-at_when").only('state').first()
if last_state_for_scan:
# see: test_update_state
if last_state_for_scan.state == state == scan.state:
return
# do not update a cancelled scan (#159), even if a certain task has finished after a cancel was issued (letting the
# task overwriting the cancelled state, continuing the scan)
if last_state_for_scan == "cancelled":
return
# First state, or a new state.
scan.state = state
scan.state_changed_on = timezone.now()
scan.save()
# Then log the state change, if it changed, even if it already changed before, so it's clear things went wrong:
scanlog = AccountInternetNLScanLog()
scanlog.scan = scan
scanlog.at_when = timezone.now()
scanlog.state = state
scanlog.save()
@app.task(queue='storage')
def get_relevant_urls(urllist_id: int, protocol: str) -> List[int]:
urllist = UrlList.objects.all().filter(id=urllist_id).first()
if not urllist:
return []
urls = Url.objects.all().filter(urls_in_dashboard_list_2=urllist, is_dead=False, not_resolvable=False,
endpoint__protocol__in=[protocol]).values_list('id', flat=True)
return list(set(urls))
| 43.141689
| 120
| 0.703752
|
import logging
from copy import copy
from datetime import datetime, timedelta
from typing import Dict, List, Union
import pytz
from actstream import action
from celery import Task, chain, group
from constance import config
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from websecmap.app.constance import constance_cached_value
from websecmap.organizations.models import Url
from websecmap.reporting.report import recreate_url_reports
from websecmap.scanners.models import InternetNLV2Scan
from websecmap.scanners.scanner import add_model_filter, dns_endpoints, internet_nl_v2_websecmap
from websecmap.scanners.scanner.internet_nl_v2 import InternetNLApiSettings
from dashboard.celery import app
from dashboard.internet_nl_dashboard.logic.mail import (email_configration_is_correct,
send_scan_finished_mails)
from dashboard.internet_nl_dashboard.logic.report import optimize_calculation_and_add_statistics
from dashboard.internet_nl_dashboard.logic.urllist_dashboard_report import create_dashboard_report
from dashboard.internet_nl_dashboard.models import (AccountInternetNLScan, AccountInternetNLScanLog,
UrlList, UrlListReport)
# not being performed after the other task has finished. This might be a bit challenging.
# Indeed: a chord does not work. A chain might. We can verify url filters when there is a larger set of domains.
# Done: How do we get the correct list of urls at the time we're going to scan? We've to make that a task too.
# Done: This is done using chains, where each step is executed in order.
# done: create a function for this, as it is twice the same code.
# todo: probably the urllist will contain information if a scan will be done for web or mail. This cannot be managed
# yet, so this is not implemented yet.
# Todo: add the scan ID to the report, so it's easier to find which scan is what. Is that possible?
from dashboard.lockfile import remove_expired_lock, remove_lock, temporary_file_lock
log = logging.getLogger(__name__)
def create_api_settings(v2_scan_id: InternetNLV2Scan) -> Dict[str, Union[str, int]]:
scan = InternetNLV2Scan.objects.all().filter(id=v2_scan_id).first()
if not scan:
log.error(f'Did not find an internetnLV2scan with id {v2_scan_id}')
return InternetNLApiSettings().__dict__
account_scan = AccountInternetNLScan.objects.all().filter(scan=scan).first()
if not account_scan:
log.error(f'Could not find accountscan from scan {scan}')
return InternetNLApiSettings().__dict__
apisettings = InternetNLApiSettings()
apisettings.username = account_scan.account.internet_nl_api_username
apisettings.password = account_scan.account.decrypt_password()
apisettings.url = config.INTERNET_NL_API_URL
apisettings.url = apisettings.url.rstrip("/")
apisettings.maximum_domains = config.INTERNET_NL_MAXIMUM_URLS
return apisettings.__dict__
internet_nl_v2_websecmap.create_api_settings = create_api_settings
@app.task(queue='storage')
def initialize_scan(urllist_id: int, manual_or_scheduled: str = "scheduled") -> int:
urllist = UrlList.objects.all().filter(id=urllist_id).first()
if not urllist:
return -1
if urllist.scan_type == "all":
create_scan("mail_dashboard", urllist, manual_or_scheduled)
return create_scan("web", urllist, manual_or_scheduled)
if urllist.scan_type == "mail":
return create_scan("mail_dashboard", urllist, manual_or_scheduled)
if urllist.scan_type == "web":
return create_scan("web", urllist, manual_or_scheduled)
return -1
def create_scan(internal_scan_type: str, urllist: UrlList, manual_or_scheduled: str = "scheduled") -> int:
new_scan = InternetNLV2Scan()
new_scan.type = internal_scan_type
new_scan.save()
internet_nl_v2_websecmap.update_state(new_scan.id, "requested and empty",
"requested a scan to be performed on internet.nl api")
accountinternetnlscan = AccountInternetNLScan()
accountinternetnlscan.account = urllist.account
accountinternetnlscan.urllist = urllist
accountinternetnlscan.started_on = datetime.now(pytz.utc)
accountinternetnlscan.scan = new_scan
accountinternetnlscan.state = ""
accountinternetnlscan.save()
update_state("requested", accountinternetnlscan.id)
action.send(urllist.account, verb=f'started {manual_or_scheduled} scan', target=accountinternetnlscan, public=False)
return accountinternetnlscan.id
@app.task(queue='storage')
def check_running_dashboard_scans(**kwargs) -> Task:
lock_timeout = 300
lock_name = 'check_running_dashboard_scans'
if temporary_file_lock(lock_name, timeout_in_seconds=lock_timeout):
if kwargs:
scans = AccountInternetNLScan.objects.all()
scans = add_model_filter(scans, **kwargs)
else:
scans = AccountInternetNLScan.objects.all().exclude(
Q(state="finished")
| Q(state__startswith="error")
| Q(state__startswith="cancelled")).only('id')
log.debug(f"Checking the state of scan {scans}.")
tasks = [progress_running_scan(scan.id) for scan in scans]
remove_lock(lock_name)
return group(tasks)
remove_expired_lock(lock_name, timeout_in_seconds=lock_timeout)
return group([])
def progress_running_scan(scan_id: int) -> Task:
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.debug("No scan received to progress...")
return group([])
steps = {
"requested": discovering_endpoints,
"discovered endpoints": retrieving_scannable_urls,
"retrieved scannable urls": registering_scan_at_internet_nl,
"registered scan at internet.nl": running_scan,
"registered": running_scan,
"running scan": running_scan,
"scan results ready": storing_scan_results,
"scan results stored": processing_scan_results,
"imported scan results": creating_report,
"created report": sending_mail,
"sent mail": finishing_scan,
"skipped sending mail: no e-mail addresses associated with account": finishing_scan,
"skipped sending mail: no mail server configured": finishing_scan,
"network_error": continue_running_scan,
"configuration_error": continue_running_scan,
"timeout": continue_running_scan,
"discovering endpoints": monitor_timeout,
"retrieving scannable urls": monitor_timeout,
"registering scan at internet.nl": monitor_timeout,
"importing scan results": monitor_timeout,
"creating report": monitor_timeout,
"sending mail": monitor_timeout,
"server_error": monitor_timeout,
}
with transaction.atomic():
# also run this in a transaction, so it's only possible to get a state and update it to an active state once.
scan = AccountInternetNLScan.objects.get(id=scan.id)
next_step = steps.get(scan.state, handle_unknown_state)
return next_step(scan.id)
@app.task(queue="storage")
def recover_and_retry(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to recover_and_retry with unknown scan: {scan_id}.')
return group([])
valid_states = ['requested', 'discovered endpoints', 'retrieved scannable urls', 'registered scan at internet.nl',
'registered', "running scan", "scan results ready", "scan results stored", "created report",
"sent mail", "skipped sending mail: no e-mail addresses associated with account",
"skipped sending mail: no mail server configured"]
error_states = ["network_error", "configuration_error", "timeout"]
if scan.state in valid_states:
return group([])
latest_valid = AccountInternetNLScanLog.objects.all().filter(
scan=scan, state__in=valid_states).order_by('-id').first()
if not latest_valid:
log.error('Trying to recover from a scan that has no log history.')
return group([])
log.warning(f"No valid rollback state for scan {scan_id}.")
log.debug(f"AccountInternetNLScan scan #{scan.id} is rolled back to retry from "
f"'{scan.state}' to '{latest_valid.state}'.")
if scan.state in error_states:
update_state(latest_valid.state, scan.id)
else:
update_state(latest_valid.state, scan.id)
if scan.scan:
internet_nl_v2_websecmap.recover_and_retry(scan.scan.id)
return group([])
def handle_unknown_state(scan_id):
log.warning(f'Scan {scan_id} is in unknown state. It will not progress.')
return group([])
def discovering_endpoints(scan_id: int):
update_state("discovering endpoints", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to discovering_endpoints with unknown scan: {scan_id}.')
return group([])
return (
dns_endpoints.compose_discover_task(**{
'urls_filter': {'urls_in_dashboard_list_2__id': scan.urllist.id, 'is_dead': False,
'not_resolvable': False}})
| update_state.si("discovered endpoints", scan.id)
)
def retrieving_scannable_urls(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to retrieving_scannable_urls with unknown scan: {scan_id}.')
return group([])
update_state("retrieving scannable urls", scan.id)
relevant_scan_types = {"web": "dns_a_aaaa", "mail_dashboard": "dns_soa", "mail": "dns_soa"}
return (
get_relevant_urls.si(scan.urllist.id, relevant_scan_types[scan.scan.type])
| check_retrieved_scannable_urls.s()
| update_state.s(scan.id)
)
def registering_scan_at_internet_nl(scan_id: int):
update_state("registering scan at internet.nl", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to registering_scan_at_internet_nl with unknown scan: {scan_id}.')
return group([])
relevant_endpoint_types = {"web": "dns_a_aaaa", "mail_dashboard": "dns_soa", "mail": "dns_soa"}
scan.scan.subject_urls.set(get_relevant_urls(scan.urllist.id, relevant_endpoint_types[scan.scan.type]))
internet_nl_v2_websecmap.update_state(
scan.scan.id, "requested", "requested a scan to be performed on internet.nl api")
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
def running_scan(scan_id: int):
update_state("running scan", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to running_scan with unknown scan: {scan_id}.')
return group([])
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
def continue_running_scan(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to continue_running_scan with unknown scan: {scan_id}.')
return group([])
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
def storing_scan_results(scan_id: int):
update_state("storing scan results", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to storing_scan_results with unknown scan: {scan_id}.')
return group([])
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
def processing_scan_results(scan_id: int):
update_state("processing scan results", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
log.warning(f'Trying to processing_scan_results with unknown scan: {scan_id}.')
return group([])
return chain(internet_nl_v2_websecmap.progress_running_scan(scan.scan.id)
| copy_state_from_websecmap_scan.si(scan.id))
@app.task(queue="storage")
def copy_state_from_websecmap_scan(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan or not scan.scan:
return
up_to_date_scan_information = InternetNLV2Scan.objects.all().get(id=scan.scan.pk)
current_state = up_to_date_scan_information.state
log.debug(f"Copying state from websecmap, current state: '{current_state}'. ")
if current_state == "requested":
new_state = scan.state
# the websecmap scan progress is not as chatty, make it nicer to better understand scan progress
# the websecmap scan progress is not as chatty, make it nicer to better understand scan progress
elif current_state == "registered":
new_state = "registered scan at internet.nl"
# there is more to do than finishing the scan
elif current_state == "finished":
new_state = "imported scan results"
else:
new_state = scan.scan.state
update_state(new_state, scan.id)
def creating_report(scan_id: int):
update_state("creating report", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to creating_report with unknown scan: {scan_id}.')
return group([])
# Note that calling 'timezone.now()' at canvas creation time, means that you'll have a date in the past
return (group(recreate_url_reports(list(scan.urllist.urls.all().values_list('id', flat=True))))
| create_dashboard_report.si(scan.id)
| connect_urllistreport_to_accountinternetnlscan.s(scan.id)
| upgrade_report_with_statistics.s()
| upgrade_report_with_unscannable_urls.s(scan.id)
| update_state.si("created report", scan.id))
def sending_mail(scan_id: int):
update_state("sending mail", scan_id)
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to sending_mail with unknown scan: {scan_id}.')
return group([])
return (send_after_scan_mail.si(scan.id)
| update_state.s(scan.id))
def finishing_scan(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to finishing_scan with unknown scan: {scan_id}.')
return group([])
scan.finished_on = datetime.now(pytz.utc)
scan.save()
update_state("finished", scan.id)
return group([])
def monitor_timeout(scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
log.warning(f'Trying to monitor_timeout with unknown scan: {scan_id}.')
return group([])
recovering_strategies = {
"discovering endpoints": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_DISCOVERING_ENDPOINTS'),
"state after timeout": "requested"
},
"retrieving scannable urls": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_RETRIEVING_SCANABLE_URLS'),
"state after timeout": "discovered endpoints"
},
"registering scan at internet.nl": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_REGISTERING_SCAN_AT_INTERNET_NL'),
"state after timeout": "retrieved scannable urls"
},
"importing scan results": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_IMPORTING_SCAN_RESULTS'),
"state after timeout": "scan results stored"
},
"creating report": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_CREATING_REPORT'),
"state after timeout": "imported scan results"
},
"sending mail": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_SENDING_MAIL'),
"state after timeout": "created report"
},
"server_error": {
"timeout in minutes": constance_cached_value('SCAN_TIMEOUT_MINUTES_SERVER_ERROR'), # 1
"state after timeout": "requested"
},
}
strategy = recovering_strategies.get(scan.state, {})
if not strategy:
# Trying to monitor something we don't know. Raise exception, we only want to handle known states.
raise ValueError(f"Scan is at {scan.state} for which no recovery is defined.")
if scan.state_changed_on:
scan_will_timeout_on = scan.state_changed_on + timedelta(minutes=strategy['timeout in minutes'])
if timezone.now() > scan_will_timeout_on:
update_state(f"timeout reached for: '{scan.state}', "
f"performing recovery to '{strategy['state after timeout']}'", scan.id)
update_state(strategy['state after timeout'], scan.id)
return group([])
@app.task(queue='storage')
def connect_urllistreport_to_accountinternetnlscan(urllistreport_id: int, scan_id: int) -> int:
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
return -1
urllistreport = UrlListReport.objects.all().filter(id=urllistreport_id).first()
if not urllistreport:
return -1
scan.report = urllistreport
scan.save()
return int(urllistreport.id)
@app.task(queue='storage')
def upgrade_report_with_statistics(urllistreport_id: int) -> int:
urllistreport = UrlListReport.objects.all().filter(id=urllistreport_id).first()
if not urllistreport:
return -1
log.debug(f"Creating statistics over urllistreport {urllistreport}.")
urllistreport.calculation = optimize_calculation_and_add_statistics(urllistreport.calculation)
urllistreport.save()
return int(urllistreport.pk)
@app.task(queue='storage')
def upgrade_report_with_unscannable_urls(urllistreport_id: int, scan_id: int):
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
return
urllistreport = UrlListReport.objects.all().filter(id=urllistreport_id).first()
if not urllistreport:
return
log.debug("Adding unscannable urls to report.")
urls_in_report: List[str] = [url['url'] for url in urllistreport.calculation['urls']]
urls_in_list: List[Url] = list(scan.urllist.urls.all())
urls_not_in_report = [url.url for url in urls_in_list if url.url not in urls_in_report]
empty_url_template = {
"url": "",
"ratings": [],
"endpoints": [],
"total_issues": 0,
"high": 0,
"medium": 0,
"low": 0,
"ok": 0,
"total_endpoints": 0,
"high_endpoints": 0,
"medium_endpoints": 0,
"low_endpoints": 0,
"ok_endpoints": 0,
"total_url_issues": 0,
"url_issues_high": 0,
"url_issues_medium": 0,
"url_issues_low": 0,
"url_ok": 0,
"total_endpoint_issues": 0,
"endpoint_issues_high": 0,
"endpoint_issues_medium": 0,
"endpoint_issues_low": 0,
}
for url_not_in_report in urls_not_in_report:
tmp_empty_url_template = copy(empty_url_template)
tmp_empty_url_template['url'] = url_not_in_report
urllistreport.calculation['urls'].append(tmp_empty_url_template)
urllistreport.calculation['total_urls'] = len(urllistreport.calculation['urls'])
urllistreport.total_urls = len(urllistreport.calculation['urls'])
urllistreport.save()
return
@app.task(queue='storage')
def send_after_scan_mail(scan_id: int) -> str:
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).first()
if not scan:
return ""
if not email_configration_is_correct():
return "skipped sending mail: no mail server configured"
mails_sent = send_scan_finished_mails(scan)
if not mails_sent:
return "skipped sending mail: no e-mail addresses associated with account"
return "sent mail"
@app.task(queue='storage')
def check_retrieved_scannable_urls(urls: List[int]):
if not urls:
return "error retrieving scannable urls: " \
"no urls to scan found. Will not continue as the report will be empty."
return "retrieved scannable urls"
@app.task(queue='storage')
def update_state(state: str, scan_id: int) -> None:
scan = AccountInternetNLScan.objects.all().filter(id=scan_id).only('id', 'state').first()
if not scan:
return
# Don't get it from the scan object, that info might be obsolete.
last_state_for_scan = AccountInternetNLScanLog.objects.all().filter(
scan=scan
).order_by("-at_when").only('state').first()
if last_state_for_scan:
if last_state_for_scan.state == state == scan.state:
return
ate = state
scan.state_changed_on = timezone.now()
scan.save()
scanlog = AccountInternetNLScanLog()
scanlog.scan = scan
scanlog.at_when = timezone.now()
scanlog.state = state
scanlog.save()
@app.task(queue='storage')
def get_relevant_urls(urllist_id: int, protocol: str) -> List[int]:
urllist = UrlList.objects.all().filter(id=urllist_id).first()
if not urllist:
return []
urls = Url.objects.all().filter(urls_in_dashboard_list_2=urllist, is_dead=False, not_resolvable=False,
endpoint__protocol__in=[protocol]).values_list('id', flat=True)
return list(set(urls))
| true
| true
|
1c405b033cae8c0cc0e16b1317fb1fa4ee564a69
| 689
|
py
|
Python
|
e2e_dialog/setup.py
|
hscspring/chatbot
|
9d0bc91db0d8834a1a75cba3edcd3133191e80af
|
[
"Apache-2.0"
] | null | null | null |
e2e_dialog/setup.py
|
hscspring/chatbot
|
9d0bc91db0d8834a1a75cba3edcd3133191e80af
|
[
"Apache-2.0"
] | null | null | null |
e2e_dialog/setup.py
|
hscspring/chatbot
|
9d0bc91db0d8834a1a75cba3edcd3133191e80af
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="e2e_dialog",
version="0.0.1",
author="Yam",
author_email="haoshaochun@gmail.com",
description="Humanly Deeplearning NLP.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hscspring",
packages=['e2e_dialog.damd', 'e2e_dialog.simpletod'],
package_dir={},
install_requires=[
],
package_data={
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 25.518519
| 57
| 0.642961
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="e2e_dialog",
version="0.0.1",
author="Yam",
author_email="haoshaochun@gmail.com",
description="Humanly Deeplearning NLP.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hscspring",
packages=['e2e_dialog.damd', 'e2e_dialog.simpletod'],
package_dir={},
install_requires=[
],
package_data={
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| true
| true
|
1c405c4d284bb882ab972c77fa28c815d4f98c5e
| 7,476
|
py
|
Python
|
lib/surface/container/images/delete.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/container/images/delete.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/container/images/delete.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete images command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_session
from googlecloudsdk.api_lib.container.images import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container import flags
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
class Delete(base.DeleteCommand):
"""Delete existing images.
The container images delete command of gcloud deletes a specified
image and tags in a specified repository. Repositories
must be hosted by the Google Container Registry.
"""
detailed_help = {
'DESCRIPTION':
"""\
The container images delete command deletes the specified image from
the registry. All associated tags are also deleted.
""",
'EXAMPLES':
"""\
Deletes the image as long as there aren't additional, unspecified tags
referencing it:
$ {command} <IMAGE_NAME>
Deletes the image (and tags) from the input IMAGE_NAME:
$ {command} <IMAGE_NAME> --force-delete-tags
Deletes the image (and tags) from the input IMAGE_NAME, without
additional prompting:
$ {command} <IMAGE_NAME> --force-delete-tags --quiet
To easily identify and delete untagged images in a project, first
filter digests that lack tags:
$ gcloud container images list-tags [HOSTNAME]/[PROJECT-ID]/[IMAGE]\
--filter='-tags:*' --format='get(digest)' --limit=$BIG_NUMBER
Then, delete these tagless images without prompting by running:
$ {command} --quiet [HOSTNAME]/[PROJECT-ID]/[IMAGE]@DIGEST
""",
}
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
flags.AddTagOrDigestPositional(parser, verb='delete')
parser.add_argument(
'--force-delete-tags',
action='store_true',
default=False,
help=(
'If there are tags pointing to an image to be deleted then they '
'must all be specified explicitly, or this flag must be specified, '
'for the command to succeed.'))
def Run(self, args):
"""This is what ts called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Raises:
InvalidImageNameError: If the user specified an invalid image name.
Returns:
A list of the deleted docker_name.Tag and docker_name.Digest objects
"""
# IMAGE_NAME: The fully-qualified image name to delete (with a digest).
# Deletes the layers. Ex. gcr.io/google-appengine/java(@DIGEST|:TAG).
http_obj = http.Http()
with util.WrapExpectedDockerlessErrors():
# collect input/validate
digests, explicit_tags = self._ProcessImageNames(args.image_names)
# Resolve tags to digests.
for tag in explicit_tags:
digests.add(util.GetDigestFromName(str(tag)))
# Find all the tags that reference digests to be deleted.
all_tags = set()
for digest in digests:
all_tags.update(util.GetDockerTagsForDigest(digest, http_obj))
# Find all the tags that weren't specified explicitly.
implicit_tags = all_tags.difference(explicit_tags)
if implicit_tags and not args.force_delete_tags:
log.error('Tags:')
for tag in explicit_tags:
log.error('- ' + str(tag))
raise exceptions.Error(
'This operation will implicitly delete the tags listed above. '
'Please manually remove with the `untag` command or re-run with '
'--force-delete-tags to confirm.')
# Print the digests to be deleted.
if digests:
log.status.Print('Digests:')
for digest in digests:
self._PrintDigest(digest, http_obj)
# Print the tags to be deleted.
if explicit_tags:
log.status.Print('Tags:')
for tag in explicit_tags:
log.status.Print('- ' + str(tag))
# Prompt the user for consent to delete all the above.
console_io.PromptContinue(
'This operation will delete the tags and images identified by the '
'digests above.',
default=True,
cancel_on_no=True)
# The user has given explicit consent, merge the tags.
explicit_tags.update(implicit_tags)
# delete and collect output
result = []
for tag in explicit_tags: # tags must be deleted before digests
self._DeleteDockerTagOrDigest(tag, http_obj)
result.append({'name': str(tag)})
for digest in digests:
self._DeleteDockerTagOrDigest(digest, http_obj)
result.append({'name': str(digest)})
return result
def _ProcessImageNames(self, image_names):
digests = set()
tags = set()
for image_name in image_names:
docker_obj = util.GetDockerImageFromTagOrDigest(image_name)
if isinstance(docker_obj, docker_name.Digest):
digests.add(docker_obj)
elif isinstance(docker_obj, docker_name.Tag):
if not util.IsFullySpecified(image_name):
log.warning('Implicit ":latest" tag specified: ' + image_name)
tags.add(docker_obj)
return [digests, tags]
def _DeleteDockerTagOrDigest(self, tag_or_digest, http_obj):
docker_session.Delete(
creds=util.CredentialProvider(), name=tag_or_digest, transport=http_obj)
log.DeletedResource(tag_or_digest)
def _DeleteDigestAndAssociatedTags(self, digest, http_obj):
# Digest must not have any tags in order to be deleted.
# Errors raised from tag deletion are deliberately uncaught.
util.DeleteTagsFromDigest(digest, http_obj)
tag_list = util.GetTagNamesForDigest(digest, http_obj)
for tag in tag_list:
log.DeletedResource(tag)
docker_session.Delete(
creds=util.CredentialProvider(), name=digest, transport=http_obj)
log.DeletedResource(digest)
def _PrintDigest(self, digest, http_obj):
log.status.Print('- ' + str(digest))
self._DisplayDigestTags(digest, http_obj)
def _DisplayDigestTags(self, digest, http_obj):
tag_list = util.GetTagNamesForDigest(digest, http_obj)
if not tag_list: # no tags on this digest, skip delete prompt
return
fmt = ('list[title=" Associated tags:"]')
resource_printer.Print(tag_list, fmt, out=log.status)
| 35.942308
| 80
| 0.687935
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_session
from googlecloudsdk.api_lib.container.images import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container import flags
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
class Delete(base.DeleteCommand):
detailed_help = {
'DESCRIPTION':
"""\
The container images delete command deletes the specified image from
the registry. All associated tags are also deleted.
""",
'EXAMPLES':
"""\
Deletes the image as long as there aren't additional, unspecified tags
referencing it:
$ {command} <IMAGE_NAME>
Deletes the image (and tags) from the input IMAGE_NAME:
$ {command} <IMAGE_NAME> --force-delete-tags
Deletes the image (and tags) from the input IMAGE_NAME, without
additional prompting:
$ {command} <IMAGE_NAME> --force-delete-tags --quiet
To easily identify and delete untagged images in a project, first
filter digests that lack tags:
$ gcloud container images list-tags [HOSTNAME]/[PROJECT-ID]/[IMAGE]\
--filter='-tags:*' --format='get(digest)' --limit=$BIG_NUMBER
Then, delete these tagless images without prompting by running:
$ {command} --quiet [HOSTNAME]/[PROJECT-ID]/[IMAGE]@DIGEST
""",
}
@staticmethod
def Args(parser):
flags.AddTagOrDigestPositional(parser, verb='delete')
parser.add_argument(
'--force-delete-tags',
action='store_true',
default=False,
help=(
'If there are tags pointing to an image to be deleted then they '
'must all be specified explicitly, or this flag must be specified, '
'for the command to succeed.'))
def Run(self, args):
# IMAGE_NAME: The fully-qualified image name to delete (with a digest).
# Deletes the layers. Ex. gcr.io/google-appengine/java(@DIGEST|:TAG).
http_obj = http.Http()
with util.WrapExpectedDockerlessErrors():
# collect input/validate
digests, explicit_tags = self._ProcessImageNames(args.image_names)
# Resolve tags to digests.
for tag in explicit_tags:
digests.add(util.GetDigestFromName(str(tag)))
# Find all the tags that reference digests to be deleted.
all_tags = set()
for digest in digests:
all_tags.update(util.GetDockerTagsForDigest(digest, http_obj))
# Find all the tags that weren't specified explicitly.
implicit_tags = all_tags.difference(explicit_tags)
if implicit_tags and not args.force_delete_tags:
log.error('Tags:')
for tag in explicit_tags:
log.error('- ' + str(tag))
raise exceptions.Error(
'This operation will implicitly delete the tags listed above. '
'Please manually remove with the `untag` command or re-run with '
'--force-delete-tags to confirm.')
if digests:
log.status.Print('Digests:')
for digest in digests:
self._PrintDigest(digest, http_obj)
if explicit_tags:
log.status.Print('Tags:')
for tag in explicit_tags:
log.status.Print('- ' + str(tag))
console_io.PromptContinue(
'This operation will delete the tags and images identified by the '
'digests above.',
default=True,
cancel_on_no=True)
explicit_tags.update(implicit_tags)
result = []
for tag in explicit_tags:
self._DeleteDockerTagOrDigest(tag, http_obj)
result.append({'name': str(tag)})
for digest in digests:
self._DeleteDockerTagOrDigest(digest, http_obj)
result.append({'name': str(digest)})
return result
def _ProcessImageNames(self, image_names):
digests = set()
tags = set()
for image_name in image_names:
docker_obj = util.GetDockerImageFromTagOrDigest(image_name)
if isinstance(docker_obj, docker_name.Digest):
digests.add(docker_obj)
elif isinstance(docker_obj, docker_name.Tag):
if not util.IsFullySpecified(image_name):
log.warning('Implicit ":latest" tag specified: ' + image_name)
tags.add(docker_obj)
return [digests, tags]
def _DeleteDockerTagOrDigest(self, tag_or_digest, http_obj):
docker_session.Delete(
creds=util.CredentialProvider(), name=tag_or_digest, transport=http_obj)
log.DeletedResource(tag_or_digest)
def _DeleteDigestAndAssociatedTags(self, digest, http_obj):
util.DeleteTagsFromDigest(digest, http_obj)
tag_list = util.GetTagNamesForDigest(digest, http_obj)
for tag in tag_list:
log.DeletedResource(tag)
docker_session.Delete(
creds=util.CredentialProvider(), name=digest, transport=http_obj)
log.DeletedResource(digest)
def _PrintDigest(self, digest, http_obj):
log.status.Print('- ' + str(digest))
self._DisplayDigestTags(digest, http_obj)
def _DisplayDigestTags(self, digest, http_obj):
tag_list = util.GetTagNamesForDigest(digest, http_obj)
if not tag_list:
return
fmt = ('list[title=" Associated tags:"]')
resource_printer.Print(tag_list, fmt, out=log.status)
| true
| true
|
1c405cd20bb551e5550c516adf161146b5589e2a
| 2,439
|
py
|
Python
|
day05_solve_puzzle1.py
|
olsgaard/adventofcode2019
|
1e14fd38f472cde894cbbbd7eebdd6545cd4c4ee
|
[
"MIT"
] | 1
|
2019-12-05T20:08:07.000Z
|
2019-12-05T20:08:07.000Z
|
day05_solve_puzzle1.py
|
olsgaard/adventofcode2019
|
1e14fd38f472cde894cbbbd7eebdd6545cd4c4ee
|
[
"MIT"
] | null | null | null |
day05_solve_puzzle1.py
|
olsgaard/adventofcode2019
|
1e14fd38f472cde894cbbbd7eebdd6545cd4c4ee
|
[
"MIT"
] | null | null | null |
"""
The TEST diagnostic program will run on your existing Intcode computer after a few modifications:
First, you'll need to add two new instructions:
Opcode 3 takes a single integer as input and saves it to the position given by its only parameter. For example, the instruction 3,50 would take an input value and store it at address 50.
Opcode 4 outputs the value of its only parameter. For example, the instruction 4,50 would output the value at address 50.
"""
def parse_opcode(opcode: int):
opcode = str(opcode).rjust(5, "0")
A,B,C = [int(i) for i in opcode[:-2]]
opcode = int(opcode[-2:])
return A,B,C, opcode
def parse_parameter(intcode, idx, mode=0):
if mode == 0:
return intcode[intcode[idx]]
elif mode == 1:
return intcode[idx]
def execute_intcode(intcode: list, _input=1):
intcode = intcode.copy()
output = []
i = 0
A, B, C, opcode = parse_opcode(intcode[i])
while True:
if opcode == 1:
parameter1 = parse_parameter(intcode, i+1, C)
parameter2 = parse_parameter(intcode, i+2, B)
intcode[intcode[i+3]] = parameter1 + parameter2
i += 4
elif opcode == 2:
parameter1 = parse_parameter(intcode, i+1, C)
parameter2 = parse_parameter(intcode, i+2, B)
intcode[intcode[i+3]] = parameter1 * parameter2
i += 4
elif opcode == 3:
intcode[intcode[i+1]] = _input
i += 2
elif opcode == 4:
parameter1 = parse_parameter(intcode, i+1, C)
output.append(parameter1)
i +=2
elif opcode == 99:
return intcode, output, i
else:
raise ValueError(f"Opcode {opcode} at position {i} is invalid. Current state of intcode:\n {intcode}")
A, B, C, opcode = parse_opcode(intcode[i])
# Testcases from audentis, https://www.reddit.com/r/adventofcode/comments/e6ob88/2019_day_5_part_1algorithms_dont_understand/f9s81tm/
assert execute_intcode([1101, 5, 6, 5, 99, 0, 11, 12])[0] == [1101, 5, 6, 5, 99, 11, 11, 12]
assert execute_intcode([1,0,0,0,99])[0] == [2,0,0,0,99]
assert execute_intcode([2,3,0,3,99])[0] == [2,3,0,6,99]
assert execute_intcode([2,4,4,5,99,0])[0] == [2,4,4,5,99,9801]
assert execute_intcode([1,1,1,4,99,5,6,0,99])[0] == [30,1,1,4,2,5,6,0,99]
assert execute_intcode([3,3,4,0,99])[0] == [3,3,4,1,99]
with open("input05.txt", 'r') as f:
intcode = [int(i) for i in f.read().split(",")]
executed_intcode, outputs, final_position = execute_intcode(intcode)
diagnotstic_code = outputs[-1]
print(executed_intcode)
print(outputs)
print(diagnotstic_code)
| 29.743902
| 186
| 0.684297
|
def parse_opcode(opcode: int):
opcode = str(opcode).rjust(5, "0")
A,B,C = [int(i) for i in opcode[:-2]]
opcode = int(opcode[-2:])
return A,B,C, opcode
def parse_parameter(intcode, idx, mode=0):
if mode == 0:
return intcode[intcode[idx]]
elif mode == 1:
return intcode[idx]
def execute_intcode(intcode: list, _input=1):
intcode = intcode.copy()
output = []
i = 0
A, B, C, opcode = parse_opcode(intcode[i])
while True:
if opcode == 1:
parameter1 = parse_parameter(intcode, i+1, C)
parameter2 = parse_parameter(intcode, i+2, B)
intcode[intcode[i+3]] = parameter1 + parameter2
i += 4
elif opcode == 2:
parameter1 = parse_parameter(intcode, i+1, C)
parameter2 = parse_parameter(intcode, i+2, B)
intcode[intcode[i+3]] = parameter1 * parameter2
i += 4
elif opcode == 3:
intcode[intcode[i+1]] = _input
i += 2
elif opcode == 4:
parameter1 = parse_parameter(intcode, i+1, C)
output.append(parameter1)
i +=2
elif opcode == 99:
return intcode, output, i
else:
raise ValueError(f"Opcode {opcode} at position {i} is invalid. Current state of intcode:\n {intcode}")
A, B, C, opcode = parse_opcode(intcode[i])
assert execute_intcode([1101, 5, 6, 5, 99, 0, 11, 12])[0] == [1101, 5, 6, 5, 99, 11, 11, 12]
assert execute_intcode([1,0,0,0,99])[0] == [2,0,0,0,99]
assert execute_intcode([2,3,0,3,99])[0] == [2,3,0,6,99]
assert execute_intcode([2,4,4,5,99,0])[0] == [2,4,4,5,99,9801]
assert execute_intcode([1,1,1,4,99,5,6,0,99])[0] == [30,1,1,4,2,5,6,0,99]
assert execute_intcode([3,3,4,0,99])[0] == [3,3,4,1,99]
with open("input05.txt", 'r') as f:
intcode = [int(i) for i in f.read().split(",")]
executed_intcode, outputs, final_position = execute_intcode(intcode)
diagnotstic_code = outputs[-1]
print(executed_intcode)
print(outputs)
print(diagnotstic_code)
| true
| true
|
1c405d02adb8a1818347e2c4406ab9da198e1886
| 3,666
|
py
|
Python
|
actrneuro/iccm2012_preofficial_ACT-R_7/evaluate.py
|
Sn0wfir3/cogmods
|
b7a5867e2daa160148872f97a855baab1f645d39
|
[
"MIT"
] | null | null | null |
actrneuro/iccm2012_preofficial_ACT-R_7/evaluate.py
|
Sn0wfir3/cogmods
|
b7a5867e2daa160148872f97a855baab1f645d39
|
[
"MIT"
] | 11
|
2020-05-04T09:05:29.000Z
|
2021-04-08T13:22:34.000Z
|
actrneuro/iccm2012_preofficial_ACT-R_7/evaluate.py
|
Sn0wfir3/cogmods
|
b7a5867e2daa160148872f97a855baab1f645d39
|
[
"MIT"
] | 12
|
2020-05-02T09:36:14.000Z
|
2021-06-22T08:10:45.000Z
|
import numpy as np
import os
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib
def evaluate( output_path="./plots", filename="actr7"):
bold_table = np.zeros([13, 60])
bold_table_divider = np.zeros([13, 60])
boldfiles = [x[0] for x in os.walk("./log/bolds")]
inslen = 0
arrays = []
longcount = 0
for boldfile in sorted(boldfiles):
if boldfile.count("/") < 5:
continue
if "training" in boldfile.lower():
continue
bold_result, buffer_list = get_bold(os.path.abspath(boldfile + "/bold-response.dat"))
inslen = inslen + len(bold_result)
if len(bold_result) > 32:
longcount = longcount + 1
permutation = np.argsort(buffer_list)
for bs_ix in range(len(bold_result)):
bold_result[bs_ix] = bold_result[bs_ix, permutation]
buffer_list = buffer_list[permutation]
arrays.append(bold_result)
print("insgesamt länge ", inslen)
print(longcount)
matplotlib.use("agg", force=True)
plt.clf()
plt.tight_layout()
for b in range(len(buffer_list)):
arrr = []
buffer = buffer_list[b]
for a in arrays:
nparray = np.array(a)
arrr.append(nparray[:, b])
axes = plt.gca()
buffer = buffer.lower()
if buffer in ["time", "temporal", "aural-location", "vocal", "visual-location", "production", "aural", "visual", "retrieval"]:
continue
y, error = tolerant_mean(arrr)
buffer = buffer.lower()
if buffer == "retrieval":
marker = "+"
color = "black"
elif buffer == "goal":
marker = "4"
color = "pink"
elif buffer == "manual":
marker = "v"
color = "y"
elif buffer == "visual":
marker = "s"
color = "b"
elif buffer == "aural":
marker = "s"
color = "purple"
elif buffer == "imaginal":
marker = "o"
color = "g"
else:
print(buffer)
1/0
axes.plot(np.arange(len(y))/2, y, label=buffer.lower(), marker=marker, color=color)
plt.fill_between(np.arange(len(y))/2, y - error, y + error, color=color, alpha=0.2)
plt.xlabel('Time (seconds)')
plt.ylabel('BOLD response')
axes.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=3, fancybox=True, shadow=True)
axes.set_ylim([0, 1])
axes.set_xlim([0, 25])
plt.savefig(output_path + "/" + filename + "graph.png")
plt.clf()
def tolerant_mean(arrs):
lens = [len(i) for i in arrs]
arr = np.ma.empty((np.max(lens),len(arrs)))
arr.mask = True
for idx, l in enumerate(arrs):
arr[:len(l),idx] = l
return arr.mean(axis = -1), arr.std(axis=-1)
def get_bold(boldfile):
bold = []
with open(boldfile) as f:
line = f.readline()
if "#|Warning" in line:
line = f.readline()
warnings += 1
line = line.strip().split(" ")
while "" in line:
line.remove("")
bufferstring = line
line = f.readline()
while len(line) > 1:
if "#|Warning" in line:
warnings += 1
line = f.readline()
continue
line = line.strip().split(" ")
while "" in line:
line.remove("")
line = [float(i) for i in line]
bold.append(line)
line = f.readline()
return np.array(bold), np.array(bufferstring)
evaluate()
| 21.438596
| 134
| 0.525914
|
import numpy as np
import os
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib
def evaluate( output_path="./plots", filename="actr7"):
bold_table = np.zeros([13, 60])
bold_table_divider = np.zeros([13, 60])
boldfiles = [x[0] for x in os.walk("./log/bolds")]
inslen = 0
arrays = []
longcount = 0
for boldfile in sorted(boldfiles):
if boldfile.count("/") < 5:
continue
if "training" in boldfile.lower():
continue
bold_result, buffer_list = get_bold(os.path.abspath(boldfile + "/bold-response.dat"))
inslen = inslen + len(bold_result)
if len(bold_result) > 32:
longcount = longcount + 1
permutation = np.argsort(buffer_list)
for bs_ix in range(len(bold_result)):
bold_result[bs_ix] = bold_result[bs_ix, permutation]
buffer_list = buffer_list[permutation]
arrays.append(bold_result)
print("insgesamt länge ", inslen)
print(longcount)
matplotlib.use("agg", force=True)
plt.clf()
plt.tight_layout()
for b in range(len(buffer_list)):
arrr = []
buffer = buffer_list[b]
for a in arrays:
nparray = np.array(a)
arrr.append(nparray[:, b])
axes = plt.gca()
buffer = buffer.lower()
if buffer in ["time", "temporal", "aural-location", "vocal", "visual-location", "production", "aural", "visual", "retrieval"]:
continue
y, error = tolerant_mean(arrr)
buffer = buffer.lower()
if buffer == "retrieval":
marker = "+"
color = "black"
elif buffer == "goal":
marker = "4"
color = "pink"
elif buffer == "manual":
marker = "v"
color = "y"
elif buffer == "visual":
marker = "s"
color = "b"
elif buffer == "aural":
marker = "s"
color = "purple"
elif buffer == "imaginal":
marker = "o"
color = "g"
else:
print(buffer)
1/0
axes.plot(np.arange(len(y))/2, y, label=buffer.lower(), marker=marker, color=color)
plt.fill_between(np.arange(len(y))/2, y - error, y + error, color=color, alpha=0.2)
plt.xlabel('Time (seconds)')
plt.ylabel('BOLD response')
axes.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=3, fancybox=True, shadow=True)
axes.set_ylim([0, 1])
axes.set_xlim([0, 25])
plt.savefig(output_path + "/" + filename + "graph.png")
plt.clf()
def tolerant_mean(arrs):
lens = [len(i) for i in arrs]
arr = np.ma.empty((np.max(lens),len(arrs)))
arr.mask = True
for idx, l in enumerate(arrs):
arr[:len(l),idx] = l
return arr.mean(axis = -1), arr.std(axis=-1)
def get_bold(boldfile):
bold = []
with open(boldfile) as f:
line = f.readline()
if "#|Warning" in line:
line = f.readline()
warnings += 1
line = line.strip().split(" ")
while "" in line:
line.remove("")
bufferstring = line
line = f.readline()
while len(line) > 1:
if "#|Warning" in line:
warnings += 1
line = f.readline()
continue
line = line.strip().split(" ")
while "" in line:
line.remove("")
line = [float(i) for i in line]
bold.append(line)
line = f.readline()
return np.array(bold), np.array(bufferstring)
evaluate()
| true
| true
|
1c405d6fd314843ead1df39e26c6ac5d095fc3b8
| 3,966
|
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
Ilyazv/recipe-app-api
|
da20425abaecb9581e78ae797bb6ce48c507c2c0
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
Ilyazv/recipe-app-api
|
da20425abaecb9581e78ae797bb6ce48c507c2c0
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
Ilyazv/recipe-app-api
|
da20425abaecb9581e78ae797bb6ce48c507c2c0
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorazed user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@test.com',
'testpass'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tegs(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'other@test.com',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test crating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=10.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 31.228346
| 71
| 0.630106
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@test.com',
'testpass'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tegs(self):
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'other@test.com',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=10.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| true
| true
|
1c405fe0d996c7638acfe501be963e40135d26c4
| 23,498
|
py
|
Python
|
MainWindow.py
|
Ling-fengZhang/lab_gui
|
5d79298a9099bfa5f879568d40bcf68ef4604f3d
|
[
"MIT"
] | 1
|
2021-08-16T19:39:18.000Z
|
2021-08-16T19:39:18.000Z
|
MainWindow.py
|
Ling-fengZhang/lab_gui
|
5d79298a9099bfa5f879568d40bcf68ef4604f3d
|
[
"MIT"
] | null | null | null |
MainWindow.py
|
Ling-fengZhang/lab_gui
|
5d79298a9099bfa5f879568d40bcf68ef4604f3d
|
[
"MIT"
] | null | null | null |
from Model.Instruments.Camera.Chameleon import Chameleon
from Utilities.Helper import settings, Helper
from Utilities.IO import IOHelper
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from Widget.CoreWidget.PlotMainWindowWidget import PlotMainWindow
from Widget.CoreWidget.ImgQueueWidget import ImgQueueWidget
from Widget.CoreWidget.ImgDisplaySetting import ImgDisplaySetting
from Widget.CoreWidget.AnalyseDataWidget import ImgAnalysisSetting
from Widget.CoreWidget.PromptWidget import PromptWidget
from Widget.CoreWidget.ResultWidget import ResultWidget
from Widget.CustomWidget.CameraSettingWidget import CameraOption
import numpy as np
import sys
from PIL import Image
import time
from pathlib import Path
import datetime
class TestMainWindow(QMainWindow):
sig_abort_workers = pyqtSignal()
def __init__(self):
super(TestMainWindow, self).__init__()
### MENUS AND TOOLBARS ###
self.fileMenu = self.menuBar().addMenu("File")
self.windowMenu = self.menuBar().addMenu("Window")
self.optionMenu = self.menuBar().addMenu("Options")
self.plotToolbar = self.addToolBar("Plot")
self.expToolbar = self.addToolBar("Experiment")
# experiment start/stop buttons
self.start_exp_action = Helper.create_action(self, "Start Experiment", slot=self.start_exp, icon="start")
self.stop_exp_action = Helper.create_action(self, "Stop Experiment", slot=self.stop_exp, icon="stop")
self.stop_exp_action.setEnabled(False)
# plot buttons
self.clear_img_stack_action = Helper.create_action(self, "clear image stack", slot=self.clear_img_stack, icon="clear_img_stack")
self.clear_main_win_action = Helper.create_action(self, "clear main window", slot=self.clear_main_win, icon="clear_main_win")
### CREATE WIDGET ###
# global parameters
settings.inintParams()
self.plot_main_window = PlotMainWindow()
self.setCentralWidget(self.plot_main_window)
# image queue dock
self.img_queue = ImgQueueWidget()
# create a QDockWidget
imgQueueDockWidget = QDockWidget("Image Stack", self)
imgQueueDockWidget.setObjectName("imgStackDockWidget")
imgQueueDockWidget.setAllowedAreas(
Qt.LeftDockWidgetArea)
imgQueueDockWidget.setWidget(self.img_queue)
self.addDockWidget(Qt.LeftDockWidgetArea, imgQueueDockWidget)
self.windowMenu.addAction(imgQueueDockWidget.toggleViewAction())
# image display setting dock
self.img_display_setting = ImgDisplaySetting()
# create a QDockWidget
displaySettingDockWidget = QDockWidget("Display Setting", self)
displaySettingDockWidget.setObjectName("displaySettingDockWidget")
displaySettingDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)
displaySettingDockWidget.setWidget(self.img_display_setting)
self.addDockWidget(Qt.RightDockWidgetArea, displaySettingDockWidget)
# enable the toggle view action
self.windowMenu.addAction(displaySettingDockWidget.toggleViewAction())
# image analyse setting dock
self.img_analyse_setting = ImgAnalysisSetting()
analyseDataDockWidget = QDockWidget("Analyse Data", self)
analyseDataDockWidget.setObjectName("analyseDataDockWidget")
analyseDataDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)
analyseDataDockWidget.setWidget(self.img_analyse_setting)
self.addDockWidget(Qt.RightDockWidgetArea, analyseDataDockWidget)
self.windowMenu.addAction(analyseDataDockWidget.toggleViewAction())
# camera setting dock
self.camera_setting = CameraOption()
cameraSettingDockWidget = QDockWidget("Camera Setting", self)
cameraSettingDockWidget.setObjectName("cameraSettingDockWidget")
cameraSettingDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)
cameraSettingDockWidget.setWidget(self.camera_setting)
self.addDockWidget(Qt.RightDockWidgetArea, cameraSettingDockWidget)
self.windowMenu.addAction(cameraSettingDockWidget.toggleViewAction())
# output dock
self.prompt_dock = PromptWidget()
promptDockWidget = QDockWidget("Output Console", self)
promptDockWidget.setObjectName("consoleDockWidget")
promptDockWidget.setAllowedAreas(Qt.BottomDockWidgetArea)
promptDockWidget.setWidget(self.prompt_dock)
self.addDockWidget(Qt.BottomDockWidgetArea, promptDockWidget)
# redirect print statements to show a copy on "console"
sys.stdout = Helper.print_redirect()
sys.stdout.print_signal.connect(self.update_console)
self.windowMenu.addAction(promptDockWidget.toggleViewAction())
# result dock
self.result_dock = ResultWidget()
resultDockWidget = QDockWidget("Result Console", self)
resultDockWidget.setObjectName("resultDockWidget")
resultDockWidget.setAllowedAreas(Qt.BottomDockWidgetArea)
resultDockWidget.setWidget(self.result_dock)
self.addDockWidget(Qt.BottomDockWidgetArea, resultDockWidget)
self.windowMenu.addAction(resultDockWidget.toggleViewAction())
### TOOLBAR MENU ###
self.expToolbar.setObjectName("ExperimentToolbar")
self.expToolbar.addAction(self.start_exp_action)
self.expToolbar.addAction(self.stop_exp_action)
self.plotToolbar.setObjectName("PlotToolbar")
self.plotToolbar.addAction(self.clear_img_stack_action)
self.plotToolbar.addAction(self.clear_main_win_action)
self.fileLoadImgAction = Helper.create_action(self,
"Load Previous Images",
slot=self.file_load_imgs,
shortcut=None,
icon=None,
tip="Load previous images to image stack from file")
self.fileSaveImgAction = Helper.create_action(self,
"Save Image Data",
slot=self.file_save_imgs,
shortcut=None,
icon=None,
tip="Save image stack's images")
self.fileMenu.addAction(self.fileLoadImgAction)
self.fileMenu.addAction(self.fileSaveImgAction)
# queue for update main window when camera is in video mode
self.acquiring = False
# thread for acquiring image from camera to queue
self.thread = None
self.worker = None
self.connect_slot2signal()
self.setWindowIcon(QIcon('images/icon/UALab.png'))
self.show()
def change_camera_params(self):
self.camera_setting.apply_button.setEnabled(False)
if self.acquiring:
self.sig_abort_workers.emit()
self.thread.quit() # this will quit **as soon as thread event loop unblocks**
self.thread.wait() # <- so you need to wait for it to *actually* quit
print("camera thread quit")
self.worker = Worker()
self.thread = QThread()
self.worker.moveToThread(self.thread)
self.worker.sig_video_mode_img.connect(self.update_main_plot_win)
self.worker.sig_hardware_mode_img.connect(self.update_image_queue)
# control worker:
self.sig_abort_workers.connect(self.worker.abort)
self.thread.started.connect(self.worker.work)
self.thread.start() # this will emit 'started' and start thread's event loop
print("camera setting is applied ")
self.camera_setting.apply_button.setEnabled(True)
def change_camera_mode(self, mode):
if self.acquiring:
if mode.isChecked():
self.sig_abort_workers.emit()
self.thread.quit() # this will quit **as soon as thread event loop unblocks**
self.thread.wait() # <- so you need to wait for it to *actually* quit
print("camera thread quit")
if mode.text() == 'video mode':
settings.widget_params["Image Display Setting"]["mode"] = 0
self.img_display_setting.hardware_mode.setEnabled(True)
self.img_display_setting.video_mode.setEnabled(False)
self.img_display_setting.hardware_mode.setChecked(False)
self.camera_setting.apply_button.setEnabled(True)
self.camera_setting.camera_further_setting.gain_value.setEnabled(True)
self.camera_setting.camera_further_setting.exposure_time.setEnabled(True)
self.camera_setting.camera_further_setting.shutter_time.setEnabled(True)
elif mode.text() == 'hardware mode':
settings.widget_params["Image Display Setting"]["mode"] = 2
self.img_display_setting.hardware_mode.setEnabled(False)
self.img_display_setting.video_mode.setChecked(False)
self.img_display_setting.video_mode.setEnabled(True)
self.camera_setting.apply_button.setEnabled(False)
self.camera_setting.apply_button.setEnabled(False)
self.camera_setting.camera_further_setting.gain_value.setEnabled(False)
self.camera_setting.camera_further_setting.exposure_time.setEnabled(False)
self.camera_setting.camera_further_setting.shutter_time.setEnabled(False)
self.worker = Worker()
self.thread = QThread()
self.worker.moveToThread(self.thread)
self.worker.sig_video_mode_img.connect(self.update_main_plot_win)
self.worker.sig_hardware_mode_img.connect(self.update_image_queue)
# control worker:
self.sig_abort_workers.connect(self.worker.abort)
self.thread.started.connect(self.worker.work)
self.thread.start() # this will emit 'started' and start thread's event loop
print("camera is in new mode")
def start_exp(self):
"""
start basis experiment include capturing images, more operations can be
added here or use a script file to control instrument accurately.
:return:
"""
if settings.instrument_params["Camera"]["index"] is not None:
self.start_exp_action.setEnabled(False)
self.fileLoadImgAction.setEnabled(False)
self.fileSaveImgAction.setEnabled(False)
self.img_display_setting.video_mode.setEnabled(True)
self.img_display_setting.hardware_mode.setEnabled(True)
self.clear_img_stack_action.setEnabled(False)
self.clear_main_win_action.setEnabled(False)
self.worker = Worker()
self.thread = QThread()
self.worker.moveToThread(self.thread)
self.worker.sig_video_mode_img.connect(self.update_main_plot_win)
self.worker.sig_hardware_mode_img.connect(self.update_image_queue)
# control worker:
self.sig_abort_workers.connect(self.worker.abort)
self.thread.started.connect(self.worker.work)
self.thread.start() # this will emit 'started' and start thread's event loop
# finish camera index setting, then can't change camera index during experiment,
# if want to change camera index, then stop experiment
self.camera_setting.cb.setEnabled(False)
self.camera_setting.further_setting.setEnabled(True)
self.camera_setting.apply_button.setEnabled(True)
settings.widget_params["Image Display Setting"]["imgSource"] = "camera"
self.img_display_setting.video_mode.setChecked(True)
self.img_display_setting.video_mode.setEnabled(False)
settings.widget_params["Image Display Setting"]["mode"] = 0
self.acquiring = True
self.stop_exp_action.setEnabled(True)
else:
print("select a camera for further experiment")
def stop_exp(self):
"""
stop basis experiment include capturing images when image source is camera.
:return:
"""
self.stop_exp_action.setEnabled(False)
if self.acquiring:
self.sig_abort_workers.emit()
self.thread.quit() # this will quit **as soon as thread event loop unblocks**
self.thread.wait() # <- so you need to wait for it to *actually* quit
self.acquiring = False
self.start_exp_action.setEnabled(True)
self.fileLoadImgAction.setEnabled(True)
self.fileSaveImgAction.setEnabled(True)
self.clear_img_stack_action.setEnabled(True)
self.clear_main_win_action.setEnabled(True)
self.camera_setting.cb.setEnabled(True)
self.camera_setting.further_setting.setEnabled(False)
self.img_display_setting.video_mode.setChecked(False)
self.img_display_setting.hardware_mode.setChecked(False)
self.img_display_setting.video_mode.setEnabled(False)
self.img_display_setting.hardware_mode.setEnabled(False)
def connect_slot2signal(self):
# image display widget
# all parameters' signal are connected to global parameters.
self.img_display_setting.video_mode.stateChanged.connect(
lambda: self.change_camera_mode(self.img_display_setting.video_mode)
)
self.img_display_setting.hardware_mode.stateChanged.connect(
lambda: self.change_camera_mode(self.img_display_setting.hardware_mode)
)
# image stack widget
for i in range(settings.widget_params["Image Display Setting"]["img_stack_num"]):
plot_win = self.img_queue.plot_wins.get()
plot_win.img_dict.connect(self.plot_main_window.img_plot)
self.img_queue.plot_wins.put(plot_win)
# plot main window widget
self.plot_main_window.atom_number.connect(self.result_dock.change_atom_num)
# analyse data widget
self.img_analyse_setting.roi.stateChanged.connect(
lambda: self.plot_main_window.add_roi(self.img_analyse_setting.roi, self.img_analyse_setting.cross_axes)
)
self.img_analyse_setting.cross_axes.stateChanged.connect(
lambda: self.plot_main_window.add_cross_axes(self.img_analyse_setting.cross_axes)
)
# camera setting widget
self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_exposure)
self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_gain)
self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_shutter)
self.camera_setting.apply_button.clicked.connect(self.change_camera_params)
def clear_img_stack(self):
"""
clear image stack
:return:
"""
if self.acquiring and settings.widget_params["Image Display Setting"]["mode"] == 0:
print("video mode can't clear image stack")
return
# make sure that queue isn't changing when using qsize()
for i in range(settings.widget_params["Image Display Setting"]["img_stack_num"]):
plot_win = self.img_queue.plot_wins.get()
plot_win.clear_win()
self.img_queue.plot_wins.put(plot_win)
def clear_main_win(self):
"""
clear main windows
:return:
"""
if self.acquiring and settings.widget_params["Image Display Setting"]["mode"] == 0:
print("video mode can't clear main window")
return
self.plot_main_window.clear_win()
### LOAD CUSTOM SETTING FOR INSTRUMENT CONNECT AND PARAMETERS ###
def file_save_imgs(self):
"""
save image stack's images to disk
:return:
"""
fpath = IOHelper.get_config_setting('DATA_PATH')
fpath = Path(fpath)
dir_path = fpath.joinpath(str(datetime.datetime.now()).split('.')[0].replace(' ', '-').replace(':', '_'))
print("save images to {}".format(dir_path))
if not dir_path.exists():
dir_path.mkdir()
for i in range(settings.widget_params["Image Display Setting"]["img_stack_num"]):
plot_win = self.img_queue.plot_wins.get()
if plot_win.video.image is not None:
img_data = np.array(plot_win.video.image)
# load image name by path
img_name = (plot_win.img_label.text()).split('.')[0].replace(' ', '-').replace(':', '_')
img_data = Image.fromarray(img_data)
img_data.save(r"{}\{}.png".format(dir_path, img_name))
self.img_queue.plot_wins.put(plot_win)
print("images have saved.")
def file_load_imgs(self):
"""
Load previous image to stack.
:return:
"""
self.load_img2stack()
def load_img2stack(self):
"""
load images to image queue, with image name and data
"""
settings.widget_params["Image Display Setting"]["imgSource"] = "disk"
fpath = IOHelper.get_config_setting('DATA_PATH')
img_fpath = QFileDialog.getExistingDirectory(self, "Open File", fpath)
img_file = Path(img_fpath)
img_paths = list(img_file.glob('*.png'))
for win_index in range(settings.widget_params["Image Display Setting"]["img_stack_num"]):
if win_index == len(img_paths):
break
plot_win = self.img_queue.plot_wins.get()
plot_win.img_plot(self.load_img_dict(img_paths[win_index]))
self.img_queue.plot_wins.put(plot_win)
### MISCELLANY ###
def load_img_dict(self, img_path):
img_data = np.array(Image.open(img_path))
# load image name by path
img_name = img_path.stem
img = {
'img_name': img_name,
'img_data': img_data
}
return img
def update_console(self, stri):
MAX_LINES = 50
stri = str(stri)
new_text = self.prompt_dock.console_text() + '\n' + stri
line_list = new_text.splitlines()
N_lines = min(MAX_LINES, len(line_list))
# limit output lines
new_text = '\n'.join(line_list[-N_lines:])
self.prompt_dock.console_text(new_text)
self.prompt_dock.automatic_scroll()
def update_main_plot_win(self, img_dict):
"""
Updates the main plot window at regular intervals. It designs for video mode
"""
# take the newest image in the queue
if img_dict is None:
return
self.plot_main_window.img_plot(img_dict)
def update_image_queue(self, img_dict):
plot_win = self.img_queue.plot_wins.get()
plot_win.img_plot(img_dict)
self.img_queue.plot_wins.put(plot_win)
print("update image queue")
class Worker(QObject):
"""
Must derive from QObject in order to emit signals, connect slots to other signals, and operate in a QThread.
"""
sig_video_mode_img = pyqtSignal(dict)
sig_hardware_mode_img = pyqtSignal(dict)
def __init__(self):
super().__init__()
self.camera = Chameleon()
self.camera.initializeCamera(settings.instrument_params["Camera"]["index"])
self.camera.setAcquisitionMode(settings.widget_params["Image Display Setting"]["mode"])
self.camera.setExposure(settings.instrument_params["Camera"]["exposure time"])
self.camera.setShutter(settings.instrument_params["Camera"]["shutter time"])
self.camera.setGain(settings.instrument_params["Camera"]["gain value"])
# set a low grab timeout to avoid crash when retrieve image.
self.camera.set_grab_timeout(grab_timeout=10)
self.__abort = False
@pyqtSlot()
def work(self):
print("camera start work")
self.camera.startAcquisition()
while True:
# check if we need to abort the loop; need to process events to receive signals;
app.processEvents() # this could cause change to self.__abort
if self.__abort:
break
img_data = self.camera.retrieveOneImg() # retrieve image from camera buffer
if img_data is None:
continue
else:
timestamp = datetime.datetime.now()
if settings.widget_params["Image Display Setting"]["mode"] == 2:
self.sig_hardware_mode_img.emit({'img_name': str(timestamp), 'img_data': Helper.split_list(img_data)})
else:
self.sig_video_mode_img.emit({'img_name': str(timestamp), 'img_data': Helper.split_list(img_data)})
# set a appropriate refresh value
time.sleep(0.1)
self.camera.stopCamera()
def abort(self):
self.__abort = True
def start_main_win():
app = QApplication(sys.argv)
# Force the style to be the same on all OSs:
app.setStyle("Fusion")
# Now use a palette to switch to dark colors:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
app.setPalette(palette)
app.setApplicationName("UALab")
window = TestMainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
app = QApplication(sys.argv)
# Force the style to be the same on all OSs:
app.setStyle("Fusion")
# Now use a palette to switch to dark colors:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
app.setPalette(palette)
app.setApplicationName("UALab")
window = TestMainWindow()
window.show()
sys.exit(app.exec_())
| 44.503788
| 136
| 0.658013
|
from Model.Instruments.Camera.Chameleon import Chameleon
from Utilities.Helper import settings, Helper
from Utilities.IO import IOHelper
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from Widget.CoreWidget.PlotMainWindowWidget import PlotMainWindow
from Widget.CoreWidget.ImgQueueWidget import ImgQueueWidget
from Widget.CoreWidget.ImgDisplaySetting import ImgDisplaySetting
from Widget.CoreWidget.AnalyseDataWidget import ImgAnalysisSetting
from Widget.CoreWidget.PromptWidget import PromptWidget
from Widget.CoreWidget.ResultWidget import ResultWidget
from Widget.CustomWidget.CameraSettingWidget import CameraOption
import numpy as np
import sys
from PIL import Image
import time
from pathlib import Path
import datetime
class TestMainWindow(QMainWindow):
sig_abort_workers = pyqtSignal()
def __init__(self):
super(TestMainWindow, self).__init__()
self.windowMenu = self.menuBar().addMenu("Window")
self.optionMenu = self.menuBar().addMenu("Options")
self.plotToolbar = self.addToolBar("Plot")
self.expToolbar = self.addToolBar("Experiment")
self.start_exp_action = Helper.create_action(self, "Start Experiment", slot=self.start_exp, icon="start")
self.stop_exp_action = Helper.create_action(self, "Stop Experiment", slot=self.stop_exp, icon="stop")
self.stop_exp_action.setEnabled(False)
self.clear_img_stack_action = Helper.create_action(self, "clear image stack", slot=self.clear_img_stack, icon="clear_img_stack")
self.clear_main_win_action = Helper.create_action(self, "clear main window", slot=self.clear_main_win, icon="clear_main_win")
self.plot_main_window = PlotMainWindow()
self.setCentralWidget(self.plot_main_window)
self.img_queue = ImgQueueWidget()
imgQueueDockWidget = QDockWidget("Image Stack", self)
imgQueueDockWidget.setObjectName("imgStackDockWidget")
imgQueueDockWidget.setAllowedAreas(
Qt.LeftDockWidgetArea)
imgQueueDockWidget.setWidget(self.img_queue)
self.addDockWidget(Qt.LeftDockWidgetArea, imgQueueDockWidget)
self.windowMenu.addAction(imgQueueDockWidget.toggleViewAction())
self.img_display_setting = ImgDisplaySetting()
displaySettingDockWidget = QDockWidget("Display Setting", self)
displaySettingDockWidget.setObjectName("displaySettingDockWidget")
displaySettingDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)
displaySettingDockWidget.setWidget(self.img_display_setting)
self.addDockWidget(Qt.RightDockWidgetArea, displaySettingDockWidget)
self.windowMenu.addAction(displaySettingDockWidget.toggleViewAction())
self.img_analyse_setting = ImgAnalysisSetting()
analyseDataDockWidget = QDockWidget("Analyse Data", self)
analyseDataDockWidget.setObjectName("analyseDataDockWidget")
analyseDataDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)
analyseDataDockWidget.setWidget(self.img_analyse_setting)
self.addDockWidget(Qt.RightDockWidgetArea, analyseDataDockWidget)
self.windowMenu.addAction(analyseDataDockWidget.toggleViewAction())
self.camera_setting = CameraOption()
cameraSettingDockWidget = QDockWidget("Camera Setting", self)
cameraSettingDockWidget.setObjectName("cameraSettingDockWidget")
cameraSettingDockWidget.setAllowedAreas(Qt.RightDockWidgetArea)
cameraSettingDockWidget.setWidget(self.camera_setting)
self.addDockWidget(Qt.RightDockWidgetArea, cameraSettingDockWidget)
self.windowMenu.addAction(cameraSettingDockWidget.toggleViewAction())
self.prompt_dock = PromptWidget()
promptDockWidget = QDockWidget("Output Console", self)
promptDockWidget.setObjectName("consoleDockWidget")
promptDockWidget.setAllowedAreas(Qt.BottomDockWidgetArea)
promptDockWidget.setWidget(self.prompt_dock)
self.addDockWidget(Qt.BottomDockWidgetArea, promptDockWidget)
sys.stdout = Helper.print_redirect()
sys.stdout.print_signal.connect(self.update_console)
self.windowMenu.addAction(promptDockWidget.toggleViewAction())
self.result_dock = ResultWidget()
resultDockWidget = QDockWidget("Result Console", self)
resultDockWidget.setObjectName("resultDockWidget")
resultDockWidget.setAllowedAreas(Qt.BottomDockWidgetArea)
resultDockWidget.setWidget(self.result_dock)
self.addDockWidget(Qt.BottomDockWidgetArea, resultDockWidget)
self.windowMenu.addAction(resultDockWidget.toggleViewAction())
erimentToolbar")
self.expToolbar.addAction(self.start_exp_action)
self.expToolbar.addAction(self.stop_exp_action)
self.plotToolbar.setObjectName("PlotToolbar")
self.plotToolbar.addAction(self.clear_img_stack_action)
self.plotToolbar.addAction(self.clear_main_win_action)
self.fileLoadImgAction = Helper.create_action(self,
"Load Previous Images",
slot=self.file_load_imgs,
shortcut=None,
icon=None,
tip="Load previous images to image stack from file")
self.fileSaveImgAction = Helper.create_action(self,
"Save Image Data",
slot=self.file_save_imgs,
shortcut=None,
icon=None,
tip="Save image stack's images")
self.fileMenu.addAction(self.fileLoadImgAction)
self.fileMenu.addAction(self.fileSaveImgAction)
# queue for update main window when camera is in video mode
self.acquiring = False
# thread for acquiring image from camera to queue
self.thread = None
self.worker = None
self.connect_slot2signal()
self.setWindowIcon(QIcon('images/icon/UALab.png'))
self.show()
def change_camera_params(self):
self.camera_setting.apply_button.setEnabled(False)
if self.acquiring:
self.sig_abort_workers.emit()
self.thread.quit() # this will quit **as soon as thread event loop unblocks**
self.thread.wait() # <- so you need to wait for it to *actually* quit
print("camera thread quit")
self.worker = Worker()
self.thread = QThread()
self.worker.moveToThread(self.thread)
self.worker.sig_video_mode_img.connect(self.update_main_plot_win)
self.worker.sig_hardware_mode_img.connect(self.update_image_queue)
# control worker:
self.sig_abort_workers.connect(self.worker.abort)
self.thread.started.connect(self.worker.work)
self.thread.start() # this will emit 'started' and start thread's event loop
print("camera setting is applied ")
self.camera_setting.apply_button.setEnabled(True)
def change_camera_mode(self, mode):
if self.acquiring:
if mode.isChecked():
self.sig_abort_workers.emit()
self.thread.quit()
self.thread.wait()
print("camera thread quit")
if mode.text() == 'video mode':
settings.widget_params["Image Display Setting"]["mode"] = 0
self.img_display_setting.hardware_mode.setEnabled(True)
self.img_display_setting.video_mode.setEnabled(False)
self.img_display_setting.hardware_mode.setChecked(False)
self.camera_setting.apply_button.setEnabled(True)
self.camera_setting.camera_further_setting.gain_value.setEnabled(True)
self.camera_setting.camera_further_setting.exposure_time.setEnabled(True)
self.camera_setting.camera_further_setting.shutter_time.setEnabled(True)
elif mode.text() == 'hardware mode':
settings.widget_params["Image Display Setting"]["mode"] = 2
self.img_display_setting.hardware_mode.setEnabled(False)
self.img_display_setting.video_mode.setChecked(False)
self.img_display_setting.video_mode.setEnabled(True)
self.camera_setting.apply_button.setEnabled(False)
self.camera_setting.apply_button.setEnabled(False)
self.camera_setting.camera_further_setting.gain_value.setEnabled(False)
self.camera_setting.camera_further_setting.exposure_time.setEnabled(False)
self.camera_setting.camera_further_setting.shutter_time.setEnabled(False)
self.worker = Worker()
self.thread = QThread()
self.worker.moveToThread(self.thread)
self.worker.sig_video_mode_img.connect(self.update_main_plot_win)
self.worker.sig_hardware_mode_img.connect(self.update_image_queue)
self.sig_abort_workers.connect(self.worker.abort)
self.thread.started.connect(self.worker.work)
self.thread.start()
print("camera is in new mode")
def start_exp(self):
if settings.instrument_params["Camera"]["index"] is not None:
self.start_exp_action.setEnabled(False)
self.fileLoadImgAction.setEnabled(False)
self.fileSaveImgAction.setEnabled(False)
self.img_display_setting.video_mode.setEnabled(True)
self.img_display_setting.hardware_mode.setEnabled(True)
self.clear_img_stack_action.setEnabled(False)
self.clear_main_win_action.setEnabled(False)
self.worker = Worker()
self.thread = QThread()
self.worker.moveToThread(self.thread)
self.worker.sig_video_mode_img.connect(self.update_main_plot_win)
self.worker.sig_hardware_mode_img.connect(self.update_image_queue)
# control worker:
self.sig_abort_workers.connect(self.worker.abort)
self.thread.started.connect(self.worker.work)
self.thread.start() # this will emit 'started' and start thread's event loop
# if want to change camera index, then stop experiment
self.camera_setting.cb.setEnabled(False)
self.camera_setting.further_setting.setEnabled(True)
self.camera_setting.apply_button.setEnabled(True)
settings.widget_params["Image Display Setting"]["imgSource"] = "camera"
self.img_display_setting.video_mode.setChecked(True)
self.img_display_setting.video_mode.setEnabled(False)
settings.widget_params["Image Display Setting"]["mode"] = 0
self.acquiring = True
self.stop_exp_action.setEnabled(True)
else:
print("select a camera for further experiment")
def stop_exp(self):
self.stop_exp_action.setEnabled(False)
if self.acquiring:
self.sig_abort_workers.emit()
self.thread.quit() # this will quit **as soon as thread event loop unblocks**
self.thread.wait() # <- so you need to wait for it to *actually* quit
self.acquiring = False
self.start_exp_action.setEnabled(True)
self.fileLoadImgAction.setEnabled(True)
self.fileSaveImgAction.setEnabled(True)
self.clear_img_stack_action.setEnabled(True)
self.clear_main_win_action.setEnabled(True)
self.camera_setting.cb.setEnabled(True)
self.camera_setting.further_setting.setEnabled(False)
self.img_display_setting.video_mode.setChecked(False)
self.img_display_setting.hardware_mode.setChecked(False)
self.img_display_setting.video_mode.setEnabled(False)
self.img_display_setting.hardware_mode.setEnabled(False)
def connect_slot2signal(self):
# image display widget
# all parameters' signal are connected to global parameters.
self.img_display_setting.video_mode.stateChanged.connect(
lambda: self.change_camera_mode(self.img_display_setting.video_mode)
)
self.img_display_setting.hardware_mode.stateChanged.connect(
lambda: self.change_camera_mode(self.img_display_setting.hardware_mode)
)
for i in range(settings.widget_params["Image Display Setting"]["img_stack_num"]):
plot_win = self.img_queue.plot_wins.get()
plot_win.img_dict.connect(self.plot_main_window.img_plot)
self.img_queue.plot_wins.put(plot_win)
self.plot_main_window.atom_number.connect(self.result_dock.change_atom_num)
self.img_analyse_setting.roi.stateChanged.connect(
lambda: self.plot_main_window.add_roi(self.img_analyse_setting.roi, self.img_analyse_setting.cross_axes)
)
self.img_analyse_setting.cross_axes.stateChanged.connect(
lambda: self.plot_main_window.add_cross_axes(self.img_analyse_setting.cross_axes)
)
self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_exposure)
self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_gain)
self.camera_setting.apply_button.clicked.connect(self.camera_setting.camera_further_setting.change_shutter)
self.camera_setting.apply_button.clicked.connect(self.change_camera_params)
def clear_img_stack(self):
if self.acquiring and settings.widget_params["Image Display Setting"]["mode"] == 0:
print("video mode can't clear image stack")
return
# make sure that queue isn't changing when using qsize()
for i in range(settings.widget_params["Image Display Setting"]["img_stack_num"]):
plot_win = self.img_queue.plot_wins.get()
plot_win.clear_win()
self.img_queue.plot_wins.put(plot_win)
def clear_main_win(self):
if self.acquiring and settings.widget_params["Image Display Setting"]["mode"] == 0:
print("video mode can't clear main window")
return
self.plot_main_window.clear_win()
### LOAD CUSTOM SETTING FOR INSTRUMENT CONNECT AND PARAMETERS ###
def file_save_imgs(self):
fpath = IOHelper.get_config_setting('DATA_PATH')
fpath = Path(fpath)
dir_path = fpath.joinpath(str(datetime.datetime.now()).split('.')[0].replace(' ', '-').replace(':', '_'))
print("save images to {}".format(dir_path))
if not dir_path.exists():
dir_path.mkdir()
for i in range(settings.widget_params["Image Display Setting"]["img_stack_num"]):
plot_win = self.img_queue.plot_wins.get()
if plot_win.video.image is not None:
img_data = np.array(plot_win.video.image)
# load image name by path
img_name = (plot_win.img_label.text()).split('.')[0].replace(' ', '-').replace(':', '_')
img_data = Image.fromarray(img_data)
img_data.save(r"{}\{}.png".format(dir_path, img_name))
self.img_queue.plot_wins.put(plot_win)
print("images have saved.")
def file_load_imgs(self):
self.load_img2stack()
def load_img2stack(self):
settings.widget_params["Image Display Setting"]["imgSource"] = "disk"
fpath = IOHelper.get_config_setting('DATA_PATH')
img_fpath = QFileDialog.getExistingDirectory(self, "Open File", fpath)
img_file = Path(img_fpath)
img_paths = list(img_file.glob('*.png'))
for win_index in range(settings.widget_params["Image Display Setting"]["img_stack_num"]):
if win_index == len(img_paths):
break
plot_win = self.img_queue.plot_wins.get()
plot_win.img_plot(self.load_img_dict(img_paths[win_index]))
self.img_queue.plot_wins.put(plot_win)
### MISCELLANY ###
def load_img_dict(self, img_path):
img_data = np.array(Image.open(img_path))
# load image name by path
img_name = img_path.stem
img = {
'img_name': img_name,
'img_data': img_data
}
return img
def update_console(self, stri):
MAX_LINES = 50
stri = str(stri)
new_text = self.prompt_dock.console_text() + '\n' + stri
line_list = new_text.splitlines()
N_lines = min(MAX_LINES, len(line_list))
# limit output lines
new_text = '\n'.join(line_list[-N_lines:])
self.prompt_dock.console_text(new_text)
self.prompt_dock.automatic_scroll()
def update_main_plot_win(self, img_dict):
# take the newest image in the queue
if img_dict is None:
return
self.plot_main_window.img_plot(img_dict)
def update_image_queue(self, img_dict):
plot_win = self.img_queue.plot_wins.get()
plot_win.img_plot(img_dict)
self.img_queue.plot_wins.put(plot_win)
print("update image queue")
class Worker(QObject):
sig_video_mode_img = pyqtSignal(dict)
sig_hardware_mode_img = pyqtSignal(dict)
def __init__(self):
super().__init__()
self.camera = Chameleon()
self.camera.initializeCamera(settings.instrument_params["Camera"]["index"])
self.camera.setAcquisitionMode(settings.widget_params["Image Display Setting"]["mode"])
self.camera.setExposure(settings.instrument_params["Camera"]["exposure time"])
self.camera.setShutter(settings.instrument_params["Camera"]["shutter time"])
self.camera.setGain(settings.instrument_params["Camera"]["gain value"])
# set a low grab timeout to avoid crash when retrieve image.
self.camera.set_grab_timeout(grab_timeout=10)
self.__abort = False
@pyqtSlot()
def work(self):
print("camera start work")
self.camera.startAcquisition()
while True:
# check if we need to abort the loop; need to process events to receive signals;
app.processEvents() # this could cause change to self.__abort
if self.__abort:
break
img_data = self.camera.retrieveOneImg() # retrieve image from camera buffer
if img_data is None:
continue
else:
timestamp = datetime.datetime.now()
if settings.widget_params["Image Display Setting"]["mode"] == 2:
self.sig_hardware_mode_img.emit({'img_name': str(timestamp), 'img_data': Helper.split_list(img_data)})
else:
self.sig_video_mode_img.emit({'img_name': str(timestamp), 'img_data': Helper.split_list(img_data)})
# set a appropriate refresh value
time.sleep(0.1)
self.camera.stopCamera()
def abort(self):
self.__abort = True
def start_main_win():
app = QApplication(sys.argv)
# Force the style to be the same on all OSs:
app.setStyle("Fusion")
# Now use a palette to switch to dark colors:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
app.setPalette(palette)
app.setApplicationName("UALab")
window = TestMainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
app = QApplication(sys.argv)
# Force the style to be the same on all OSs:
app.setStyle("Fusion")
# Now use a palette to switch to dark colors:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
app.setPalette(palette)
app.setApplicationName("UALab")
window = TestMainWindow()
window.show()
sys.exit(app.exec_())
| true
| true
|
1c4060f0a6d704828e53c904008845a0871b50ac
| 6,079
|
py
|
Python
|
tutorials/mnist_dpsgd_tutorial_eager.py
|
amad-person/privacy
|
aaf4c252a0bbfa41670ddefd3798bdf2066c0e21
|
[
"Apache-2.0"
] | 2,327
|
2020-03-01T09:47:34.000Z
|
2021-11-25T12:38:42.000Z
|
tutorials/mnist_dpsgd_tutorial_eager.py
|
amad-person/privacy
|
aaf4c252a0bbfa41670ddefd3798bdf2066c0e21
|
[
"Apache-2.0"
] | 209
|
2020-03-01T17:14:12.000Z
|
2021-11-08T20:35:42.000Z
|
tutorials/mnist_dpsgd_tutorial_eager.py
|
amad-person/privacy
|
aaf4c252a0bbfa41670ddefd3798bdf2066c0e21
|
[
"Apache-2.0"
] | 686
|
2020-03-03T17:24:51.000Z
|
2021-11-25T23:39:12.000Z
|
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training a CNN on MNIST in TF Eager mode with DP-SGD optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
tf.enable_eager_execution()
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.')
flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 1.1,
'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
flags.DEFINE_integer('batch_size', 250, 'Batch size')
flags.DEFINE_integer('epochs', 60, 'Number of epochs')
flags.DEFINE_integer('microbatches', 250, 'Number of microbatches '
'(must evenly divide batch_size)')
FLAGS = flags.FLAGS
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / 60000
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=FLAGS.noise_multiplier,
steps=steps,
orders=orders)
# Delta is set to 1e-5 because MNIST has 60000 training points.
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
def main(_):
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
# Fetch the mnist data
train, test = tf.keras.datasets.mnist.load_data()
train_images, train_labels = train
test_images, test_labels = test
# Create a dataset object and batch for the training data
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(train_images[..., tf.newaxis]/255, tf.float32),
tf.cast(train_labels, tf.int64)))
dataset = dataset.shuffle(1000).batch(FLAGS.batch_size)
# Create a dataset object and batch for the test data
eval_dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(test_images[..., tf.newaxis]/255, tf.float32),
tf.cast(test_labels, tf.int64)))
eval_dataset = eval_dataset.batch(10000)
# Define the model using tf.keras.layers
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 8,
strides=2,
padding='same',
activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Conv2D(32, 4, strides=2, activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(10)
])
# Instantiate the optimizer
if FLAGS.dpsgd:
opt = DPGradientDescentGaussianOptimizer(
l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=FLAGS.microbatches,
learning_rate=FLAGS.learning_rate)
else:
opt = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
# Training loop.
steps_per_epoch = 60000 // FLAGS.batch_size
for epoch in range(FLAGS.epochs):
# Train the model for one epoch.
for (_, (images, labels)) in enumerate(dataset.take(-1)):
with tf.GradientTape(persistent=True) as gradient_tape:
# This dummy call is needed to obtain the var list.
logits = mnist_model(images, training=True)
var_list = mnist_model.trainable_variables
# In Eager mode, the optimizer takes a function that returns the loss.
def loss_fn():
logits = mnist_model(images, training=True) # pylint: disable=undefined-loop-variable,cell-var-from-loop
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits) # pylint: disable=undefined-loop-variable,cell-var-from-loop
# If training without privacy, the loss is a scalar not a vector.
if not FLAGS.dpsgd:
loss = tf.reduce_mean(input_tensor=loss)
return loss
if FLAGS.dpsgd:
grads_and_vars = opt.compute_gradients(loss_fn, var_list,
gradient_tape=gradient_tape)
else:
grads_and_vars = opt.compute_gradients(loss_fn, var_list)
opt.apply_gradients(grads_and_vars)
# Evaluate the model and print results
for (_, (images, labels)) in enumerate(eval_dataset.take(-1)):
logits = mnist_model(images, training=False)
correct_preds = tf.equal(tf.argmax(input=logits, axis=1), labels)
test_accuracy = np.mean(correct_preds.numpy())
print('Test accuracy after epoch %d is: %.3f' % (epoch, test_accuracy))
# Compute the privacy budget expended so far.
if FLAGS.dpsgd:
eps = compute_epsilon((epoch + 1) * steps_per_epoch)
print('For delta=1e-5, the current epsilon is: %.2f' % eps)
else:
print('Trained with vanilla non-private SGD optimizer')
if __name__ == '__main__':
app.run(main)
| 40.798658
| 115
| 0.696661
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
tf.enable_eager_execution()
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.')
flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 1.1,
'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
flags.DEFINE_integer('batch_size', 250, 'Batch size')
flags.DEFINE_integer('epochs', 60, 'Number of epochs')
flags.DEFINE_integer('microbatches', 250, 'Number of microbatches '
'(must evenly divide batch_size)')
FLAGS = flags.FLAGS
def compute_epsilon(steps):
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / 60000
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=FLAGS.noise_multiplier,
steps=steps,
orders=orders)
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
def main(_):
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
train, test = tf.keras.datasets.mnist.load_data()
train_images, train_labels = train
test_images, test_labels = test
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(train_images[..., tf.newaxis]/255, tf.float32),
tf.cast(train_labels, tf.int64)))
dataset = dataset.shuffle(1000).batch(FLAGS.batch_size)
eval_dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(test_images[..., tf.newaxis]/255, tf.float32),
tf.cast(test_labels, tf.int64)))
eval_dataset = eval_dataset.batch(10000)
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 8,
strides=2,
padding='same',
activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Conv2D(32, 4, strides=2, activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(10)
])
if FLAGS.dpsgd:
opt = DPGradientDescentGaussianOptimizer(
l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=FLAGS.microbatches,
learning_rate=FLAGS.learning_rate)
else:
opt = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
steps_per_epoch = 60000 // FLAGS.batch_size
for epoch in range(FLAGS.epochs):
for (_, (images, labels)) in enumerate(dataset.take(-1)):
with tf.GradientTape(persistent=True) as gradient_tape:
logits = mnist_model(images, training=True)
var_list = mnist_model.trainable_variables
def loss_fn():
logits = mnist_model(images, training=True)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
if not FLAGS.dpsgd:
loss = tf.reduce_mean(input_tensor=loss)
return loss
if FLAGS.dpsgd:
grads_and_vars = opt.compute_gradients(loss_fn, var_list,
gradient_tape=gradient_tape)
else:
grads_and_vars = opt.compute_gradients(loss_fn, var_list)
opt.apply_gradients(grads_and_vars)
for (_, (images, labels)) in enumerate(eval_dataset.take(-1)):
logits = mnist_model(images, training=False)
correct_preds = tf.equal(tf.argmax(input=logits, axis=1), labels)
test_accuracy = np.mean(correct_preds.numpy())
print('Test accuracy after epoch %d is: %.3f' % (epoch, test_accuracy))
if FLAGS.dpsgd:
eps = compute_epsilon((epoch + 1) * steps_per_epoch)
print('For delta=1e-5, the current epsilon is: %.2f' % eps)
else:
print('Trained with vanilla non-private SGD optimizer')
if __name__ == '__main__':
app.run(main)
| true
| true
|
1c40610ba9273b3df0d4a1f6df9941104ff0555d
| 6,780
|
py
|
Python
|
src/python/pants/backend/python/tasks/python_binary_create.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/tasks/python_binary_create.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/tasks/python_binary_create.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pex.interpreter import PythonInterpreter
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from pants.backend.python.subsystems.python_native_code import PythonNativeCode
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks.pex_build_util import (dump_requirement_libs, dump_sources,
has_python_requirements, has_python_sources,
has_resources, is_python_target)
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.target_scopes import Scopes
from pants.task.task import Task
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir_for
from pants.util.fileutil import atomic_copy
from pants.util.memo import memoized_property
class PythonBinaryCreate(Task):
"""Create an executable .pex file."""
@classmethod
def subsystem_dependencies(cls):
return super(PythonBinaryCreate, cls).subsystem_dependencies() + (PythonNativeCode.scoped(cls),)
@memoized_property
def _python_native_code_settings(self):
return PythonNativeCode.scoped_instance(self)
@classmethod
def product_types(cls):
return ['pex_archives', 'deployable_archives']
@classmethod
def implementation_version(cls):
return super(PythonBinaryCreate, cls).implementation_version() + [('PythonBinaryCreate', 2)]
@property
def cache_target_dirs(self):
return True
@classmethod
def prepare(cls, options, round_manager):
# See comment below for why we don't use the GatherSources.PYTHON_SOURCES product.
round_manager.require_data(PythonInterpreter)
round_manager.optional_data('python') # For codegen.
round_manager.optional_product(PythonRequirementLibrary) # For local dists.
@staticmethod
def is_binary(target):
return isinstance(target, PythonBinary)
def __init__(self, *args, **kwargs):
super(PythonBinaryCreate, self).__init__(*args, **kwargs)
self._distdir = self.get_options().pants_distdir
def execute(self):
binaries = self.context.targets(self.is_binary)
# Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
names = {}
for binary in binaries:
name = binary.name
if name in names:
raise TaskError('Cannot build two binaries with the same name in a single invocation. '
'{} and {} both have the name {}.'.format(binary, names[name], name))
names[name] = binary
with self.invalidated(binaries, invalidate_dependents=True) as invalidation_check:
python_deployable_archive = self.context.products.get('deployable_archives')
python_pex_product = self.context.products.get('pex_archives')
for vt in invalidation_check.all_vts:
pex_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
if not vt.valid:
self.context.log.debug('cache for {} is invalid, rebuilding'.format(vt.target))
self._create_binary(vt.target, vt.results_dir)
else:
self.context.log.debug('using cache for {}'.format(vt.target))
basename = os.path.basename(pex_path)
python_pex_product.add(vt.target, os.path.dirname(pex_path)).append(basename)
python_deployable_archive.add(vt.target, os.path.dirname(pex_path)).append(basename)
self.context.log.debug('created {}'.format(os.path.relpath(pex_path, get_buildroot())))
# Create a copy for pex.
pex_copy = os.path.join(self._distdir, os.path.basename(pex_path))
safe_mkdir_for(pex_copy)
atomic_copy(pex_path, pex_copy)
self.context.log.info('created pex {}'.format(os.path.relpath(pex_copy, get_buildroot())))
def _create_binary(self, binary_tgt, results_dir):
"""Create a .pex file for the specified binary target."""
# Note that we rebuild a chroot from scratch, instead of using the REQUIREMENTS_PEX
# and PYTHON_SOURCES products, because those products are already-built pexes, and there's
# no easy way to merge them into a single pex file (for example, they each have a __main__.py,
# metadata, and so on, which the merging code would have to handle specially).
interpreter = self.context.products.get_data(PythonInterpreter)
with temporary_dir() as tmpdir:
# Create the pex_info for the binary.
run_info_dict = self.context.run_tracker.run_info.get_as_dict()
build_properties = PexInfo.make_build_properties()
build_properties.update(run_info_dict)
pex_info = binary_tgt.pexinfo.copy()
pex_info.build_properties = build_properties
builder = PEXBuilder(path=tmpdir, interpreter=interpreter, pex_info=pex_info, copy=True)
if binary_tgt.shebang:
self.context.log.info('Found Python binary target {} with customized shebang, using it: {}'
.format(binary_tgt.name, binary_tgt.shebang))
builder.set_shebang(binary_tgt.shebang)
else:
self.context.log.debug('No customized shebang found for {}'.format(binary_tgt.name))
# Find which targets provide sources and which specify requirements.
source_tgts = []
req_tgts = []
for tgt in binary_tgt.closure(exclude_scopes=Scopes.COMPILE):
if has_python_sources(tgt) or has_resources(tgt):
source_tgts.append(tgt)
elif has_python_requirements(tgt):
req_tgts.append(tgt)
# Add target's interpreter compatibility constraints to pex info.
if is_python_target(tgt):
for constraint in tgt.compatibility:
builder.add_interpreter_constraint(constraint)
# Dump everything into the builder's chroot.
for tgt in source_tgts:
dump_sources(builder, tgt, self.context.log)
# We need to ensure that we are resolving for only the current platform if we are
# including local python dist targets that have native extensions.
self._python_native_code_settings.check_build_for_current_platform_only(self.context.targets())
dump_requirement_libs(builder, interpreter, req_tgts, self.context.log,
platforms=binary_tgt.platforms)
# Build the .pex file.
pex_path = os.path.join(results_dir, '{}.pex'.format(binary_tgt.name))
builder.build(pex_path)
return pex_path
| 44.900662
| 101
| 0.720944
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pex.interpreter import PythonInterpreter
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from pants.backend.python.subsystems.python_native_code import PythonNativeCode
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks.pex_build_util import (dump_requirement_libs, dump_sources,
has_python_requirements, has_python_sources,
has_resources, is_python_target)
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.target_scopes import Scopes
from pants.task.task import Task
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir_for
from pants.util.fileutil import atomic_copy
from pants.util.memo import memoized_property
class PythonBinaryCreate(Task):
@classmethod
def subsystem_dependencies(cls):
return super(PythonBinaryCreate, cls).subsystem_dependencies() + (PythonNativeCode.scoped(cls),)
@memoized_property
def _python_native_code_settings(self):
return PythonNativeCode.scoped_instance(self)
@classmethod
def product_types(cls):
return ['pex_archives', 'deployable_archives']
@classmethod
def implementation_version(cls):
return super(PythonBinaryCreate, cls).implementation_version() + [('PythonBinaryCreate', 2)]
@property
def cache_target_dirs(self):
return True
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data(PythonInterpreter)
round_manager.optional_data('python') # For codegen.
round_manager.optional_product(PythonRequirementLibrary) # For local dists.
@staticmethod
def is_binary(target):
return isinstance(target, PythonBinary)
def __init__(self, *args, **kwargs):
super(PythonBinaryCreate, self).__init__(*args, **kwargs)
self._distdir = self.get_options().pants_distdir
def execute(self):
binaries = self.context.targets(self.is_binary)
# Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
names = {}
for binary in binaries:
name = binary.name
if name in names:
raise TaskError('Cannot build two binaries with the same name in a single invocation. '
'{} and {} both have the name {}.'.format(binary, names[name], name))
names[name] = binary
with self.invalidated(binaries, invalidate_dependents=True) as invalidation_check:
python_deployable_archive = self.context.products.get('deployable_archives')
python_pex_product = self.context.products.get('pex_archives')
for vt in invalidation_check.all_vts:
pex_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
if not vt.valid:
self.context.log.debug('cache for {} is invalid, rebuilding'.format(vt.target))
self._create_binary(vt.target, vt.results_dir)
else:
self.context.log.debug('using cache for {}'.format(vt.target))
basename = os.path.basename(pex_path)
python_pex_product.add(vt.target, os.path.dirname(pex_path)).append(basename)
python_deployable_archive.add(vt.target, os.path.dirname(pex_path)).append(basename)
self.context.log.debug('created {}'.format(os.path.relpath(pex_path, get_buildroot())))
# Create a copy for pex.
pex_copy = os.path.join(self._distdir, os.path.basename(pex_path))
safe_mkdir_for(pex_copy)
atomic_copy(pex_path, pex_copy)
self.context.log.info('created pex {}'.format(os.path.relpath(pex_copy, get_buildroot())))
def _create_binary(self, binary_tgt, results_dir):
# Note that we rebuild a chroot from scratch, instead of using the REQUIREMENTS_PEX
# and PYTHON_SOURCES products, because those products are already-built pexes, and there's
interpreter = self.context.products.get_data(PythonInterpreter)
with temporary_dir() as tmpdir:
run_info_dict = self.context.run_tracker.run_info.get_as_dict()
build_properties = PexInfo.make_build_properties()
build_properties.update(run_info_dict)
pex_info = binary_tgt.pexinfo.copy()
pex_info.build_properties = build_properties
builder = PEXBuilder(path=tmpdir, interpreter=interpreter, pex_info=pex_info, copy=True)
if binary_tgt.shebang:
self.context.log.info('Found Python binary target {} with customized shebang, using it: {}'
.format(binary_tgt.name, binary_tgt.shebang))
builder.set_shebang(binary_tgt.shebang)
else:
self.context.log.debug('No customized shebang found for {}'.format(binary_tgt.name))
source_tgts = []
req_tgts = []
for tgt in binary_tgt.closure(exclude_scopes=Scopes.COMPILE):
if has_python_sources(tgt) or has_resources(tgt):
source_tgts.append(tgt)
elif has_python_requirements(tgt):
req_tgts.append(tgt)
if is_python_target(tgt):
for constraint in tgt.compatibility:
builder.add_interpreter_constraint(constraint)
# Dump everything into the builder's chroot.
for tgt in source_tgts:
dump_sources(builder, tgt, self.context.log)
self._python_native_code_settings.check_build_for_current_platform_only(self.context.targets())
dump_requirement_libs(builder, interpreter, req_tgts, self.context.log,
platforms=binary_tgt.platforms)
pex_path = os.path.join(results_dir, '{}.pex'.format(binary_tgt.name))
builder.build(pex_path)
return pex_path
| true
| true
|
1c406220f0981138b791aac6bfbd92089b0d15aa
| 76
|
py
|
Python
|
mtsg/__init__.py
|
stjude/mutspec
|
59a4ccc0fcda9b041637e27d7e9f6a2135581e31
|
[
"MIT"
] | 1
|
2020-12-24T19:51:35.000Z
|
2020-12-24T19:51:35.000Z
|
mtsg/__init__.py
|
stjude/mtsg
|
59a4ccc0fcda9b041637e27d7e9f6a2135581e31
|
[
"MIT"
] | null | null | null |
mtsg/__init__.py
|
stjude/mtsg
|
59a4ccc0fcda9b041637e27d7e9f6a2135581e31
|
[
"MIT"
] | 1
|
2021-02-08T14:28:24.000Z
|
2021-02-08T14:28:24.000Z
|
from .genome_build import GenomeBuild as GenomeBuild
__version__ = "3.1.0"
| 19
| 52
| 0.789474
|
from .genome_build import GenomeBuild as GenomeBuild
__version__ = "3.1.0"
| true
| true
|
1c40623005bf0cd4c86b7de7ff9c9b56aaf1cbb6
| 5,001
|
py
|
Python
|
core/src/autogluon/core/searcher/bayesopt/datatypes/config_ext.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 4,462
|
2019-12-09T17:41:07.000Z
|
2022-03-31T22:00:41.000Z
|
core/src/autogluon/core/searcher/bayesopt/datatypes/config_ext.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 1,408
|
2019-12-09T17:48:59.000Z
|
2022-03-31T20:24:12.000Z
|
core/src/autogluon/core/searcher/bayesopt/datatypes/config_ext.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 623
|
2019-12-10T02:04:18.000Z
|
2022-03-20T17:11:01.000Z
|
from typing import Tuple, Union
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import copy
from .hp_ranges_cs import HyperparameterRanges_CS
RESOURCE_ATTR_PREFIX = 'RESOURCE_ATTR_'
class ExtendedConfiguration(object):
"""
This class facilitates handling extended configs, which consist of a normal
config and a resource attribute.
The config space hp_ranges is extended by an additional resource
attribute. Note that this is not a hyperparameter we optimize over,
but it is under the control of the scheduler.
Its allowed range is [1, resource_attr_range[1]], which can be larger than
[resource_attr_range[0], resource_attr_range[1]]. This is because extended
configs with resource values outside of resource_attr_range may arise (for
example, in the early stopping context, we may receive data from
epoch < resource_attr_range[0]).
"""
def __init__(
self, hp_ranges: HyperparameterRanges_CS, resource_attr_key: str,
resource_attr_range: Tuple[int, int]):
assert resource_attr_range[0] >= 1
assert resource_attr_range[1] >= resource_attr_range[0]
self.hp_ranges = hp_ranges
self.resource_attr_key = resource_attr_key
self.resource_attr_range = resource_attr_range
# Extended configuration space including resource attribute
config_space_ext = copy.deepcopy(hp_ranges.config_space)
self.resource_attr_name = RESOURCE_ATTR_PREFIX + resource_attr_key
# Allowed range: [1, resource_attr_range[1]]
config_space_ext.add_hyperparameter(CSH.UniformIntegerHyperparameter(
name=self.resource_attr_name, lower=1,
upper=resource_attr_range[1]))
self.hp_ranges_ext = HyperparameterRanges_CS(
config_space_ext, name_last_pos=self.resource_attr_name)
def get(self, config: CS.Configuration, resource: int) -> CS.Configuration:
"""
Create extended config with resource added.
:param config:
:param resource:
:return: Extended config
"""
values = copy.deepcopy(config.get_dictionary())
values[self.resource_attr_name] = resource
return CS.Configuration(self.hp_ranges_ext.config_space, values=values)
def remap_resource(
self, config_ext: CS.Configuration, resource: int,
as_dict: bool=False) -> Union[CS.Configuration, dict]:
"""
Re-assigns resource value for extended config.
:param config_ext: Extended config
:param resource: New resource value
:param as_dict: Return as dict?
:return:
"""
x_dct = copy.copy(config_ext.get_dictionary())
x_dct[self.resource_attr_name] = resource
if as_dict:
return x_dct
else:
return CS.Configuration(
self.hp_ranges_ext.config_space, values=x_dct)
def remove_resource(
self, config_ext: CS.Configuration,
as_dict: bool=False) -> Union[CS.Configuration, dict]:
"""
Strips away resource attribute and returns normal config
:param config_ext: Extended config
:param as_dict: Return as dict?
:return: config_ext without resource attribute
"""
x_dct = copy.copy(config_ext.get_dictionary())
del x_dct[self.resource_attr_name]
if as_dict:
return x_dct
else:
return CS.Configuration(self.hp_ranges.config_space, values=x_dct)
def from_dict(self, config_dct: dict) -> CS.Configuration:
"""
Converts dict into CS.Configuration config (extended or normal, depending
on whether the dict contains a resource attribute).
:param config_dct:
:return:
"""
# Note: Here, the key for resource is resource_attr_key, not
# resource_attr_name
hp_ranges = self.hp_ranges_ext if self.resource_attr_key in config_dct \
else self.hp_ranges
return CS.Configuration(hp_ranges.config_space, values=config_dct)
def split(self, config_ext: CS.Configuration, as_dict: bool=False) -> \
(Union[CS.Configuration, dict], int):
"""
Split extended config into normal config and resource value.
:param config_ext: Extended config
:param as_dict: Return config as dict?
:return: (config, resource_value)
"""
x_res = copy.copy(config_ext.get_dictionary())
resource_value = int(x_res[self.resource_attr_name])
del x_res[self.resource_attr_name]
if not as_dict:
x_res = CS.Configuration(self.hp_ranges.config_space, values=x_res)
return x_res, resource_value
def get_resource(self, config_ext: CS.Configuration) -> int:
"""
:param config_ext: Extended config
:return: Value of resource attribute
"""
return int(config_ext.get_dictionary()[self.resource_attr_name])
| 39.070313
| 81
| 0.672665
|
from typing import Tuple, Union
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import copy
from .hp_ranges_cs import HyperparameterRanges_CS
RESOURCE_ATTR_PREFIX = 'RESOURCE_ATTR_'
class ExtendedConfiguration(object):
def __init__(
self, hp_ranges: HyperparameterRanges_CS, resource_attr_key: str,
resource_attr_range: Tuple[int, int]):
assert resource_attr_range[0] >= 1
assert resource_attr_range[1] >= resource_attr_range[0]
self.hp_ranges = hp_ranges
self.resource_attr_key = resource_attr_key
self.resource_attr_range = resource_attr_range
config_space_ext = copy.deepcopy(hp_ranges.config_space)
self.resource_attr_name = RESOURCE_ATTR_PREFIX + resource_attr_key
config_space_ext.add_hyperparameter(CSH.UniformIntegerHyperparameter(
name=self.resource_attr_name, lower=1,
upper=resource_attr_range[1]))
self.hp_ranges_ext = HyperparameterRanges_CS(
config_space_ext, name_last_pos=self.resource_attr_name)
def get(self, config: CS.Configuration, resource: int) -> CS.Configuration:
values = copy.deepcopy(config.get_dictionary())
values[self.resource_attr_name] = resource
return CS.Configuration(self.hp_ranges_ext.config_space, values=values)
def remap_resource(
self, config_ext: CS.Configuration, resource: int,
as_dict: bool=False) -> Union[CS.Configuration, dict]:
x_dct = copy.copy(config_ext.get_dictionary())
x_dct[self.resource_attr_name] = resource
if as_dict:
return x_dct
else:
return CS.Configuration(
self.hp_ranges_ext.config_space, values=x_dct)
def remove_resource(
self, config_ext: CS.Configuration,
as_dict: bool=False) -> Union[CS.Configuration, dict]:
x_dct = copy.copy(config_ext.get_dictionary())
del x_dct[self.resource_attr_name]
if as_dict:
return x_dct
else:
return CS.Configuration(self.hp_ranges.config_space, values=x_dct)
def from_dict(self, config_dct: dict) -> CS.Configuration:
hp_ranges = self.hp_ranges_ext if self.resource_attr_key in config_dct \
else self.hp_ranges
return CS.Configuration(hp_ranges.config_space, values=config_dct)
def split(self, config_ext: CS.Configuration, as_dict: bool=False) -> \
(Union[CS.Configuration, dict], int):
x_res = copy.copy(config_ext.get_dictionary())
resource_value = int(x_res[self.resource_attr_name])
del x_res[self.resource_attr_name]
if not as_dict:
x_res = CS.Configuration(self.hp_ranges.config_space, values=x_res)
return x_res, resource_value
def get_resource(self, config_ext: CS.Configuration) -> int:
return int(config_ext.get_dictionary()[self.resource_attr_name])
| true
| true
|
1c4062b42c83812279f930cff97ab9f5922f6af2
| 809
|
py
|
Python
|
poky-dunfell/scripts/pybootchartgui/pybootchartgui.py
|
lacie-life/YoctoPi
|
3412e78468a9b84da50bb1aadb12b459001a3712
|
[
"MIT"
] | 14
|
2021-11-04T07:47:37.000Z
|
2022-03-21T10:10:30.000Z
|
poky-dunfell/scripts/pybootchartgui/pybootchartgui.py
|
lacie-life/YoctoPi
|
3412e78468a9b84da50bb1aadb12b459001a3712
|
[
"MIT"
] | 3
|
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
poky-dunfell/scripts/pybootchartgui/pybootchartgui.py
|
lacie-life/YoctoPi
|
3412e78468a9b84da50bb1aadb12b459001a3712
|
[
"MIT"
] | 11
|
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
#!/usr/bin/env python3
#
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import sys
from pybootchartgui.main import main
if __name__ == '__main__':
sys.exit(main())
| 33.708333
| 73
| 0.755253
|
import sys
from pybootchartgui.main import main
if __name__ == '__main__':
sys.exit(main())
| true
| true
|
1c4063669d8d3250519bf6dd530e291c32f9f070
| 993
|
py
|
Python
|
vframe_faceless/settings/faceless_cfg.py
|
vframeio/vframe_faceless_plugin
|
12ee7e136ac48c40df6a66db3712c497f5061d1d
|
[
"MIT"
] | null | null | null |
vframe_faceless/settings/faceless_cfg.py
|
vframeio/vframe_faceless_plugin
|
12ee7e136ac48c40df6a66db3712c497f5061d1d
|
[
"MIT"
] | null | null | null |
vframe_faceless/settings/faceless_cfg.py
|
vframeio/vframe_faceless_plugin
|
12ee7e136ac48c40df6a66db3712c497f5061d1d
|
[
"MIT"
] | null | null | null |
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import os
from os.path import join
from pathlib import Path
import logging
from dotenv import load_dotenv
# -----------------------------------------------------------------------------
# Logging
# -----------------------------------------------------------------------------
LOG = logging.getLogger('vframe')
# -----------------------------------------------------------------------------
#
# Filepaths
#
# -----------------------------------------------------------------------------
# Project directory
SELF_CWD = os.path.dirname(os.path.realpath(__file__))
DIR_PROJECT_ROOT = str(Path(SELF_CWD).parent)
# source .env vars if exists
fp_env = join(DIR_PROJECT_ROOT, '.env')
if Path(fp_env).is_file():
load_dotenv(dotenv_path=fp_env)
| 26.131579
| 79
| 0.387714
| true
| true
|
|
1c40636e6ad40fbe551ddf88c53868ca689268cd
| 4,982
|
py
|
Python
|
python-package/mlbox/model/supervised/regression/feature_selector.py
|
maheshkarriisb/MLBox
|
2e5e8eb488531b4cdbbe202dd6f70bba0ae3bebd
|
[
"BSD-3-Clause"
] | null | null | null |
python-package/mlbox/model/supervised/regression/feature_selector.py
|
maheshkarriisb/MLBox
|
2e5e8eb488531b4cdbbe202dd6f70bba0ae3bebd
|
[
"BSD-3-Clause"
] | null | null | null |
python-package/mlbox/model/supervised/regression/feature_selector.py
|
maheshkarriisb/MLBox
|
2e5e8eb488531b4cdbbe202dd6f70bba0ae3bebd
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Author: Axel ARONIO DE ROMBLAY <axelderomblay@gmail.com>
# License: BSD 3 clause
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
import warnings
class Reg_feature_selector():
"""
Selects useful features.
Several strategies are possible (filter and wrapper methods).
Works for regression problems only.
Parameters
----------
strategy : string, defaut = "l1"
The strategy to select features.
Available strategies = "variance", "l1" or "rf_feature_importance"
threshold : float between 0. and 1., defaut = 0.3
The percentage of variable to discard according the strategy.
"""
def __init__(self, strategy='l1', threshold=0.3):
self.strategy = strategy
self.threshold = threshold
self.__fitOK = False
self.__to_discard = []
def get_params(self, deep=True):
return {'strategy': self.strategy,
'threshold': self.threshold}
def set_params(self, **params):
self.__fitOK = False
for k, v in params.items():
if k not in self.get_params():
warnings.warn("Invalid parameter a for feature selector"
"Reg_feature_selector. Parameter IGNORED. Check "
"the list of available parameters with "
"`feature_selector.get_params().keys()`")
else:
setattr(self, k, v)
def fit(self, df_train, y_train):
"""
Fits Reg_feature_selector.
Parameters
----------
df_train : pandas dataframe of shape = (n_train, n_features)
The train dataset with numerical features and no NA
y_train : pandas series of shape = (n_train, ).
The target for regression task.
Returns
-------
None
"""
### sanity checks
if ((type(df_train)!=pd.SparseDataFrame)&(type(df_train)!=pd.DataFrame)):
raise ValueError("df_train must be a DataFrame")
if (type(y_train) != pd.core.series.Series):
raise ValueError("y_train must be a Series")
if(self.strategy == 'variance'):
coef = df_train.std()
abstract_threshold = np.percentile(coef, 100.*self.threshold)
self.__to_discard = coef[coef < abstract_threshold].index
self.__fitOK = True
elif(self.strategy == 'l1'):
model = Lasso(alpha=100.0, random_state=0) # to be tuned
model.fit(df_train, y_train)
coef = np.abs(model.coef_)
abstract_threshold = np.percentile(coef, 100.*self.threshold)
self.__to_discard = df_train.columns[coef < abstract_threshold]
self.__fitOK = True
elif(self.strategy == 'rf_feature_importance'):
model = RandomForestRegressor(n_estimators=50,
n_jobs=-1,
random_state=0) # to be tuned
model.fit(df_train, y_train)
coef = model.feature_importances_
abstract_threshold = np.percentile(coef, 100.*self.threshold)
self.__to_discard = df_train.columns[coef < abstract_threshold]
self.__fitOK = True
else:
raise ValueError("Strategy invalid. Please choose between "
"'variance', 'l1' or 'rf_feature_importance'")
return self
def transform(self, df):
"""
Transforms the dataset
Parameters
----------
df : pandas dataframe of shape = (n, n_features)
The dataset with numerical features and no NA
Returns
-------
df : pandas dataframe of shape = (n_train, n_features*(1-threshold))
The train dataset with relevant features
"""
if(self.__fitOK):
### sanity checks
if ((type(df)!=pd.SparseDataFrame)&(type(df)!=pd.DataFrame)):
raise ValueError("df must be a DataFrame")
return df.drop(self.__to_discard, axis=1)
else:
raise ValueError("call fit or fit_transform function before")
def fit_transform(self, df_train, y_train):
"""
Fits Reg_feature_selector and transforms the dataset
Parameters
----------
df_train : pandas dataframe of shape = (n_train, n_features)
The train dataset with numerical features and no NA
y_train : pandas series of shape = (n_train, ).
The target for regression task.
Returns
-------
df_train : pandas dataframe
Dataframe's shape = (n_train, n_features*(1-threshold))
The train dataset with relevant features
"""
self.fit(df_train, y_train)
return self.transform(df_train)
| 30.378049
| 81
| 0.578081
|
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
import warnings
class Reg_feature_selector():
def __init__(self, strategy='l1', threshold=0.3):
self.strategy = strategy
self.threshold = threshold
self.__fitOK = False
self.__to_discard = []
def get_params(self, deep=True):
return {'strategy': self.strategy,
'threshold': self.threshold}
def set_params(self, **params):
self.__fitOK = False
for k, v in params.items():
if k not in self.get_params():
warnings.warn("Invalid parameter a for feature selector"
"Reg_feature_selector. Parameter IGNORED. Check "
"the list of available parameters with "
"`feature_selector.get_params().keys()`")
else:
setattr(self, k, v)
def fit(self, df_train, y_train):
d.SparseDataFrame)&(type(df_train)!=pd.DataFrame)):
raise ValueError("df_train must be a DataFrame")
if (type(y_train) != pd.core.series.Series):
raise ValueError("y_train must be a Series")
if(self.strategy == 'variance'):
coef = df_train.std()
abstract_threshold = np.percentile(coef, 100.*self.threshold)
self.__to_discard = coef[coef < abstract_threshold].index
self.__fitOK = True
elif(self.strategy == 'l1'):
model = Lasso(alpha=100.0, random_state=0)
model.fit(df_train, y_train)
coef = np.abs(model.coef_)
abstract_threshold = np.percentile(coef, 100.*self.threshold)
self.__to_discard = df_train.columns[coef < abstract_threshold]
self.__fitOK = True
elif(self.strategy == 'rf_feature_importance'):
model = RandomForestRegressor(n_estimators=50,
n_jobs=-1,
random_state=0)
model.fit(df_train, y_train)
coef = model.feature_importances_
abstract_threshold = np.percentile(coef, 100.*self.threshold)
self.__to_discard = df_train.columns[coef < abstract_threshold]
self.__fitOK = True
else:
raise ValueError("Strategy invalid. Please choose between "
"'variance', 'l1' or 'rf_feature_importance'")
return self
def transform(self, df):
if(self.__fitOK):
SparseDataFrame)&(type(df)!=pd.DataFrame)):
raise ValueError("df must be a DataFrame")
return df.drop(self.__to_discard, axis=1)
else:
raise ValueError("call fit or fit_transform function before")
def fit_transform(self, df_train, y_train):
self.fit(df_train, y_train)
return self.transform(df_train)
| true
| true
|
1c40638402afd5e56d358484d132ec6b575a38de
| 17,502
|
py
|
Python
|
David and Pooja/++Validating Linked Mods/Python-3.0/Lib/idlelib/MultiCall.py
|
LinkedModernismProject/web_code
|
4cf6bf53d5c3249e52a75f0a3f57d106e31daf9e
|
[
"Apache-2.0"
] | 1
|
2015-05-21T23:47:54.000Z
|
2015-05-21T23:47:54.000Z
|
front-end/testsuite-python-lib/Python-3.0/Lib/idlelib/MultiCall.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2015-10-29T20:51:31.000Z
|
2015-10-29T20:51:31.000Z
|
front-end/testsuite-python-lib/Python-3.0/Lib/idlelib/MultiCall.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
"""
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import sys
import re
import tkinter
# the event type constants, which define the meaning of mc_type
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
# the modifier state constants, which define the meaning of mc_state
MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
# define the list of modifiers, to be used in complex event types.
if sys.platform == "darwin" and sys.executable.count(".app"):
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
# a dictionary to map a modifier name into its number
_modifier_names = dict([(name, number)
for number in range(len(_modifiers))
for name in _modifiers[number]])
# A binder is a class which binds functions to one type of event. It has two
# methods: bind and unbind, which get a function and a parsed sequence, as
# returned by _parse_sequence(). There are two types of binders:
# _SimpleBinder handles event types with no modifiers and no detail.
# No Python functions are called when no events are binded.
# _ComplexBinder handles event types with modifiers and a detail.
# A Python function is called each time an event is generated.
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<'+_types[type][0]+'>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l = self.bindedfuncs, mc_type = self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst,
self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
def __del__(self):
if self.handlerid:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
# An int in range(1 << len(_modifiers)) represents a combination of modifiers
# (if the least significent bit is on, _modifiers[0] is on, and so on).
# _state_subsets gives for each combination of modifiers, or *state*,
# a list of the states which are a subset of it. This list is ordered by the
# number of modifiers is the state - the most specific state comes first.
_states = range(1 << len(_modifiers))
_state_names = [''.join(m[0]+'-'
for i, m in enumerate(_modifiers)
if (1 << i) & s)
for s in _states]
def expand_substates(states):
'''For each item of states return a list containing all combinations of
that item with individual bits reset, sorted by the number of set bits.
'''
def nbits(n):
"number of bits set in n base 2"
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set(state & x for x in states))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
# _state_codes gives for each state, the portable code to be passed as mc_state
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if (1 << i) & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
# This class binds many functions, and only unbinds them when it is deleted.
# self.handlerids is the list of seqs and ids of binded handler functions.
# The binded functions sit in a dictionary of lists of lists, which maps
# a detail (or None) and a state into a list of functions.
# When a new detail is discovered, handlers for all the possible states
# are binded.
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists = lists,
mc_type = mc_type, mc_state = mc_state,
ishandlerrunning = self.ishandlerrunning,
doafterhandler = self.doafterhandler):
ishandlerrunning[:] = [True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
# Call all functions in doafterhandler and remove them from list
while doafterhandler:
doafterhandler.pop()()
if r:
return r
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [[] for s in _states]}
self.handlerids = []
# we don't want to change the lists of functions while a handler is
# running - it will mess up the loop and anyway, we usually want the
# change to happen from the next event. So we have a list of functions
# for the handler to run after it finishes calling the binded functions.
# It calls them only once.
# ishandlerrunning is a list. An empty one means no, otherwise - yes.
# this is done so that it would be mutable.
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<'+_state_names[s]+self.typename+'>'
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [[] for s in _states]
for s in _states:
lists = [ self.bindedfuncs[detail][i]
for detail in (triplet[2], None)
for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type,
_state_codes[s])
seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def unbind(self, triplet, func):
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
self.widget.unbind(self.widgetinst, seq, id)
# define the list of event types to be handled by MultiEvent. the order is
# compatible with the definition of event type constants.
_types = (
("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
("Visibility",),
)
# which binder should be used for every event type?
_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
# A dictionary to map a type name into its number
_type_names = dict([(name, number)
for number in range(len(_types))
for name in _types[number]])
_keysym_re = re.compile(r"^\w+$")
_button_re = re.compile(r"^[1-5]$")
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
triplet[2]+'>'
else:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall (widget):
assert issubclass(widget, tkinter.Misc)
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
# a dictionary which maps a virtual event to a tuple with:
# 0. the function binded
# 1. a list of triplets - the sequences it is binded to
self.__eventinfo = {}
self.__binders = [_binder_classes[i](i, widget, self)
for i in range(len(_types))]
def bind(self, sequence=None, func=None, add=None):
#print("bind(%s, %s, %s)" % (sequence, func, add),
# file=sys.__stderr__)
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>":
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>" and \
sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
#print("event_add(%s, %s)" % (repr(virtual), repr(sequences)),
# file=sys.__stderr__)
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_add(%s)" % seq, file=sys.__stderr__)
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_delete: %s" % seq, file=sys.__stderr__)
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence,
self.__eventinfo[virtual][1])) + \
widget.event_info(self, virtual)
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
_multicall_dict[widget] = MultiCall
return MultiCall
if __name__ == "__main__":
# Test
root = tkinter.Tk()
text = MultiCallCreator(tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print(seq)
text.bind("<<handler%d>>"%n[0], handler)
text.event_add("<<handler%d>>"%n[0], seq)
n[0] += 1
bindseq("<Key>")
bindseq("<Control-Key>")
bindseq("<Alt-Key-a>")
bindseq("<Control-Key-a>")
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
root.mainloop()
| 41.671429
| 81
| 0.589361
|
import sys
import re
import tkinter
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
if sys.platform == "darwin" and sys.executable.count(".app"):
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
_modifier_names = dict([(name, number)
for number in range(len(_modifiers))
for name in _modifiers[number]])
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<'+_types[type][0]+'>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l = self.bindedfuncs, mc_type = self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst,
self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
def __del__(self):
if self.handlerid:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
_states = range(1 << len(_modifiers))
_state_names = [''.join(m[0]+'-'
for i, m in enumerate(_modifiers)
if (1 << i) & s)
for s in _states]
def expand_substates(states):
def nbits(n):
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set(state & x for x in states))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if (1 << i) & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists = lists,
mc_type = mc_type, mc_state = mc_state,
ishandlerrunning = self.ishandlerrunning,
doafterhandler = self.doafterhandler):
ishandlerrunning[:] = [True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
while doafterhandler:
doafterhandler.pop()()
if r:
return r
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [[] for s in _states]}
self.handlerids = []
# running - it will mess up the loop and anyway, we usually want the
# change to happen from the next event. So we have a list of functions
# for the handler to run after it finishes calling the binded functions.
# It calls them only once.
# ishandlerrunning is a list. An empty one means no, otherwise - yes.
# this is done so that it would be mutable.
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<'+_state_names[s]+self.typename+'>'
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [[] for s in _states]
for s in _states:
lists = [ self.bindedfuncs[detail][i]
for detail in (triplet[2], None)
for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type,
_state_codes[s])
seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def unbind(self, triplet, func):
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
self.widget.unbind(self.widgetinst, seq, id)
# define the list of event types to be handled by MultiEvent. the order is
# compatible with the definition of event type constants.
_types = (
("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
("Visibility",),
)
# which binder should be used for every event type?
_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
# A dictionary to map a type name into its number
_type_names = dict([(name, number)
for number in range(len(_types))
for name in _types[number]])
_keysym_re = re.compile(r"^\w+$")
_button_re = re.compile(r"^[1-5]$")
def _parse_sequence(sequence):
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
triplet[2]+'>'
else:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
_multicall_dict = {}
def MultiCallCreator(widget):
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall (widget):
assert issubclass(widget, tkinter.Misc)
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
# a dictionary which maps a virtual event to a tuple with:
# 0. the function binded
# 1. a list of triplets - the sequences it is binded to
self.__eventinfo = {}
self.__binders = [_binder_classes[i](i, widget, self)
for i in range(len(_types))]
def bind(self, sequence=None, func=None, add=None):
#print("bind(%s, %s, %s)" % (sequence, func, add),
# file=sys.__stderr__)
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>":
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>" and \
sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
#print("event_add(%s, %s)" % (repr(virtual), repr(sequences)),
# file=sys.__stderr__)
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_add(%s)" % seq, file=sys.__stderr__)
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_delete: %s" % seq, file=sys.__stderr__)
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence,
self.__eventinfo[virtual][1])) + \
widget.event_info(self, virtual)
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
_multicall_dict[widget] = MultiCall
return MultiCall
if __name__ == "__main__":
# Test
root = tkinter.Tk()
text = MultiCallCreator(tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print(seq)
text.bind("<<handler%d>>"%n[0], handler)
text.event_add("<<handler%d>>"%n[0], seq)
n[0] += 1
bindseq("<Key>")
bindseq("<Control-Key>")
bindseq("<Alt-Key-a>")
bindseq("<Control-Key-a>")
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
root.mainloop()
| true
| true
|
1c4063d3b13a98ff09dc44014e9e75c826ea8dc7
| 10,871
|
py
|
Python
|
tests/v1/client/test.py
|
Motmom/komtet-kassa-python-sdk
|
56bc49f1148aaae4702abf7e978250ab98b57f47
|
[
"MIT"
] | null | null | null |
tests/v1/client/test.py
|
Motmom/komtet-kassa-python-sdk
|
56bc49f1148aaae4702abf7e978250ab98b57f47
|
[
"MIT"
] | null | null | null |
tests/v1/client/test.py
|
Motmom/komtet-kassa-python-sdk
|
56bc49f1148aaae4702abf7e978250ab98b57f47
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from decimal import Decimal
from unittest import TestCase
from komtet_kassa_sdk.v1 import (Client, EmployeeType, Task, TaskInfo)
from ...helpers.mock import ResponseMock, ResponseListMock
from mock import patch
class TestClient(TestCase):
def setUp(self):
self.client = Client('shop-id', 'secret-key')
def test_is_queue_active(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
requests.get.return_value = ResponseMock(state='active')
self.assertTrue(self.client.is_queue_active(1))
requests.get.assert_called_with(
allow_redirects=True,
headers={
'Authorization': 'shop-id',
'Accept': 'application/json',
'X-HMAC-Signature': 'b72f98703ccaea912cdf06e364d81885'
},
url='https://kassa.komtet.ru/api/shop/v1/queues/1'
)
self.assertIs(self.client, self.client.set_host('new-host'))
requests.get.return_value = ResponseMock(state='passive')
self.assertFalse(self.client.is_queue_active(1))
requests.get.assert_called_with(
allow_redirects=True,
headers={
'Authorization': 'shop-id',
'Accept': 'application/json',
'X-HMAC-Signature': 'd7f30739a1cf280291f763d80d6d5bfd'
},
url='new-host/api/shop/v1/queues/1'
)
with self.assertRaises(ValueError) as ctx:
self.client.is_queue_active()
self.assertEqual(ctx.exception.args, ('Queue ID is not specified',))
self.client.set_default_queue(2)
self.assertFalse(self.client.is_queue_active())
requests.get.assert_called_with(
allow_redirects=True,
headers={
'Authorization': 'shop-id',
'Accept': 'application/json',
'X-HMAC-Signature': '000ed801dae724e047bc74b67743af1d'
},
url='new-host/api/shop/v1/queues/2'
)
def test_create_task_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
response_mock = ResponseMock(id=1, external_id=2, print_queue_id=3, state='new')
requests.post.return_value = response_mock
task = self.client.create_task({'key': Decimal('10.0')}, 3)
self.assertIsInstance(task, Task)
self.assertEqual(task.id, 1)
self.assertEqual(task.external_id, 2)
self.assertEqual(task.print_queue_id, 3)
self.assertEqual(task.state, 'new')
requests.post.assert_called_with(
headers={
'Authorization': 'shop-id',
'Accept': 'application/json',
'X-HMAC-Signature': '5bfe6ef2290053624fdda725177caa33',
'Content-Type': 'application/json'
},
url='https://kassa.komtet.ru/api/shop/v1/queues/3/task',
data='{"key": 10.0}'
)
with self.assertRaises(ValueError) as ctx:
self.client.create_task({'key': 'value'})
self.assertEqual(ctx.exception.args, ('Queue ID is not specified',))
self.client.set_default_queue(2)
with self.assertRaises(TypeError) as ctx:
self.client.create_task({'key': object()})
self.assertIn('is not JSON serializable', ctx.exception.args[0])
def test_get_task_info_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
response_mock = ResponseMock(
id=234, external_id='4321', state='done', error_description=None,
fiscal_data={
'i': '111',
'fn': '2222222222222222',
't': '3333333333333',
'n': 4,
'fp': '555555555',
's': '6666.77'
})
requests.get.return_value = response_mock
task_info = self.client.get_task_info(234)
self.assertIsInstance(task_info, TaskInfo)
self.assertEqual(task_info.id, 234)
self.assertEqual(task_info.external_id, '4321')
self.assertEqual(task_info.state, 'done')
self.assertIsNone(task_info.error_description)
self.assertDictEqual(task_info.fiscal_data, {
'i': '111',
'fn': '2222222222222222',
't': '3333333333333',
'n': 4,
'fp': '555555555',
's': '6666.77'
})
def test_get_couriers_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
response_mock = ResponseMock(
account_employees=[
{
'email': 'q@mail.ru',
'id': 46,
'phone': '1',
'name': 'Dima D'
},
{
'email': 'q@q.com',
'id': 57,
'phone': '1',
'name': 'qwerty'
},
{
'email': 'ivanov@example.com',
'id': 2,
'phone': '+70000000000',
'name': 'Иванов И.П.'
}],
meta={'total': 3, 'total_pages': 1}
)
requests.get.return_value = response_mock
couriers_info = self.client.get_employees(type=EmployeeType.COURIER)
self.assertDictEqual(couriers_info['meta'], {'total': 3, 'total_pages': 1})
self.assertDictEqual(couriers_info['account_employees'][0], {
'email': 'q@mail.ru',
'id': 46,
'phone': '1',
'name': 'Dima D'
})
self.assertDictEqual(couriers_info['account_employees'][1], {
'email': 'q@q.com',
'id': 57,
'phone': '1',
'name': 'qwerty'
})
self.assertDictEqual(couriers_info['account_employees'][2], {
'email': 'ivanov@example.com',
'id': 2,
'phone': '+70000000000',
'name': 'Иванов И.П.'
})
def test_get_orders_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
expected = {
'orders': [
{
'id': 1101,
'external_id': '336',
'task_id': 100,
'client_address': 'Вашингтон стрит',
'client_name': 'Авраам',
'client_email': 'a@e.t',
'client_phone': '58895',
'date_start': '2020-12-08 10:00',
'date_end': '2020-12-08 19:00',
'description': 'Линкольн',
'state': 'done',
'is_pay_to_courier': True,
'sno': 2485,
'client_coordinate': None,
'prepayment': -7277.321,
'payment_type': 'card',
'callback_url': 'белый дом',
'amount': 2493.412,
'is_paid': False,
'courier': {
'id': 70,
'name': 'Пупкин'
}
},
{
'id': 1102,
'external_id': '337',
'task_id': 100,
'client_address': 'Вашингтон стрит',
'client_name': 'Джон',
'client_email': 'd@e.t',
'client_phone': '46435',
'date_start': '2020-12-09 10:00',
'date_end': '2020-12-09 19:00',
'description': 'Кеннеди',
'state': 'done',
'is_pay_to_courier': True,
'sno': 2489,
'client_coordinate': None,
'prepayment': -2577.321,
'payment_type': 'card',
'callback_url': 'белый дом',
'amount': 24993.412,
'is_paid': False,
'courier': {
'id': 70,
'name': 'Пупкин'
}
}
],
'meta': {
'total': 2,
'total_pages': 1
}
}
response_mock = ResponseListMock(expected)
requests.get.return_value = response_mock
response = self.client.get_orders(0, 10, 70, '2020-12-08 10:00')
self.assertDictEqual(response, expected)
self.assertEqual(response['meta']['total'], 2)
def test_delete_orders_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
expected = {
'text': None,
'content': b'',
'status': '200 OK',
'status_code': 200,
'headers': {
'content-length': '0',
'content-type': 'application/json'
},
'cookies': {},
'encoding': None
}
response_mock = ResponseListMock(expected)
requests.get.return_value = response_mock
isDeleted = self.client.delete_order(25)
self.assertEqual(isDeleted, True)
def test_get_employee_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
expected = {
'id': 71,
'name': 'Пупкин',
'email': 'pupkin@example.com',
'phone': '+79998887766',
'type': 'cashier',
'inn': '1848654484',
'is_manager': True,
'is_app_fast_basket': True,
'is_can_assign_order': False,
'payment_address': 'ул Мира'
}
response_mock = ResponseListMock(expected)
requests.get.return_value = response_mock
response = self.client.get_employee_info(71)
self.assertDictEqual(dict(response), expected)
| 40.262963
| 92
| 0.458835
|
from decimal import Decimal
from unittest import TestCase
from komtet_kassa_sdk.v1 import (Client, EmployeeType, Task, TaskInfo)
from ...helpers.mock import ResponseMock, ResponseListMock
from mock import patch
class TestClient(TestCase):
def setUp(self):
self.client = Client('shop-id', 'secret-key')
def test_is_queue_active(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
requests.get.return_value = ResponseMock(state='active')
self.assertTrue(self.client.is_queue_active(1))
requests.get.assert_called_with(
allow_redirects=True,
headers={
'Authorization': 'shop-id',
'Accept': 'application/json',
'X-HMAC-Signature': 'b72f98703ccaea912cdf06e364d81885'
},
url='https://kassa.komtet.ru/api/shop/v1/queues/1'
)
self.assertIs(self.client, self.client.set_host('new-host'))
requests.get.return_value = ResponseMock(state='passive')
self.assertFalse(self.client.is_queue_active(1))
requests.get.assert_called_with(
allow_redirects=True,
headers={
'Authorization': 'shop-id',
'Accept': 'application/json',
'X-HMAC-Signature': 'd7f30739a1cf280291f763d80d6d5bfd'
},
url='new-host/api/shop/v1/queues/1'
)
with self.assertRaises(ValueError) as ctx:
self.client.is_queue_active()
self.assertEqual(ctx.exception.args, ('Queue ID is not specified',))
self.client.set_default_queue(2)
self.assertFalse(self.client.is_queue_active())
requests.get.assert_called_with(
allow_redirects=True,
headers={
'Authorization': 'shop-id',
'Accept': 'application/json',
'X-HMAC-Signature': '000ed801dae724e047bc74b67743af1d'
},
url='new-host/api/shop/v1/queues/2'
)
def test_create_task_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
response_mock = ResponseMock(id=1, external_id=2, print_queue_id=3, state='new')
requests.post.return_value = response_mock
task = self.client.create_task({'key': Decimal('10.0')}, 3)
self.assertIsInstance(task, Task)
self.assertEqual(task.id, 1)
self.assertEqual(task.external_id, 2)
self.assertEqual(task.print_queue_id, 3)
self.assertEqual(task.state, 'new')
requests.post.assert_called_with(
headers={
'Authorization': 'shop-id',
'Accept': 'application/json',
'X-HMAC-Signature': '5bfe6ef2290053624fdda725177caa33',
'Content-Type': 'application/json'
},
url='https://kassa.komtet.ru/api/shop/v1/queues/3/task',
data='{"key": 10.0}'
)
with self.assertRaises(ValueError) as ctx:
self.client.create_task({'key': 'value'})
self.assertEqual(ctx.exception.args, ('Queue ID is not specified',))
self.client.set_default_queue(2)
with self.assertRaises(TypeError) as ctx:
self.client.create_task({'key': object()})
self.assertIn('is not JSON serializable', ctx.exception.args[0])
def test_get_task_info_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
response_mock = ResponseMock(
id=234, external_id='4321', state='done', error_description=None,
fiscal_data={
'i': '111',
'fn': '2222222222222222',
't': '3333333333333',
'n': 4,
'fp': '555555555',
's': '6666.77'
})
requests.get.return_value = response_mock
task_info = self.client.get_task_info(234)
self.assertIsInstance(task_info, TaskInfo)
self.assertEqual(task_info.id, 234)
self.assertEqual(task_info.external_id, '4321')
self.assertEqual(task_info.state, 'done')
self.assertIsNone(task_info.error_description)
self.assertDictEqual(task_info.fiscal_data, {
'i': '111',
'fn': '2222222222222222',
't': '3333333333333',
'n': 4,
'fp': '555555555',
's': '6666.77'
})
def test_get_couriers_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
response_mock = ResponseMock(
account_employees=[
{
'email': 'q@mail.ru',
'id': 46,
'phone': '1',
'name': 'Dima D'
},
{
'email': 'q@q.com',
'id': 57,
'phone': '1',
'name': 'qwerty'
},
{
'email': 'ivanov@example.com',
'id': 2,
'phone': '+70000000000',
'name': 'Иванов И.П.'
}],
meta={'total': 3, 'total_pages': 1}
)
requests.get.return_value = response_mock
couriers_info = self.client.get_employees(type=EmployeeType.COURIER)
self.assertDictEqual(couriers_info['meta'], {'total': 3, 'total_pages': 1})
self.assertDictEqual(couriers_info['account_employees'][0], {
'email': 'q@mail.ru',
'id': 46,
'phone': '1',
'name': 'Dima D'
})
self.assertDictEqual(couriers_info['account_employees'][1], {
'email': 'q@q.com',
'id': 57,
'phone': '1',
'name': 'qwerty'
})
self.assertDictEqual(couriers_info['account_employees'][2], {
'email': 'ivanov@example.com',
'id': 2,
'phone': '+70000000000',
'name': 'Иванов И.П.'
})
def test_get_orders_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
expected = {
'orders': [
{
'id': 1101,
'external_id': '336',
'task_id': 100,
'client_address': 'Вашингтон стрит',
'client_name': 'Авраам',
'client_email': 'a@e.t',
'client_phone': '58895',
'date_start': '2020-12-08 10:00',
'date_end': '2020-12-08 19:00',
'description': 'Линкольн',
'state': 'done',
'is_pay_to_courier': True,
'sno': 2485,
'client_coordinate': None,
'prepayment': -7277.321,
'payment_type': 'card',
'callback_url': 'белый дом',
'amount': 2493.412,
'is_paid': False,
'courier': {
'id': 70,
'name': 'Пупкин'
}
},
{
'id': 1102,
'external_id': '337',
'task_id': 100,
'client_address': 'Вашингтон стрит',
'client_name': 'Джон',
'client_email': 'd@e.t',
'client_phone': '46435',
'date_start': '2020-12-09 10:00',
'date_end': '2020-12-09 19:00',
'description': 'Кеннеди',
'state': 'done',
'is_pay_to_courier': True,
'sno': 2489,
'client_coordinate': None,
'prepayment': -2577.321,
'payment_type': 'card',
'callback_url': 'белый дом',
'amount': 24993.412,
'is_paid': False,
'courier': {
'id': 70,
'name': 'Пупкин'
}
}
],
'meta': {
'total': 2,
'total_pages': 1
}
}
response_mock = ResponseListMock(expected)
requests.get.return_value = response_mock
response = self.client.get_orders(0, 10, 70, '2020-12-08 10:00')
self.assertDictEqual(response, expected)
self.assertEqual(response['meta']['total'], 2)
def test_delete_orders_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
expected = {
'text': None,
'content': b'',
'status': '200 OK',
'status_code': 200,
'headers': {
'content-length': '0',
'content-type': 'application/json'
},
'cookies': {},
'encoding': None
}
response_mock = ResponseListMock(expected)
requests.get.return_value = response_mock
isDeleted = self.client.delete_order(25)
self.assertEqual(isDeleted, True)
def test_get_employee_success(self):
with patch('komtet_kassa_sdk.v1.client.requests') as requests:
expected = {
'id': 71,
'name': 'Пупкин',
'email': 'pupkin@example.com',
'phone': '+79998887766',
'type': 'cashier',
'inn': '1848654484',
'is_manager': True,
'is_app_fast_basket': True,
'is_can_assign_order': False,
'payment_address': 'ул Мира'
}
response_mock = ResponseListMock(expected)
requests.get.return_value = response_mock
response = self.client.get_employee_info(71)
self.assertDictEqual(dict(response), expected)
| true
| true
|
1c406589588a96bfb0f30855c900c12a8e8768ae
| 860
|
py
|
Python
|
domain-messages/domain_messages/ControlState/__init__.py
|
simcesplatform/static-time-series-resource-forecaster
|
0f53915d7fe6da543f628487c8e643f2e9bb2652
|
[
"MIT"
] | null | null | null |
domain-messages/domain_messages/ControlState/__init__.py
|
simcesplatform/static-time-series-resource-forecaster
|
0f53915d7fe6da543f628487c8e643f2e9bb2652
|
[
"MIT"
] | null | null | null |
domain-messages/domain_messages/ControlState/__init__.py
|
simcesplatform/static-time-series-resource-forecaster
|
0f53915d7fe6da543f628487c8e643f2e9bb2652
|
[
"MIT"
] | null | null | null |
# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland
# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus
# This source code is licensed under the MIT license. See LICENSE in the repository root directory.
# Author(s): Amir Safdarian <amir.safdarian@vtt.fi>
# Kalle Ruuth (TAU) <kalle.ruuth@tuni.fi>
# Keski-Koukkari Antti <antti.keski-koukkari@vtt.fi>
# Md Tanjimuddin <md.tanjimuddin@tuni.fi>
# Olli Suominen <olli.suominen@tuni.fi>
# Otto Hylli <otto.hylli@tuni.fi>
# Tanjim <tanjim0023@gmail.com>
# Ville Heikkilä <ville.heikkila@tuni.fi>
# Ville Mörsky (TAU) <ville.morsky@tuni.fi>
from domain_messages.ControlState.ControlState_Power_Setpoint import ControlStatePowerSetpointMessage
| 66.153846
| 108
| 0.711628
|
from domain_messages.ControlState.ControlState_Power_Setpoint import ControlStatePowerSetpointMessage
| true
| true
|
1c4066b9c3a023a45235f50ef54de81eefaf6e3c
| 288
|
py
|
Python
|
apps.py
|
audacious-software/Passive-Data-Kit-Codebook
|
84533014f6f3aaa6b7cdb1bcf095633c65c94d01
|
[
"Apache-2.0"
] | null | null | null |
apps.py
|
audacious-software/Passive-Data-Kit-Codebook
|
84533014f6f3aaa6b7cdb1bcf095633c65c94d01
|
[
"Apache-2.0"
] | null | null | null |
apps.py
|
audacious-software/Passive-Data-Kit-Codebook
|
84533014f6f3aaa6b7cdb1bcf095633c65c94d01
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class PassiveDataKitCodebookConfig(AppConfig):
name = 'passive_data_kit_codebook'
verbose_name = 'Passive Data Kit: Codebook'
default_auto_field = 'django.db.models.AutoField'
| 28.8
| 53
| 0.767361
|
from __future__ import unicode_literals
from django.apps import AppConfig
class PassiveDataKitCodebookConfig(AppConfig):
name = 'passive_data_kit_codebook'
verbose_name = 'Passive Data Kit: Codebook'
default_auto_field = 'django.db.models.AutoField'
| true
| true
|
1c4068bf10d2d520e30f6471fcec8d75bacda1dc
| 2,900
|
py
|
Python
|
persistent_settings.py
|
ryanvolz/discord_bot_utils
|
4895fa351f6495bda3dd9aab7561984874b8a3d3
|
[
"BSD-3-Clause"
] | null | null | null |
persistent_settings.py
|
ryanvolz/discord_bot_utils
|
4895fa351f6495bda3dd9aab7561984874b8a3d3
|
[
"BSD-3-Clause"
] | null | null | null |
persistent_settings.py
|
ryanvolz/discord_bot_utils
|
4895fa351f6495bda3dd9aab7561984874b8a3d3
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2020 Ryan Volz
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
#
# SPDX-License-Identifier: BSD-3-Clause
# ----------------------------------------------------------------------------
"""Utilities for persistent settings corresponding to Discord IDs."""
import os
import pathlib
import ruamel.yaml
__all__ = ("yaml", "DiscordIDSettings")
yaml = ruamel.yaml.YAML(typ="safe")
yaml.default_flow_style = False
def load_persistent_settings(path):
"""Load settings from a yaml file."""
settings = {}
if path.exists():
with open(path, "r") as f:
persistent = yaml.load(f)
if persistent:
settings.update(persistent)
return settings
def dump_persistent_settings(path, settings):
"""Write settings to a yaml file."""
if not path.parent.is_dir():
os.makedirs(path.parent)
with open(path, "w+") as f:
# wrap settings with dict so we write a regular dict and not a defaultdict
yaml.dump(dict(settings), f)
class DiscordIDSettings(object):
"""Class for managing related settings corresponding to Discord IDs."""
def __init__(self, bot, name, default_settings=None):
"""Initialize settings object for a given group/file name."""
if default_settings is None:
default_settings = {}
self.bot = bot
self.name = name
self.default_settings = default_settings
# set up storage for settings and load from persistent file
self.settings_path = pathlib.Path(".settings", f"{self.name}.yml")
self.id_dict = load_persistent_settings(self.settings_path)
def teardown(self):
"""Tear down persistent settings for the bot."""
# dump persistent storage to file
dump_persistent_settings(self.settings_path, self.id_dict)
def get(self, id, key, default=None):
"""Get value corresponding to ID from bot setting storage."""
try:
id_settings = self.id_dict[id]
val = id_settings[key]
except KeyError:
try:
val = self.default_settings[key]
except KeyError:
val = default
return val
def set(self, id, key, val):
"""Set value corresponding to ID to bot setting storage."""
try:
id_settings = self.id_dict[id]
except KeyError:
id_settings = {}
self.id_dict[id] = id_settings
id_settings[key] = val
def unset(self, id, key):
"""Unset a key value, so it will fall back to the default."""
try:
id_settings = self.id_dict[id]
except KeyError:
return
del id_settings[key]
| 31.868132
| 82
| 0.594828
|
import os
import pathlib
import ruamel.yaml
__all__ = ("yaml", "DiscordIDSettings")
yaml = ruamel.yaml.YAML(typ="safe")
yaml.default_flow_style = False
def load_persistent_settings(path):
settings = {}
if path.exists():
with open(path, "r") as f:
persistent = yaml.load(f)
if persistent:
settings.update(persistent)
return settings
def dump_persistent_settings(path, settings):
if not path.parent.is_dir():
os.makedirs(path.parent)
with open(path, "w+") as f:
yaml.dump(dict(settings), f)
class DiscordIDSettings(object):
def __init__(self, bot, name, default_settings=None):
if default_settings is None:
default_settings = {}
self.bot = bot
self.name = name
self.default_settings = default_settings
self.settings_path = pathlib.Path(".settings", f"{self.name}.yml")
self.id_dict = load_persistent_settings(self.settings_path)
def teardown(self):
dump_persistent_settings(self.settings_path, self.id_dict)
def get(self, id, key, default=None):
try:
id_settings = self.id_dict[id]
val = id_settings[key]
except KeyError:
try:
val = self.default_settings[key]
except KeyError:
val = default
return val
def set(self, id, key, val):
try:
id_settings = self.id_dict[id]
except KeyError:
id_settings = {}
self.id_dict[id] = id_settings
id_settings[key] = val
def unset(self, id, key):
try:
id_settings = self.id_dict[id]
except KeyError:
return
del id_settings[key]
| true
| true
|
1c406957c4699002c219b6c7a34acd93600c681c
| 28,082
|
py
|
Python
|
bridgedb/strings.py
|
isislovecruft/bridgedb
|
5ca53926d4bf32fe2cdd2ae3f2dba9e7a6f91b4e
|
[
"BSD-3-Clause-Clear"
] | 4
|
2016-04-18T06:30:54.000Z
|
2019-07-17T14:02:45.000Z
|
bridgedb/strings.py
|
isislovecruft/bridgedb
|
5ca53926d4bf32fe2cdd2ae3f2dba9e7a6f91b4e
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
bridgedb/strings.py
|
isislovecruft/bridgedb
|
5ca53926d4bf32fe2cdd2ae3f2dba9e7a6f91b4e
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 ; test-case-name: bridgedb.test.test_strings ; -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# :copyright: (c) 2007-2017, The Tor Project, Inc.
# (c) 2013-2017, Isis Lovecruft
# (c) 2007-2017, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
"""Commonly used string constants.
.. todo:: The instructions for the OpenPGP keys in
:data:`BRIDGEDB_OPENPGP_KEY` are not translated… should we translate them?
Should we tell users where to obtain GPG4Win/GPGTools/gnupg? Should those
instruction be that verbose? Or should we get rid of the instructions
altogether, and assume that any encouragement towards using GPG will just
make users more frustrated, and (possibly) (mis-)direct that frustration
at Tor or BridgeDB?
"""
from __future__ import unicode_literals
# This won't work on Python2.6, however
# 1) We don't use Python2.6, and
# 2) We don't care about supporting Python2.6, because Python 2.6 (and,
# honestly, all of Python2) should die.
from collections import OrderedDict
def _(text):
"""This is necessary because strings are translated when they're imported.
Otherwise this would make it impossible to switch languages more than
once.
:returns: The **text**.
"""
return text
# TRANSLATORS: Please do not translate the word "TYPE".
EMAIL_MISC_TEXT = {
0: _("""\
[This is an automated message; please do not reply.]"""),
1: _("""\
Here are your bridges:"""),
2: _("""\
You have exceeded the rate limit. Please slow down! The minimum time between
emails is %s hours. All further emails during this time period will be ignored."""),
3: _("""\
COMMANDs: (combine COMMANDs to specify multiple options simultaneously)"""),
# TRANSLATORS: Please DO NOT translate the word "BridgeDB".
4: _("Welcome to BridgeDB!"),
# TRANSLATORS: Please DO NOT translate the words "transport" or "TYPE".
5: _("Currently supported transport TYPEs:"),
6: _("Hey, %s!"),
7: _("Hello, friend!"),
8: _("Public Keys"),
# TRANSLATORS: This string will end up saying something like:
# "This email was generated with rainbows, unicorns, and sparkles
# for alice@example.com on Friday, 09 May, 2014 at 18:59:39."
9: _("""\
This email was generated with rainbows, unicorns, and sparkles
for %s on %s at %s."""),
}
WELCOME = {
# TRANSLATORS: Please DO NOT translate "BridgeDB".
# TRANSLATORS: Please DO NOT translate "Pluggable Transports".
# TRANSLATORS: Please DO NOT translate "Tor".
# TRANSLATORS: Please DO NOT translate "Tor Network".
0: _("""\
BridgeDB can provide bridges with several %stypes of Pluggable Transports%s,
which can help obfuscate your connections to the Tor Network, making it more
difficult for anyone watching your internet traffic to determine that you are
using Tor.\n\n"""),
# TRANSLATORS: Please DO NOT translate "Pluggable Transports".
1: _("""\
Some bridges with IPv6 addresses are also available, though some Pluggable
Transports aren't IPv6 compatible.\n\n"""),
# TRANSLATORS: Please DO NOT translate "BridgeDB".
# TRANSLATORS: The phrase "plain-ol'-vanilla" means "plain, boring,
# regular, or unexciting". Like vanilla ice cream. It refers to bridges
# which do not have Pluggable Transports, and only speak the regular,
# boring Tor protocol. Translate it as you see fit. Have fun with it.
2: _("""\
Additionally, BridgeDB has plenty of plain-ol'-vanilla bridges %s without any
Pluggable Transports %s which maybe doesn't sound as cool, but they can still
help to circumvent internet censorship in many cases.\n\n"""),
}
"""These strings should go on the first "Welcome" email sent by the
:mod:`~bridgedb.EmailServer`, as well as on the ``index.html`` template used
by the :mod:`~bridgedb.distributors.https.server`. They are used as an introduction to
explain what Tor bridges are, what bridges do, and why someone might want to
use bridges.
"""
FAQ = {
0: _("What are bridges?"),
1: _("""\
%s Bridges %s are Tor relays that help you circumvent censorship."""),
}
OTHER_DISTRIBUTORS = {
0: _("I need an alternative way of getting bridges!"),
1: _("""\
Another way to get bridges is to send an email to %s. Please note that you must
send the email using an address from one of the following email providers:
%s, %s or %s."""),
}
HELP = {
0: _("My bridges don't work! I need help!"),
# TRANSLATORS: Please DO NOT translate "Tor".
1: _("""If your Tor doesn't work, you should email %s."""),
# TRANSLATORS: Please DO NOT translate "Pluggable Transports".
# TRANSLATORS: Please DO NOT translate "Tor Browser".
# TRANSLATORS: Please DO NOT translate "Tor".
2: _("""\
Try including as much info about your case as you can, including the list of
bridges and Pluggable Transports you tried to use, your Tor Browser version,
and any messages which Tor gave out, etc."""),
}
BRIDGES = {
0: _("Here are your bridge lines:"),
1: _("Get Bridges!"),
}
OPTIONS = {
0: _("Please select options for bridge type:"),
1: _("Do you need IPv6 addresses?"),
2: _("Do you need a %s?"),
}
CAPTCHA = {
0: _('Your browser is not displaying images properly.'),
1: _('Enter the characters from the image above...'),
}
HOWTO_TBB = {
0: _("""How to start using your bridges"""),
# TRANSLATORS: Please DO NOT translate "Tor Browser".
1: _("""\
To enter bridges into Tor Browser, first go to the %s Tor Browser download
page %s and then follow the instructions there for downloading and starting
Tor Browser."""),
# TRANSLATORS: Please DO NOT translate "Tor".
2: _("""\
When the 'Tor Network Settings' dialogue pops up, click 'Configure' and follow
the wizard until it asks:"""),
# TRANSLATORS: Please DO NOT translate "Tor".
3: _("""\
Does your Internet Service Provider (ISP) block or otherwise censor connections
to the Tor network?"""),
# TRANSLATORS: Please DO NOT translate "Tor".
4: _("""\
Select 'Yes' and then click 'Next'. To configure your new bridges, copy and
paste the bridge lines into the text input box. Finally, click 'Connect', and
you should be good to go! If you experience trouble, try clicking the 'Help'
button in the 'Tor Network Settings' wizard for further assistance."""),
}
EMAIL_COMMANDS = {
"get help": _("Displays this message."),
# TRANSLATORS: Please try to make it clear that "vanilla" here refers to the
# same non-Pluggable Transport bridges described above as being
# "plain-ol'-vanilla" bridges.
"get bridges": _("Request vanilla bridges."),
"get ipv6": _("Request IPv6 bridges."),
# TRANSLATORS: Please DO NOT translate the word the word "TYPE".
"get transport [TYPE]": _("Request a Pluggable Transport by TYPE."),
# TRANSLATORS: Please DO NOT translate "BridgeDB".
# TRANSLATORS: Please DO NOT translate "GnuPG".
"get key": _("Get a copy of BridgeDB's public GnuPG key."),
#"subscribe": _("Subscribe to receive new bridges once per week"),
#"unsubscribe": _("Cancel a subscription to new bridges"),
}
#-----------------------------------------------------------------------------
# All of the following containers are untranslated!
#-----------------------------------------------------------------------------
#: SUPPORTED TRANSPORTS is dictionary mapping all Pluggable Transports
#: methodname to whether or not we actively distribute them. The ones which we
#: distribute SHOULD have the following properties:
#:
#: 1. The PT is in a widely accepted, usable state for most Tor users.
#: 2. The PT is currently publicly deployed *en masse*".
#: 3. The PT is included within the transports which Tor Browser offers in
#: the stable releases.
#:
#: These will be sorted by methodname in alphabetical order.
#:
#: ***Don't change this setting here; change it in :file:`bridgedb.conf`.***
SUPPORTED_TRANSPORTS = {}
#: DEFAULT_TRANSPORT is a string. It should be the PT methodname of the
#: transport which is selected by default (e.g. in the webserver dropdown
#: menu).
#:
#: ***Don't change this setting here; change it in :file:`bridgedb.conf`.***
DEFAULT_TRANSPORT = ''
def _getSupportedTransports():
"""Get the list of currently supported transports.
:rtype: list
:returns: A list of strings, one for each supported Pluggable Transport
methodname, sorted in alphabetical order.
"""
supported = [name.lower() for name,w00t in SUPPORTED_TRANSPORTS.items() if w00t]
supported.sort()
return supported
def _setDefaultTransport(transport):
global DEFAULT_TRANSPORT
DEFAULT_TRANSPORT = transport
def _getDefaultTransport():
return DEFAULT_TRANSPORT
def _setSupportedTransports(transports):
"""Set the list of currently supported transports.
.. note: You shouldn't need to touch this. This is used by the config file
parser. You should change the SUPPORTED_TRANSPORTS dictionary in
:file:`bridgedb.conf`.
:param dict transports: A mapping of Pluggable Transport methodnames
(strings) to booleans. If the boolean is ``True``, then the Pluggable
Transport is one which we will (more easily) distribute to clients.
If ``False``, then we (sort of) don't distribute it.
"""
global SUPPORTED_TRANSPORTS
SUPPORTED_TRANSPORTS = transports
def _getSupportedAndDefaultTransports():
"""Get a dictionary of currently supported transports, along with a boolean
marking which transport is the default.
It is returned as a :class:`collections.OrderedDict`, because if it is a
regular dict, then the dropdown menu would populated in random order each
time the page is rendered. It is sorted in alphabetical order.
:rtype: :class:`collections.OrderedDict`
:returns: An :class:`~collections.OrderedDict` of the Pluggable Transport
methodnames from :data:`SUPPORTED_TRANSPORTS` whose value in
``SUPPORTED_TRANSPORTS`` is ``True``. If :data:`DEFAULT_TRANSPORT` is
set, then the PT methodname in the ``DEFAULT_TRANSPORT`` setting is
added to the :class:`~collections.OrderedDict`, with the value
``True``. Every other transport in the returned ``OrderedDict`` has
its value set to ``False``, so that only the one which should be the
default PT is ``True``.
"""
supported = _getSupportedTransports()
transports = OrderedDict(zip(supported, [False for _ in range(len(supported))]))
if DEFAULT_TRANSPORT:
transports[DEFAULT_TRANSPORT] = True
return transports
EMAIL_SPRINTF = {
# Goes into the "%s types of Pluggable Transports %s" part of ``WELCOME[0]``
"WELCOME0": ("", "[0]"),
# Goes into the "%s without Pluggable Transport %s" part of ``WELCOME[2]``
"WELCOME2": ("-", "-"),
# For the "%s Tor Browser download page %s" part of ``HOWTO_TBB[1]``
"HOWTO_TBB1": ("", "[0]"),
# For the "you should email %s" in ``HELP[0]``
"HELP0": ("help@rt.torproject.org"),
}
"""``EMAIL_SPRINTF`` is a dictionary that maps translated strings which
contain format specifiers (i.e. ``%s``) to what those format specifiers should
be replaced with in a given template system.
For example, a string which needs a pair of HTML ``("<a href=''">, "</a>")``
tags (for the templates used by :mod:`bridgedb.distributors.https.server`) would need some
alternative replacements for the :mod:`EmailServer`, because the latter uses
templates with a ``text/plain`` mimetype instead of HTML. For the
``EmailServer``, the format strings specifiers are replaced with an empty
string where the opening ``<a>`` tags would go, and a numbered Markdown link
specifier where the closing ``</a>`` tags would go.
The keys in this dictionary are the Python variable names of the corresponding
strings which are being formatted, i.e. ``WELCOME0`` would be the string
replacements for ``strings.WELCOME.get(0)``.
For example, the ``0`` string in :data:`WELCOME` above has the substring::
"%s without Pluggable Transport %s"
and so to replace the two ``%s`` format specifiers, you would use this mapping
like so::
>>> from bridgedb import strings
>>> welcome = strings.WELCOME[0] % strings.EMAIL_SPRINTF["WELCOME0"]
>>> print welcome.split('\n')[0]
BridgeDB can provide bridges with several types of Pluggable Transports[0],
"""
EMAIL_REFERENCE_LINKS = {
"WELCOME0": "[0]: https://www.torproject.org/docs/pluggable-transports.html",
"HOWTO_TBB1": "[0]: https://www.torproject.org/projects/torbrowser.html",
}
BRIDGEDB_OPENPGP_KEY = """\
# This keypair contains BridgeDB's online signing and encryption subkeys. This
# keypair rotates because it is kept online. However, the current online
# keypair will *ALWAYS* be certified by the offline keypair (at the bottom of
# this file).
#
# If you receive an email from BridgeDB, it should be signed with the
# 21B554E95938F4D0 subkey from the following keypair:
# pub 4096R/8DC43A2848821E32 2013-09-11 [expires: 2015-09-11]
# Key fingerprint = DF81 1109 E17C 8BF1 34B5 EEB6 8DC4 3A28 4882 1E32
# uid BridgeDB <bridges@bridges.torproject.org>
# sub 4096R/21B554E95938F4D0 2013-09-11 [expires: 2015-09-11]
# Key fingerprint = 9FE3 9D1A 7438 9223 3B3F 66F2 21B5 54E9 5938 F4D0
# sub 4096R/E7793047C5B54232 2013-09-11 [expires: 2015-09-11]
# Key fingerprint = CFFB 8469 9048 37E7 8CAE 322C E779 3047 C5B5 4232
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFIv8YABEADRqvfLB4xWj3Fz+HEmUUt/qbJnZhqIjo5WBHaBJOmrzx1c9fLN
aYG36Hgo6A7NygI1oQmFnDinSrZAtrPaT63d1Jg49yZwr/OhMaxHYJElMFHGJ876
kLZHmQTysquYKDHhv+fH51t7UVaZ9NkP5cI+V3pqck0DW5DwMsVJXNaU317kk9me
mPJUDMb5FM4d2Vtk1N+54bHJgpgmnukNtpJmRyHRbZBqNMln5nWF7vdZ4u5PGPWj
bA0rPZhayeE3FQ0MHiGL12kHAy30pfg54QfPJDQBCywjABetRE+xaM9TcS+R31Pf
2VbLeb+Km7QpHMwOXI5xZLss9BAWm9EBbmXxuqaRBHyi830jjCrK9UYuzzOqKoUV
Mk1BRelZTFnGPWeVTE+Ps+pwJ0Dwx4ghppJBCoArmEbkNliblxR/2wYOOFi/ZVA4
Zc2ok9T3rBLVg07b7ezFUScGiTnc7ac7hp6r8Qsh09ZbhRr9erK/n194aEvkXTfr
qepwrAE7YeF4YuR206UOFFWDhxWDLbRu0gIWgrevEQu/cvQPrO9uH5fL6Gw/+mNP
Q/NIteejhkDyvyTUKyBu7x+Gls71zT2u/X13eOAJ8IxBkSVRKQ8tRD+oqJkWplOf
+BpaGU+g6u4kT2AzFDxTOupfrYcPvORTAV/V3suys2YQE4x422GASXDivQARAQAB
tClCcmlkZ2VEQiA8YnJpZGdlc0BicmlkZ2VzLnRvcnByb2plY3Qub3JnPokDJQQT
AQoBD0gUgAAAAAAXACh2ZXJpZmllZEB0b3Jwcm9qZWN0Lm9yZ0RGODExMTA5RTE3
QzhCRjEzNEI1RUVCNjhEQzQzQTI4NDg4MjFFMzJPFIAAAAAAHgAoYnJpZGdlc0Bi
cmlkZ2VzLnRvcnByb2plY3Qub3JnREY4MTExMDlFMTdDOEJGMTM0QjVFRUI2OERD
NDNBMjg0ODgyMUUzMioaaHR0cHM6Ly9icmlkZ2VzLnRvcnByb2plY3Qub3JnL3Bv
bGljeS50eHQCGwEDCw0JBBUKCQgEFgIBAAIeAQIXgCcYaHR0cHM6Ly9icmlkZ2Vz
LnRvcnByb2plY3Qub3JnL2tleS5hc2MFAlSKBKIFCQPDTiIACgkQjcQ6KEiCHjIs
jg//bJ12eRnBMfIGzOGh+T4wz7/YyKLfARAMnqDnSxhTxuE+M5hWm3QbxP03R6eY
x+PKwQaDJSmm7HhRhltb7QXUe8dqjnocFwwagpoLZ/81mBLxByqg5TKHGGIGy+DX
omIzCq5ijx1IUkHlgh708a5alG7bjRTqedT4Wxxyl6psGzDhGQdS8bqx/f32nQaE
h41l+A/EY1g2HVqky63ZHAP3S2v+mWCrk5DnkElc0229MXqaBuEr4nbYMXRkahMb
E2gnCmdSoeD21AY6bNyz7IcJGpyKCx9+hVgPjpm3J23JEYyPL+s48jn6QcI/Q2gD
yAtgU65y6IrdYn8SwkABI1FIq9WAwG7DaInxvkqkYqyBQLaZJEMyX8NTBvFoT5JS
jnkxG0xu61Vxq0BLYBIOJE0VFHAJ40/jOvSxQJkQhu9G4BK7htnADbtstmMDMM3q
xuuO5pcj2rl7YthNunyZ1yhPHXijUUyKrwR9piENpptztFBVN6+ijqU/TmWMOtbH
X7p9F+3tXCHHqwO5U/JMtsb/9M39MR8BrdcLc7m6dCpeuSUuR2LLroh+MoMJGviI
iesxHF95kFqkJAecW1Z3eKL9vrlbfO3waeuCi18k1TePnZuG5lmf2KjKDW5vHK4O
WFqvvfK2kxkCUjvGdLeTOAVOV+X+PQ23jvBJO2bS7YbOb9C5Ag0EUi/ygQEQALZ/
p7xRINHY7MMf3/bo/I0WRxWHd1AE9tRToyEg1S2u1YrWWL5M9D8saRsp9cpnpGEu
hW3vu7G4nasY27qOz4bSKu1YMAVIC58v1tEnBqdo1zErNjhs38PrmJKbbs9tDfYY
Oi2x0GlhMbIrNStcZpnCdLa6U6NLMbggDL1GxjMPYBMi4TtLgcIeRDUSjsZscZkg
Kxs5QkSVc3SrYyraayIc8WtIpDLcxPt6/g90rbatZzBfO+93Rz7qUXHmgzuM0hy1
Fvn619o3I5DsWrfOz9t/QuznoOBw4PfzDPNT7VlzZN4xHAcr5+7B+DH9IsvlCt5N
kQFuYpFZCpXNaD2XOtmIqjTCeLNfcgTEj0qoUIEKyKbBIgfP+7S2tLXy8JKUTy5g
9kxXQeHueLykQ4Mt18JH0nMHbHbQl0K3LGT4ucRDOmjNtlQCltVLkIk3GimyqKs/
vdZ9c+dm4Akx1qsJcwvveX+imJe2e9RUodcxWXxWrYnuPa5b5nfR1i+GfV0on/Pt
AQ8gc9CkJpMiq5TQDOFhFP6yQcq77sXuUkEl5qamptedz28E0I693ulnfwcsE80p
xkpIG6n33DZJSEyqgtWjE1P2pnsVfO5ILs3mKLe7bO1v3qMXcCkMCGH/kwzvtowq
YvY4gaZMDZtQFY8U7lI9FdRUvVdeHAB24y291nhzABEBAAGJBYMEGAEKANNIFIAA
AAAAFwAodmVyaWZpZWRAdG9ycHJvamVjdC5vcmdERjgxMTEwOUUxN0M4QkYxMzRC
NUVFQjY4REM0M0EyODQ4ODIxRTMyTxSAAAAAAB4AKGJyaWRnZXNAYnJpZGdlcy50
b3Jwcm9qZWN0Lm9yZ0RGODExMTA5RTE3QzhCRjEzNEI1RUVCNjhEQzQzQTI4NDg4
MjFFMzIqGmh0dHBzOi8vYnJpZGdlcy50b3Jwcm9qZWN0Lm9yZy9wb2xpY3kudHh0
AhsCBQJUigTTBQkDw01SAqTB2CAEGQEKAIEFAlIv8oFPFIAAAAAAHgAoYnJpZGdl
c0BicmlkZ2VzLnRvcnByb2plY3Qub3JnOUZFMzlEMUE3NDM4OTIyMzNCM0Y2NkYy
MjFCNTU0RTk1OTM4RjREMCoaaHR0cHM6Ly9icmlkZ2VzLnRvcnByb2plY3Qub3Jn
L3BvbGljeS50eHQACgkQIbVU6Vk49NDbPw/5ATe/T8+eaToC3v0TYNRH5nveQvzA
WdnshD3lnvfsgDhbilwifKpc5LHKXU3rvb42HH2cu0ckuksdDTvICZD9cJjRq/F+
Mzm0pNCAJg0pQnHaaWFQjw+CHYEoizai3S+iYxhNHeSdA6Ty7xm4+bHNf0Aqblbd
6dKwq9EvjwAI6zZsAHtsmHRUMdrFwGdKae6CSchUT2JQFBPEWMhvzdpDGACWVaSP
sxYKuYg9LgpswGcof+tprRjKRl8MtSh0ufjbVBlTeSKpL5Y+fcTRD3PI8w7Ocr3z
jr6XpYG4SUNHsWwxyu/DTXg76Lk1/+BdaH25hDOAasLUOU7yRL8zD/c7M0FkGXdj
r5I2DEEqwzJ9cPHWjpgb8N9fZLoPFP9JOmKGHINqxNe7TfwiTdD6uDKs/u/QK1U+
o3iYBXBTREdopPUdBTM9wYRUhyGXTEKLhUP3MGpXYlgeYPrSdp76VyN3BzLTbMv+
+7rxyKxL9cWYU0pnXHgPC5nyHX5nqXmhMnkxAD3Bnm8n9XDfgiyTDExqksEh2VXt
yhVfLezylEP2fwtd8/mABBCsTjzZW6FRfRRAjUZWZGFpFg8no1x5JS9uiswRP5g1
qHijNFWpGyTtJWl5VNd0d9+LtVUX1jRpDUpsjZcxqs3fsvw2p+H/zQ1wFvDrsoav
hqOTq+AEnJc7ZG8JEI3EOihIgh4ych8P/3GTyWb29+43YVibbSPPvEv4gFqziC+9
1p92FJ0V4XdaT7TW3qaZVp5edUNwB/jjF0SxBybwaMX2ZIGXOjnjF6/Zby4ynuTX
vZkS1mKRA0KWupB3e9PSMY3ZtssnqpGna/+3qlpxtunW7HcW4nCF/f59WHhlVjaO
MXjtuWj59yB56Dd1sNjwhcNCyp4/NpzGnRW97ZV3Pp4oqIOqcGzCQXkVPcnaqcOh
Cs9vIDJlMtn/IWBzUGimuRllDSSVSWkYkyJcG2NUHUwgXYpLwQz7sScvmCPchf4K
qarpX0FpkUDfqaVVuQ7A2XbPUAVFzIk930G1WzgOuOdg9vhWSEjou+SKrAoMz90w
3xHwEvmPDTTVJQft9ytoRbwZkIPfzzhII3mr4agbORAfzDaj5g/f6CVRdg6D3ME1
Etg9ZrfLgRY993g/arfIME6OOsiNcy5+PunN96Rw0o1xoD+97NmZuQrs/p4Mfn5o
8EwXHutREhahin+3/SV3hz9ReeLYmClq+OVhjPzPdtwZsFoyQyUJoFVHPTuSdChZ
FPaqN68FjlNMugmxnvski3ZDVT7pw3B6otjjaL3rr6q0PC2yhEb2ntb3IFUizHjn
80SmfE1Bqwit7ZHu8r/Gt/0iecGk5h84VzSgiGZGF/7m1i5UMVlNSeWnsInGa5Su
7HSzfMq+YmkzuQINBFIv8p4BEADTOLR5e5NKKRPpjCb4B/8YYkWh+orb70EogIZ6
j5v8d/djLyhjqZ9BIkh41/hYKMwnsa4KkDkTaX0eNu3BFB2zGgZ1GSd7525ESxiq
suXIlAg2pex7bysaFfua0nUx64tmaQm2XArdkj/wI0pbg+idWym3WQQmZLyTTbzl
8rpTEtTt+S2m6z3EeAhEHuNFH16hEDUywlef3EotX3njuFiLqaNvnzUYDxhUvKd2
2K1es1ggispgP+eb1bkMApxecf2rqmSUEcvsuTWip4oGZPBLGDQeNKHkCUVbj4wT
yWDIRtto3wi+4CFPEVzw+htj1cQfTstPqUdG7NSOmLQggedoUdv7AJm4MJJiyEax
l+IAf6Afwrrm3eOSv0PgoUxOrUb9vhIoL8ih8gtiqvQ9qYaRQfQA/w3Z0Su2Yfoc
fQS8Uw99qG+oTgieG6F6ud8+hMZAYVZFqbU+ztzMyDE6h4Hflkt6VNJ0Hk0VoF38
TTs77pHXXBbLD6SzR6tbNuR9r/lbmC8Qf2A1ZAThR0iuGhNRFtUPo28GxakxGdLZ
9kHIxjl7EN/gsmYTwuEhr+yfNtLwtSH0ojeqbDmgufvgh+SITCtyNDAUspjrZYEt
F0NHRpSom2NFVELMqMRydU/ncph1rGZgVp6/zVj6xIlhKmqj5P1y/9B0c4Tu1CzJ
pkJ5wwARAQABiQLpBBgBCgDTSBSAAAAAABcAKHZlcmlmaWVkQHRvcnByb2plY3Qu
b3JnREY4MTExMDlFMTdDOEJGMTM0QjVFRUI2OERDNDNBMjg0ODgyMUUzMk8UgAAA
AAAeAChicmlkZ2VzQGJyaWRnZXMudG9ycHJvamVjdC5vcmdERjgxMTEwOUUxN0M4
QkYxMzRCNUVFQjY4REM0M0EyODQ4ODIxRTMyKhpodHRwczovL2JyaWRnZXMudG9y
cHJvamVjdC5vcmcvcG9saWN5LnR4dAIbDAUCVIoE4QUJA8NNQwAKCRCNxDooSIIe
Mo7JEADDBtQpYxPhbj3MT0xpk96EDlon/5yHVf+cnk1pNisc+HkJsVe1nh7gAWOz
wJKdeqOVpgxiJTxIQdl6mipKwwFi0DreP7h56s1WQkuSSWJzqssAwWHfVAsX13fV
zWd0XyxN/OF9ZKQjX4qwpJ/na631PSwZLvHYhMaZnb9pjNwC5/PEKRmFqLbQT6Px
12miZT6ToPDCczHxJ4BxbEGVU+PtRsHwmTRT3JhxFNDfeVd+uwsQIMidJbUoqVW7
fe2zNd0TaWyz4Rw087oZE2OXdctjvtsu8fzXx6d/tkazI6cUOqoaMTR41KEu5X0T
BpWSAMADBYjNs9QRWXX7ZlsJRUSCX1EKbMhgoL6KIGceIkjH61M/LF6HqDgSgSWt
h+LIYGa+LrB/6819o32QSOSHHJ5+NJrbCSaLgKE/LKnf92V2QbZE8IGY6EOSjHqn
n1+j+CLRKY/kUyvk+1TumTghjg/aDs/8Jv8PvgSWLQ0q1rxHYbX7q9ZJhYC/4LdR
ya/Cho6w2l0N3tV/IMAwvFNHsaiIiiwfoOQbkBUvkyzBwjKt96Ai4I0QKt/63uH0
drQhlJEgIyGkOrorBByVqZAQdnoLENYIu6tDUj0bTbGObKqua4iPlSK3/g40zCm4
9OgcN7A8kFuNpgp2EHqj1/jrwd7mZYKsWTuGiR/7fwXf+4xbvg==
=raCx
-----END PGP PUBLIC KEY BLOCK-----
# The following keypair is BridgeDB's offline certification-only keypair. It
# is used to sign new online signing/encryption keypairs.
#
# If you import this key and mark it as trusted, emails from BridgeDB (if
# signed correctly with the online keypair above) should always be trusted. To
# do this, open a shell and do:
#
# $ curl -O https://bridges.torproject.org/keys
# $ gpg --import keys
# $ gpg --check-sigs 7B78437015E63DF47BB1270ACBD97AA24E8E472E
# $ gpg --edit-key 7B78437015E63DF47BB1270ACBD97AA24E8E472E
#
# Then type 'trust' to set the trust level. Choose a number that you like.
# Next type 'quit'. Finally, to create a local signature which will will not
# be uploaded to keyservers:
#
# $ gpg --lsign-key 7B78437015E63DF47BB1270ACBD97AA24E8E472E
#
# pub 16384R/CBD97AA24E8E472E 2013-10-12
# Key fingerprint = 7B78 4370 15E6 3DF4 7BB1 270A CBD9 7AA2 4E8E 472E
# uid BridgeDB (Offline ID Key) <bridges@bridges.torproject.org>
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQgNBFJZB+QBQADcx7laikgZOZXLm6WH2mClm7KrRChmQAHOmzvRYTElk+hVZJ6g
qSUTdl8fvfhifZPCd3g7nJBtOhQAGlrHmJRXfdf4cTRuD73nggbYQ0NRR9VZ3MIK
ToJDELBhgmWeNKpLcPsTpi2t9qrHf3xxM06OdxOs9lCGtW7XVYnKx3vaRNk6c0ln
De82ZWnZr1eMoPzcjslw7AxI94hIgV1GDwTSpBndv/VwgLeBC5XNCKv0adhO/RSt
fuZOHGT/HfI0U0C3fSTiIu4lJqEd9Qe8LUFQ7wRMrf3KSWwyWNb/OtyMfZ52PEg9
SMWEfpr6aGwQu6yGPsE4SeHsiew5IqCMi64TZ9IcgY0fveiDzMSIAqnWQcxSL0SH
YbwQPxuOc4Rxj/b1umjigBG/Y4rkrxCKIw6M+CRaz203zs9ntOsWfnary/w+hepA
XLjC0yb0cP/oBB6qRyaCk2UTdqq1uWmJ2R/XhZHdZIDabxby6mvQbUQA/NEMOE/B
VrDonP1HNo1xpnY8lltbxdFD/jDikdjIazckMWl/0fri0pyPSdiJdAK2JrUniP9Q
eNbgcx3XvNnfjYjiQjTdqfxCTKpSmnsBNyYng6c4viOr5weBFXwEJq2Nl7+rP5pm
TF1PeiF769z4l2Mrx3X5sQqavTzd2VBMQ6/Kmk9Emxb8e1zyQD6odqJyTi1BBAes
F2BuKLMCVgZWOFSNGDOMoAUMZh0c6sRQtwz3KRBAuxUYm3wQPqG3XpDDcNM5YXgF
wVU8SYVwdFpPYT5XJIv2J2u45XbPma5aR0ynGuAmNptzELHta5cgeWIMVsKQbnPN
M6YTOy5auxLts3FZvKpTDyjBd/VRK6ihkKNKFY3gbP6RbwEK3ws/zOxqFau7sA5i
NGv4siQTWMG++pClz/exbgHPgs3f8yO34ZbocEBdS1sDl1Lsq4qJYo2Kn5MMHCGs
dqd7Y+E+ep6b74njb1m2UsySEE2cjj/FAFH91jfFy5PedNb/2Hx6BsPJVb7+N4eI
pehKQQ46XAbsMq6vUtI4Y0rFiBnqvpERqATQ2QhnEh0UmH7wKVQc4MREZfeEqazV
G/JFt5Qnt3jq8p6/qbWlOPKTLGUqGq3RXiJgEy/5i22R2ZDjafiGoG1KsZIVZg39
N25fT8abjPWme6JI3Jv+6gKY8tURoePZcMp/rw0NFs1HtCKUAU6FEOh6uJO7KNie
eE8qG8ItRXVYnP4f8MFyFkHZcJw27d0PT3IrCM1vJwjqgb2j2xWM/8GJDDuUyims
jvLDH1E7ek600H3FT5c9xPcgwfMM8BOdBNu0Evm9sdZBZFket+ytXo6GKyS/d91D
FWE+YL+25+sZJS71dnvSUWVneJrTLFasefvPfIR9/aLJoLVFHnN9sUHfVMj0KlGl
8AuxL7QfNQawvyjoV8rw/sJOQOwwhof1gZz0ZyjuTKj0WekjmDxcRzVY0eX6BzTm
o7r4jrHl1Mi75svnKCpXi0Vu/1ZqSnKjCjhRTXDLm7tb8b18jogsgDfs7UkUNwD/
XF8EfTTU4KotLOODAZIW+soFJZgf8rXQZLRShQmre+PUJfADEUd3yyE9h0JIunPQ
CxR8R8hVhK4yqFn662Ou7fEl3q8FYBBi1Ahn+263S7+WaZGo7ElwzfRb97gP1e77
eYd8JwY7UBIQku83CxQdahdGOpAfyvhYW2mxCHVZLXObwc18VgRMa7vjCbkGRPSN
5NecU5KGW6jU1dXuZk0jRt/9mqtYPjJ7K/EVJD9Yxmz+UdxH+BtsSRp3/5fDmHtW
CB39a7fetp0ixN503FXPKQUvKAKykETwevmWOzHH3t6BpY/ZSjDCC35Y3dWeB54H
qNta1r0pSWV6IARUoVteAOcuOU/l3HNzY80rL+iR0HiaszioBsd8k8u0rWXzM3BP
3vhTzccaldSWfqoT86Jfx0YLX6EoocVS8Ka5KUA8VlJWufnPPXDlF3dULrb+ds/l
zLazt9hF49HCpU1rZc3doRgmBYxMjYyrfK/3uarDefpfdnjbAVIoP26VpVXhLTEM
oaD+WoTpIyLYfJQUDn1Q06Nu393JqZb8nRngyMeTs73MDJTzqdL4rZXyweeTrtYe
4yy+Kc3CZdPlZqpkbuxP0cO0ivaTLdXsTCHDnpk16u4sDukcsmlaTF5d75nu/KIQ
o3nk0g9NvoschDcQiExuqCUOXCkKcUvYVHsuglAuT+AqK692562JrDOVoGwkUVvm
Qfo0AQvBvXUzHY4YuBUdWbjWsC4sj6B+MW/TIs/OlKIbPE3MHeDhEGLl/8uBceVo
kM36xm4F8wDwPK4GPyi/D+3piqBsrsjkgRlodQIUS7A9V19b8TWbUFeH4JGJ+5EH
9WErBlrsQrnosojLchGGp7HxSxWLBiwdnltu6+/hwbBwydJT8ZxPUANIwTdB+mOE
ILUXBkpIDfVSoZD7qWlntai53BDQr5pfMJhv15di4XAhtqv43vAmA57ifd+QJS2U
AfYc4CdX0lk2BZ4jRD8jCZ4Uxw15E3RqhnXsWDRxtD4fwsb2ZFi0DDuPlwBdGgh5
Rm2Bz9JjSV6gDEuXr/JtAzjSz1Jdh8wPkSofiHGTfxysVhlGlg+YPRziRlzML8A2
0xY+9mPxEEin5ZQ9wmrDyiiOBvPTbG3O9+Sp5VZDuD4ivW/DHumPWGVSRdjcAQDe
HMXUVGjhBDnj06XNrgJPhODdJeYq0EnGTt15ofZQSswD7TTTRPDOn0Cz/QARAQAB
tDpCcmlkZ2VEQiAoT2ZmbGluZSBJRCBLZXkpIDxicmlkZ2VzQGJyaWRnZXMudG9y
cHJvamVjdC5vcmc+iQkfBBMBCgEJBQJSWQfkSBSAAAAAABcAKHZlcmlmaWVkQHRv
cnByb2plY3Qub3JnN0I3ODQzNzAxNUU2M0RGNDdCQjEyNzBBQ0JEOTdBQTI0RThF
NDcyRU8UgAAAAAAeAChicmlkZ2VzQGJyaWRnZXMudG9ycHJvamVjdC5vcmc3Qjc4
NDM3MDE1RTYzREY0N0JCMTI3MEFDQkQ5N0FBMjRFOEU0NzJFKhpodHRwczovL2Jy
aWRnZXMudG9ycHJvamVjdC5vcmcvcG9saWN5LnR4dAIbAQMLDQkEFQoJCAQWAgEA
Ah4BAheAJxhodHRwczovL2JyaWRnZXMudG9ycHJvamVjdC5vcmcva2V5LmFzYwAK
CRDL2XqiTo5HLoqEP/48rFpJCVStn8xo+KkHSVvsqpxDRlb/nNheI+ov2UxILdwl
NIU6kLsvKECKPe1AHKdS/MzANbkTF35Y4QgZsNpVXaCVL7adGBSzOdPFupDJJVOu
wa+uFRc/FuNJyH/TIn56/+R5J5C54OxIYNxvW3WF4eHKLJYk/JZOMMfy4iWm7Sql
0nDC5O435nK4F4Jb4GLPlUIzioIy2OWqGoFHXymbGhL1tWaqasYmED4n3AMqlYw6
xnNhdWOc/KZelPl9nanybyh0IIdZqUKZleRt4BxSgIT8FqC2sZuZ8z7O9s987Naz
Q32SKaP4i2M9lai/Y2QYfKo+wlG+egmxtujz7etQMGlpgBZzFLdJ8/w4U11ku1ai
om74RIn8zl/LHjMQHnCKGoVlscTI1ZPt+p+p8hO2/9vOwTR8y8O/3DQSOfTSipwc
a3obRkp5ndjfjefOoAnuYapLw72fhJ9+Co09miiHQu7vq4j5k05VpDQd0yxOAZnG
vodPPhq7/zCG1K9Sb1rS9GvmQxGmgMBWVn+keTqJCZX7TSVgtgua9YjTJNVSiSLv
rLslNkeRfvkfbAbU8379MDB+Bax04HcYTC4syf0uqUXYq6jRtX37Dfq5XkLCk2Bt
WusH2NSpHuzZRWODM9PZb6U3vuBmU1nqY77DciuaeBqGGyrC/6UKrS0DrmVvF/0Z
Sft3BY6Zb3q7Qm7xpzsfrhVlhlyQqZPhr6o7QnGuvwRr+gDwhRbpISKYo89KYwjK
4Qr7sg/CyU2hWBCDOFPOcv/rtE0aD88M+EwRG/LCfEWU34Dc43Jk+dH56/3eVR58
rISHRUcU0Y603Uc+/WM31iJmR/1PvGeal+mhI9YSWUIgIY8Mxt3cM2gYl/OErGbN
4hWAPIFn4sM9Oo4BHpN7J2vkUatpW6v4Mdh+pNxzgE/V5S21SGaAldvM1SzCRz52
xRt642Mhf6jqfrwzXf7kq7jpOlu1HkG1XhCZQPw7qyIKnX4tjaRd9HXhn9Jb2vA5
Av+EOPoAx6Yii5X1RkDILOijvgVfSIFXnflHzs58AhwHztQOUWXDkfS5jVxbenbV
X4DwgtrpzpdPBgBYNtCGBy9pqhWA2XnkH2vjchZy+xIAoaJNIVBfNkR8tflJWEfm
i/2U0jJnhY8dEClbu3KQnbwKe5E9mTz1VmBsdWaK5rBVZamD/wssQzzyf3SXXXIU
W6DUXOCzgWvxvqC09lc4izEAxwUktMY+aapplNs/kjOqHYVkW4zpWGp4PDAT/DW9
/727CeoqY29HePaeGl0/PpR37CkquP69oQeJSU9CNeyAKnQtvaqxLBcEOohSaPtK
Iy1q6yQgT4j+gVAsFDVyobCNkA8B0GfemDcEXA5dfriTHN72Br0koS0nvv6P5k7T
7aaSNX++zdEnPauAZXPPjVt7R1sEvx0Oj+l1pu9hNX0nldaNs13bPU5DIOYv+5fN
En6pqzYGj/0v8Qeb53Qv5de+lo7ZAu/truVa+GOT3ay4jZBaFh2mDZbA+t1V3GmB
FtYGoVfou4iBQpx6tJLg3PKvtPj9g5B4LTxZVKrdfHXWyNNQOLzWSIgFj44+SmhU
LVCXofEvJ0sOX2wtqy54Q4lMIc6BK1IB+hsFV6sSnIeI7YmrRXusWEG0wnroNlbq
FiWg5+oeI1CnnCyj4FmDX/A/Bo0RxD0x3yqDximkOpcHMtLLfFjK3d5ltwBgDOOe
pvgabxID01mZxh3OYKdGpW3Z1VKEhHjF5e9BhhEKQ8mG3raaDs2hQ2iuEqTzNLif
aQdRCYd62jS14qSy2Dd+oZ0FbgzJNigWldvuwWzJCO2toF29pvfWtYRuqV/Vt3CK
iO7en9bhOMRynPlCkAR7ZiZvT9dzStiMKf1v8mzgRjCIiLIwM1v/xNZWEZ/TOfSR
E7dBMbDzaNjtCsMmNiyplqCjWbaj4irdIhKbtKJ02a1Jopo1/XNK0Y8AbK1xEHV0
+mjBYU/Pfqnf0WFhkJgha+J17wqrUxf2/Y1b/pdDMGqVWe9+p8tvSP5FNddNyecZ
0pojFH0jAzHpQen7eeIA3XupVe6cTEhNz4OjHBlZE6dN0q8UDdeG75yPunwShQiO
kRXA/qxkID/2OLIInWJP0HG05hncGfWZKCLBc/dFg3dNo8VKpw/Q6uMBj2iGi8iB
lnQGmHQa3j1ANPbcl3ljdJQDEnxk5TEVxNPYUw/BI58l3p+Z3vAZqC0Io7EgpuZ8
qPuV6hJ2c/7VuFAXVs2mUExtWAjbgnYAfsJtn1yk3sphl65TjPnZwaBlP/ls/W/j
mVjAx9d5b3mmMBJmNZDvY1QvcftDgfL5vYG5g7UwsbojuNxeM4rwn8qCKk5wC1/a
Zl6Rh2DG4xS3/ef5tQWw28grjRRwv5phYKtedsKpYRscKAMhiOsChAiSYuCRczmI
ErdO8ryK8QNzcpE4qVzFQMEtkG6V0RYYjMJzJuY5BW3hKt1UNNaqiGBpNKuf0GoO
zK/vMgxoo+iFmOuaBdQEjlPLbK+3k+7j14KKVI655AXVKyAsOoSYPzOqfkdiu9W8
34fOanH7S+lclkXwxTbXko9Jt6Ml64H4QKwd8ak2nCcX9FuMge7XP9VL/pBBMXcB
WHUKdoqMJExcg5A4H2cyxZ6QgHzNFgqV/4+MGGP+TMc9owzrT3PBadVrMxnHnjc/
/XYv48p2rRkjyjrtH+ZO9rlOsw0OmGgh9yoQPZn2tiNhG9piyvVxFKZflJm8I4kC
4AQTAQoAygUCUlkPIkgUgAAAAAAXACh2ZXJpZmllZEB0b3Jwcm9qZWN0Lm9yZzdC
Nzg0MzcwMTVFNjNERjQ3QkIxMjcwQUNCRDk3QUEyNEU4RTQ3MkVPFIAAAAAAHgAo
YnJpZGdlc0BicmlkZ2VzLnRvcnByb2plY3Qub3JnREY4MTExMDlFMTdDOEJGMTM0
QjVFRUI2OERDNDNBMjg0ODgyMUUzMioaaHR0cHM6Ly9icmlkZ2VzLnRvcnByb2pl
Y3Qub3JnL3BvbGljeS50eHQACgkQjcQ6KEiCHjIaqBAA0BuEs7horx6iCq4cjAhv
YPLrxuC4fKEfVyhAjCJMJSFFCPAlGgU+BjyPNDD57wzKAmUkdJG+Ss25mwWXa53w
5R2kDqDnHocOdZGtxZ7zx/uUd2eWLNBfVuK7nHOk1d1Hs0OZBnckc+MCqnLtuYe5
68pa9+jW6cNIjAnzMIListmoXWgYYWJvMKeBMG4DGtYJ8w7CJQjOHc5yar12DrX3
wnQ7hXtFuuqQblpEUnLnZGvHf2NKMZfBBMcP96h9OmLGNa+vmNYsMyPKU7n5hPgX
nTgmQ4xrv1G7JukjppZRA8SFoxupcaQeTixyWERGBhBiAbwZsbQz8L/TVZKierzg
sdNngHcFzE8MyjuJDvTos7qXPmgSRXFqJLRn0ZxpR5V1V8BVZUqCGuSZT89TizsD
z5vyv8c9r7HKD4pRjw32P2dgcEqyGRkqERAgSuFpObP+juty+kxYyfnadBNCyjgP
s7u0GmsTt4CZi7BbowNRL6bynrwrmQI9LJI1bPhgqfdDUbqG3HXwHz80oRFfKou8
JTYKxK4Iumfw2l/uAACma5ZyrwIDBX/H5XEQqch4sORzQnuhlTmZRf6ldVIIWjdJ
ef+DpOt12s+cS2F4D5g8G6t9CprCLYyrXiHwM/U8N5ywL9IeYKSWJxa7si3l9A6o
ZxOds8F/UJYDSIB97MQFzBo=
=JdC7
-----END PGP PUBLIC KEY BLOCK-----
"""
| 49.614841
| 90
| 0.817143
|
from __future__ import unicode_literals
# 1) We don't use Python2.6, and
# honestly, all of Python2) should die.
from collections import OrderedDict
def _(text):
return text
# TRANSLATORS: Please do not translate the word "TYPE".
EMAIL_MISC_TEXT = {
0: _("""\
[This is an automated message; please do not reply.]"""),
1: _("""\
Here are your bridges:"""),
2: _("""\
You have exceeded the rate limit. Please slow down! The minimum time between
emails is %s hours. All further emails during this time period will be ignored."""),
3: _("""\
COMMANDs: (combine COMMANDs to specify multiple options simultaneously)"""),
# TRANSLATORS: Please DO NOT translate the word "BridgeDB".
4: _("Welcome to BridgeDB!"),
# TRANSLATORS: Please DO NOT translate the words "transport" or "TYPE".
5: _("Currently supported transport TYPEs:"),
6: _("Hey, %s!"),
7: _("Hello, friend!"),
8: _("Public Keys"),
# TRANSLATORS: This string will end up saying something like:
# "This email was generated with rainbows, unicorns, and sparkles
# for alice@example.com on Friday, 09 May, 2014 at 18:59:39."
9: _("""\
This email was generated with rainbows, unicorns, and sparkles
for %s on %s at %s."""),
}
WELCOME = {
# TRANSLATORS: Please DO NOT translate "BridgeDB".
# TRANSLATORS: Please DO NOT translate "Pluggable Transports".
# TRANSLATORS: Please DO NOT translate "Tor".
# TRANSLATORS: Please DO NOT translate "Tor Network".
0: _("""\
BridgeDB can provide bridges with several %stypes of Pluggable Transports%s,
which can help obfuscate your connections to the Tor Network, making it more
difficult for anyone watching your internet traffic to determine that you are
using Tor.\n\n"""),
# TRANSLATORS: Please DO NOT translate "Pluggable Transports".
1: _("""\
Some bridges with IPv6 addresses are also available, though some Pluggable
Transports aren't IPv6 compatible.\n\n"""),
# regular, or unexciting". Like vanilla ice cream. It refers to bridges
# which do not have Pluggable Transports, and only speak the regular,
# boring Tor protocol. Translate it as you see fit. Have fun with it.
2: _("""\
Additionally, BridgeDB has plenty of plain-ol'-vanilla bridges %s without any
Pluggable Transports %s which maybe doesn't sound as cool, but they can still
help to circumvent internet censorship in many cases.\n\n"""),
}
FAQ = {
0: _("What are bridges?"),
1: _("""\
%s Bridges %s are Tor relays that help you circumvent censorship."""),
}
OTHER_DISTRIBUTORS = {
0: _("I need an alternative way of getting bridges!"),
1: _("""\
Another way to get bridges is to send an email to %s. Please note that you must
send the email using an address from one of the following email providers:
%s, %s or %s."""),
}
HELP = {
0: _("My bridges don't work! I need help!"),
1: _("""If your Tor doesn't work, you should email %s."""),
# TRANSLATORS: Please DO NOT translate "Pluggable Transports".
# TRANSLATORS: Please DO NOT translate "Tor Browser".
# TRANSLATORS: Please DO NOT translate "Tor".
2: _("""\
Try including as much info about your case as you can, including the list of
bridges and Pluggable Transports you tried to use, your Tor Browser version,
and any messages which Tor gave out, etc."""),
}
BRIDGES = {
0: _("Here are your bridge lines:"),
1: _("Get Bridges!"),
}
OPTIONS = {
0: _("Please select options for bridge type:"),
1: _("Do you need IPv6 addresses?"),
2: _("Do you need a %s?"),
}
CAPTCHA = {
0: _('Your browser is not displaying images properly.'),
1: _('Enter the characters from the image above...'),
}
HOWTO_TBB = {
0: _("""How to start using your bridges"""),
# TRANSLATORS: Please DO NOT translate "Tor Browser".
1: _("""\
To enter bridges into Tor Browser, first go to the %s Tor Browser download
page %s and then follow the instructions there for downloading and starting
Tor Browser."""),
# TRANSLATORS: Please DO NOT translate "Tor".
2: _("""\
When the 'Tor Network Settings' dialogue pops up, click 'Configure' and follow
the wizard until it asks:"""),
# TRANSLATORS: Please DO NOT translate "Tor".
3: _("""\
Does your Internet Service Provider (ISP) block or otherwise censor connections
to the Tor network?"""),
# TRANSLATORS: Please DO NOT translate "Tor".
4: _("""\
Select 'Yes' and then click 'Next'. To configure your new bridges, copy and
paste the bridge lines into the text input box. Finally, click 'Connect', and
you should be good to go! If you experience trouble, try clicking the 'Help'
button in the 'Tor Network Settings' wizard for further assistance."""),
}
EMAIL_COMMANDS = {
"get help": _("Displays this message."),
# TRANSLATORS: Please try to make it clear that "vanilla" here refers to the
# same non-Pluggable Transport bridges described above as being
# "plain-ol'-vanilla" bridges.
"get bridges": _("Request vanilla bridges."),
"get ipv6": _("Request IPv6 bridges."),
"get transport [TYPE]": _("Request a Pluggable Transport by TYPE."),
"get key": _("Get a copy of BridgeDB's public GnuPG key."),
#"subscribe": _("Subscribe to receive new bridges once per week"),
#"unsubscribe": _("Cancel a subscription to new bridges"),
}
#-----------------------------------------------------------------------------
# All of the following containers are untranslated!
#-----------------------------------------------------------------------------
#: SUPPORTED TRANSPORTS is dictionary mapping all Pluggable Transports
#: methodname to whether or not we actively distribute them. The ones which we
#: distribute SHOULD have the following properties:
#:
#: 1. The PT is in a widely accepted, usable state for most Tor users.
#: 2. The PT is currently publicly deployed *en masse*".
#: 3. The PT is included within the transports which Tor Browser offers in
#: the stable releases.
#:
#: These will be sorted by methodname in alphabetical order.
#:
#: ***Don't change this setting here; change it in :file:`bridgedb.conf`.***
SUPPORTED_TRANSPORTS = {}
#: DEFAULT_TRANSPORT is a string. It should be the PT methodname of the
#: transport which is selected by default (e.g. in the webserver dropdown
#: menu).
#:
#: ***Don't change this setting here; change it in :file:`bridgedb.conf`.***
DEFAULT_TRANSPORT = ''
def _getSupportedTransports():
supported = [name.lower() for name,w00t in SUPPORTED_TRANSPORTS.items() if w00t]
supported.sort()
return supported
def _setDefaultTransport(transport):
global DEFAULT_TRANSPORT
DEFAULT_TRANSPORT = transport
def _getDefaultTransport():
return DEFAULT_TRANSPORT
def _setSupportedTransports(transports):
global SUPPORTED_TRANSPORTS
SUPPORTED_TRANSPORTS = transports
def _getSupportedAndDefaultTransports():
supported = _getSupportedTransports()
transports = OrderedDict(zip(supported, [False for _ in range(len(supported))]))
if DEFAULT_TRANSPORT:
transports[DEFAULT_TRANSPORT] = True
return transports
EMAIL_SPRINTF = {
# Goes into the "%s types of Pluggable Transports %s" part of ``WELCOME[0]``
"WELCOME0": ("", "[0]"),
# Goes into the "%s without Pluggable Transport %s" part of ``WELCOME[2]``
"WELCOME2": ("-", "-"),
# For the "%s Tor Browser download page %s" part of ``HOWTO_TBB[1]``
"HOWTO_TBB1": ("", "[0]"),
# For the "you should email %s" in ``HELP[0]``
"HELP0": ("help@rt.torproject.org"),
}
EMAIL_REFERENCE_LINKS = {
"WELCOME0": "[0]: https://www.torproject.org/docs/pluggable-transports.html",
"HOWTO_TBB1": "[0]: https://www.torproject.org/projects/torbrowser.html",
}
BRIDGEDB_OPENPGP_KEY = """\
# This keypair contains BridgeDB's online signing and encryption subkeys. This
# keypair rotates because it is kept online. However, the current online
# keypair will *ALWAYS* be certified by the offline keypair (at the bottom of
# this file).
#
# If you receive an email from BridgeDB, it should be signed with the
# 21B554E95938F4D0 subkey from the following keypair:
# pub 4096R/8DC43A2848821E32 2013-09-11 [expires: 2015-09-11]
# Key fingerprint = DF81 1109 E17C 8BF1 34B5 EEB6 8DC4 3A28 4882 1E32
# uid BridgeDB <bridges@bridges.torproject.org>
# sub 4096R/21B554E95938F4D0 2013-09-11 [expires: 2015-09-11]
# Key fingerprint = 9FE3 9D1A 7438 9223 3B3F 66F2 21B5 54E9 5938 F4D0
# sub 4096R/E7793047C5B54232 2013-09-11 [expires: 2015-09-11]
# Key fingerprint = CFFB 8469 9048 37E7 8CAE 322C E779 3047 C5B5 4232
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFIv8YABEADRqvfLB4xWj3Fz+HEmUUt/qbJnZhqIjo5WBHaBJOmrzx1c9fLN
aYG36Hgo6A7NygI1oQmFnDinSrZAtrPaT63d1Jg49yZwr/OhMaxHYJElMFHGJ876
kLZHmQTysquYKDHhv+fH51t7UVaZ9NkP5cI+V3pqck0DW5DwMsVJXNaU317kk9me
mPJUDMb5FM4d2Vtk1N+54bHJgpgmnukNtpJmRyHRbZBqNMln5nWF7vdZ4u5PGPWj
bA0rPZhayeE3FQ0MHiGL12kHAy30pfg54QfPJDQBCywjABetRE+xaM9TcS+R31Pf
2VbLeb+Km7QpHMwOXI5xZLss9BAWm9EBbmXxuqaRBHyi830jjCrK9UYuzzOqKoUV
Mk1BRelZTFnGPWeVTE+Ps+pwJ0Dwx4ghppJBCoArmEbkNliblxR/2wYOOFi/ZVA4
Zc2ok9T3rBLVg07b7ezFUScGiTnc7ac7hp6r8Qsh09ZbhRr9erK/n194aEvkXTfr
qepwrAE7YeF4YuR206UOFFWDhxWDLbRu0gIWgrevEQu/cvQPrO9uH5fL6Gw/+mNP
Q/NIteejhkDyvyTUKyBu7x+Gls71zT2u/X13eOAJ8IxBkSVRKQ8tRD+oqJkWplOf
+BpaGU+g6u4kT2AzFDxTOupfrYcPvORTAV/V3suys2YQE4x422GASXDivQARAQAB
tClCcmlkZ2VEQiA8YnJpZGdlc0BicmlkZ2VzLnRvcnByb2plY3Qub3JnPokDJQQT
AQoBD0gUgAAAAAAXACh2ZXJpZmllZEB0b3Jwcm9qZWN0Lm9yZ0RGODExMTA5RTE3
QzhCRjEzNEI1RUVCNjhEQzQzQTI4NDg4MjFFMzJPFIAAAAAAHgAoYnJpZGdlc0Bi
cmlkZ2VzLnRvcnByb2plY3Qub3JnREY4MTExMDlFMTdDOEJGMTM0QjVFRUI2OERD
NDNBMjg0ODgyMUUzMioaaHR0cHM6Ly9icmlkZ2VzLnRvcnByb2plY3Qub3JnL3Bv
bGljeS50eHQCGwEDCw0JBBUKCQgEFgIBAAIeAQIXgCcYaHR0cHM6Ly9icmlkZ2Vz
LnRvcnByb2plY3Qub3JnL2tleS5hc2MFAlSKBKIFCQPDTiIACgkQjcQ6KEiCHjIs
jg//bJ12eRnBMfIGzOGh+T4wz7/YyKLfARAMnqDnSxhTxuE+M5hWm3QbxP03R6eY
x+PKwQaDJSmm7HhRhltb7QXUe8dqjnocFwwagpoLZ/81mBLxByqg5TKHGGIGy+DX
omIzCq5ijx1IUkHlgh708a5alG7bjRTqedT4Wxxyl6psGzDhGQdS8bqx/f32nQaE
h41l+A/EY1g2HVqky63ZHAP3S2v+mWCrk5DnkElc0229MXqaBuEr4nbYMXRkahMb
E2gnCmdSoeD21AY6bNyz7IcJGpyKCx9+hVgPjpm3J23JEYyPL+s48jn6QcI/Q2gD
yAtgU65y6IrdYn8SwkABI1FIq9WAwG7DaInxvkqkYqyBQLaZJEMyX8NTBvFoT5JS
jnkxG0xu61Vxq0BLYBIOJE0VFHAJ40/jOvSxQJkQhu9G4BK7htnADbtstmMDMM3q
xuuO5pcj2rl7YthNunyZ1yhPHXijUUyKrwR9piENpptztFBVN6+ijqU/TmWMOtbH
X7p9F+3tXCHHqwO5U/JMtsb/9M39MR8BrdcLc7m6dCpeuSUuR2LLroh+MoMJGviI
iesxHF95kFqkJAecW1Z3eKL9vrlbfO3waeuCi18k1TePnZuG5lmf2KjKDW5vHK4O
WFqvvfK2kxkCUjvGdLeTOAVOV+X+PQ23jvBJO2bS7YbOb9C5Ag0EUi/ygQEQALZ/
p7xRINHY7MMf3/bo/I0WRxWHd1AE9tRToyEg1S2u1YrWWL5M9D8saRsp9cpnpGEu
hW3vu7G4nasY27qOz4bSKu1YMAVIC58v1tEnBqdo1zErNjhs38PrmJKbbs9tDfYY
Oi2x0GlhMbIrNStcZpnCdLa6U6NLMbggDL1GxjMPYBMi4TtLgcIeRDUSjsZscZkg
Kxs5QkSVc3SrYyraayIc8WtIpDLcxPt6/g90rbatZzBfO+93Rz7qUXHmgzuM0hy1
Fvn619o3I5DsWrfOz9t/QuznoOBw4PfzDPNT7VlzZN4xHAcr5+7B+DH9IsvlCt5N
kQFuYpFZCpXNaD2XOtmIqjTCeLNfcgTEj0qoUIEKyKbBIgfP+7S2tLXy8JKUTy5g
9kxXQeHueLykQ4Mt18JH0nMHbHbQl0K3LGT4ucRDOmjNtlQCltVLkIk3GimyqKs/
vdZ9c+dm4Akx1qsJcwvveX+imJe2e9RUodcxWXxWrYnuPa5b5nfR1i+GfV0on/Pt
AQ8gc9CkJpMiq5TQDOFhFP6yQcq77sXuUkEl5qamptedz28E0I693ulnfwcsE80p
xkpIG6n33DZJSEyqgtWjE1P2pnsVfO5ILs3mKLe7bO1v3qMXcCkMCGH/kwzvtowq
YvY4gaZMDZtQFY8U7lI9FdRUvVdeHAB24y291nhzABEBAAGJBYMEGAEKANNIFIAA
AAAAFwAodmVyaWZpZWRAdG9ycHJvamVjdC5vcmdERjgxMTEwOUUxN0M4QkYxMzRC
NUVFQjY4REM0M0EyODQ4ODIxRTMyTxSAAAAAAB4AKGJyaWRnZXNAYnJpZGdlcy50
b3Jwcm9qZWN0Lm9yZ0RGODExMTA5RTE3QzhCRjEzNEI1RUVCNjhEQzQzQTI4NDg4
MjFFMzIqGmh0dHBzOi8vYnJpZGdlcy50b3Jwcm9qZWN0Lm9yZy9wb2xpY3kudHh0
AhsCBQJUigTTBQkDw01SAqTB2CAEGQEKAIEFAlIv8oFPFIAAAAAAHgAoYnJpZGdl
c0BicmlkZ2VzLnRvcnByb2plY3Qub3JnOUZFMzlEMUE3NDM4OTIyMzNCM0Y2NkYy
MjFCNTU0RTk1OTM4RjREMCoaaHR0cHM6Ly9icmlkZ2VzLnRvcnByb2plY3Qub3Jn
L3BvbGljeS50eHQACgkQIbVU6Vk49NDbPw/5ATe/T8+eaToC3v0TYNRH5nveQvzA
WdnshD3lnvfsgDhbilwifKpc5LHKXU3rvb42HH2cu0ckuksdDTvICZD9cJjRq/F+
Mzm0pNCAJg0pQnHaaWFQjw+CHYEoizai3S+iYxhNHeSdA6Ty7xm4+bHNf0Aqblbd
6dKwq9EvjwAI6zZsAHtsmHRUMdrFwGdKae6CSchUT2JQFBPEWMhvzdpDGACWVaSP
sxYKuYg9LgpswGcof+tprRjKRl8MtSh0ufjbVBlTeSKpL5Y+fcTRD3PI8w7Ocr3z
jr6XpYG4SUNHsWwxyu/DTXg76Lk1/+BdaH25hDOAasLUOU7yRL8zD/c7M0FkGXdj
r5I2DEEqwzJ9cPHWjpgb8N9fZLoPFP9JOmKGHINqxNe7TfwiTdD6uDKs/u/QK1U+
o3iYBXBTREdopPUdBTM9wYRUhyGXTEKLhUP3MGpXYlgeYPrSdp76VyN3BzLTbMv+
+7rxyKxL9cWYU0pnXHgPC5nyHX5nqXmhMnkxAD3Bnm8n9XDfgiyTDExqksEh2VXt
yhVfLezylEP2fwtd8/mABBCsTjzZW6FRfRRAjUZWZGFpFg8no1x5JS9uiswRP5g1
qHijNFWpGyTtJWl5VNd0d9+LtVUX1jRpDUpsjZcxqs3fsvw2p+H/zQ1wFvDrsoav
hqOTq+AEnJc7ZG8JEI3EOihIgh4ych8P/3GTyWb29+43YVibbSPPvEv4gFqziC+9
1p92FJ0V4XdaT7TW3qaZVp5edUNwB/jjF0SxBybwaMX2ZIGXOjnjF6/Zby4ynuTX
vZkS1mKRA0KWupB3e9PSMY3ZtssnqpGna/+3qlpxtunW7HcW4nCF/f59WHhlVjaO
MXjtuWj59yB56Dd1sNjwhcNCyp4/NpzGnRW97ZV3Pp4oqIOqcGzCQXkVPcnaqcOh
Cs9vIDJlMtn/IWBzUGimuRllDSSVSWkYkyJcG2NUHUwgXYpLwQz7sScvmCPchf4K
qarpX0FpkUDfqaVVuQ7A2XbPUAVFzIk930G1WzgOuOdg9vhWSEjou+SKrAoMz90w
3xHwEvmPDTTVJQft9ytoRbwZkIPfzzhII3mr4agbORAfzDaj5g/f6CVRdg6D3ME1
Etg9ZrfLgRY993g/arfIME6OOsiNcy5+PunN96Rw0o1xoD+97NmZuQrs/p4Mfn5o
8EwXHutREhahin+3/SV3hz9ReeLYmClq+OVhjPzPdtwZsFoyQyUJoFVHPTuSdChZ
FPaqN68FjlNMugmxnvski3ZDVT7pw3B6otjjaL3rr6q0PC2yhEb2ntb3IFUizHjn
80SmfE1Bqwit7ZHu8r/Gt/0iecGk5h84VzSgiGZGF/7m1i5UMVlNSeWnsInGa5Su
7HSzfMq+YmkzuQINBFIv8p4BEADTOLR5e5NKKRPpjCb4B/8YYkWh+orb70EogIZ6
j5v8d/djLyhjqZ9BIkh41/hYKMwnsa4KkDkTaX0eNu3BFB2zGgZ1GSd7525ESxiq
suXIlAg2pex7bysaFfua0nUx64tmaQm2XArdkj/wI0pbg+idWym3WQQmZLyTTbzl
8rpTEtTt+S2m6z3EeAhEHuNFH16hEDUywlef3EotX3njuFiLqaNvnzUYDxhUvKd2
2K1es1ggispgP+eb1bkMApxecf2rqmSUEcvsuTWip4oGZPBLGDQeNKHkCUVbj4wT
yWDIRtto3wi+4CFPEVzw+htj1cQfTstPqUdG7NSOmLQggedoUdv7AJm4MJJiyEax
l+IAf6Afwrrm3eOSv0PgoUxOrUb9vhIoL8ih8gtiqvQ9qYaRQfQA/w3Z0Su2Yfoc
fQS8Uw99qG+oTgieG6F6ud8+hMZAYVZFqbU+ztzMyDE6h4Hflkt6VNJ0Hk0VoF38
TTs77pHXXBbLD6SzR6tbNuR9r/lbmC8Qf2A1ZAThR0iuGhNRFtUPo28GxakxGdLZ
9kHIxjl7EN/gsmYTwuEhr+yfNtLwtSH0ojeqbDmgufvgh+SITCtyNDAUspjrZYEt
F0NHRpSom2NFVELMqMRydU/ncph1rGZgVp6/zVj6xIlhKmqj5P1y/9B0c4Tu1CzJ
pkJ5wwARAQABiQLpBBgBCgDTSBSAAAAAABcAKHZlcmlmaWVkQHRvcnByb2plY3Qu
b3JnREY4MTExMDlFMTdDOEJGMTM0QjVFRUI2OERDNDNBMjg0ODgyMUUzMk8UgAAA
AAAeAChicmlkZ2VzQGJyaWRnZXMudG9ycHJvamVjdC5vcmdERjgxMTEwOUUxN0M4
QkYxMzRCNUVFQjY4REM0M0EyODQ4ODIxRTMyKhpodHRwczovL2JyaWRnZXMudG9y
cHJvamVjdC5vcmcvcG9saWN5LnR4dAIbDAUCVIoE4QUJA8NNQwAKCRCNxDooSIIe
Mo7JEADDBtQpYxPhbj3MT0xpk96EDlon/5yHVf+cnk1pNisc+HkJsVe1nh7gAWOz
wJKdeqOVpgxiJTxIQdl6mipKwwFi0DreP7h56s1WQkuSSWJzqssAwWHfVAsX13fV
zWd0XyxN/OF9ZKQjX4qwpJ/na631PSwZLvHYhMaZnb9pjNwC5/PEKRmFqLbQT6Px
12miZT6ToPDCczHxJ4BxbEGVU+PtRsHwmTRT3JhxFNDfeVd+uwsQIMidJbUoqVW7
fe2zNd0TaWyz4Rw087oZE2OXdctjvtsu8fzXx6d/tkazI6cUOqoaMTR41KEu5X0T
BpWSAMADBYjNs9QRWXX7ZlsJRUSCX1EKbMhgoL6KIGceIkjH61M/LF6HqDgSgSWt
h+LIYGa+LrB/6819o32QSOSHHJ5+NJrbCSaLgKE/LKnf92V2QbZE8IGY6EOSjHqn
n1+j+CLRKY/kUyvk+1TumTghjg/aDs/8Jv8PvgSWLQ0q1rxHYbX7q9ZJhYC/4LdR
ya/Cho6w2l0N3tV/IMAwvFNHsaiIiiwfoOQbkBUvkyzBwjKt96Ai4I0QKt/63uH0
drQhlJEgIyGkOrorBByVqZAQdnoLENYIu6tDUj0bTbGObKqua4iPlSK3/g40zCm4
9OgcN7A8kFuNpgp2EHqj1/jrwd7mZYKsWTuGiR/7fwXf+4xbvg==
=raCx
-----END PGP PUBLIC KEY BLOCK-----
# The following keypair is BridgeDB's offline certification-only keypair. It
# is used to sign new online signing/encryption keypairs.
#
# If you import this key and mark it as trusted, emails from BridgeDB (if
# signed correctly with the online keypair above) should always be trusted. To
# do this, open a shell and do:
#
# $ curl -O https://bridges.torproject.org/keys
# $ gpg --import keys
# $ gpg --check-sigs 7B78437015E63DF47BB1270ACBD97AA24E8E472E
# $ gpg --edit-key 7B78437015E63DF47BB1270ACBD97AA24E8E472E
#
# Then type 'trust' to set the trust level. Choose a number that you like.
# Next type 'quit'. Finally, to create a local signature which will will not
# be uploaded to keyservers:
#
# $ gpg --lsign-key 7B78437015E63DF47BB1270ACBD97AA24E8E472E
#
# pub 16384R/CBD97AA24E8E472E 2013-10-12
# Key fingerprint = 7B78 4370 15E6 3DF4 7BB1 270A CBD9 7AA2 4E8E 472E
# uid BridgeDB (Offline ID Key) <bridges@bridges.torproject.org>
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQgNBFJZB+QBQADcx7laikgZOZXLm6WH2mClm7KrRChmQAHOmzvRYTElk+hVZJ6g
qSUTdl8fvfhifZPCd3g7nJBtOhQAGlrHmJRXfdf4cTRuD73nggbYQ0NRR9VZ3MIK
ToJDELBhgmWeNKpLcPsTpi2t9qrHf3xxM06OdxOs9lCGtW7XVYnKx3vaRNk6c0ln
De82ZWnZr1eMoPzcjslw7AxI94hIgV1GDwTSpBndv/VwgLeBC5XNCKv0adhO/RSt
fuZOHGT/HfI0U0C3fSTiIu4lJqEd9Qe8LUFQ7wRMrf3KSWwyWNb/OtyMfZ52PEg9
SMWEfpr6aGwQu6yGPsE4SeHsiew5IqCMi64TZ9IcgY0fveiDzMSIAqnWQcxSL0SH
YbwQPxuOc4Rxj/b1umjigBG/Y4rkrxCKIw6M+CRaz203zs9ntOsWfnary/w+hepA
XLjC0yb0cP/oBB6qRyaCk2UTdqq1uWmJ2R/XhZHdZIDabxby6mvQbUQA/NEMOE/B
VrDonP1HNo1xpnY8lltbxdFD/jDikdjIazckMWl/0fri0pyPSdiJdAK2JrUniP9Q
eNbgcx3XvNnfjYjiQjTdqfxCTKpSmnsBNyYng6c4viOr5weBFXwEJq2Nl7+rP5pm
TF1PeiF769z4l2Mrx3X5sQqavTzd2VBMQ6/Kmk9Emxb8e1zyQD6odqJyTi1BBAes
F2BuKLMCVgZWOFSNGDOMoAUMZh0c6sRQtwz3KRBAuxUYm3wQPqG3XpDDcNM5YXgF
wVU8SYVwdFpPYT5XJIv2J2u45XbPma5aR0ynGuAmNptzELHta5cgeWIMVsKQbnPN
M6YTOy5auxLts3FZvKpTDyjBd/VRK6ihkKNKFY3gbP6RbwEK3ws/zOxqFau7sA5i
NGv4siQTWMG++pClz/exbgHPgs3f8yO34ZbocEBdS1sDl1Lsq4qJYo2Kn5MMHCGs
dqd7Y+E+ep6b74njb1m2UsySEE2cjj/FAFH91jfFy5PedNb/2Hx6BsPJVb7+N4eI
pehKQQ46XAbsMq6vUtI4Y0rFiBnqvpERqATQ2QhnEh0UmH7wKVQc4MREZfeEqazV
G/JFt5Qnt3jq8p6/qbWlOPKTLGUqGq3RXiJgEy/5i22R2ZDjafiGoG1KsZIVZg39
N25fT8abjPWme6JI3Jv+6gKY8tURoePZcMp/rw0NFs1HtCKUAU6FEOh6uJO7KNie
eE8qG8ItRXVYnP4f8MFyFkHZcJw27d0PT3IrCM1vJwjqgb2j2xWM/8GJDDuUyims
jvLDH1E7ek600H3FT5c9xPcgwfMM8BOdBNu0Evm9sdZBZFket+ytXo6GKyS/d91D
FWE+YL+25+sZJS71dnvSUWVneJrTLFasefvPfIR9/aLJoLVFHnN9sUHfVMj0KlGl
8AuxL7QfNQawvyjoV8rw/sJOQOwwhof1gZz0ZyjuTKj0WekjmDxcRzVY0eX6BzTm
o7r4jrHl1Mi75svnKCpXi0Vu/1ZqSnKjCjhRTXDLm7tb8b18jogsgDfs7UkUNwD/
XF8EfTTU4KotLOODAZIW+soFJZgf8rXQZLRShQmre+PUJfADEUd3yyE9h0JIunPQ
CxR8R8hVhK4yqFn662Ou7fEl3q8FYBBi1Ahn+263S7+WaZGo7ElwzfRb97gP1e77
eYd8JwY7UBIQku83CxQdahdGOpAfyvhYW2mxCHVZLXObwc18VgRMa7vjCbkGRPSN
5NecU5KGW6jU1dXuZk0jRt/9mqtYPjJ7K/EVJD9Yxmz+UdxH+BtsSRp3/5fDmHtW
CB39a7fetp0ixN503FXPKQUvKAKykETwevmWOzHH3t6BpY/ZSjDCC35Y3dWeB54H
qNta1r0pSWV6IARUoVteAOcuOU/l3HNzY80rL+iR0HiaszioBsd8k8u0rWXzM3BP
3vhTzccaldSWfqoT86Jfx0YLX6EoocVS8Ka5KUA8VlJWufnPPXDlF3dULrb+ds/l
zLazt9hF49HCpU1rZc3doRgmBYxMjYyrfK/3uarDefpfdnjbAVIoP26VpVXhLTEM
oaD+WoTpIyLYfJQUDn1Q06Nu393JqZb8nRngyMeTs73MDJTzqdL4rZXyweeTrtYe
4yy+Kc3CZdPlZqpkbuxP0cO0ivaTLdXsTCHDnpk16u4sDukcsmlaTF5d75nu/KIQ
o3nk0g9NvoschDcQiExuqCUOXCkKcUvYVHsuglAuT+AqK692562JrDOVoGwkUVvm
Qfo0AQvBvXUzHY4YuBUdWbjWsC4sj6B+MW/TIs/OlKIbPE3MHeDhEGLl/8uBceVo
kM36xm4F8wDwPK4GPyi/D+3piqBsrsjkgRlodQIUS7A9V19b8TWbUFeH4JGJ+5EH
9WErBlrsQrnosojLchGGp7HxSxWLBiwdnltu6+/hwbBwydJT8ZxPUANIwTdB+mOE
ILUXBkpIDfVSoZD7qWlntai53BDQr5pfMJhv15di4XAhtqv43vAmA57ifd+QJS2U
AfYc4CdX0lk2BZ4jRD8jCZ4Uxw15E3RqhnXsWDRxtD4fwsb2ZFi0DDuPlwBdGgh5
Rm2Bz9JjSV6gDEuXr/JtAzjSz1Jdh8wPkSofiHGTfxysVhlGlg+YPRziRlzML8A2
0xY+9mPxEEin5ZQ9wmrDyiiOBvPTbG3O9+Sp5VZDuD4ivW/DHumPWGVSRdjcAQDe
HMXUVGjhBDnj06XNrgJPhODdJeYq0EnGTt15ofZQSswD7TTTRPDOn0Cz/QARAQAB
tDpCcmlkZ2VEQiAoT2ZmbGluZSBJRCBLZXkpIDxicmlkZ2VzQGJyaWRnZXMudG9y
cHJvamVjdC5vcmc+iQkfBBMBCgEJBQJSWQfkSBSAAAAAABcAKHZlcmlmaWVkQHRv
cnByb2plY3Qub3JnN0I3ODQzNzAxNUU2M0RGNDdCQjEyNzBBQ0JEOTdBQTI0RThF
NDcyRU8UgAAAAAAeAChicmlkZ2VzQGJyaWRnZXMudG9ycHJvamVjdC5vcmc3Qjc4
NDM3MDE1RTYzREY0N0JCMTI3MEFDQkQ5N0FBMjRFOEU0NzJFKhpodHRwczovL2Jy
aWRnZXMudG9ycHJvamVjdC5vcmcvcG9saWN5LnR4dAIbAQMLDQkEFQoJCAQWAgEA
Ah4BAheAJxhodHRwczovL2JyaWRnZXMudG9ycHJvamVjdC5vcmcva2V5LmFzYwAK
CRDL2XqiTo5HLoqEP/48rFpJCVStn8xo+KkHSVvsqpxDRlb/nNheI+ov2UxILdwl
NIU6kLsvKECKPe1AHKdS/MzANbkTF35Y4QgZsNpVXaCVL7adGBSzOdPFupDJJVOu
wa+uFRc/FuNJyH/TIn56/+R5J5C54OxIYNxvW3WF4eHKLJYk/JZOMMfy4iWm7Sql
0nDC5O435nK4F4Jb4GLPlUIzioIy2OWqGoFHXymbGhL1tWaqasYmED4n3AMqlYw6
xnNhdWOc/KZelPl9nanybyh0IIdZqUKZleRt4BxSgIT8FqC2sZuZ8z7O9s987Naz
Q32SKaP4i2M9lai/Y2QYfKo+wlG+egmxtujz7etQMGlpgBZzFLdJ8/w4U11ku1ai
om74RIn8zl/LHjMQHnCKGoVlscTI1ZPt+p+p8hO2/9vOwTR8y8O/3DQSOfTSipwc
a3obRkp5ndjfjefOoAnuYapLw72fhJ9+Co09miiHQu7vq4j5k05VpDQd0yxOAZnG
vodPPhq7/zCG1K9Sb1rS9GvmQxGmgMBWVn+keTqJCZX7TSVgtgua9YjTJNVSiSLv
rLslNkeRfvkfbAbU8379MDB+Bax04HcYTC4syf0uqUXYq6jRtX37Dfq5XkLCk2Bt
WusH2NSpHuzZRWODM9PZb6U3vuBmU1nqY77DciuaeBqGGyrC/6UKrS0DrmVvF/0Z
Sft3BY6Zb3q7Qm7xpzsfrhVlhlyQqZPhr6o7QnGuvwRr+gDwhRbpISKYo89KYwjK
4Qr7sg/CyU2hWBCDOFPOcv/rtE0aD88M+EwRG/LCfEWU34Dc43Jk+dH56/3eVR58
rISHRUcU0Y603Uc+/WM31iJmR/1PvGeal+mhI9YSWUIgIY8Mxt3cM2gYl/OErGbN
4hWAPIFn4sM9Oo4BHpN7J2vkUatpW6v4Mdh+pNxzgE/V5S21SGaAldvM1SzCRz52
xRt642Mhf6jqfrwzXf7kq7jpOlu1HkG1XhCZQPw7qyIKnX4tjaRd9HXhn9Jb2vA5
Av+EOPoAx6Yii5X1RkDILOijvgVfSIFXnflHzs58AhwHztQOUWXDkfS5jVxbenbV
X4DwgtrpzpdPBgBYNtCGBy9pqhWA2XnkH2vjchZy+xIAoaJNIVBfNkR8tflJWEfm
i/2U0jJnhY8dEClbu3KQnbwKe5E9mTz1VmBsdWaK5rBVZamD/wssQzzyf3SXXXIU
W6DUXOCzgWvxvqC09lc4izEAxwUktMY+aapplNs/kjOqHYVkW4zpWGp4PDAT/DW9
/727CeoqY29HePaeGl0/PpR37CkquP69oQeJSU9CNeyAKnQtvaqxLBcEOohSaPtK
Iy1q6yQgT4j+gVAsFDVyobCNkA8B0GfemDcEXA5dfriTHN72Br0koS0nvv6P5k7T
7aaSNX++zdEnPauAZXPPjVt7R1sEvx0Oj+l1pu9hNX0nldaNs13bPU5DIOYv+5fN
En6pqzYGj/0v8Qeb53Qv5de+lo7ZAu/truVa+GOT3ay4jZBaFh2mDZbA+t1V3GmB
FtYGoVfou4iBQpx6tJLg3PKvtPj9g5B4LTxZVKrdfHXWyNNQOLzWSIgFj44+SmhU
LVCXofEvJ0sOX2wtqy54Q4lMIc6BK1IB+hsFV6sSnIeI7YmrRXusWEG0wnroNlbq
FiWg5+oeI1CnnCyj4FmDX/A/Bo0RxD0x3yqDximkOpcHMtLLfFjK3d5ltwBgDOOe
pvgabxID01mZxh3OYKdGpW3Z1VKEhHjF5e9BhhEKQ8mG3raaDs2hQ2iuEqTzNLif
aQdRCYd62jS14qSy2Dd+oZ0FbgzJNigWldvuwWzJCO2toF29pvfWtYRuqV/Vt3CK
iO7en9bhOMRynPlCkAR7ZiZvT9dzStiMKf1v8mzgRjCIiLIwM1v/xNZWEZ/TOfSR
E7dBMbDzaNjtCsMmNiyplqCjWbaj4irdIhKbtKJ02a1Jopo1/XNK0Y8AbK1xEHV0
+mjBYU/Pfqnf0WFhkJgha+J17wqrUxf2/Y1b/pdDMGqVWe9+p8tvSP5FNddNyecZ
0pojFH0jAzHpQen7eeIA3XupVe6cTEhNz4OjHBlZE6dN0q8UDdeG75yPunwShQiO
kRXA/qxkID/2OLIInWJP0HG05hncGfWZKCLBc/dFg3dNo8VKpw/Q6uMBj2iGi8iB
lnQGmHQa3j1ANPbcl3ljdJQDEnxk5TEVxNPYUw/BI58l3p+Z3vAZqC0Io7EgpuZ8
qPuV6hJ2c/7VuFAXVs2mUExtWAjbgnYAfsJtn1yk3sphl65TjPnZwaBlP/ls/W/j
mVjAx9d5b3mmMBJmNZDvY1QvcftDgfL5vYG5g7UwsbojuNxeM4rwn8qCKk5wC1/a
Zl6Rh2DG4xS3/ef5tQWw28grjRRwv5phYKtedsKpYRscKAMhiOsChAiSYuCRczmI
ErdO8ryK8QNzcpE4qVzFQMEtkG6V0RYYjMJzJuY5BW3hKt1UNNaqiGBpNKuf0GoO
zK/vMgxoo+iFmOuaBdQEjlPLbK+3k+7j14KKVI655AXVKyAsOoSYPzOqfkdiu9W8
34fOanH7S+lclkXwxTbXko9Jt6Ml64H4QKwd8ak2nCcX9FuMge7XP9VL/pBBMXcB
WHUKdoqMJExcg5A4H2cyxZ6QgHzNFgqV/4+MGGP+TMc9owzrT3PBadVrMxnHnjc/
/XYv48p2rRkjyjrtH+ZO9rlOsw0OmGgh9yoQPZn2tiNhG9piyvVxFKZflJm8I4kC
4AQTAQoAygUCUlkPIkgUgAAAAAAXACh2ZXJpZmllZEB0b3Jwcm9qZWN0Lm9yZzdC
Nzg0MzcwMTVFNjNERjQ3QkIxMjcwQUNCRDk3QUEyNEU4RTQ3MkVPFIAAAAAAHgAo
YnJpZGdlc0BicmlkZ2VzLnRvcnByb2plY3Qub3JnREY4MTExMDlFMTdDOEJGMTM0
QjVFRUI2OERDNDNBMjg0ODgyMUUzMioaaHR0cHM6Ly9icmlkZ2VzLnRvcnByb2pl
Y3Qub3JnL3BvbGljeS50eHQACgkQjcQ6KEiCHjIaqBAA0BuEs7horx6iCq4cjAhv
YPLrxuC4fKEfVyhAjCJMJSFFCPAlGgU+BjyPNDD57wzKAmUkdJG+Ss25mwWXa53w
5R2kDqDnHocOdZGtxZ7zx/uUd2eWLNBfVuK7nHOk1d1Hs0OZBnckc+MCqnLtuYe5
68pa9+jW6cNIjAnzMIListmoXWgYYWJvMKeBMG4DGtYJ8w7CJQjOHc5yar12DrX3
wnQ7hXtFuuqQblpEUnLnZGvHf2NKMZfBBMcP96h9OmLGNa+vmNYsMyPKU7n5hPgX
nTgmQ4xrv1G7JukjppZRA8SFoxupcaQeTixyWERGBhBiAbwZsbQz8L/TVZKierzg
sdNngHcFzE8MyjuJDvTos7qXPmgSRXFqJLRn0ZxpR5V1V8BVZUqCGuSZT89TizsD
z5vyv8c9r7HKD4pRjw32P2dgcEqyGRkqERAgSuFpObP+juty+kxYyfnadBNCyjgP
s7u0GmsTt4CZi7BbowNRL6bynrwrmQI9LJI1bPhgqfdDUbqG3HXwHz80oRFfKou8
JTYKxK4Iumfw2l/uAACma5ZyrwIDBX/H5XEQqch4sORzQnuhlTmZRf6ldVIIWjdJ
ef+DpOt12s+cS2F4D5g8G6t9CprCLYyrXiHwM/U8N5ywL9IeYKSWJxa7si3l9A6o
ZxOds8F/UJYDSIB97MQFzBo=
=JdC7
-----END PGP PUBLIC KEY BLOCK-----
"""
| true
| true
|
1c40697e10a1872eee7320fa943aea76e01aa80d
| 7,643
|
py
|
Python
|
pytmx/util_pygame.py
|
unmonk/Project-Infinity
|
63c22fcf4086adf868967cb172434e3ceba4bac5
|
[
"CC0-1.0"
] | null | null | null |
pytmx/util_pygame.py
|
unmonk/Project-Infinity
|
63c22fcf4086adf868967cb172434e3ceba4bac5
|
[
"CC0-1.0"
] | null | null | null |
pytmx/util_pygame.py
|
unmonk/Project-Infinity
|
63c22fcf4086adf868967cb172434e3ceba4bac5
|
[
"CC0-1.0"
] | null | null | null |
import logging
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
try:
from pygame.transform import flip, rotate
import pygame
except ImportError:
logger.error('cannot import pygame (is it installed?)')
raise
import itertools
import pytmx
__all__ = ['load_pygame', 'pygame_image_loader', 'simplify', 'build_rects']
def handle_transformation(tile, flags):
if flags.flipped_diagonally:
tile = flip(rotate(tile, 270), 1, 0)
if flags.flipped_horizontally or flags.flipped_vertically:
tile = flip(tile, flags.flipped_horizontally, flags.flipped_vertically)
return tile
def smart_convert(original, colorkey, pixelalpha):
"""
this method does several tests on a surface to determine the optimal
flags and pixel format for each tile surface.
this is done for the best rendering speeds and removes the need to
convert() the images on your own
"""
tile_size = original.get_size()
threshold = 127 # the default
# count the number of pixels in the tile that are not transparent
px = pygame.mask.from_surface(original, threshold).count()
# there are no transparent pixels in the image
if px == tile_size[0] * tile_size[1]:
tile = original.convert()
# there are transparent pixels, and tiled set a colorkey
elif colorkey:
tile = original.convert()
tile.set_colorkey(colorkey, pygame.RLEACCEL)
# there are transparent pixels, and set for perpixel alpha
elif pixelalpha:
tile = original.convert_alpha()
# there are transparent pixels, and we won't handle them
else:
tile = original.convert()
return tile
def pygame_image_loader(filename, colorkey, **kwargs):
if colorkey:
colorkey = pygame.Color('#{0}'.format(colorkey))
pixelalpha = kwargs.get('pixelalpha', True)
image = pygame.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
tile = image.subsurface(rect)
except ValueError:
logger.error('Tile bounds outside bounds of tileset image')
raise
else:
tile = image.copy()
if flags:
tile = handle_transformation(tile, flags)
tile = smart_convert(tile, colorkey, pixelalpha)
return tile
return load_image
def load_pygame(filename, *args, **kwargs):
"""
PYGAME USERS: Use me.
Load a TMX file, load the images, and return a TiledMap class that is
ready to use.
this utility has 'smart' tile loading. by default any tile without
transparent pixels will be loaded for quick blitting. if the tile has
transparent pixels, then it will be loaded with per-pixel alpha. this is
a per-tile, per-image check.
if a color key is specified as an argument, or in the tmx data, the
per-pixel alpha will not be used at all. if the tileset's image has colorkey
transparency set in Tiled, the util_pygam will return images that have their
transparency already set.
TL;DR:
Don't attempt to convert() or convert_alpha() the individual tiles. It is
already done for you.
"""
kwargs['image_loader'] = pygame_image_loader
return pytmx.TiledMap(filename, *args, **kwargs)
def build_rects(tmxmap, layer, tileset=None, real_gid=None):
"""generate a set of non-overlapping rects that represents the distribution\
of the specified gid.
useful for generating rects for use in collision detection
"""
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.tilelayers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects
def simplify(all_points, tilewidth, tileheight):
"""Given a list of points, return list of rects that represent them
kludge:
"A kludge (or kluge) is a workaround, a quick-and-dirty solution,
a clumsy or inelegant, yet effective, solution to a problem, typically
using parts that are cobbled together."
-- wikipedia
turn a list of points into a rects
adjacent rects will be combined.
plain english:
the input list must be a list of tuples that represent
the areas to be combined into rects
the rects will be blended together over solid groups
so if data is something like:
0 1 1 1 0 0 0
0 1 1 0 0 0 0
0 0 0 0 0 4 0
0 0 0 0 0 4 0
0 0 0 0 0 0 0
0 0 1 1 1 1 1
you'll have the 4 rects that mask the area like this:
..######......
..####........
..........##..
..........##..
..............
....##########
pretty cool, right?
there may be cases where the number of rectangles is not as low as possible,
but I haven't found that it is excessively bad. certainly much better than
making a list of rects, one for each tile on the map!
"""
def pick_rect(points, rects):
ox, oy = sorted([(sum(p), p) for p in points])[0][1]
x = ox
y = oy
ex = None
while 1:
x += 1
if not (x, y) in points:
if ex is None:
ex = x - 1
if (ox, y + 1) in points:
if x == ex + 1:
y += 1
x = ox
else:
y -= 1
break
else:
if x <= ex: y -= 1
break
c_rect = pygame.Rect(ox * tilewidth, oy * tileheight,
(ex - ox + 1) * tilewidth,
(y - oy + 1) * tileheight)
rects.append(c_rect)
rect = pygame.Rect(ox, oy, ex - ox + 1, y - oy + 1)
kill = [p for p in points if rect.collidepoint(p)]
[points.remove(i) for i in kill]
if points:
pick_rect(points, rects)
rect_list = []
while all_points:
pick_rect(all_points, rect_list)
return rect_list
| 29.855469
| 80
| 0.593223
|
import logging
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
try:
from pygame.transform import flip, rotate
import pygame
except ImportError:
logger.error('cannot import pygame (is it installed?)')
raise
import itertools
import pytmx
__all__ = ['load_pygame', 'pygame_image_loader', 'simplify', 'build_rects']
def handle_transformation(tile, flags):
if flags.flipped_diagonally:
tile = flip(rotate(tile, 270), 1, 0)
if flags.flipped_horizontally or flags.flipped_vertically:
tile = flip(tile, flags.flipped_horizontally, flags.flipped_vertically)
return tile
def smart_convert(original, colorkey, pixelalpha):
tile_size = original.get_size()
threshold = 127
px = pygame.mask.from_surface(original, threshold).count()
if px == tile_size[0] * tile_size[1]:
tile = original.convert()
elif colorkey:
tile = original.convert()
tile.set_colorkey(colorkey, pygame.RLEACCEL)
elif pixelalpha:
tile = original.convert_alpha()
else:
tile = original.convert()
return tile
def pygame_image_loader(filename, colorkey, **kwargs):
if colorkey:
colorkey = pygame.Color('
pixelalpha = kwargs.get('pixelalpha', True)
image = pygame.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
tile = image.subsurface(rect)
except ValueError:
logger.error('Tile bounds outside bounds of tileset image')
raise
else:
tile = image.copy()
if flags:
tile = handle_transformation(tile, flags)
tile = smart_convert(tile, colorkey, pixelalpha)
return tile
return load_image
def load_pygame(filename, *args, **kwargs):
kwargs['image_loader'] = pygame_image_loader
return pytmx.TiledMap(filename, *args, **kwargs)
def build_rects(tmxmap, layer, tileset=None, real_gid=None):
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.tilelayers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects
def simplify(all_points, tilewidth, tileheight):
def pick_rect(points, rects):
ox, oy = sorted([(sum(p), p) for p in points])[0][1]
x = ox
y = oy
ex = None
while 1:
x += 1
if not (x, y) in points:
if ex is None:
ex = x - 1
if (ox, y + 1) in points:
if x == ex + 1:
y += 1
x = ox
else:
y -= 1
break
else:
if x <= ex: y -= 1
break
c_rect = pygame.Rect(ox * tilewidth, oy * tileheight,
(ex - ox + 1) * tilewidth,
(y - oy + 1) * tileheight)
rects.append(c_rect)
rect = pygame.Rect(ox, oy, ex - ox + 1, y - oy + 1)
kill = [p for p in points if rect.collidepoint(p)]
[points.remove(i) for i in kill]
if points:
pick_rect(points, rects)
rect_list = []
while all_points:
pick_rect(all_points, rect_list)
return rect_list
| true
| true
|
1c406a7714bf55168d96649919c5b080acdc4ae3
| 831
|
py
|
Python
|
kubernetes/test/test_batch_api.py
|
redjohn/python
|
5e512ff564c244c50cab780d821542ed56aa965a
|
[
"Apache-2.0"
] | 1
|
2019-04-14T23:51:35.000Z
|
2019-04-14T23:51:35.000Z
|
kubernetes/test/test_batch_api.py
|
redjohn/python
|
5e512ff564c244c50cab780d821542ed56aa965a
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_batch_api.py
|
redjohn/python
|
5e512ff564c244c50cab780d821542ed56aa965a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.batch_api import BatchApi
class TestBatchApi(unittest.TestCase):
""" BatchApi unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.batch_api.BatchApi()
def tearDown(self):
pass
def test_get_api_group(self):
"""
Test case for get_api_group
"""
pass
if __name__ == '__main__':
unittest.main()
| 18.466667
| 105
| 0.683514
|
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.batch_api import BatchApi
class TestBatchApi(unittest.TestCase):
def setUp(self):
self.api = kubernetes.client.apis.batch_api.BatchApi()
def tearDown(self):
pass
def test_get_api_group(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c406c26c41e6e23b2dd752576e0d4831ac43142
| 3,211
|
py
|
Python
|
mysite/settings.py
|
ItisMamun/pyshop
|
6b101b26ab4ccec558076951c094c96bc29a5f48
|
[
"Apache-2.0"
] | null | null | null |
mysite/settings.py
|
ItisMamun/pyshop
|
6b101b26ab4ccec558076951c094c96bc29a5f48
|
[
"Apache-2.0"
] | null | null | null |
mysite/settings.py
|
ItisMamun/pyshop
|
6b101b26ab4ccec558076951c094c96bc29a5f48
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b#s*_o(3t3ai_k(c5po@h7a=nj5#vjkd3u7ckhnx@)mi=8fn67'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products.apps.ProductsConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templatespublic')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = ['*']
X_FRAME_OPTIONS = '*'
| 25.688
| 91
| 0.696979
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'b#s*_o(3t3ai_k(c5po@h7a=nj5#vjkd3u7ckhnx@)mi=8fn67'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products.apps.ProductsConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templatespublic')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = ['*']
X_FRAME_OPTIONS = '*'
| true
| true
|
1c406c5b1a022519d85a101436d9324921b83015
| 985
|
py
|
Python
|
users/views.py
|
Kowies/ToDo-web-app
|
30c0555fbe73464f36fc88c5d9a1d16ac309f327
|
[
"MIT"
] | null | null | null |
users/views.py
|
Kowies/ToDo-web-app
|
30c0555fbe73464f36fc88c5d9a1d16ac309f327
|
[
"MIT"
] | 4
|
2021-03-19T01:07:13.000Z
|
2022-02-10T13:23:35.000Z
|
users/views.py
|
Kowies/ToDo-web-app
|
30c0555fbe73464f36fc88c5d9a1d16ac309f327
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.forms import UserCreationForm
def index(request):
return HttpResponseRedirect(reverse('users:login'))
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse('todoapp:tasks'))
def register(request):
if request.method == 'GET':
form = UserCreationForm()
if request.method == 'POST':
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
authenticated_user = authenticate(username=new_user.username,
password=request.POST['password1'])
login(request, authenticated_user)
return HttpResponseRedirect(reverse('todoapp:index'))
context = {'form': form}
return render(request, 'users/register.html', context)
| 29.848485
| 73
| 0.698477
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.forms import UserCreationForm
def index(request):
return HttpResponseRedirect(reverse('users:login'))
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse('todoapp:tasks'))
def register(request):
if request.method == 'GET':
form = UserCreationForm()
if request.method == 'POST':
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
authenticated_user = authenticate(username=new_user.username,
password=request.POST['password1'])
login(request, authenticated_user)
return HttpResponseRedirect(reverse('todoapp:index'))
context = {'form': form}
return render(request, 'users/register.html', context)
| true
| true
|
1c406cd8fdb1878cf53168c030b0169aa8a44417
| 117,677
|
py
|
Python
|
tk_ui/admin_program.py
|
SuperH-0630/HGSSystem
|
4bd0b18cec810df4915fea9473adbea6faea4fe2
|
[
"MIT"
] | null | null | null |
tk_ui/admin_program.py
|
SuperH-0630/HGSSystem
|
4bd0b18cec810df4915fea9473adbea6faea4fe2
|
[
"MIT"
] | null | null | null |
tk_ui/admin_program.py
|
SuperH-0630/HGSSystem
|
4bd0b18cec810df4915fea9473adbea6faea4fe2
|
[
"MIT"
] | null | null | null |
import abc
import datetime
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.filedialog import askdirectory, askopenfilename, asksaveasfilename
from math import ceil
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.axes import Axes
import numpy as np
from matplotlib.colorbar import Colorbar
from matplotlib.figure import Figure
from tool.color import random_color
from tool.typing import *
from tool.tk import make_font, set_tk_disable_from_list
from tool.login import create_uid
from conf import Config
from . import admin
from . import admin_event as tk_event
from sql import DBBit
from sql.user import find_user_by_name
from core.garbage import GarbageType
class AdminProgram(metaclass=abc.ABCMeta):
def __init__(self, station: "admin.AdminStation", win: Union[tk.Frame, tk.Toplevel, tk.Tk], color: str, title: str):
self.station = station
self.win = win
self.color = color
self.frame = tk.Frame(self.win)
self.frame['bg'] = color
self.program_title = title
@abc.abstractmethod
def set_disable(self):
...
@abc.abstractmethod
def reset_disable(self):
...
@abc.abstractmethod
def conf_gui(self, n: int = 1):
...
def to_program(self):
pass
def leave_program(self):
pass
def get_title(self) -> str:
return self.program_title
def get_program_frame(self) -> Tuple[str, tk.Frame]:
return self.program_title, self.frame
class WelcomeProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "欢迎页")
self.title = tk.Label(self.frame)
self.info = tk.Label(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(25 * n)
self.info_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size, weight="bold")
info_font = make_font(size=self.info_font_size)
self.title['font'] = title_font
self.title['bg'] = self.color
self.title['text'] = '欢迎使用 HGSSystem 管理员系统\n[帮助]'
self.info['bg'] = self.color
self.info['font'] = info_font
self.info['anchor'] = 'nw'
self.info['justify'] = 'left'
self.info['text'] = (f'''
HGSSystem 管理者界面:
1) 点击菜单按钮进入子菜单或程序
2) 创建 菜单包含创建类的程序
3) 删除 菜单包含删除类的程序
4) 搜索 菜单包含数据分析类的程序
5) 更新 菜单包含数据更新类的程序
6) 当离开操作系统时请退出登录以确保安全
7) 只能使用具有管理员权限的账号登陆系统
8) 只有admin用户可以完成危险操作(例如删除所有垃圾袋数据)
程序的运行:
1) 在菜单中选中程序后,根据程序界面提示完成操作
2) 操作过程通常会显示进度条,除非任务执行迅速
3) 结果通常会被反馈, 且不会自动消失
系统登录:
1) 仅Manager用户可以登录
'''.strip())
self.title.place(relx=0.1, rely=0.0, relwidth=0.8, relheight=0.2)
self.info.place(relx=0.05, rely=0.21, relwidth=0.90, relheight=0.75)
def set_disable(self):
pass
def reset_disable(self):
pass
class AboutProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "关于")
self.title = tk.Label(self.frame)
self.info = tk.Label(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(25 * n)
self.info_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size, weight="bold")
info_font = make_font(size=self.info_font_size)
self.title['font'] = title_font
self.title['bg'] = self.color
self.title['text'] = '关于 HGSSystem 管理员系统'
self.info['bg'] = self.color
self.info['font'] = info_font
self.info['anchor'] = 'nw'
self.info['justify'] = 'left'
self.info['text'] = Config.about_info
self.title.place(relx=0.1, rely=0.0, relwidth=0.8, relheight=0.2)
self.info.place(relx=0.05, rely=0.21, relwidth=0.90, relheight=0.75)
def set_disable(self):
pass
def reset_disable(self):
pass
class CreateUserProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(3)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(3)]
self.var: List[tk.Variable] = [tk.StringVar() for _ in range(3)]
self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)]
self._conf("#FA8072", False) # 默认颜色
self.__conf_font()
def _conf(self, bg_color, is_manager: bool):
self.bg_color = bg_color
self.is_manager = is_manager
return self
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = self.bg_color
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.30)
height = 0.1
for lb, text, enter, var in zip(self.title, ["用户名:", "用户密码:", "手机号:"], self.enter, self.var):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = self.bg_color
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.17)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.17)
height += 0.30
for btn, text, x, func in zip(self.btn,
["创建用户", "获取用户ID"],
[0.2, 0.6],
[lambda: self.create_by_name(), lambda: self.get_uid()]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
btn.place(relx=x, rely=0.7, relwidth=0.2, relheight=0.08)
def __get_info(self) -> Optional[Tuple[uname_t, passwd_t, str]]:
name: uname_t = self.var[0].get()
passwd: passwd_t = self.var[1].get()
phone: str = self.var[2].get()
if len(name) == 0 or len(passwd) == 0 or len(phone) != 11:
self.station.show_msg("用户创建失败", "请再次尝试, 输入用户名, 用户密码和11位手机号")
return None
return name, passwd, phone
def create_by_name(self):
res = self.__get_info()
if res is None:
return
name, passwd, phone = res
event = tk_event.CreateUserEvent(self.station).start(name, passwd, phone, self.is_manager)
self.station.push_event(event)
def get_uid(self):
res = self.__get_info()
if res is None:
return
name, passwd, phone = res
uid = create_uid(name, passwd, phone)
self.station.show_msg("获取用户ID", f"用户名: {name}\n用户ID: {uid}")
def set_disable(self):
set_tk_disable_from_list(self.btn)
set_tk_disable_from_list(self.enter)
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
set_tk_disable_from_list(self.enter, flat='normal')
class CreateNormalUserProgram(CreateUserProgramBase):
def __init__(self, station, win, color):
super(CreateNormalUserProgram, self).__init__(station, win, color, "创建普通用户")
class CreateManagerUserProgram(CreateUserProgramBase):
def __init__(self, station, win, color):
super(CreateManagerUserProgram, self).__init__(station, win, color, "创建管理员")
self._conf("#4b5cc4", True)
class CreateAutoNormalUserProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "创建自动用户")
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.btn: tk.Button = tk.Button(self.frame) # create(生成用户) try(计算uid)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#bce672"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.12)
self.title['font'] = title_font
self.title['text'] = "手机号:"
self.title['bg'] = "#bce672"
self.title['anchor'] = 'e'
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.02, rely=0.25, relwidth=0.25, relheight=0.50)
self.enter.place(relx=0.30, rely=0.25, relwidth=0.60, relheight=0.50)
self.btn['font'] = btn_font
self.btn['text'] = "创建用户"
self.btn['bg'] = Config.tk_btn_bg
self.btn['command'] = lambda: self.create_user()
self.btn.place(relx=0.4, rely=0.7, relwidth=0.2, relheight=0.08)
def create_user(self):
phone = self.var.get()
if len(phone) != 11:
self.station.show_msg("UserInfoError", "Please, enter Phone(11)")
event = tk_event.CreateUserEvent(self.station).start(None, None, phone, False)
self.station.push_event(event)
def set_disable(self):
self.btn['state'] = 'disable'
self.enter['state'] = 'disable'
def reset_disable(self):
self.btn['state'] = 'normal'
self.enter['state'] = 'normal'
class CreateGarbageProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "创建垃圾袋")
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame), tk.Label(self.enter_frame)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame), tk.Entry(self.enter_frame)]
self.var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]
self.create_btn: tk.Button = tk.Button(self.frame)
self.file_btn: tk.Button = tk.Button(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#b69968"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.17)
height = 0.1
for lb, text, enter, var in zip(self.title, ["数量:", "导出位置:"], self.enter, self.var):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#b69968"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
height += 0.43
for btn, text, x, func in zip([self.create_btn, self.file_btn],
["创建垃圾袋", "选择目录"],
[0.2, 0.6],
[lambda: self.create_garbage(), lambda: self.choose_file()]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
btn.place(relx=x, rely=0.7, relwidth=0.2, relheight=0.08)
def choose_file(self):
path = askdirectory(title='选择二维码导出位置')
self.var[1].set(path)
def create_garbage(self):
try:
count = int(self.var[0].get())
if count <= 0:
raise ValueError
except (ValueError, TypeError):
self.station.show_msg("类型错误", "数量必须为大于0的数字")
else:
path = self.var[1].get()
if len(path) == 0:
path = None
event = tk_event.CreateGarbageEvent(self.station).start(path, count)
self.station.push_event(event)
def set_disable(self):
self.create_btn['state'] = 'disable'
self.file_btn['state'] = 'disable'
set_tk_disable_from_list(self.enter)
def reset_disable(self):
self.create_btn['state'] = 'normal'
self.file_btn['state'] = 'normal'
set_tk_disable_from_list(self.enter, flat='normal')
class ExportProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.gid_frame = tk.Frame(self.frame)
self.gid_title: List[tk.Label] = [tk.Label(self.gid_frame), tk.Label(self.gid_frame)]
self.gid_enter: List[tk.Entry] = [tk.Entry(self.gid_frame), tk.Entry(self.gid_frame)]
self.gid_var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]
self.where_frame = tk.Frame(self.frame)
self.where_title: List[tk.Label] = [tk.Label(self.where_frame), tk.Label(self.where_frame)]
self.where_enter: List[tk.Entry] = [tk.Entry(self.where_frame), tk.Entry(self.where_frame)]
self.where_var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]
self.create_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.file_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self._conf("", [], [], [])
self.__conf_font()
def _conf(self, bg_color: str, title_id, title_where, title_command):
self.bg_color = bg_color
self.title_id = title_id
self.title_where = title_where
self.title_command = title_command
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.where_frame['bg'] = self.bg_color
self.where_frame['bd'] = 5
self.where_frame['relief'] = "ridge"
self.where_frame.place(relx=0.2, rely=0.2, relwidth=0.6, relheight=0.17)
self.gid_frame['bg'] = self.bg_color
self.gid_frame['bd'] = 5
self.gid_frame['relief'] = "ridge"
self.gid_frame.place(relx=0.2, rely=0.6, relwidth=0.6, relheight=0.17)
height = 0.1
for lb, text, enter, var, lb_w, text_w, enter_w, var_w in zip(
self.gid_title, self.title_id, self.gid_enter, self.gid_var,
self.where_title, self.title_where, self.where_enter, self.where_var):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = self.bg_color
lb['anchor'] = 'e'
lb_w['font'] = title_font
lb_w['text'] = text_w
lb_w['bg'] = self.bg_color
lb_w['anchor'] = 'e'
enter['textvariable'] = var
enter['font'] = title_font
enter_w['textvariable'] = var_w
enter_w['font'] = title_font
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
lb_w.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter_w.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
height += 0.43
for btn, text in zip(self.create_btn + self.file_btn, self.title_command):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
self.create_btn[1]['command'] = self.export_where
self.create_btn[0]['command'] = self.export_id
self.create_btn[1].place(relx=0.2, rely=0.39, relwidth=0.25, relheight=0.08)
self.create_btn[0].place(relx=0.2, rely=0.79, relwidth=0.25, relheight=0.08)
self.file_btn[1]['command'] = self.choose_file_where
self.file_btn[0]['command'] = self.choose_file_id
self.file_btn[1].place(relx=0.6, rely=0.39, relwidth=0.2, relheight=0.08)
self.file_btn[0].place(relx=0.6, rely=0.79, relwidth=0.2, relheight=0.08)
def choose_file_id(self):
path = askdirectory(title='选择二维码导出位置')
self.gid_var[1].set(path)
def choose_file_where(self):
path = askdirectory(title='选择二维码导出位置')
self.where_var[1].set(path)
def export_id(self):
...
def export_where(self):
...
def set_disable(self):
set_tk_disable_from_list(self.gid_enter)
set_tk_disable_from_list(self.create_btn)
set_tk_disable_from_list(self.file_btn)
def reset_disable(self):
set_tk_disable_from_list(self.gid_enter, flat='normal')
set_tk_disable_from_list(self.create_btn, flat='normal')
set_tk_disable_from_list(self.file_btn, flat='normal')
class ExportGarbageProgram(ExportProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "导出垃圾袋二维码")
self._conf("#afdfe4", ["垃圾袋ID:", "导出位置:"], ["条件:", "导出位置:"],
["根据垃圾袋ID导出", "根据条件导出", "选择目录", "选择目录"])
def export_id(self):
gid = self.gid_var[0].get()
path = self.gid_var[1].get()
if len(path) == 0:
self.station.show_warning("导出失败", "请指定导出的位置")
return
event = tk_event.ExportGarbageByIDEvent(self.station).start(path, gid)
self.station.push_event(event)
def export_where(self):
where = self.where_var[0].get()
path = self.where_var[1].get()
if len(path) == 0:
self.station.show_warning("导出失败", "请指定导出的位置")
return
event = tk_event.ExportGarbageAdvancedEvent(self.station).start(path, where)
self.station.push_event(event)
class ExportUserProgram(ExportProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "导出用户二维码")
self._conf("#f69c9f", ["用户ID:", "导出位置:"], ["条件:", "导出位置:"],
["根据用户ID导出", "根据条件导出", "选择目录", "选择目录"])
def export_id(self):
uid = self.gid_var[0].get()
path = self.gid_var[1].get()
if len(path) == 0:
self.station.show_warning("导出失败", "请指定导出的位置")
return
event = tk_event.ExportUserByIDEvent(self.station).start(path, uid)
self.station.push_event(event)
def export_where(self):
where = self.where_var[0].get()
path = self.where_var[1].get()
if len(path) == 0:
self.station.show_warning("导出失败", "请指定导出的位置")
return
event = tk_event.ExportUserAdvancedEvent(self.station).start(path, where)
self.station.push_event(event)
class CreateUserFromCSVProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "从CSV导入用户")
self.auto_frame = tk.Frame(self.frame)
self.auto_title: tk.Label = tk.Label(self.auto_frame)
self.auto_enter: tk.Entry = tk.Entry(self.auto_frame)
self.auto_var: tk.Variable = tk.StringVar()
self.enter_frame = tk.Frame(self.frame)
self.path_title: tk.Label = tk.Label(self.enter_frame)
self.path_enter: tk.Entry = tk.Entry(self.enter_frame)
self.path_var: tk.Variable = tk.StringVar()
self.create_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.file_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#EEE8AA"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.2, relwidth=0.6, relheight=0.12)
self.auto_frame['bg'] = "#EEE8AA"
self.auto_frame['bd'] = 5
self.auto_frame['relief'] = "ridge"
self.auto_frame.place(relx=0.2, rely=0.6, relwidth=0.6, relheight=0.12)
self.auto_title['font'] = title_font
self.auto_title['text'] = "CSV文件:"
self.auto_title['bg'] = "#EEE8AA"
self.auto_title['anchor'] = 'e'
self.path_title['font'] = title_font
self.path_title['text'] = "CSV文件:"
self.path_title['bg'] = "#EEE8AA"
self.path_title['anchor'] = 'e'
self.auto_enter['textvariable'] = self.auto_var
self.auto_enter['font'] = title_font
self.path_enter['textvariable'] = self.path_var
self.path_enter['font'] = title_font
self.auto_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.auto_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
self.path_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.path_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for btn, text in zip(self.create_btn + self.file_btn,
["创建用户", "创建自动用户", "选择CSV", "选择CSV"]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
self.create_btn[0]['command'] = self.create
self.create_btn[1]['command'] = self.create_auto
self.create_btn[0].place(relx=0.2, rely=0.34, relwidth=0.25, relheight=0.08)
self.create_btn[1].place(relx=0.2, rely=0.74, relwidth=0.25, relheight=0.08)
self.file_btn[0]['command'] = self.choose_file
self.file_btn[1]['command'] = self.choose_file_auto
self.file_btn[0].place(relx=0.6, rely=0.34, relwidth=0.2, relheight=0.08)
self.file_btn[1].place(relx=0.6, rely=0.74, relwidth=0.2, relheight=0.08)
def choose_file_auto(self):
path = askopenfilename(title='选择CSV文件', filetypes=[("CSV", ".csv")])
self.auto_var.set(path)
def choose_file(self):
path = askopenfilename(title='选择CSV文件', filetypes=[("CSV", ".csv")])
self.path_var.set(path)
def create_auto(self):
path = self.auto_var.get()
event = tk_event.CreateAutoUserFromCSVEvent(self.station).start(path)
self.station.push_event(event)
def create(self):
path = self.path_var.get()
event = tk_event.CreateUserFromCSVEvent(self.station).start(path)
self.station.push_event(event)
def set_disable(self):
self.auto_enter['state'] = 'disable'
self.path_enter['state'] = 'disable'
set_tk_disable_from_list(self.create_btn)
set_tk_disable_from_list(self.file_btn)
def reset_disable(self):
self.auto_enter['state'] = 'normal'
self.path_enter['state'] = 'normal'
set_tk_disable_from_list(self.create_btn, flat='normal')
set_tk_disable_from_list(self.file_btn, flat='normal')
class DeleteUserProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "删除用户")
self.uid_frame = tk.Frame(self.frame)
self.uid_title: tk.Label = tk.Label(self.uid_frame)
self.uid_enter: tk.Entry = tk.Entry(self.uid_frame)
self.uid_var: tk.Variable = tk.StringVar()
self.name_frame = tk.Frame(self.frame)
self.name_title: List[tk.Label] = [tk.Label(self.name_frame) for _ in range(2)]
self.name_enter: List[tk.Entry] = [tk.Entry(self.name_frame) for _ in range(2)]
self.name_var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]
self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)] # uid-del, name-passwd-del
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.uid_frame['bg'] = "#FA8072"
self.uid_frame['bd'] = 5
self.uid_frame['relief'] = "ridge"
self.uid_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)
self.name_frame['bg'] = "#FA8072"
self.name_frame['bd'] = 5
self.name_frame['relief'] = "ridge"
self.name_frame.place(relx=0.2, rely=0.48, relwidth=0.6, relheight=0.25)
height = 0.17
for lb, text, enter, var in zip(self.name_title, ["用户名:", "密码:"], self.name_enter, self.name_var):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#FA8072"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.20)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.20)
height += 0.45
self.uid_title['font'] = title_font
self.uid_title['text'] = "用户ID:"
self.uid_title['bg'] = "#FA8072"
self.uid_title['anchor'] = 'e'
self.uid_enter['font'] = title_font
self.uid_enter['textvariable'] = self.uid_var
self.uid_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.uid_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for btn, text, func in zip(self.btn,
["通过用户ID删除", "通过用户名删除"],
[lambda: self.del_by_uid(), lambda: self.del_by_name()]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
self.btn[0].place(relx=0.6, rely=0.32, relwidth=0.2, relheight=0.08)
self.btn[1].place(relx=0.6, rely=0.75, relwidth=0.2, relheight=0.08)
def del_by_uid(self):
uid = self.uid_var.get()
if len(uid) != 32:
self.station.show_warning("用户ID错误", "用户ID必须为32位")
return
event = tk_event.DelUserEvent(self.station).start(uid)
self.station.push_event(event)
def del_by_name(self):
name = self.name_var[0].get()
passwd = self.name_var[1].get()
if len(name) == 0 or len(passwd) == 0:
self.station.show_warning("用户名或密码错误", "请输入用户名和密码")
return
uid = create_uid(name, passwd)
event = tk_event.DelUserEvent(self.station).start(uid)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
set_tk_disable_from_list(self.name_enter)
self.uid_enter['state'] = 'disable'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
set_tk_disable_from_list(self.name_enter, flat='normal')
self.uid_enter['state'] = 'normal'
class DeleteUsersProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "删除多个用户")
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)] # del, scan
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#48c0a3"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.30, relwidth=0.6, relheight=0.10)
self.title['font'] = title_font
self.title['text'] = "条件:"
self.title['anchor'] = 'e'
self.title['bg'] = "#48c0a3"
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for btn, text, x, func in zip(self.btn,
["删除", "扫描"],
[0.2, 0.6],
[lambda: self.delete_user(), lambda: self.scan_user()]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
btn.place(relx=x, rely=0.6, relwidth=0.2, relheight=0.08)
def delete_user(self):
where = self.var.get()
if len(where) == 0:
self.station.show_warning("条件错误", "条件必须为正确的SQL语句")
return
event = tk_event.DelUserFromWhereEvent(self.station).start(where)
self.station.push_event(event)
def scan_user(self):
where = self.var.get()
if len(where) == 0:
self.station.show_warning("条件错误", "条件必须为正确的SQL语句")
return
event = tk_event.DelUserFromWhereScanEvent(self.station).start(where)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
self.enter['state'] = 'disable'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
self.enter['state'] = 'normal'
class DeleteGarbageProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.int_var: tk.Variable = tk.IntVar()
self.int_var.set(0)
self.radio: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]
self.btn: tk.Button = tk.Button(self.frame)
self.__conf_font()
self._conf()
def _conf(self, title: str = "垃圾袋ID:", color: str = "#b69968", support_del_all: bool = True):
self.frame_title = title
self.frame_color = color
self.support_del_all = support_del_all
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = self.frame_color
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.30, relwidth=0.6, relheight=0.10)
self.title['font'] = title_font
self.title['text'] = self.frame_title
self.title['bg'] = self.frame_color
self.title['anchor'] = 'e'
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for i in range(4):
radio = self.radio[i]
radio['font'] = btn_font
radio['text'] = ['均可', '仅未使用', '仅待检测', '仅已检测'][i]
radio['bg'] = self.color
radio['value'] = i
radio['variable'] = self.int_var
radio['anchor'] = 'w'
if not self.support_del_all:
self.int_var.set(1)
self.radio[0]['state'] = 'disable'
self.radio[0].place(relx=0.20, rely=0.43, relwidth=0.20, relheight=0.1)
self.radio[1].place(relx=0.60, rely=0.43, relwidth=0.20, relheight=0.1)
self.radio[2].place(relx=0.20, rely=0.55, relwidth=0.20, relheight=0.1)
self.radio[3].place(relx=0.60, rely=0.55, relwidth=0.20, relheight=0.1)
self.btn['font'] = btn_font
self.btn['text'] = '删除'
self.btn['bg'] = Config.tk_btn_bg
self.btn['command'] = lambda: self.delete_garbage()
self.btn.place(relx=0.4, rely=0.68, relwidth=0.2, relheight=0.08)
def delete_garbage(self):
...
def set_disable(self):
self.enter['state'] = 'disable'
self.btn['state'] = 'disable'
def reset_disable(self):
self.enter['state'] = 'normal'
self.btn['state'] = 'normal'
class DeleteGarbageProgram(DeleteGarbageProgramBase):
def __init__(self, station, win, color):
super(DeleteGarbageProgram, self).__init__(station, win, color, "删除垃圾袋")
def delete_garbage(self):
where = self.int_var.get()
assert where in [0, 1, 2, 3]
gid = self.var.get()
if len(gid) == 0:
self.station.show_warning("垃圾袋ID错误", "请输入正确的垃圾袋ID")
return
event = tk_event.DelGarbageEvent(self.station).start(gid, where)
self.station.push_event(event)
class DeleteGarbageMoreProgram(DeleteGarbageProgramBase):
def __init__(self, station, win, color):
super(DeleteGarbageMoreProgram, self).__init__(station, win, color, "删除多个垃圾袋")
self.scan_btn = tk.Button(self.frame)
self._conf("条件:", "#f58f98", False)
def conf_gui(self, n: int = 1):
super(DeleteGarbageMoreProgram, self).conf_gui(n)
self.btn.place_forget()
self.btn.place(relx=0.2, rely=0.68, relwidth=0.2, relheight=0.08)
self.scan_btn['font'] = make_font(size=self.btn_font_size)
self.scan_btn['text'] = '扫描'
self.scan_btn['bg'] = Config.tk_btn_bg
self.scan_btn['command'] = self.scan_garbage
self.scan_btn.place(relx=0.6, rely=0.68, relwidth=0.2, relheight=0.08)
def set_disable(self):
super(DeleteGarbageMoreProgram, self).set_disable()
self.scan_btn['state'] = 'disable'
def reset_disable(self):
super(DeleteGarbageMoreProgram, self).reset_disable()
self.scan_btn['state'] = 'normal'
def delete_garbage(self):
where = self.int_var.get()
assert where in [1, 2, 3]
where_sql = self.var.get()
if len(where_sql) == 0:
self.station.show_warning("条件错误", "条件必须为正确的SQL语句")
return
event = tk_event.DelGarbageWhereEvent(self.station).start(where, where_sql)
self.station.push_event(event)
def scan_garbage(self):
where = self.int_var.get()
assert where in [1, 2, 3]
where_sql = self.var.get()
if len(where_sql) == 0:
self.station.show_warning("条件错误", "条件必须为正确的SQL语句")
return
event = tk_event.DelGarbageWhereScanEvent(self.station).start(where, where_sql)
self.station.push_event(event)
class DeleteAllGarbageProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "删除所有垃圾袋")
self.dangerous: tk.Label = tk.Label(self.frame)
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)] # del, scan
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.danger_font_size = int(20 * n)
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
danger_font = make_font(size=self.danger_font_size, weight="bold", underline=1)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
danger_btn_font = make_font(size=self.btn_font_size, weight="bold", overstrike=1)
self.dangerous['bg'] = self.color
self.dangerous['font'] = danger_font
self.dangerous['fg'] = "#f20c00"
self.dangerous['text'] = ("确定要从数据库删除所有垃圾袋吗?\n"
"请输入[admin]用户的密码再继续操作.\n"
"只有[admin]用户具有该操作的权限.\n"
"这是相当危险的操作.\n"
"操作后数据库可能无法恢复原数据.\n"
"SuperHuan和程序的缔造者不会对\n"
"此操作负责.\n"
"删库跑路可不是一件好事.\n"
"请遵守当地法律法规.")
self.dangerous.place(relx=0.05, rely=0.03, relwidth=0.9, relheight=0.53)
self.enter_frame['bg'] = "#f20c00"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)
self.title['font'] = title_font
self.title['text'] = "密码:"
self.title['bg'] = "#f20c00"
self.title['anchor'] = 'e'
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for btn, text, x in zip(self.btn, ["删除", "扫描"], [0.2, 0.6]):
btn['text'] = text
btn.place(relx=x, rely=0.78, relwidth=0.2, relheight=0.08)
self.btn[0]['font'] = danger_btn_font
self.btn[0]['bg'] = "#f20c00"
self.btn[0]['command'] = lambda: self.delete_garbage()
self.btn[1]['font'] = btn_font
self.btn[1]['bg'] = Config.tk_btn_bg
self.btn[1]['command'] = lambda: self.scan_garbage()
def scan_garbage(self):
event = tk_event.DelAllGarbageScanEvent(self.station) # 不需要start
self.station.push_event(event)
def delete_garbage(self):
passwd = self.var.get()
if len(passwd) == 0:
self.station.show_warning("密码错误", "请输入正确的[admin]用户密码")
user = find_user_by_name('admin', passwd, self.station.get_db())
if user is None or not user.is_manager():
self.station.show_warning("密码错误", "请输入正确的[admin]用户密码")
return
event = tk_event.DelAllGarbageEvent(self.station) # 不需要start
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
self.enter['state'] = 'disable'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
self.enter['state'] = 'normal'
class SearchProgramBase(AdminProgram, metaclass=abc.ABCMeta):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.view_frame = tk.Frame(self.frame)
self.view = ttk.Treeview(self.view_frame)
self.y_scroll = tk.Scrollbar(self.view_frame)
self.x_scroll = tk.Scrollbar(self.view_frame)
def conf_view_gui(self, columns: list, relx, rely, relwidth, relheight,
x_scroll=0.05, y_scroll=0.02, color: str = "#FA8072"):
self.view_frame['bg'] = color
self.view_frame['bd'] = 2
self.view_frame['relief'] = "ridge"
self.view_frame.place(relx=relx, rely=rely, relwidth=relwidth, relheight=relheight)
self.view['columns'] = columns
self.view['show'] = 'headings'
self.view['selectmode'] = 'none'
for i in columns:
self.view.column(i, anchor="c")
self.view.heading(i, text=i)
self.y_scroll['orient'] = 'vertical'
self.y_scroll['command'] = self.view.yview
self.view['yscrollcommand'] = self.y_scroll.set
self.x_scroll['orient'] = 'horizontal'
self.x_scroll['command'] = self.view.xview
self.view['xscrollcommand'] = self.x_scroll.set
self.view.place(relx=0.0, rely=0.0, relwidth=1 - y_scroll, relheight=1 - x_scroll)
self.y_scroll.place(relx=0.98, rely=0.0, relwidth=y_scroll, relheight=1.0)
self.x_scroll.place(relx=0.0, rely=1 - x_scroll, relwidth=1 - y_scroll, relheight=x_scroll)
class SearchUserProgram(SearchProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "搜索用户")
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(3)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(3)]
self.var: List[tk.Variable] = [tk.StringVar() for _ in range(3)]
self.check: List[Tuple[tk.Checkbutton, tk.Variable]] = [(tk.Checkbutton(self.enter_frame), tk.IntVar())
for _ in range(3)]
self.btn: tk.Button = tk.Button(self.frame)
self._columns = ["UserID", "Name", "Phone", "Score", "Reputation", "IsManager"]
self._columns_ch = ["用户ID[UserID]", "用户名[Name]", "手机号[Phone]",
"积分[Score]", "垃圾分类信用[Reputation]", "是否管理员[IsManager]"]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#FA8072"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.0, relwidth=0.6, relheight=0.30)
height = 0.1
for lb, text, enter, var, check in zip(self.title,
["用户ID:", "用户名:", "手机号:"],
self.enter, self.var, self.check):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#FA8072"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
check[0]['font'] = title_font
check[0]['text'] = ''
check[0]['bg'] = "#FA8072"
check[0]['variable'] = check[1]
check[1].set(1)
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.17)
enter.place(relx=0.35, rely=height, relwidth=0.55, relheight=0.17)
check[0].place(relx=0.92, rely=height, relwidth=0.04, relheight=0.17)
height += 0.30
self.btn['font'] = btn_font
self.btn['text'] = "搜索"
self.btn['bg'] = Config.tk_btn_bg
self.btn['command'] = self.search_user
self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)
self.conf_view_gui(self._columns_ch, relx=0.05, rely=0.32, relwidth=0.9, relheight=0.55)
def search_user(self):
use_uid = self.check[0][1].get()
use_name = self.check[1][1].get()
use_phone = self.check[2][1].get()
uid = None
name = None
phone = None
if use_uid:
uid = self.var[0].get()
if len(uid) == 0:
uid = None
if use_name:
name = self.var[1].get()
if len(name) == 0:
name = None
if use_phone:
phone = self.var[2].get()
if len(phone) == 0:
phone = None
event = tk_event.SearchUserEvent(self.station).start(self._columns, uid, name, phone, self)
self.station.push_event(event)
def set_disable(self):
self.btn['state'] = 'disable'
set_tk_disable_from_list(self.enter)
def reset_disable(self):
self.btn['state'] = 'normal'
set_tk_disable_from_list(self.enter, flat='normal')
class SearchAdvancedProgramBase(SearchProgramBase, metaclass=abc.ABCMeta):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.btn: tk.Button = tk.Button(self.frame)
self._conf([], [], "#FA8072") # 默认颜色
self.__conf_font()
def _conf(self, columns: list, columns_ch: list, bg_color):
self.bg_color = bg_color
self._columns = columns
self._columns_ch = columns_ch
return self
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = self.bg_color
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.00, relwidth=0.6, relheight=0.10)
self.title['font'] = title_font
self.title['bg'] = self.bg_color
self.title['text'] = "条件:"
self.title['anchor'] = 'e'
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
self.btn['text'] = "搜索"
self.btn['font'] = btn_font
self.btn['bg'] = Config.tk_btn_bg
self.btn['command'] = self.search
self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)
self.conf_view_gui(self._columns_ch, relx=0.05, rely=0.12, relwidth=0.9, relheight=0.76)
def search(self):
...
def set_disable(self):
self.btn['state'] = 'disable'
self.enter['state'] = 'disable'
def reset_disable(self):
self.btn['state'] = 'normal'
self.enter['state'] = 'normal'
class SearchUserAdvancedProgram(SearchAdvancedProgramBase):
def __init__(self, station, win, color):
super(SearchUserAdvancedProgram, self).__init__(station, win, color, "高级搜索-用户")
columns = ["UserID", "Name", "Phone", "Score", "Reputation", "IsManager"]
columns_ch = ["用户ID[UserID]", "用户名[Name]", "手机号[Phone]",
"积分[Score]", "垃圾分类信用[Reputation]", "是否管理员[IsManager]"]
self._conf(columns, columns_ch, '#48c0a3')
def search(self):
where = self.var.get()
event = tk_event.SearchUserAdvancedEvent(self.station).start(self._columns, where, self)
self.station.push_event(event)
class SearchGarbageProgram(SearchProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "搜索垃圾袋")
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(8)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(8)]
self.var: List[tk.Variable] = [tk.StringVar() for _ in range(8)]
self.check: List[Tuple[tk.Checkbutton, tk.Variable]] = [(tk.Checkbutton(self.enter_frame), tk.IntVar())
for _ in range(8)]
self._columns = ["GarbageID", "UserID", "CheckerID", "CreateTime", "UseTime", "Location", "GarbageType",
"CheckResult"]
self._columns_zh = ["垃圾袋ID[GarbageID]", "使用者ID[UserID]", "检测者ID[CheckerID]", "创建时间[CreateTime]",
"使用时间[UseTime]", "使用地点[Location]", "垃圾类型[GarbageType]", "检测结果[CheckResult]"]
self.btn: tk.Button = tk.Button(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#7bbfea"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.0, relwidth=0.6, relheight=0.47)
height = 0.02
for lb, text, enter, var, check in zip(self.title,
["垃圾袋ID:", "使用者ID:", "检查者ID:", "创建时间:", "使用时间:",
"使用地点:", "垃圾类型:", "检测结果:"],
self.enter, self.var, self.check):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#7bbfea"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
check[0]['font'] = title_font
check[0]['bg'] = "#7bbfea"
check[0]['text'] = ''
check[0]['variable'] = check[1]
check[1].set(1)
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.10)
enter.place(relx=0.35, rely=height, relwidth=0.55, relheight=0.10)
check[0].place(relx=0.92, rely=height, relwidth=0.04, relheight=0.10)
height += 0.121
self.btn['font'] = btn_font
self.btn['bg'] = Config.tk_btn_bg
self.btn['text'] = "Search"
self.btn['command'] = self.search_user
self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)
self.conf_view_gui(self._columns_zh, relx=0.05, rely=0.49, relwidth=0.9, relheight=0.38, x_scroll=0.07)
def search_user(self):
keys = ["gid", "uid", "cuid", "create_time", "use_time", "loc", "type_", "check"]
key_values = {}
for i, key in enumerate(keys):
ck = self.check[i][1].get()
if ck:
res = self.enter[i].get()
if len(res) > 0:
key_values[key] = res
continue
key_values[key] = None
event = tk_event.SearchGarbageEvent(self.station).start(self._columns, key_values, self)
self.station.push_event(event)
def set_disable(self):
self.btn['state'] = 'disable'
set_tk_disable_from_list(self.enter)
def reset_disable(self):
self.btn['state'] = 'normal'
set_tk_disable_from_list(self.enter, flat='normal')
class SearchGarbageAdvancedProgram(SearchAdvancedProgramBase):
def __init__(self, station, win, color):
super(SearchGarbageAdvancedProgram, self).__init__(station, win, color, "高级搜索-垃圾袋")
columns = ["GarbageID", "UserID", "CheckerID", "CreateTime", "UseTime", "Location", "GarbageType",
"CheckResult"]
columns_zh = ["垃圾袋ID[GarbageID]", "使用者ID[UserID]", "检测者ID[CheckerID]", "创建时间[CreateTime]",
"使用时间[UseTime]", "使用地点[Location]", "垃圾类型[GarbageType]", "检测结果[CheckResult]"]
self._conf(columns, columns_zh, '#d1923f')
def search(self):
where = self.var.get()
event = tk_event.SearchGarbageAdvancedEvent(self.station).start(self._columns, where, self)
self.station.push_event(event)
class SearchAdvancedProgram(SearchAdvancedProgramBase):
def __init__(self, station, win, color):
super(SearchAdvancedProgram, self).__init__(station, win, color, "高级搜索")
columns = ["GarbageID", "UserID", "UserName", "UserPhone", "UserScore",
"UserReputation", "CheckerID", "CheckerName", "CheckerPhone",
"CreateTime", "UseTime", "Location", "GarbageType", "CheckResult"]
columns_zh = ["垃圾袋ID[GarbageID]", "使用者ID[UserID]", "使用者名[UserName]", "使用者手机号[UserPhone]",
"使用者积分[UserScore]", "使用者垃圾分类信用[UserReputation]", "检测者ID[CheckerID]",
"检测这名[CheckerName]", "检测者手机号[CheckerPhone]", "创建时间[CreateTime]", "使用时间[UseTime]",
"使用地点[Location]", "垃圾类型[GarbageType]", "检测结果[CheckResult]"]
self._conf(columns, columns_zh, '#426ab3')
def search(self):
where = self.var.get()
event = tk_event.SearchAdvancedEvent(self.station).start(self._columns, where, self)
self.station.push_event(event)
class UpdateUserProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(2)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(2)]
self.var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]
self.where_frame = tk.Frame(self.frame)
self.where_title: List[tk.Label] = [tk.Label(self.where_frame) for _ in range(2)]
self.where_enter: List[tk.Entry] = [tk.Entry(self.where_frame) for _ in range(2)]
self.where_var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]
self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self._conf(["", ""], "#FA8072")
self.__conf_font()
def _conf(self, title: List[str], bg_color: str):
self.bg_color = bg_color
self.bg_color_where = bg_color
self.enter_title = title
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.where_frame['bg'] = self.bg_color_where
self.where_frame['bd'] = 5
self.where_frame['relief'] = "ridge"
self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.17)
self.enter_frame['bg'] = self.bg_color
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.58, relwidth=0.6, relheight=0.17)
height = 0.1
for lb, text, enter, var, lb_w, text_w, enter_w, var_w in (
zip(self.title, self.enter_title, self.enter, self.var,
self.where_title, ["条件:", self.enter_title[1]], self.where_enter, self.where_var)):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = self.bg_color
lb['anchor'] = 'e'
lb_w['font'] = title_font
lb_w['text'] = text_w
lb_w['bg'] = self.bg_color_where
lb_w['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
enter_w['font'] = title_font
enter_w['textvariable'] = var_w
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
lb_w.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter_w.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
height += 0.43
for btn, text, func in zip(self.btn,
["通过条件更新", "通过用户ID更新"],
[self.update_by_where, self.update_by_uid]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
self.btn[0].place(relx=0.55, rely=0.40, relwidth=0.25, relheight=0.08)
self.btn[1].place(relx=0.55, rely=0.78, relwidth=0.25, relheight=0.08)
def update_by_uid(self):
...
def update_by_where(self):
...
def set_disable(self):
set_tk_disable_from_list(self.btn)
set_tk_disable_from_list(self.enter)
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
set_tk_disable_from_list(self.enter, flat='normal')
class UpdateUserScore(UpdateUserProgramBase):
def __init__(self, station, win, color):
super(UpdateUserScore, self).__init__(station, win, color, "更新用户-积分")
self._conf(["用户ID:", "积分:"], "#afdfe4")
def update_by_uid(self):
uid = self.enter[0].get()
score = int(self.enter[1].get())
event = tk_event.UpdateUserScoreEvent(self.station).start(score, f"UserID='{uid}'")
self.station.push_event(event)
def update_by_where(self):
where = self.where_enter[0].get()
score = int(self.where_enter[1].get())
event = tk_event.UpdateUserScoreEvent(self.station).start(score, where)
self.station.push_event(event)
class UpdateUserReputation(UpdateUserProgramBase):
def __init__(self, station, win, color):
super(UpdateUserReputation, self).__init__(station, win, color, "更新用户-垃圾分类信用")
self._conf(["用户ID:", "垃圾分类信用:"], "#f8aba6")
def update_by_uid(self):
uid = self.enter[0].get()
reputation = int(self.enter[1].get())
event = tk_event.UpdateUserReputationEvent(self.station).start(reputation, f"UserID='{uid}'")
self.station.push_event(event)
def update_by_where(self):
where = self.where_enter[0].get()
reputation = int(self.where_enter[1].get())
event = tk_event.UpdateUserReputationEvent(self.station).start(reputation, where)
self.station.push_event(event)
class UpdateGarbageTypeProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "更新垃圾袋-垃圾类型")
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]
self.var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]
self.where_frame = tk.Frame(self.frame)
self.where_title: tk.Label = tk.Label(self.where_frame)
self.where_enter: tk.Entry = tk.Entry(self.where_frame)
self.where_type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]
self.where_var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]
self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.where_frame['bg'] = "#fdb933"
self.where_frame['bd'] = 5
self.where_frame['relief'] = "ridge"
self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)
self.enter_frame['bg'] = "#fdb933"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)
for lb, enter, radios, var, y, text in zip([self.title, self.where_title],
[self.enter, self.where_enter],
[self.type, self.where_type],
[self.var, self.where_var],
[0.32, 0.72],
["垃圾袋ID:", "条件:"]):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#fdb933"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var[0]
for i, radio in enumerate(radios):
radio['font'] = btn_font
radio['bg'] = self.color
radio['text'] = GarbageType.GarbageTypeStrList_ch[i + 1]
radio['value'] = i + 1
radio['variable'] = var[1]
radio['anchor'] = 'w'
var[1].set(1)
radios[0].place(relx=0.20, rely=y + 0.00, relwidth=0.20, relheight=0.04)
radios[1].place(relx=0.60, rely=y + 0.00, relwidth=0.20, relheight=0.04)
radios[2].place(relx=0.20, rely=y + 0.05, relwidth=0.20, relheight=0.04)
radios[3].place(relx=0.60, rely=y + 0.05, relwidth=0.20, relheight=0.04)
lb.place(relx=0.02, rely=0.2, relwidth=0.25, relheight=0.48)
enter.place(relx=0.30, rely=0.2, relwidth=0.60, relheight=0.48)
for btn, text, func in zip(self.btn,
["通过条件更新", "通过垃圾袋ID更新"],
[self.update_by_where, self.update_by_gid]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
self.btn[0].place(relx=0.55, rely=0.43, relwidth=0.25, relheight=0.08)
self.btn[1].place(relx=0.55, rely=0.83, relwidth=0.25, relheight=0.08)
def update_by_gid(self):
gid = self.enter.get()
type_ = self.var[1].get()
event = tk_event.UpdateGarbageTypeEvent(self.station).start(type_, f"GarbageID={gid}")
self.station.push_event(event)
def update_by_where(self):
where = self.where_enter.get()
type_ = self.where_var[1].get()
event = tk_event.UpdateGarbageTypeEvent(self.station).start(type_, where)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
self.enter['state'] = 'disable'
self.where_enter['state'] = 'normal'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
self.enter['state'] = 'normal'
self.where_enter['state'] = 'normal'
class UpdateGarbageCheckResultProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "更新垃圾袋-检测结果")
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(2)]
self.var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]
self.where_frame = tk.Frame(self.frame)
self.where_title: tk.Label = tk.Label(self.where_frame)
self.where_enter: tk.Entry = tk.Entry(self.where_frame)
self.where_type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(2)]
self.where_var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]
self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.where_frame['bg'] = "#abc88b"
self.where_frame['bd'] = 5
self.where_frame['relief'] = "ridge"
self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)
self.enter_frame['bg'] = "#abc88b"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)
for lb, enter, radios, var, y, text in zip([self.title, self.where_title],
[self.enter, self.where_enter],
[self.type, self.where_type],
[self.var, self.where_var],
[0.32, 0.72],
["垃圾袋ID:", "条件:"]):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#abc88b"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var[0]
for i, radio in enumerate(radios):
radio['font'] = btn_font
radio['bg'] = self.color
radio['text'] = ["投放错误", "投放正确"][i]
radio['value'] = i
radio['variable'] = var[1]
radio['anchor'] = 'w'
var[1].set(1)
radios[0].place(relx=0.20, rely=y + 0.00, relwidth=0.20, relheight=0.04)
radios[1].place(relx=0.60, rely=y + 0.00, relwidth=0.20, relheight=0.04)
lb.place(relx=0.02, rely=0.2, relwidth=0.25, relheight=0.48)
enter.place(relx=0.30, rely=0.2, relwidth=0.60, relheight=0.48)
for btn, text, func in zip(self.btn,
["通过条件更新", "通过垃圾袋ID更新"],
[self.update_by_where, self.update_by_gid]):
btn['font'] = btn_font
btn['bg'] = Config.tk_btn_bg
btn['text'] = text
btn['command'] = func
self.btn[0].place(relx=0.55, rely=0.38, relwidth=0.25, relheight=0.08)
self.btn[1].place(relx=0.55, rely=0.78, relwidth=0.25, relheight=0.08)
def update_by_gid(self):
gid = self.enter.get()
check = (self.var[1].get() == 1)
event = tk_event.UpdateGarbageCheckEvent(self.station).start(check, f"GarbageID={gid}")
self.station.push_event(event)
def update_by_where(self):
where = self.where_enter.get()
check = (self.where_var[1].get() == 1)
event = tk_event.UpdateGarbageCheckEvent(self.station).start(check, where)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
self.enter['state'] = 'disable'
self.where_enter['state'] = 'normal'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
self.enter['state'] = 'normal'
self.where_enter['state'] = 'normal'
class StatisticsTimeProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.figure_frame = tk.Frame(self.frame)
self.figure = Figure(dpi=100)
self.plt_1: Axes = self.figure.add_subplot(211) # 添加子图:2行1列第1个
self.plt_2: Axes = self.figure.add_subplot(212, sharex=self.plt_1) # 添加子图:2行1列第2个 (共享x轴)
self.figure.subplots_adjust(hspace=0.7)
self.canvas = FigureCanvasTkAgg(self.figure, master=self.figure_frame)
self.canvas_tk = self.canvas.get_tk_widget()
self.toolbar = NavigationToolbar2Tk(self.canvas, self.figure_frame)
self.color_frame = tk.Frame(self.frame)
self.show_list_tk = tk.Listbox(self.color_frame)
self.show_list_scroll = tk.Scrollbar(self.color_frame)
self.hide_list_tk = tk.Listbox(self.color_frame)
self.hide_list_scroll = tk.Scrollbar(self.color_frame)
self.btn_show = tk.Button(self.color_frame)
self.btn_hide = tk.Button(self.color_frame)
self.color_show_dict = {}
self.color_hide_dict = {}
self.export_lst = []
self.export_btn = tk.Button(self.frame)
self.refresh_btn = tk.Button(self.frame)
self.reset_btn = tk.Button(self.frame)
self.reverse_btn = tk.Button(self.frame)
self.legend_show = tk.Checkbutton(self.frame), tk.IntVar()
self._conf("#abc88b")
self.__conf_font()
def _conf(self, bg_color):
self.bg_color = bg_color
def __conf_font(self, n: int = Config.tk_zoom):
self.btn_font_size = int(14 * n)
self.little_btn_font_size = int(12 * n)
def to_program(self):
self.refresh()
def update_listbox(self):
self.show_list_tk.delete(0, tk.END) # 清空列表
self.hide_list_tk.delete(0, tk.END) # 清空列表
for i in self.color_show_dict:
self.show_list_tk.insert(tk.END, i)
self.show_list_tk.itemconfig(tk.END,
selectbackground=self.color_show_dict[i],
bg=self.color_show_dict[i],
selectforeground='#FFFFFF',
fg='#000000')
for i in self.color_hide_dict:
self.hide_list_tk.insert(tk.END, i)
self.hide_list_tk.itemconfig(tk.END,
selectbackground=self.color_hide_dict[i],
bg=self.color_hide_dict[i],
selectforeground='#FFFFFF',
fg='#000000')
def check_show(self, res: str):
color = self.color_show_dict.get(res)
if color is not None:
return color
color = self.color_hide_dict.get(res)
if color is not None:
return None
color = random_color()
self.color_show_dict[res] = color
return color
def hide(self):
i = self.show_list_tk.curselection()
if len(i) == 0:
return
res = self.show_list_tk.get(i[0])
self.hide_(res)
self.update_listbox()
def show(self):
i = self.hide_list_tk.curselection()
if len(i) == 0:
return
res = self.hide_list_tk.get(i[0])
self.show_(res)
self.update_listbox()
def hide_(self, res):
color = self.color_show_dict.get(res)
if color is not None:
del self.color_show_dict[res]
self.color_hide_dict[res] = color
def show_(self, res):
color = self.color_hide_dict.get(res)
if color is not None:
del self.color_hide_dict[res]
self.color_show_dict[res] = color
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
btn_font = make_font(size=self.btn_font_size)
little_btn_font = make_font(size=self.little_btn_font_size)
self.color_frame['bg'] = self.bg_color
self.color_frame['bd'] = 5
self.color_frame['relief'] = "ridge"
self.show_list_tk.place(relx=0, rely=0, relwidth=0.90, relheight=0.475)
self.show_list_scroll.place(relx=0.90, rely=0, relwidth=0.10, relheight=0.475)
self.show_list_scroll['orient'] = 'vertical'
self.show_list_scroll['command'] = self.show_list_tk.yview
self.show_list_tk['yscrollcommand'] = self.show_list_scroll.set
self.show_list_tk['activestyle'] = tk.NONE
self.hide_list_tk.place(relx=0, rely=0.525, relwidth=0.90, relheight=0.475)
self.hide_list_scroll.place(relx=0.90, rely=0.525, relwidth=0.10, relheight=0.475)
self.hide_list_scroll['orient'] = 'vertical'
self.hide_list_scroll['command'] = self.hide_list_tk.yview
self.hide_list_tk['yscrollcommand'] = self.hide_list_scroll.set
self.hide_list_tk['activestyle'] = tk.NONE
for btn, text, func, x in zip([self.btn_show, self.btn_hide],
["显示", "隐藏"],
[self.show, self.hide],
[0.00, 0.50]):
btn['font'] = little_btn_font
btn['bg'] = Config.tk_btn_bg
btn['text'] = text
btn['command'] = func
btn.place(relx=x, rely=0.475, relwidth=0.50, relheight=0.05)
self.color_frame.place(relx=0.01, rely=0.02, relwidth=0.18, relheight=0.88)
self.figure_frame['bg'] = self.bg_color
self.figure_frame['bd'] = 5
self.figure_frame['relief'] = "ridge"
self.figure_frame.place(relx=0.21, rely=0.02, relwidth=0.79, relheight=0.88)
self.canvas_tk.place(relx=0, rely=0, relwidth=1.0, relheight=0.9)
self.toolbar.place(relx=0, rely=0.9, relwidth=1.0, relheight=0.1)
for btn, text, func, x in zip([self.reset_btn, self.reverse_btn, self.refresh_btn, self.export_btn],
["复位选择", "反转选择", "刷新数据", "导出数据"],
[self.reset, self.reverse, self.refresh, self.export],
[0.37, 0.53, 0.69, 0.85]):
btn['font'] = btn_font
btn['bg'] = Config.tk_btn_bg
btn['text'] = text
btn['command'] = func
btn.place(relx=x, rely=0.91, relwidth=0.15, relheight=0.08)
self.legend_show[0]['font'] = btn_font
self.legend_show[0]['bg'] = self.color
self.legend_show[0]['text'] = "显示图例"
self.legend_show[0]['variable'] = self.legend_show[1]
self.legend_show[0].place(relx=0.21, rely=0.91, relwidth=0.15, relheight=0.08)
def export(self, title, func: Callable):
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write(f"Hour, Count, {title}\n")
for i in self.export_lst:
f.write(f"{i[0]}, {i[1]}, {func(i)}\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def refresh(self):
self.plt_1.cla()
self.plt_2.cla()
def reset(self):
self.color_show_dict.update(self.color_hide_dict)
self.color_hide_dict = {}
self.update_listbox()
def reverse(self):
tmp = self.color_show_dict
self.color_show_dict = self.color_hide_dict
self.color_hide_dict = tmp
self.update_listbox()
def show_result(self, res: Dict[str, any], lst: List):
bottom = np.zeros(24)
label_num = [i for i in range(24)]
label_str = [f"{i}" for i in range(24)]
res_type_lst: List = res['res_type']
self.export_lst = lst
max_y_plot = 1
max_y_bar = 1
for res_type in res_type_lst:
res_count: Tuple[str] = res[res_type]
if len(res_count) != 0:
color = self.check_show(res_type)
if color is None:
continue
y = [0 for _ in range(24)]
for i in res_count:
y[int(i[0])] += int(i[1])
self.color_show_dict[res_type] = color
self.plt_1.bar(label_num, y,
color=color,
align="center",
bottom=bottom,
tick_label=label_str,
label=res_type)
self.plt_2.plot(label_num, y,
color=color,
label=res_type,
marker='o',
markersize=5)
bottom += np.array(y)
max_y_plot = max(max(y), max_y_plot)
if self.legend_show[1].get() == 1: # 显示图例
self.plt_1.legend(loc="upper left")
self.plt_2.legend(loc="upper left")
self.plt_1.set_xlim(-1, 24)
self.plt_1.set_xticks([i for i in range(0, 24, 2)])
self.plt_1.set_xticklabels([f"{i}h" for i in range(0, 24, 2)])
max_y_bar = int(max(bottom.max(), max_y_bar))
self.plt_1.set_ylim(0, max_y_bar + max_y_bar * 0.1)
step = ceil(max_y_bar / 5) # 向上取整
if step > 0:
y_ticks = [i for i in range(0, max_y_bar, step)]
y_ticklabels = [f'{i}' for i in range(0, max_y_bar, step)]
else:
y_ticks = []
y_ticklabels = []
y_ticks.append(max_y_bar)
y_ticklabels.append(f"{max_y_bar}")
self.plt_1.set_yticks(y_ticks)
self.plt_1.set_yticklabels(y_ticklabels) # 倒序
self.plt_1.spines['right'].set_color('none')
self.plt_1.spines['top'].set_color('none')
self.plt_1.grid(axis='y')
self.plt_1.set_title(f"{self.program_title}柱状图")
self.plt_2.set_xlim(-1, 24)
self.plt_2.set_xticks([i for i in range(0, 24, 2)])
self.plt_2.set_xticklabels([f"{i}h" for i in range(0, 24, 2)])
self.plt_2.set_ylim(0, max_y_plot + max_y_plot * 0.1)
step = ceil(max_y_plot / 5) # 向上取整
if step > 0:
y_ticks = [i for i in range(0, max_y_plot, step)]
y_ticklabels = [f'{i}' for i in range(0, max_y_plot, step)]
else:
y_ticks = []
y_ticklabels = []
y_ticks.append(max_y_plot)
y_ticklabels.append(f"{max_y_plot}")
self.plt_2.set_yticks(y_ticks)
self.plt_2.set_yticklabels(y_ticklabels)
self.plt_2.spines['right'].set_color('none')
self.plt_2.spines['top'].set_color('none')
self.plt_2.grid(axis='y')
self.plt_2.set_title(f"{self.program_title}折线图")
self.canvas.draw()
self.toolbar.update()
self.update_listbox()
def set_disable(self):
self.export_btn['state'] = 'disable'
self.reset_btn['state'] = 'disable'
self.refresh_btn['state'] = 'disable'
self.reverse_btn['state'] = 'disable'
self.btn_show['state'] = 'disable'
self.btn_hide['state'] = 'disable'
def reset_disable(self):
self.export_btn['state'] = 'normal'
self.reset_btn['state'] = 'normal'
self.refresh_btn['state'] = 'normal'
self.reverse_btn['state'] = 'normal'
self.btn_show['state'] = 'normal'
self.btn_hide['state'] = 'normal'
class StatisticsTimeLocProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按投放区域")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["Location"], lambda i: i[2], self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Location", lambda i: i[2])
class StatisticsTimeTypeProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按投放类型")
self._conf("#abc88b")
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = "#00BFFF"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = "#32CD32"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = "#DC143C"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = "#A9A9A9"
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["GarbageType"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Type", self.get_name)
@staticmethod
def get_name(i: Tuple):
data: bytes = i[2]
return GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]
class StatisticsTimeTypeLocProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按投放类型和区域")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["GarbageType", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Type-Location", self.get_name)
@staticmethod
def get_name(i: Tuple):
data: bytes = i[2]
return f"{GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]}-{i[3]}"
class StatisticsTimeCheckResultProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按检查结果")
self._conf("#abc88b")
self.color_show_dict['Pass'] = "#00BFFF"
self.color_show_dict['Fail'] = "#DC143C"
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["CheckResult"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result", self.get_name)
@staticmethod
def get_name(i: Tuple):
if i[2] is None:
return 'None'
data: bytes = i[2]
return 'Pass' if data == DBBit.BIT_1 else 'Fail'
class StatisticsTimeCheckResultAndTypeProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按检查结果和类型")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["CheckResult", "GarbageType"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result-Location", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: bytes = i[2]
data_2: bytes = i[3]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode("utf-8"))]}'
class StatisticsTimeCheckResultAndLocProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按检查结果和区域")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["CheckResult", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result-Type", self.get_name)
@staticmethod
def get_name(i: Tuple):
if i[2] is None:
return 'None'
data_1: bytes = i[2]
return (f'Pass' if data_1 == DBBit.BIT_1 else 'Fail') + f"-{i[3]}"
class StatisticsTimeDetailProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-详细分类")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station)
event.start(["CheckResult", "GarbageType", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Detail", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: bytes = i[2]
data_2: bytes = i[3]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode("utf-8"))]}' + f'-{i[4]}'
class StatisticsUserBaseProgram(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.figure_frame = tk.Frame(self.frame)
self.figure = Figure(dpi=100)
self.plt: Axes = self.figure.add_subplot(111) # 添加子图:1行1列第1个
self.figure.subplots_adjust(bottom=0.2, top=0.93)
self.canvas = FigureCanvasTkAgg(self.figure, master=self.figure_frame)
self.canvas_tk = self.canvas.get_tk_widget()
self.toolbar = NavigationToolbar2Tk(self.canvas, self.figure_frame)
self.color_bar: Optional[Colorbar] = None
self.export_lst: Optional[np.array] = None
self.export_btn = tk.Button(self.frame)
self.refresh_btn = tk.Button(self.frame)
self._conf("#abc88b")
self.__conf_font()
def _conf(self, bg_color):
self.bg_color = bg_color
def __conf_font(self, n: int = Config.tk_zoom):
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
btn_font = make_font(size=self.btn_font_size)
self.figure_frame['bg'] = self.bg_color
self.figure_frame['bd'] = 5
self.figure_frame['relief'] = "ridge"
self.figure_frame.place(relx=0.00, rely=0.02, relwidth=1, relheight=0.88)
self.canvas_tk.place(relx=0, rely=0, relwidth=1.0, relheight=0.9)
self.toolbar.place(relx=0, rely=0.9, relwidth=1.0, relheight=0.1)
for btn, text, func, x in zip([self.refresh_btn, self.export_btn],
["刷新数据", "导出数据"],
[self.refresh, self.export],
[0.34, 0.51]):
btn['font'] = btn_font
btn['bg'] = Config.tk_btn_bg
btn['text'] = text
btn['command'] = func
btn.place(relx=x, rely=0.91, relwidth=0.15, relheight=0.08)
def export(self):
...
def refresh(self, event_class):
self.plt.cla()
if self.color_bar is not None:
self.color_bar.remove()
event = event_class(self.station).start(self)
self.station.push_event(event)
def set_disable(self):
self.export_btn['state'] = 'disable'
self.refresh_btn['state'] = 'disable'
def reset_disable(self):
self.export_btn['state'] = 'normal'
self.refresh_btn['state'] = 'normal'
class StatisticsUserTinyProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsUserTinyProgram, self).__init__(station, win, color, "积分信用分析-细致")
def show_result(self, lst: np.array):
self.export_lst = lst
x_label = [f'{i * 10}' for i in range(0, 51, 10)]
y_label = [f'{i * 10}' for i in range(0, 101, 20)]
im = self.plt.pcolormesh(lst, cmap='Blues') # 用cmap设置配色方案
self.plt.set_xticks(range(0, 101, 20)) # 设置x轴刻度
self.plt.set_yticks(range(0, 101, 20)) # 设置y轴刻度
self.plt.set_xticklabels(x_label) # 设置x轴刻度标签
self.plt.set_yticklabels(y_label) # 设置y轴刻度标签
self.plt.set_xlabel("用户积分") # 设置x轴刻度标签
self.plt.set_ylabel("垃圾分类信用") # 设置y轴刻度标签
self.color_bar = self.figure.colorbar(im, pad=0.03, ax=self.plt) # 设置颜色条
self.plt.set_title("积分信用分析-细致热图") # 设置标题以及其位置和字体大小
self.canvas.draw()
self.toolbar.update()
def export(self):
if self.export_lst is None:
self.station.show_msg("保存数据", f"没有数据需要保存")
return
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write("#, " + ", ".join([f'[{i * 10} {i * 10 + 10}]' for i in range(0, 100, 1)]) + "\n")
for i, lst in zip(range(0, 50, 1), self.export_lst):
f.write(f"[{i * 10} {i * 10 + 10}], " + ", ".join([f"{a}" for a in lst]) + "\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def to_program(self):
self.refresh()
def refresh(self, _=None):
super().refresh(tk_event.CountScoreReputationTinyEvent)
class StatisticsUserLargeProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsUserLargeProgram, self).__init__(station, win, color, "积分信用分析-大致")
def show_result(self, lst: np.array):
self.export_lst = lst
x_label = [f'{i * 10}' for i in range(0, 51, 10)]
y_label = [f'{i * 10}' for i in range(0, 101, 20)]
im = self.plt.pcolormesh(lst, cmap='Blues') # 用cmap设置配色方案
self.plt.set_xticks(range(0, 11, 2)) # 设置x轴刻度
self.plt.set_yticks(range(0, 11, 2)) # 设置y轴刻度
self.plt.set_xticklabels(x_label) # 设置x轴刻度标签
self.plt.set_yticklabels(y_label) # 设置y轴刻度标签
self.plt.set_xlabel("用户积分") # 设置x轴刻度标签
self.plt.set_ylabel("垃圾分类信用") # 设置y轴刻度标签
self.color_bar = self.figure.colorbar(im, pad=0.03, ax=self.plt) # 设置颜色条
self.plt.set_title("积分信用分析-大致热图") # 设置标题以及其位置和字体大小
self.canvas.draw()
self.toolbar.update()
def export(self):
if self.export_lst is None:
self.station.show_msg("保存数据", f"没有数据需要保存")
return
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write("#, " + ", ".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 100, 10)]) + "\n")
for i, lst in zip(range(0, 50, 5), self.export_lst):
f.write(f"[{i * 10} {i * 10 + 50}], " + ", ".join([f"{a}" for a in lst]) + "\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def to_program(self):
self.refresh()
def refresh(self, _=None):
super().refresh(tk_event.CountScoreReputationLargeEvent)
class StatisticsScoreDistributedProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsScoreDistributedProgram, self).__init__(station, win, color, "积分分布")
def show_result(self, lst: np.array):
bins = [i for i in range(0, 501, 10)]
res = self.plt.hist(lst, bins)
self.export_lst = res[0]
self.plt.set_xlabel("用户积分") # 设置x轴刻度标签
self.plt.set_ylabel("分布") # 设置x轴刻度标签
self.plt.set_title("积分分布直方图") # 设置标题以及其位置和字体大小
self.canvas.draw()
self.toolbar.update()
def export(self):
if self.export_lst is None:
self.station.show_msg("保存数据", f"没有数据需要保存")
return
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write("积分区间," + ", ".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 501, 10)]) + "\n")
f.write("积分分布," + ", ".join([f'{i}' for i in self.export_lst]) + "\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def to_program(self):
self.refresh()
def refresh(self, _=None):
self.plt.cla()
if self.color_bar is not None:
self.color_bar.remove()
event = tk_event.ScoreReputationDistributedEvent(self.station).start("Score", self)
self.station.push_event(event)
class StatisticsReputationDistributedProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsReputationDistributedProgram, self).__init__(station, win, color, "垃圾分类信用分布")
def show_result(self, lst: np.array):
bins = [i for i in range(0, 1001, 20)]
res = self.plt.hist(lst, bins)
self.export_lst = res[0]
self.plt.set_xlabel("垃圾分类信用") # 设置x轴刻度标签
self.plt.set_ylabel("分布") # 设置x轴刻度标签
self.plt.set_title("垃圾分类信用分布直方图") # 设置标题以及其位置和字体大小
self.canvas.draw()
self.toolbar.update()
def export(self):
if self.export_lst is None:
self.station.show_msg("保存数据", f"没有数据需要保存")
return
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write("信用区间," + ", ".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 501, 10)]) + "\n")
f.write("信用分布," + ", ".join([f'{i}' for i in self.export_lst]) + "\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def to_program(self):
self.refresh()
def refresh(self, _=None):
self.plt.cla()
if self.color_bar is not None:
self.color_bar.remove()
event = tk_event.ScoreReputationDistributedEvent(self.station).start("Reputation", self)
self.station.push_event(event)
class StatisticsPassRateGlobalProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsPassRateGlobalProgram, self).__init__(station, win, color, "通过率-全局")
def show_result(self, lst: np.array):
passing = float(lst[0][0])
not_passing = 1 - passing
data = [passing, not_passing]
label = ["通过", "未通过"]
res = self.plt.pie(data, radius=1, pctdistance=0.7, textprops=dict(color='w'), # 不显示文字
startangle=45, autopct="%6.3f%%", wedgeprops=dict(width=0.6, edgecolor="w"))
self.plt.legend(res[0], label, loc="lower left")
self.plt.set_title("全局垃圾分类通过率") # 设置标题以及其位置和字体大小
self.plt.table(cellText=[data], cellLoc="center", colLabels=label,
rowLabels=['全局'], rowLoc='center', loc='bottom', colWidths=[0.4] * 2)
self.canvas.draw()
self.toolbar.update()
def export(self):
self.station.show_msg("保存数据", f"数据不支持导出")
return
def to_program(self):
self.refresh()
def refresh(self, _=None):
self.plt.cla()
event = tk_event.PassingRateEvent(self.station).start([], [], [], [], self)
self.station.push_event(event)
class StatisticsPassRateTypeProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsPassRateTypeProgram, self).__init__(station, win, color, "通过率-按类型")
def show_result(self, lst: List[Tuple[bytes, any]]):
data_1, data_2, data_3, data_4 = [1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]
for i in lst:
tmp: bytes = i[0]
type_ = tmp.decode('utf-8')
if type_ == '1':
data_1 = [float(i[1]), 1 - float(i[1])]
elif type_ == '2':
data_2 = [float(i[1]), 1 - float(i[1])]
elif type_ == '3':
data_3 = [float(i[1]), 1 - float(i[1])]
elif type_ == '4':
data_4 = [float(i[1]), 1 - float(i[1])]
legend_text = []
for data, r, s in zip([data_1, data_2, data_3, data_4], [0.3, 0.6, 0.9, 1.2], [0, 15, 30, 45]):
res = self.plt.pie(data, radius=r, pctdistance=0.7, # 不显示文字
startangle=s, autopct="%6.3f%%", wedgeprops=dict(width=0.3, edgecolor="w"))
legend_text += res[0]
label = []
for i in GarbageType.GarbageTypeStrList_ch[1:]:
label.append(f"{i}-通过")
label.append(f"{i}-不通过")
self.plt.table(cellText=[data_1, data_2, data_3, data_4], cellLoc="center", colLabels=['通过', '未通过'],
rowLabels=GarbageType.GarbageTypeStrList_ch[1:], rowLoc='center', loc='bottom')
self.plt.legend(legend_text, label)
self.plt.set_title("全局垃圾分类通过率") # 设置标题以及其位置和字体大小
self.canvas.draw()
self.toolbar.update()
def export(self):
self.station.show_msg("保存数据", f"数据不支持导出")
return
def to_program(self):
self.refresh()
def refresh(self, _=None):
self.plt.cla()
event = tk_event.PassingRateEvent(self.station).start(["GarbageType"],
[],
["g.GarbageType=garbage.GarbageType"],
["GarbageType"], self)
self.station.push_event(event)
class StatisticsPassRateLocProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsPassRateLocProgram, self).__init__(station, win, color, "通过率-按区域")
self.loc_frame = tk.Frame(self.frame)
self.loc_title = tk.Label(self.loc_frame)
self.loc_enter = tk.Entry(self.loc_frame), tk.StringVar()
def conf_gui(self, n: int = 1):
super(StatisticsPassRateLocProgram, self).conf_gui(n)
title_font = make_font(size=16)
self.loc_frame['bg'] = self.bg_color
self.loc_frame['bd'] = 5
self.loc_frame['relief'] = "ridge"
self.loc_frame.place(relx=0.0, rely=0.92, relwidth=0.33, relheight=0.07)
self.loc_title['font'] = title_font
self.loc_title['text'] = "区域:"
self.loc_title['bg'] = self.bg_color
self.loc_title['anchor'] = 'e'
self.loc_enter[0]['font'] = title_font
self.loc_enter[0]['textvariable'] = self.loc_enter[1]
self.loc_title.place(relx=0.0, rely=0.02, relwidth=0.3, relheight=0.96)
self.loc_enter[0].place(relx=0.3, rely=0.02, relwidth=0.7, relheight=0.96)
def show_result(self, lst: np.array):
passing = float(lst[0][0])
label = ["通过", "未通过"]
not_passing = 1 - passing
data = [passing, not_passing]
res = self.plt.pie(data, radius=1, pctdistance=0.7, textprops=dict(color='w'), # 不显示文字
startangle=45, autopct="%6.3f%%", wedgeprops=dict(width=0.6, edgecolor="w"))
self.plt.legend(res[0], label, loc="lower left")
self.plt.table(cellText=[data], cellLoc="center", colLabels=label,
rowLabels=[f"区域"], rowLoc='center', loc='bottom')
self.canvas.draw()
self.toolbar.update()
def to_program(self):
self.refresh()
def refresh(self, _=None):
where = self.loc_enter[1].get()
if len(where) == 0:
where = "全局"
where_ = []
else:
where_ = [f"Location='{where}'"]
self.plt.cla()
self.plt.set_title(f"{where}垃圾分类通过率") # 设置标题以及其位置和字体大小
event = tk_event.PassingRateEvent(self.station).start([], where_, where_, [], self)
self.station.push_event(event)
class StatisticsPassRateTypeAndLocProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsPassRateTypeAndLocProgram, self).__init__(station, win, color, "通过率-按类型和区域")
self.loc_frame = tk.Frame(self.frame)
self.loc_title = tk.Label(self.loc_frame)
self.loc_enter = tk.Entry(self.loc_frame), tk.StringVar()
def conf_gui(self, n: int = 1):
super(StatisticsPassRateTypeAndLocProgram, self).conf_gui(n)
title_font = make_font(size=16)
self.loc_frame['bg'] = self.bg_color
self.loc_frame['relief'] = "ridge"
self.loc_frame['bd'] = 5
self.loc_frame.place(relx=0.0, rely=0.92, relwidth=0.33, relheight=0.07)
self.loc_title['font'] = title_font
self.loc_title['bg'] = self.bg_color
self.loc_title['text'] = "区域:"
self.loc_title['anchor'] = 'e'
self.loc_enter[0]['font'] = title_font
self.loc_enter[0]['textvariable'] = self.loc_enter[1]
self.loc_title.place(relx=0.0, rely=0.02, relwidth=0.3, relheight=0.96)
self.loc_enter[0].place(relx=0.3, rely=0.02, relwidth=0.7, relheight=0.96)
def show_result(self, lst: List[Tuple[bytes, any]]):
data_1, data_2, data_3, data_4 = [1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]
for i in lst:
tmp: bytes = i[0]
type_ = tmp.decode('utf-8')
if type_ == '4':
data_4 = [float(i[1]), 1 - float(i[1])]
elif type_ == '3':
data_3 = [float(i[1]), 1 - float(i[1])]
elif type_ == '2':
data_2 = [float(i[1]), 1 - float(i[1])]
elif type_ == '1':
data_1 = [float(i[1]), 1 - float(i[1])]
legend_text = []
for data, r, s in zip([data_1, data_2, data_3, data_4], [0.3, 0.6, 0.9, 1.2], [5, 20, 35, 50]):
res = self.plt.pie(data, radius=r, pctdistance=0.7, # 不显示文字
startangle=s, autopct="%6.3f%%", wedgeprops=dict(width=0.3, edgecolor="w"))
legend_text += res[0]
label = []
for i in GarbageType.GarbageTypeStrList_ch[1:]:
label.append(f"{i}-通过")
label.append(f"{i}-不通过")
self.plt.table(cellText=[data_1, data_2, data_3, data_4], cellLoc="center", colLabels=['通过', '未通过'],
rowLabels=GarbageType.GarbageTypeStrList_ch[1:], rowLoc='center', loc='bottom')
self.plt.legend(legend_text, label)
self.canvas.draw()
self.toolbar.update()
def export(self):
self.station.show_msg("保存数据", f"数据不支持导出")
return
def to_program(self):
self.refresh()
def refresh(self, _=None):
where = self.loc_enter[1].get()
if len(where) == 0:
where = "全局"
where_ = []
else:
where_ = [f"Location='{where}'"]
self.plt.cla()
self.plt.set_title(f"{where}垃圾分类通过率") # 设置标题以及其位置和字体大小
event = tk_event.PassingRateEvent(self.station).start(["GarbageType"],
where_,
where_ + ["g.GarbageType=garbage.GarbageType"],
["GarbageType"], self)
self.station.push_event(event)
class StatisticsDateProgramBase(StatisticsTimeProgramBase):
def _conf(self, bg_color, days: int = 7, days_sep: int = 1):
super(StatisticsDateProgramBase, self)._conf(bg_color)
self._days = days
self._days_sep = days_sep
def export(self, title, func: Callable):
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write(f"Days, Count, {title}\n")
for i in self.export_lst:
f.write(f"{i[0]}, {i[1]}, {func(i)}\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def show_result(self, res: Dict[str, any], lst: List, end_time: Optional[str] = None):
if end_time is None:
end_time = datetime.datetime.now()
else:
end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d')
bottom = np.zeros(self._days)
label_num = [i for i in range(self._days)]
label_str = [f"{i}" for i in range(self._days)]
res_type_lst: List = res['res_type']
self.export_lst = lst
max_y_plot = 1 # max_y的最大值
max_y_bar = 1 # max_y的最大值
for res_type in res_type_lst:
res_count: List[Tuple[int, int, bytes]] = res[res_type] # 距离今天的日期, 统计值, 分类值
if len(res_count) != 0:
color = self.check_show(res_type)
if color is None:
continue
y = [0 for _ in range(self._days)]
for i in range(0, len(res_count)): # 反向迭代列表
y[res_count[i][0]] = res_count[i][1]
y = y[::-1] # 反转列表, 使距离今天小的数据靠数据轴右侧
max_y_plot = max(max(y), max_y_plot)
self.color_show_dict[res_type] = color
self.plt_1.plot(label_num, y,
color=color,
label=res_type,
marker='o',
markersize=5)
self.plt_2.bar(label_num, y,
color=color,
align="center",
bottom=bottom,
tick_label=label_str,
label=res_type)
bottom += np.array(y)
if self.legend_show[1].get() == 1: # 显示图例
self.plt_1.legend(loc="upper left")
self.plt_2.legend(loc="upper left")
x_label = []
for i in range(self._days - 1, -1, -self._days_sep):
d = end_time - datetime.timedelta(days=i)
x_label.append(d.strftime("%Y-%m-%d"))
self.plt_1.set_xlim(-1, self._days)
self.plt_1.set_xticks([i for i in range(0, self._days, self._days_sep)])
self.plt_1.set_xticklabels(x_label, rotation=20) # 倒序
self.plt_1.set_ylim(0, max_y_plot + max_y_plot * 0.1)
step = ceil(max_y_plot / 5) # 向上取整
if step > 0:
y_ticks = [i for i in range(0, max_y_plot, step)]
y_ticklabels = [f'{i}' for i in range(0, max_y_plot, step)]
else:
y_ticks = []
y_ticklabels = []
y_ticks.append(max_y_plot)
y_ticklabels.append(f"{max_y_plot}")
self.plt_1.set_yticks(y_ticks)
self.plt_1.set_yticklabels(y_ticklabels)
self.plt_1.spines['right'].set_color('none')
self.plt_1.spines['top'].set_color('none')
self.plt_1.grid(axis='y')
self.plt_1.set_title(f"{self.program_title}折线图")
self.plt_2.set_xlim(-1, self._days)
self.plt_2.set_xticks([i for i in range(0, self._days, self._days_sep)])
self.plt_2.set_xticklabels(x_label, rotation=20)
max_y_bar = int(max(bottom.max(), max_y_bar))
self.plt_2.set_ylim(0, max_y_bar + max_y_bar * 0.1)
step = ceil(max_y_bar / 5) # 向上取整
if step > 0:
y_ticks = [i for i in range(0, max_y_bar, step)]
y_ticklabels = [f'{i}' for i in range(0, max_y_bar, step)]
else:
y_ticks = []
y_ticklabels = []
y_ticks.append(max_y_bar)
y_ticklabels.append(f"{max_y_bar}")
self.plt_2.set_yticks(y_ticks)
self.plt_2.set_yticklabels(y_ticklabels) # 倒序
self.plt_2.spines['right'].set_color('none')
self.plt_2.spines['top'].set_color('none')
self.plt_2.grid(axis='y')
self.plt_2.set_title(f"{self.program_title}柱状图")
self.canvas.draw()
self.toolbar.update()
self.update_listbox()
class StatisticsDateTypeProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = "#00BFFF"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = "#32CD32"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = "#DC143C"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = "#A9A9A9"
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["GarbageType"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Type", self.get_name)
@staticmethod
def get_name(i: Tuple):
data: bytes = i[2]
return GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]
class StatisticsDateLocProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["Location"], lambda i: i[2], self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Location", lambda i: i[2])
class StatisticsDateTypeLocProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["GarbageType", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Type-Location", self.get_name)
@staticmethod
def get_name(i: Tuple):
data: bytes = i[2]
return f"{GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]}-{i[3]}"
class StatisticsDateCheckResultProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
self.color_show_dict['Pass'] = "#00BFFF"
self.color_show_dict['Fail'] = "#DC143C"
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["CheckResult"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result", self.get_name)
@staticmethod
def get_name(i: Tuple):
if i[2] is None:
return 'None'
data: int = i[2] # 返回garbage表时, BIT类型都是按bytes回传的, 但garbage_7和garbage_30会以int的方式回传
return 'Pass' if data == 1 else 'Fail'
class StatisticsDateCheckResultAndTypeProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["CheckResult", "GarbageType"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result-Location", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: int = i[2]
data_2: bytes = i[3]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode("utf-8"))]}'
class StatisticsDateCheckResultAndLocProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["CheckResult", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result-Type", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: int = i[2]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f"-{i[3]}"
class StatisticsDateDetailProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station)
event.start(7, ["CheckResult", "GarbageType", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Detail", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: int = i[2]
data_2: bytes = i[3]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode("utf-8"))]}' + f'-{i[4]}'
class StatisticsDate7TypeProgram(StatisticsDateTypeProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按投放类型")
self._conf("#abc88b", 7, 1)
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = "#00BFFF"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = "#32CD32"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = "#DC143C"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = "#A9A9A9"
class StatisticsDate7LocProgram(StatisticsDateLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按投放区域")
self._conf("#abc88b", 7, 1)
class StatisticsDate7TypeLocProgram(StatisticsDateTypeLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按投放类型和区域")
self._conf("#abc88b", 7, 1)
class StatisticsDate7CheckResultProgram(StatisticsDateCheckResultProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按检查结果")
self._conf("#abc88b", 7, 1)
self.color_show_dict['Pass'] = "#00BFFF"
self.color_show_dict['Fail'] = "#DC143C"
class StatisticsDate7CheckResultAndTypeProgram(StatisticsDateCheckResultAndTypeProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按检查结果和类型")
self._conf("#abc88b", 7, 1)
class StatisticsDate7CheckResultAndLocProgram(StatisticsDateCheckResultAndLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按检查结果和区域")
self._conf("#abc88b", 7, 1)
class StatisticsDate7DetailProgram(StatisticsDateDetailProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-详细分类")
self._conf("#abc88b", 7, 1)
class StatisticsDate30TypeProgram(StatisticsDateTypeProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按投放类型")
self._conf("#abc88b", 30, 5)
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = "#00BFFF"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = "#32CD32"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = "#DC143C"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = "#A9A9A9"
class StatisticsDate30LocProgram(StatisticsDateLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按投放区域")
self._conf("#abc88b", 30, 5)
class StatisticsDate30TypeLocProgram(StatisticsDateTypeLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按投放类型和区域")
self._conf("#abc88b", 30, 5)
class StatisticsDate30CheckResultProgram(StatisticsDateCheckResultProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按检查结果")
self._conf("#abc88b", 30, 5)
self.color_show_dict['Pass'] = "#00BFFF"
self.color_show_dict['Fail'] = "#DC143C"
class StatisticsDate30CheckResultAndTypeProgram(StatisticsDateCheckResultAndTypeProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按检查结果和类型")
self._conf("#abc88b", 30, 5)
class StatisticsDate30CheckResultAndLocProgram(StatisticsDateCheckResultAndLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按检查结果和区域")
self._conf("#abc88b", 30, 5)
class StatisticsDate30DetailProgram(StatisticsDateDetailProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-详细分类")
self._conf("#abc88b", 30, 5)
all_program = [WelcomeProgram, CreateNormalUserProgram, CreateManagerUserProgram, CreateAutoNormalUserProgram,
CreateGarbageProgram, DeleteUserProgram, DeleteUsersProgram, DeleteGarbageProgram,
DeleteGarbageMoreProgram, DeleteAllGarbageProgram, SearchUserProgram, SearchUserAdvancedProgram,
SearchGarbageProgram, SearchGarbageAdvancedProgram, SearchAdvancedProgram, UpdateUserScore,
UpdateUserReputation, UpdateGarbageTypeProgram, UpdateGarbageCheckResultProgram,
ExportGarbageProgram, ExportUserProgram, CreateUserFromCSVProgram, AboutProgram,
StatisticsTimeLocProgram, StatisticsTimeTypeProgram, StatisticsTimeTypeLocProgram,
StatisticsTimeCheckResultProgram, StatisticsTimeCheckResultAndTypeProgram,
StatisticsTimeCheckResultAndLocProgram, StatisticsTimeDetailProgram, StatisticsUserTinyProgram,
StatisticsUserLargeProgram, StatisticsScoreDistributedProgram, StatisticsReputationDistributedProgram,
StatisticsPassRateGlobalProgram, StatisticsPassRateTypeProgram, StatisticsPassRateLocProgram,
StatisticsPassRateTypeAndLocProgram, StatisticsDate7TypeProgram, StatisticsDate7LocProgram,
StatisticsDate7TypeLocProgram, StatisticsDate7CheckResultProgram,
StatisticsDate7CheckResultAndTypeProgram, StatisticsDate7CheckResultAndLocProgram,
StatisticsDate7DetailProgram, StatisticsDate30TypeProgram, StatisticsDate30LocProgram,
StatisticsDate30TypeLocProgram, StatisticsDate30CheckResultProgram,
StatisticsDate30CheckResultAndTypeProgram, StatisticsDate30CheckResultAndLocProgram,
StatisticsDate30DetailProgram]
| 38.318789
| 120
| 0.591483
|
import abc
import datetime
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.filedialog import askdirectory, askopenfilename, asksaveasfilename
from math import ceil
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.axes import Axes
import numpy as np
from matplotlib.colorbar import Colorbar
from matplotlib.figure import Figure
from tool.color import random_color
from tool.typing import *
from tool.tk import make_font, set_tk_disable_from_list
from tool.login import create_uid
from conf import Config
from . import admin
from . import admin_event as tk_event
from sql import DBBit
from sql.user import find_user_by_name
from core.garbage import GarbageType
class AdminProgram(metaclass=abc.ABCMeta):
def __init__(self, station: "admin.AdminStation", win: Union[tk.Frame, tk.Toplevel, tk.Tk], color: str, title: str):
self.station = station
self.win = win
self.color = color
self.frame = tk.Frame(self.win)
self.frame['bg'] = color
self.program_title = title
@abc.abstractmethod
def set_disable(self):
...
@abc.abstractmethod
def reset_disable(self):
...
@abc.abstractmethod
def conf_gui(self, n: int = 1):
...
def to_program(self):
pass
def leave_program(self):
pass
def get_title(self) -> str:
return self.program_title
def get_program_frame(self) -> Tuple[str, tk.Frame]:
return self.program_title, self.frame
class WelcomeProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "欢迎页")
self.title = tk.Label(self.frame)
self.info = tk.Label(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(25 * n)
self.info_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size, weight="bold")
info_font = make_font(size=self.info_font_size)
self.title['font'] = title_font
self.title['bg'] = self.color
self.title['text'] = '欢迎使用 HGSSystem 管理员系统\n[帮助]'
self.info['bg'] = self.color
self.info['font'] = info_font
self.info['anchor'] = 'nw'
self.info['justify'] = 'left'
self.info['text'] = (f'''
HGSSystem 管理者界面:
1) 点击菜单按钮进入子菜单或程序
2) 创建 菜单包含创建类的程序
3) 删除 菜单包含删除类的程序
4) 搜索 菜单包含数据分析类的程序
5) 更新 菜单包含数据更新类的程序
6) 当离开操作系统时请退出登录以确保安全
7) 只能使用具有管理员权限的账号登陆系统
8) 只有admin用户可以完成危险操作(例如删除所有垃圾袋数据)
程序的运行:
1) 在菜单中选中程序后,根据程序界面提示完成操作
2) 操作过程通常会显示进度条,除非任务执行迅速
3) 结果通常会被反馈, 且不会自动消失
系统登录:
1) 仅Manager用户可以登录
'''.strip())
self.title.place(relx=0.1, rely=0.0, relwidth=0.8, relheight=0.2)
self.info.place(relx=0.05, rely=0.21, relwidth=0.90, relheight=0.75)
def set_disable(self):
pass
def reset_disable(self):
pass
class AboutProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "关于")
self.title = tk.Label(self.frame)
self.info = tk.Label(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(25 * n)
self.info_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size, weight="bold")
info_font = make_font(size=self.info_font_size)
self.title['font'] = title_font
self.title['bg'] = self.color
self.title['text'] = '关于 HGSSystem 管理员系统'
self.info['bg'] = self.color
self.info['font'] = info_font
self.info['anchor'] = 'nw'
self.info['justify'] = 'left'
self.info['text'] = Config.about_info
self.title.place(relx=0.1, rely=0.0, relwidth=0.8, relheight=0.2)
self.info.place(relx=0.05, rely=0.21, relwidth=0.90, relheight=0.75)
def set_disable(self):
pass
def reset_disable(self):
pass
class CreateUserProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(3)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(3)]
self.var: List[tk.Variable] = [tk.StringVar() for _ in range(3)]
self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)]
self._conf("#FA8072", False)
self.__conf_font()
def _conf(self, bg_color, is_manager: bool):
self.bg_color = bg_color
self.is_manager = is_manager
return self
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = self.bg_color
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.30)
height = 0.1
for lb, text, enter, var in zip(self.title, ["用户名:", "用户密码:", "手机号:"], self.enter, self.var):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = self.bg_color
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.17)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.17)
height += 0.30
for btn, text, x, func in zip(self.btn,
["创建用户", "获取用户ID"],
[0.2, 0.6],
[lambda: self.create_by_name(), lambda: self.get_uid()]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
btn.place(relx=x, rely=0.7, relwidth=0.2, relheight=0.08)
def __get_info(self) -> Optional[Tuple[uname_t, passwd_t, str]]:
name: uname_t = self.var[0].get()
passwd: passwd_t = self.var[1].get()
phone: str = self.var[2].get()
if len(name) == 0 or len(passwd) == 0 or len(phone) != 11:
self.station.show_msg("用户创建失败", "请再次尝试, 输入用户名, 用户密码和11位手机号")
return None
return name, passwd, phone
def create_by_name(self):
res = self.__get_info()
if res is None:
return
name, passwd, phone = res
event = tk_event.CreateUserEvent(self.station).start(name, passwd, phone, self.is_manager)
self.station.push_event(event)
def get_uid(self):
res = self.__get_info()
if res is None:
return
name, passwd, phone = res
uid = create_uid(name, passwd, phone)
self.station.show_msg("获取用户ID", f"用户名: {name}\n用户ID: {uid}")
def set_disable(self):
set_tk_disable_from_list(self.btn)
set_tk_disable_from_list(self.enter)
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
set_tk_disable_from_list(self.enter, flat='normal')
class CreateNormalUserProgram(CreateUserProgramBase):
def __init__(self, station, win, color):
super(CreateNormalUserProgram, self).__init__(station, win, color, "创建普通用户")
class CreateManagerUserProgram(CreateUserProgramBase):
def __init__(self, station, win, color):
super(CreateManagerUserProgram, self).__init__(station, win, color, "创建管理员")
self._conf("#4b5cc4", True)
class CreateAutoNormalUserProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "创建自动用户")
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.btn: tk.Button = tk.Button(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#bce672"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.12)
self.title['font'] = title_font
self.title['text'] = "手机号:"
self.title['bg'] = "#bce672"
self.title['anchor'] = 'e'
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.02, rely=0.25, relwidth=0.25, relheight=0.50)
self.enter.place(relx=0.30, rely=0.25, relwidth=0.60, relheight=0.50)
self.btn['font'] = btn_font
self.btn['text'] = "创建用户"
self.btn['bg'] = Config.tk_btn_bg
self.btn['command'] = lambda: self.create_user()
self.btn.place(relx=0.4, rely=0.7, relwidth=0.2, relheight=0.08)
def create_user(self):
phone = self.var.get()
if len(phone) != 11:
self.station.show_msg("UserInfoError", "Please, enter Phone(11)")
event = tk_event.CreateUserEvent(self.station).start(None, None, phone, False)
self.station.push_event(event)
def set_disable(self):
self.btn['state'] = 'disable'
self.enter['state'] = 'disable'
def reset_disable(self):
self.btn['state'] = 'normal'
self.enter['state'] = 'normal'
class CreateGarbageProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "创建垃圾袋")
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame), tk.Label(self.enter_frame)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame), tk.Entry(self.enter_frame)]
self.var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]
self.create_btn: tk.Button = tk.Button(self.frame)
self.file_btn: tk.Button = tk.Button(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#b69968"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.17)
height = 0.1
for lb, text, enter, var in zip(self.title, ["数量:", "导出位置:"], self.enter, self.var):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#b69968"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
height += 0.43
for btn, text, x, func in zip([self.create_btn, self.file_btn],
["创建垃圾袋", "选择目录"],
[0.2, 0.6],
[lambda: self.create_garbage(), lambda: self.choose_file()]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
btn.place(relx=x, rely=0.7, relwidth=0.2, relheight=0.08)
def choose_file(self):
path = askdirectory(title='选择二维码导出位置')
self.var[1].set(path)
def create_garbage(self):
try:
count = int(self.var[0].get())
if count <= 0:
raise ValueError
except (ValueError, TypeError):
self.station.show_msg("类型错误", "数量必须为大于0的数字")
else:
path = self.var[1].get()
if len(path) == 0:
path = None
event = tk_event.CreateGarbageEvent(self.station).start(path, count)
self.station.push_event(event)
def set_disable(self):
self.create_btn['state'] = 'disable'
self.file_btn['state'] = 'disable'
set_tk_disable_from_list(self.enter)
def reset_disable(self):
self.create_btn['state'] = 'normal'
self.file_btn['state'] = 'normal'
set_tk_disable_from_list(self.enter, flat='normal')
class ExportProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.gid_frame = tk.Frame(self.frame)
self.gid_title: List[tk.Label] = [tk.Label(self.gid_frame), tk.Label(self.gid_frame)]
self.gid_enter: List[tk.Entry] = [tk.Entry(self.gid_frame), tk.Entry(self.gid_frame)]
self.gid_var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]
self.where_frame = tk.Frame(self.frame)
self.where_title: List[tk.Label] = [tk.Label(self.where_frame), tk.Label(self.where_frame)]
self.where_enter: List[tk.Entry] = [tk.Entry(self.where_frame), tk.Entry(self.where_frame)]
self.where_var: List[tk.Variable] = [tk.StringVar(), tk.StringVar()]
self.create_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.file_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self._conf("", [], [], [])
self.__conf_font()
def _conf(self, bg_color: str, title_id, title_where, title_command):
self.bg_color = bg_color
self.title_id = title_id
self.title_where = title_where
self.title_command = title_command
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.where_frame['bg'] = self.bg_color
self.where_frame['bd'] = 5
self.where_frame['relief'] = "ridge"
self.where_frame.place(relx=0.2, rely=0.2, relwidth=0.6, relheight=0.17)
self.gid_frame['bg'] = self.bg_color
self.gid_frame['bd'] = 5
self.gid_frame['relief'] = "ridge"
self.gid_frame.place(relx=0.2, rely=0.6, relwidth=0.6, relheight=0.17)
height = 0.1
for lb, text, enter, var, lb_w, text_w, enter_w, var_w in zip(
self.gid_title, self.title_id, self.gid_enter, self.gid_var,
self.where_title, self.title_where, self.where_enter, self.where_var):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = self.bg_color
lb['anchor'] = 'e'
lb_w['font'] = title_font
lb_w['text'] = text_w
lb_w['bg'] = self.bg_color
lb_w['anchor'] = 'e'
enter['textvariable'] = var
enter['font'] = title_font
enter_w['textvariable'] = var_w
enter_w['font'] = title_font
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
lb_w.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter_w.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
height += 0.43
for btn, text in zip(self.create_btn + self.file_btn, self.title_command):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
self.create_btn[1]['command'] = self.export_where
self.create_btn[0]['command'] = self.export_id
self.create_btn[1].place(relx=0.2, rely=0.39, relwidth=0.25, relheight=0.08)
self.create_btn[0].place(relx=0.2, rely=0.79, relwidth=0.25, relheight=0.08)
self.file_btn[1]['command'] = self.choose_file_where
self.file_btn[0]['command'] = self.choose_file_id
self.file_btn[1].place(relx=0.6, rely=0.39, relwidth=0.2, relheight=0.08)
self.file_btn[0].place(relx=0.6, rely=0.79, relwidth=0.2, relheight=0.08)
def choose_file_id(self):
path = askdirectory(title='选择二维码导出位置')
self.gid_var[1].set(path)
def choose_file_where(self):
path = askdirectory(title='选择二维码导出位置')
self.where_var[1].set(path)
def export_id(self):
...
def export_where(self):
...
def set_disable(self):
set_tk_disable_from_list(self.gid_enter)
set_tk_disable_from_list(self.create_btn)
set_tk_disable_from_list(self.file_btn)
def reset_disable(self):
set_tk_disable_from_list(self.gid_enter, flat='normal')
set_tk_disable_from_list(self.create_btn, flat='normal')
set_tk_disable_from_list(self.file_btn, flat='normal')
class ExportGarbageProgram(ExportProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "导出垃圾袋二维码")
self._conf("#afdfe4", ["垃圾袋ID:", "导出位置:"], ["条件:", "导出位置:"],
["根据垃圾袋ID导出", "根据条件导出", "选择目录", "选择目录"])
def export_id(self):
gid = self.gid_var[0].get()
path = self.gid_var[1].get()
if len(path) == 0:
self.station.show_warning("导出失败", "请指定导出的位置")
return
event = tk_event.ExportGarbageByIDEvent(self.station).start(path, gid)
self.station.push_event(event)
def export_where(self):
where = self.where_var[0].get()
path = self.where_var[1].get()
if len(path) == 0:
self.station.show_warning("导出失败", "请指定导出的位置")
return
event = tk_event.ExportGarbageAdvancedEvent(self.station).start(path, where)
self.station.push_event(event)
class ExportUserProgram(ExportProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "导出用户二维码")
self._conf("#f69c9f", ["用户ID:", "导出位置:"], ["条件:", "导出位置:"],
["根据用户ID导出", "根据条件导出", "选择目录", "选择目录"])
def export_id(self):
uid = self.gid_var[0].get()
path = self.gid_var[1].get()
if len(path) == 0:
self.station.show_warning("导出失败", "请指定导出的位置")
return
event = tk_event.ExportUserByIDEvent(self.station).start(path, uid)
self.station.push_event(event)
def export_where(self):
where = self.where_var[0].get()
path = self.where_var[1].get()
if len(path) == 0:
self.station.show_warning("导出失败", "请指定导出的位置")
return
event = tk_event.ExportUserAdvancedEvent(self.station).start(path, where)
self.station.push_event(event)
class CreateUserFromCSVProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "从CSV导入用户")
self.auto_frame = tk.Frame(self.frame)
self.auto_title: tk.Label = tk.Label(self.auto_frame)
self.auto_enter: tk.Entry = tk.Entry(self.auto_frame)
self.auto_var: tk.Variable = tk.StringVar()
self.enter_frame = tk.Frame(self.frame)
self.path_title: tk.Label = tk.Label(self.enter_frame)
self.path_enter: tk.Entry = tk.Entry(self.enter_frame)
self.path_var: tk.Variable = tk.StringVar()
self.create_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.file_btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#EEE8AA"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.2, relwidth=0.6, relheight=0.12)
self.auto_frame['bg'] = "#EEE8AA"
self.auto_frame['bd'] = 5
self.auto_frame['relief'] = "ridge"
self.auto_frame.place(relx=0.2, rely=0.6, relwidth=0.6, relheight=0.12)
self.auto_title['font'] = title_font
self.auto_title['text'] = "CSV文件:"
self.auto_title['bg'] = "#EEE8AA"
self.auto_title['anchor'] = 'e'
self.path_title['font'] = title_font
self.path_title['text'] = "CSV文件:"
self.path_title['bg'] = "#EEE8AA"
self.path_title['anchor'] = 'e'
self.auto_enter['textvariable'] = self.auto_var
self.auto_enter['font'] = title_font
self.path_enter['textvariable'] = self.path_var
self.path_enter['font'] = title_font
self.auto_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.auto_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
self.path_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.path_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for btn, text in zip(self.create_btn + self.file_btn,
["创建用户", "创建自动用户", "选择CSV", "选择CSV"]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
self.create_btn[0]['command'] = self.create
self.create_btn[1]['command'] = self.create_auto
self.create_btn[0].place(relx=0.2, rely=0.34, relwidth=0.25, relheight=0.08)
self.create_btn[1].place(relx=0.2, rely=0.74, relwidth=0.25, relheight=0.08)
self.file_btn[0]['command'] = self.choose_file
self.file_btn[1]['command'] = self.choose_file_auto
self.file_btn[0].place(relx=0.6, rely=0.34, relwidth=0.2, relheight=0.08)
self.file_btn[1].place(relx=0.6, rely=0.74, relwidth=0.2, relheight=0.08)
def choose_file_auto(self):
path = askopenfilename(title='选择CSV文件', filetypes=[("CSV", ".csv")])
self.auto_var.set(path)
def choose_file(self):
path = askopenfilename(title='选择CSV文件', filetypes=[("CSV", ".csv")])
self.path_var.set(path)
def create_auto(self):
path = self.auto_var.get()
event = tk_event.CreateAutoUserFromCSVEvent(self.station).start(path)
self.station.push_event(event)
def create(self):
path = self.path_var.get()
event = tk_event.CreateUserFromCSVEvent(self.station).start(path)
self.station.push_event(event)
def set_disable(self):
self.auto_enter['state'] = 'disable'
self.path_enter['state'] = 'disable'
set_tk_disable_from_list(self.create_btn)
set_tk_disable_from_list(self.file_btn)
def reset_disable(self):
self.auto_enter['state'] = 'normal'
self.path_enter['state'] = 'normal'
set_tk_disable_from_list(self.create_btn, flat='normal')
set_tk_disable_from_list(self.file_btn, flat='normal')
class DeleteUserProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "删除用户")
self.uid_frame = tk.Frame(self.frame)
self.uid_title: tk.Label = tk.Label(self.uid_frame)
self.uid_enter: tk.Entry = tk.Entry(self.uid_frame)
self.uid_var: tk.Variable = tk.StringVar()
self.name_frame = tk.Frame(self.frame)
self.name_title: List[tk.Label] = [tk.Label(self.name_frame) for _ in range(2)]
self.name_enter: List[tk.Entry] = [tk.Entry(self.name_frame) for _ in range(2)]
self.name_var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]
self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.uid_frame['bg'] = "#FA8072"
self.uid_frame['bd'] = 5
self.uid_frame['relief'] = "ridge"
self.uid_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)
self.name_frame['bg'] = "#FA8072"
self.name_frame['bd'] = 5
self.name_frame['relief'] = "ridge"
self.name_frame.place(relx=0.2, rely=0.48, relwidth=0.6, relheight=0.25)
height = 0.17
for lb, text, enter, var in zip(self.name_title, ["用户名:", "密码:"], self.name_enter, self.name_var):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#FA8072"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.20)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.20)
height += 0.45
self.uid_title['font'] = title_font
self.uid_title['text'] = "用户ID:"
self.uid_title['bg'] = "#FA8072"
self.uid_title['anchor'] = 'e'
self.uid_enter['font'] = title_font
self.uid_enter['textvariable'] = self.uid_var
self.uid_title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.uid_enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for btn, text, func in zip(self.btn,
["通过用户ID删除", "通过用户名删除"],
[lambda: self.del_by_uid(), lambda: self.del_by_name()]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
self.btn[0].place(relx=0.6, rely=0.32, relwidth=0.2, relheight=0.08)
self.btn[1].place(relx=0.6, rely=0.75, relwidth=0.2, relheight=0.08)
def del_by_uid(self):
uid = self.uid_var.get()
if len(uid) != 32:
self.station.show_warning("用户ID错误", "用户ID必须为32位")
return
event = tk_event.DelUserEvent(self.station).start(uid)
self.station.push_event(event)
def del_by_name(self):
name = self.name_var[0].get()
passwd = self.name_var[1].get()
if len(name) == 0 or len(passwd) == 0:
self.station.show_warning("用户名或密码错误", "请输入用户名和密码")
return
uid = create_uid(name, passwd)
event = tk_event.DelUserEvent(self.station).start(uid)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
set_tk_disable_from_list(self.name_enter)
self.uid_enter['state'] = 'disable'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
set_tk_disable_from_list(self.name_enter, flat='normal')
self.uid_enter['state'] = 'normal'
class DeleteUsersProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "删除多个用户")
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#48c0a3"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.30, relwidth=0.6, relheight=0.10)
self.title['font'] = title_font
self.title['text'] = "条件:"
self.title['anchor'] = 'e'
self.title['bg'] = "#48c0a3"
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for btn, text, x, func in zip(self.btn,
["删除", "扫描"],
[0.2, 0.6],
[lambda: self.delete_user(), lambda: self.scan_user()]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
btn.place(relx=x, rely=0.6, relwidth=0.2, relheight=0.08)
def delete_user(self):
where = self.var.get()
if len(where) == 0:
self.station.show_warning("条件错误", "条件必须为正确的SQL语句")
return
event = tk_event.DelUserFromWhereEvent(self.station).start(where)
self.station.push_event(event)
def scan_user(self):
where = self.var.get()
if len(where) == 0:
self.station.show_warning("条件错误", "条件必须为正确的SQL语句")
return
event = tk_event.DelUserFromWhereScanEvent(self.station).start(where)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
self.enter['state'] = 'disable'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
self.enter['state'] = 'normal'
class DeleteGarbageProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.int_var: tk.Variable = tk.IntVar()
self.int_var.set(0)
self.radio: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]
self.btn: tk.Button = tk.Button(self.frame)
self.__conf_font()
self._conf()
def _conf(self, title: str = "垃圾袋ID:", color: str = "#b69968", support_del_all: bool = True):
self.frame_title = title
self.frame_color = color
self.support_del_all = support_del_all
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = self.frame_color
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.30, relwidth=0.6, relheight=0.10)
self.title['font'] = title_font
self.title['text'] = self.frame_title
self.title['bg'] = self.frame_color
self.title['anchor'] = 'e'
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for i in range(4):
radio = self.radio[i]
radio['font'] = btn_font
radio['text'] = ['均可', '仅未使用', '仅待检测', '仅已检测'][i]
radio['bg'] = self.color
radio['value'] = i
radio['variable'] = self.int_var
radio['anchor'] = 'w'
if not self.support_del_all:
self.int_var.set(1)
self.radio[0]['state'] = 'disable'
self.radio[0].place(relx=0.20, rely=0.43, relwidth=0.20, relheight=0.1)
self.radio[1].place(relx=0.60, rely=0.43, relwidth=0.20, relheight=0.1)
self.radio[2].place(relx=0.20, rely=0.55, relwidth=0.20, relheight=0.1)
self.radio[3].place(relx=0.60, rely=0.55, relwidth=0.20, relheight=0.1)
self.btn['font'] = btn_font
self.btn['text'] = '删除'
self.btn['bg'] = Config.tk_btn_bg
self.btn['command'] = lambda: self.delete_garbage()
self.btn.place(relx=0.4, rely=0.68, relwidth=0.2, relheight=0.08)
def delete_garbage(self):
...
def set_disable(self):
self.enter['state'] = 'disable'
self.btn['state'] = 'disable'
def reset_disable(self):
self.enter['state'] = 'normal'
self.btn['state'] = 'normal'
class DeleteGarbageProgram(DeleteGarbageProgramBase):
def __init__(self, station, win, color):
super(DeleteGarbageProgram, self).__init__(station, win, color, "删除垃圾袋")
def delete_garbage(self):
where = self.int_var.get()
assert where in [0, 1, 2, 3]
gid = self.var.get()
if len(gid) == 0:
self.station.show_warning("垃圾袋ID错误", "请输入正确的垃圾袋ID")
return
event = tk_event.DelGarbageEvent(self.station).start(gid, where)
self.station.push_event(event)
class DeleteGarbageMoreProgram(DeleteGarbageProgramBase):
def __init__(self, station, win, color):
super(DeleteGarbageMoreProgram, self).__init__(station, win, color, "删除多个垃圾袋")
self.scan_btn = tk.Button(self.frame)
self._conf("条件:", "#f58f98", False)
def conf_gui(self, n: int = 1):
super(DeleteGarbageMoreProgram, self).conf_gui(n)
self.btn.place_forget()
self.btn.place(relx=0.2, rely=0.68, relwidth=0.2, relheight=0.08)
self.scan_btn['font'] = make_font(size=self.btn_font_size)
self.scan_btn['text'] = '扫描'
self.scan_btn['bg'] = Config.tk_btn_bg
self.scan_btn['command'] = self.scan_garbage
self.scan_btn.place(relx=0.6, rely=0.68, relwidth=0.2, relheight=0.08)
def set_disable(self):
super(DeleteGarbageMoreProgram, self).set_disable()
self.scan_btn['state'] = 'disable'
def reset_disable(self):
super(DeleteGarbageMoreProgram, self).reset_disable()
self.scan_btn['state'] = 'normal'
def delete_garbage(self):
where = self.int_var.get()
assert where in [1, 2, 3]
where_sql = self.var.get()
if len(where_sql) == 0:
self.station.show_warning("条件错误", "条件必须为正确的SQL语句")
return
event = tk_event.DelGarbageWhereEvent(self.station).start(where, where_sql)
self.station.push_event(event)
def scan_garbage(self):
where = self.int_var.get()
assert where in [1, 2, 3]
where_sql = self.var.get()
if len(where_sql) == 0:
self.station.show_warning("条件错误", "条件必须为正确的SQL语句")
return
event = tk_event.DelGarbageWhereScanEvent(self.station).start(where, where_sql)
self.station.push_event(event)
class DeleteAllGarbageProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "删除所有垃圾袋")
self.dangerous: tk.Label = tk.Label(self.frame)
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.btn: List[tk.Button] = [tk.Button(self.frame) for _ in range(2)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.danger_font_size = int(20 * n)
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
danger_font = make_font(size=self.danger_font_size, weight="bold", underline=1)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
danger_btn_font = make_font(size=self.btn_font_size, weight="bold", overstrike=1)
self.dangerous['bg'] = self.color
self.dangerous['font'] = danger_font
self.dangerous['fg'] = "#f20c00"
self.dangerous['text'] = ("确定要从数据库删除所有垃圾袋吗?\n"
"请输入[admin]用户的密码再继续操作.\n"
"只有[admin]用户具有该操作的权限.\n"
"这是相当危险的操作.\n"
"操作后数据库可能无法恢复原数据.\n"
"SuperHuan和程序的缔造者不会对\n"
"此操作负责.\n"
"删库跑路可不是一件好事.\n"
"请遵守当地法律法规.")
self.dangerous.place(relx=0.05, rely=0.03, relwidth=0.9, relheight=0.53)
self.enter_frame['bg'] = "#f20c00"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)
self.title['font'] = title_font
self.title['text'] = "密码:"
self.title['bg'] = "#f20c00"
self.title['anchor'] = 'e'
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
for btn, text, x in zip(self.btn, ["删除", "扫描"], [0.2, 0.6]):
btn['text'] = text
btn.place(relx=x, rely=0.78, relwidth=0.2, relheight=0.08)
self.btn[0]['font'] = danger_btn_font
self.btn[0]['bg'] = "#f20c00"
self.btn[0]['command'] = lambda: self.delete_garbage()
self.btn[1]['font'] = btn_font
self.btn[1]['bg'] = Config.tk_btn_bg
self.btn[1]['command'] = lambda: self.scan_garbage()
def scan_garbage(self):
event = tk_event.DelAllGarbageScanEvent(self.station)
self.station.push_event(event)
def delete_garbage(self):
passwd = self.var.get()
if len(passwd) == 0:
self.station.show_warning("密码错误", "请输入正确的[admin]用户密码")
user = find_user_by_name('admin', passwd, self.station.get_db())
if user is None or not user.is_manager():
self.station.show_warning("密码错误", "请输入正确的[admin]用户密码")
return
event = tk_event.DelAllGarbageEvent(self.station)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
self.enter['state'] = 'disable'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
self.enter['state'] = 'normal'
class SearchProgramBase(AdminProgram, metaclass=abc.ABCMeta):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.view_frame = tk.Frame(self.frame)
self.view = ttk.Treeview(self.view_frame)
self.y_scroll = tk.Scrollbar(self.view_frame)
self.x_scroll = tk.Scrollbar(self.view_frame)
def conf_view_gui(self, columns: list, relx, rely, relwidth, relheight,
x_scroll=0.05, y_scroll=0.02, color: str = "#FA8072"):
self.view_frame['bg'] = color
self.view_frame['bd'] = 2
self.view_frame['relief'] = "ridge"
self.view_frame.place(relx=relx, rely=rely, relwidth=relwidth, relheight=relheight)
self.view['columns'] = columns
self.view['show'] = 'headings'
self.view['selectmode'] = 'none'
for i in columns:
self.view.column(i, anchor="c")
self.view.heading(i, text=i)
self.y_scroll['orient'] = 'vertical'
self.y_scroll['command'] = self.view.yview
self.view['yscrollcommand'] = self.y_scroll.set
self.x_scroll['orient'] = 'horizontal'
self.x_scroll['command'] = self.view.xview
self.view['xscrollcommand'] = self.x_scroll.set
self.view.place(relx=0.0, rely=0.0, relwidth=1 - y_scroll, relheight=1 - x_scroll)
self.y_scroll.place(relx=0.98, rely=0.0, relwidth=y_scroll, relheight=1.0)
self.x_scroll.place(relx=0.0, rely=1 - x_scroll, relwidth=1 - y_scroll, relheight=x_scroll)
class SearchUserProgram(SearchProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "搜索用户")
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(3)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(3)]
self.var: List[tk.Variable] = [tk.StringVar() for _ in range(3)]
self.check: List[Tuple[tk.Checkbutton, tk.Variable]] = [(tk.Checkbutton(self.enter_frame), tk.IntVar())
for _ in range(3)]
self.btn: tk.Button = tk.Button(self.frame)
self._columns = ["UserID", "Name", "Phone", "Score", "Reputation", "IsManager"]
self._columns_ch = ["用户ID[UserID]", "用户名[Name]", "手机号[Phone]",
"积分[Score]", "垃圾分类信用[Reputation]", "是否管理员[IsManager]"]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#FA8072"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.0, relwidth=0.6, relheight=0.30)
height = 0.1
for lb, text, enter, var, check in zip(self.title,
["用户ID:", "用户名:", "手机号:"],
self.enter, self.var, self.check):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#FA8072"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
check[0]['font'] = title_font
check[0]['text'] = ''
check[0]['bg'] = "#FA8072"
check[0]['variable'] = check[1]
check[1].set(1)
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.17)
enter.place(relx=0.35, rely=height, relwidth=0.55, relheight=0.17)
check[0].place(relx=0.92, rely=height, relwidth=0.04, relheight=0.17)
height += 0.30
self.btn['font'] = btn_font
self.btn['text'] = "搜索"
self.btn['bg'] = Config.tk_btn_bg
self.btn['command'] = self.search_user
self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)
self.conf_view_gui(self._columns_ch, relx=0.05, rely=0.32, relwidth=0.9, relheight=0.55)
def search_user(self):
use_uid = self.check[0][1].get()
use_name = self.check[1][1].get()
use_phone = self.check[2][1].get()
uid = None
name = None
phone = None
if use_uid:
uid = self.var[0].get()
if len(uid) == 0:
uid = None
if use_name:
name = self.var[1].get()
if len(name) == 0:
name = None
if use_phone:
phone = self.var[2].get()
if len(phone) == 0:
phone = None
event = tk_event.SearchUserEvent(self.station).start(self._columns, uid, name, phone, self)
self.station.push_event(event)
def set_disable(self):
self.btn['state'] = 'disable'
set_tk_disable_from_list(self.enter)
def reset_disable(self):
self.btn['state'] = 'normal'
set_tk_disable_from_list(self.enter, flat='normal')
class SearchAdvancedProgramBase(SearchProgramBase, metaclass=abc.ABCMeta):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.var: tk.Variable = tk.StringVar()
self.btn: tk.Button = tk.Button(self.frame)
self._conf([], [], "#FA8072")
self.__conf_font()
def _conf(self, columns: list, columns_ch: list, bg_color):
self.bg_color = bg_color
self._columns = columns
self._columns_ch = columns_ch
return self
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = self.bg_color
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.00, relwidth=0.6, relheight=0.10)
self.title['font'] = title_font
self.title['bg'] = self.bg_color
self.title['text'] = "条件:"
self.title['anchor'] = 'e'
self.enter['font'] = title_font
self.enter['textvariable'] = self.var
self.title.place(relx=0.01, rely=0.25, relwidth=0.30, relheight=0.50)
self.enter.place(relx=0.35, rely=0.25, relwidth=0.60, relheight=0.50)
self.btn['text'] = "搜索"
self.btn['font'] = btn_font
self.btn['bg'] = Config.tk_btn_bg
self.btn['command'] = self.search
self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)
self.conf_view_gui(self._columns_ch, relx=0.05, rely=0.12, relwidth=0.9, relheight=0.76)
def search(self):
...
def set_disable(self):
self.btn['state'] = 'disable'
self.enter['state'] = 'disable'
def reset_disable(self):
self.btn['state'] = 'normal'
self.enter['state'] = 'normal'
class SearchUserAdvancedProgram(SearchAdvancedProgramBase):
def __init__(self, station, win, color):
super(SearchUserAdvancedProgram, self).__init__(station, win, color, "高级搜索-用户")
columns = ["UserID", "Name", "Phone", "Score", "Reputation", "IsManager"]
columns_ch = ["用户ID[UserID]", "用户名[Name]", "手机号[Phone]",
"积分[Score]", "垃圾分类信用[Reputation]", "是否管理员[IsManager]"]
self._conf(columns, columns_ch, '#48c0a3')
def search(self):
where = self.var.get()
event = tk_event.SearchUserAdvancedEvent(self.station).start(self._columns, where, self)
self.station.push_event(event)
class SearchGarbageProgram(SearchProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "搜索垃圾袋")
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(8)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(8)]
self.var: List[tk.Variable] = [tk.StringVar() for _ in range(8)]
self.check: List[Tuple[tk.Checkbutton, tk.Variable]] = [(tk.Checkbutton(self.enter_frame), tk.IntVar())
for _ in range(8)]
self._columns = ["GarbageID", "UserID", "CheckerID", "CreateTime", "UseTime", "Location", "GarbageType",
"CheckResult"]
self._columns_zh = ["垃圾袋ID[GarbageID]", "使用者ID[UserID]", "检测者ID[CheckerID]", "创建时间[CreateTime]",
"使用时间[UseTime]", "使用地点[Location]", "垃圾类型[GarbageType]", "检测结果[CheckResult]"]
self.btn: tk.Button = tk.Button(self.frame)
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.enter_frame['bg'] = "#7bbfea"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.0, relwidth=0.6, relheight=0.47)
height = 0.02
for lb, text, enter, var, check in zip(self.title,
["垃圾袋ID:", "使用者ID:", "检查者ID:", "创建时间:", "使用时间:",
"使用地点:", "垃圾类型:", "检测结果:"],
self.enter, self.var, self.check):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#7bbfea"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
check[0]['font'] = title_font
check[0]['bg'] = "#7bbfea"
check[0]['text'] = ''
check[0]['variable'] = check[1]
check[1].set(1)
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.10)
enter.place(relx=0.35, rely=height, relwidth=0.55, relheight=0.10)
check[0].place(relx=0.92, rely=height, relwidth=0.04, relheight=0.10)
height += 0.121
self.btn['font'] = btn_font
self.btn['bg'] = Config.tk_btn_bg
self.btn['text'] = "Search"
self.btn['command'] = self.search_user
self.btn.place(relx=0.4, rely=0.9, relwidth=0.2, relheight=0.08)
self.conf_view_gui(self._columns_zh, relx=0.05, rely=0.49, relwidth=0.9, relheight=0.38, x_scroll=0.07)
def search_user(self):
keys = ["gid", "uid", "cuid", "create_time", "use_time", "loc", "type_", "check"]
key_values = {}
for i, key in enumerate(keys):
ck = self.check[i][1].get()
if ck:
res = self.enter[i].get()
if len(res) > 0:
key_values[key] = res
continue
key_values[key] = None
event = tk_event.SearchGarbageEvent(self.station).start(self._columns, key_values, self)
self.station.push_event(event)
def set_disable(self):
self.btn['state'] = 'disable'
set_tk_disable_from_list(self.enter)
def reset_disable(self):
self.btn['state'] = 'normal'
set_tk_disable_from_list(self.enter, flat='normal')
class SearchGarbageAdvancedProgram(SearchAdvancedProgramBase):
def __init__(self, station, win, color):
super(SearchGarbageAdvancedProgram, self).__init__(station, win, color, "高级搜索-垃圾袋")
columns = ["GarbageID", "UserID", "CheckerID", "CreateTime", "UseTime", "Location", "GarbageType",
"CheckResult"]
columns_zh = ["垃圾袋ID[GarbageID]", "使用者ID[UserID]", "检测者ID[CheckerID]", "创建时间[CreateTime]",
"使用时间[UseTime]", "使用地点[Location]", "垃圾类型[GarbageType]", "检测结果[CheckResult]"]
self._conf(columns, columns_zh, '#d1923f')
def search(self):
where = self.var.get()
event = tk_event.SearchGarbageAdvancedEvent(self.station).start(self._columns, where, self)
self.station.push_event(event)
class SearchAdvancedProgram(SearchAdvancedProgramBase):
def __init__(self, station, win, color):
super(SearchAdvancedProgram, self).__init__(station, win, color, "高级搜索")
columns = ["GarbageID", "UserID", "UserName", "UserPhone", "UserScore",
"UserReputation", "CheckerID", "CheckerName", "CheckerPhone",
"CreateTime", "UseTime", "Location", "GarbageType", "CheckResult"]
columns_zh = ["垃圾袋ID[GarbageID]", "使用者ID[UserID]", "使用者名[UserName]", "使用者手机号[UserPhone]",
"使用者积分[UserScore]", "使用者垃圾分类信用[UserReputation]", "检测者ID[CheckerID]",
"检测这名[CheckerName]", "检测者手机号[CheckerPhone]", "创建时间[CreateTime]", "使用时间[UseTime]",
"使用地点[Location]", "垃圾类型[GarbageType]", "检测结果[CheckResult]"]
self._conf(columns, columns_zh, '#426ab3')
def search(self):
where = self.var.get()
event = tk_event.SearchAdvancedEvent(self.station).start(self._columns, where, self)
self.station.push_event(event)
class UpdateUserProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.enter_frame = tk.Frame(self.frame)
self.title: List[tk.Label] = [tk.Label(self.enter_frame) for _ in range(2)]
self.enter: List[tk.Entry] = [tk.Entry(self.enter_frame) for _ in range(2)]
self.var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]
self.where_frame = tk.Frame(self.frame)
self.where_title: List[tk.Label] = [tk.Label(self.where_frame) for _ in range(2)]
self.where_enter: List[tk.Entry] = [tk.Entry(self.where_frame) for _ in range(2)]
self.where_var: List[tk.Variable] = [tk.StringVar() for _ in range(2)]
self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self._conf(["", ""], "#FA8072")
self.__conf_font()
def _conf(self, title: List[str], bg_color: str):
self.bg_color = bg_color
self.bg_color_where = bg_color
self.enter_title = title
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.where_frame['bg'] = self.bg_color_where
self.where_frame['bd'] = 5
self.where_frame['relief'] = "ridge"
self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.17)
self.enter_frame['bg'] = self.bg_color
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.58, relwidth=0.6, relheight=0.17)
height = 0.1
for lb, text, enter, var, lb_w, text_w, enter_w, var_w in (
zip(self.title, self.enter_title, self.enter, self.var,
self.where_title, ["条件:", self.enter_title[1]], self.where_enter, self.where_var)):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = self.bg_color
lb['anchor'] = 'e'
lb_w['font'] = title_font
lb_w['text'] = text_w
lb_w['bg'] = self.bg_color_where
lb_w['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var
enter_w['font'] = title_font
enter_w['textvariable'] = var_w
lb.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
lb_w.place(relx=0.01, rely=height, relwidth=0.30, relheight=0.35)
enter_w.place(relx=0.35, rely=height, relwidth=0.60, relheight=0.35)
height += 0.43
for btn, text, func in zip(self.btn,
["通过条件更新", "通过用户ID更新"],
[self.update_by_where, self.update_by_uid]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
self.btn[0].place(relx=0.55, rely=0.40, relwidth=0.25, relheight=0.08)
self.btn[1].place(relx=0.55, rely=0.78, relwidth=0.25, relheight=0.08)
def update_by_uid(self):
...
def update_by_where(self):
...
def set_disable(self):
set_tk_disable_from_list(self.btn)
set_tk_disable_from_list(self.enter)
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
set_tk_disable_from_list(self.enter, flat='normal')
class UpdateUserScore(UpdateUserProgramBase):
def __init__(self, station, win, color):
super(UpdateUserScore, self).__init__(station, win, color, "更新用户-积分")
self._conf(["用户ID:", "积分:"], "#afdfe4")
def update_by_uid(self):
uid = self.enter[0].get()
score = int(self.enter[1].get())
event = tk_event.UpdateUserScoreEvent(self.station).start(score, f"UserID='{uid}'")
self.station.push_event(event)
def update_by_where(self):
where = self.where_enter[0].get()
score = int(self.where_enter[1].get())
event = tk_event.UpdateUserScoreEvent(self.station).start(score, where)
self.station.push_event(event)
class UpdateUserReputation(UpdateUserProgramBase):
def __init__(self, station, win, color):
super(UpdateUserReputation, self).__init__(station, win, color, "更新用户-垃圾分类信用")
self._conf(["用户ID:", "垃圾分类信用:"], "#f8aba6")
def update_by_uid(self):
uid = self.enter[0].get()
reputation = int(self.enter[1].get())
event = tk_event.UpdateUserReputationEvent(self.station).start(reputation, f"UserID='{uid}'")
self.station.push_event(event)
def update_by_where(self):
where = self.where_enter[0].get()
reputation = int(self.where_enter[1].get())
event = tk_event.UpdateUserReputationEvent(self.station).start(reputation, where)
self.station.push_event(event)
class UpdateGarbageTypeProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "更新垃圾袋-垃圾类型")
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]
self.var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]
self.where_frame = tk.Frame(self.frame)
self.where_title: tk.Label = tk.Label(self.where_frame)
self.where_enter: tk.Entry = tk.Entry(self.where_frame)
self.where_type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(4)]
self.where_var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]
self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.where_frame['bg'] = "#fdb933"
self.where_frame['bd'] = 5
self.where_frame['relief'] = "ridge"
self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)
self.enter_frame['bg'] = "#fdb933"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)
for lb, enter, radios, var, y, text in zip([self.title, self.where_title],
[self.enter, self.where_enter],
[self.type, self.where_type],
[self.var, self.where_var],
[0.32, 0.72],
["垃圾袋ID:", "条件:"]):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#fdb933"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var[0]
for i, radio in enumerate(radios):
radio['font'] = btn_font
radio['bg'] = self.color
radio['text'] = GarbageType.GarbageTypeStrList_ch[i + 1]
radio['value'] = i + 1
radio['variable'] = var[1]
radio['anchor'] = 'w'
var[1].set(1)
radios[0].place(relx=0.20, rely=y + 0.00, relwidth=0.20, relheight=0.04)
radios[1].place(relx=0.60, rely=y + 0.00, relwidth=0.20, relheight=0.04)
radios[2].place(relx=0.20, rely=y + 0.05, relwidth=0.20, relheight=0.04)
radios[3].place(relx=0.60, rely=y + 0.05, relwidth=0.20, relheight=0.04)
lb.place(relx=0.02, rely=0.2, relwidth=0.25, relheight=0.48)
enter.place(relx=0.30, rely=0.2, relwidth=0.60, relheight=0.48)
for btn, text, func in zip(self.btn,
["通过条件更新", "通过垃圾袋ID更新"],
[self.update_by_where, self.update_by_gid]):
btn['font'] = btn_font
btn['text'] = text
btn['bg'] = Config.tk_btn_bg
btn['command'] = func
self.btn[0].place(relx=0.55, rely=0.43, relwidth=0.25, relheight=0.08)
self.btn[1].place(relx=0.55, rely=0.83, relwidth=0.25, relheight=0.08)
def update_by_gid(self):
gid = self.enter.get()
type_ = self.var[1].get()
event = tk_event.UpdateGarbageTypeEvent(self.station).start(type_, f"GarbageID={gid}")
self.station.push_event(event)
def update_by_where(self):
where = self.where_enter.get()
type_ = self.where_var[1].get()
event = tk_event.UpdateGarbageTypeEvent(self.station).start(type_, where)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
self.enter['state'] = 'disable'
self.where_enter['state'] = 'normal'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
self.enter['state'] = 'normal'
self.where_enter['state'] = 'normal'
class UpdateGarbageCheckResultProgram(AdminProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "更新垃圾袋-检测结果")
self.enter_frame = tk.Frame(self.frame)
self.title: tk.Label = tk.Label(self.enter_frame)
self.enter: tk.Entry = tk.Entry(self.enter_frame)
self.type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(2)]
self.var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]
self.where_frame = tk.Frame(self.frame)
self.where_title: tk.Label = tk.Label(self.where_frame)
self.where_enter: tk.Entry = tk.Entry(self.where_frame)
self.where_type: List[tk.Radiobutton] = [tk.Radiobutton(self.frame) for _ in range(2)]
self.where_var: List[tk.Variable] = [tk.StringVar, tk.IntVar()]
self.btn: List[tk.Button] = [tk.Button(self.frame), tk.Button(self.frame)]
self.__conf_font()
def __conf_font(self, n: int = Config.tk_zoom):
self.title_font_size = int(16 * n)
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
title_font = make_font(size=self.title_font_size)
btn_font = make_font(size=self.btn_font_size)
self.where_frame['bg'] = "#abc88b"
self.where_frame['bd'] = 5
self.where_frame['relief'] = "ridge"
self.where_frame.place(relx=0.2, rely=0.20, relwidth=0.6, relheight=0.10)
self.enter_frame['bg'] = "#abc88b"
self.enter_frame['bd'] = 5
self.enter_frame['relief'] = "ridge"
self.enter_frame.place(relx=0.2, rely=0.60, relwidth=0.6, relheight=0.10)
for lb, enter, radios, var, y, text in zip([self.title, self.where_title],
[self.enter, self.where_enter],
[self.type, self.where_type],
[self.var, self.where_var],
[0.32, 0.72],
["垃圾袋ID:", "条件:"]):
lb['font'] = title_font
lb['text'] = text
lb['bg'] = "#abc88b"
lb['anchor'] = 'e'
enter['font'] = title_font
enter['textvariable'] = var[0]
for i, radio in enumerate(radios):
radio['font'] = btn_font
radio['bg'] = self.color
radio['text'] = ["投放错误", "投放正确"][i]
radio['value'] = i
radio['variable'] = var[1]
radio['anchor'] = 'w'
var[1].set(1)
radios[0].place(relx=0.20, rely=y + 0.00, relwidth=0.20, relheight=0.04)
radios[1].place(relx=0.60, rely=y + 0.00, relwidth=0.20, relheight=0.04)
lb.place(relx=0.02, rely=0.2, relwidth=0.25, relheight=0.48)
enter.place(relx=0.30, rely=0.2, relwidth=0.60, relheight=0.48)
for btn, text, func in zip(self.btn,
["通过条件更新", "通过垃圾袋ID更新"],
[self.update_by_where, self.update_by_gid]):
btn['font'] = btn_font
btn['bg'] = Config.tk_btn_bg
btn['text'] = text
btn['command'] = func
self.btn[0].place(relx=0.55, rely=0.38, relwidth=0.25, relheight=0.08)
self.btn[1].place(relx=0.55, rely=0.78, relwidth=0.25, relheight=0.08)
def update_by_gid(self):
gid = self.enter.get()
check = (self.var[1].get() == 1)
event = tk_event.UpdateGarbageCheckEvent(self.station).start(check, f"GarbageID={gid}")
self.station.push_event(event)
def update_by_where(self):
where = self.where_enter.get()
check = (self.where_var[1].get() == 1)
event = tk_event.UpdateGarbageCheckEvent(self.station).start(check, where)
self.station.push_event(event)
def set_disable(self):
set_tk_disable_from_list(self.btn)
self.enter['state'] = 'disable'
self.where_enter['state'] = 'normal'
def reset_disable(self):
set_tk_disable_from_list(self.btn, flat='normal')
self.enter['state'] = 'normal'
self.where_enter['state'] = 'normal'
class StatisticsTimeProgramBase(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.figure_frame = tk.Frame(self.frame)
self.figure = Figure(dpi=100)
self.plt_1: Axes = self.figure.add_subplot(211)
self.plt_2: Axes = self.figure.add_subplot(212, sharex=self.plt_1)
self.figure.subplots_adjust(hspace=0.7)
self.canvas = FigureCanvasTkAgg(self.figure, master=self.figure_frame)
self.canvas_tk = self.canvas.get_tk_widget()
self.toolbar = NavigationToolbar2Tk(self.canvas, self.figure_frame)
self.color_frame = tk.Frame(self.frame)
self.show_list_tk = tk.Listbox(self.color_frame)
self.show_list_scroll = tk.Scrollbar(self.color_frame)
self.hide_list_tk = tk.Listbox(self.color_frame)
self.hide_list_scroll = tk.Scrollbar(self.color_frame)
self.btn_show = tk.Button(self.color_frame)
self.btn_hide = tk.Button(self.color_frame)
self.color_show_dict = {}
self.color_hide_dict = {}
self.export_lst = []
self.export_btn = tk.Button(self.frame)
self.refresh_btn = tk.Button(self.frame)
self.reset_btn = tk.Button(self.frame)
self.reverse_btn = tk.Button(self.frame)
self.legend_show = tk.Checkbutton(self.frame), tk.IntVar()
self._conf("#abc88b")
self.__conf_font()
def _conf(self, bg_color):
self.bg_color = bg_color
def __conf_font(self, n: int = Config.tk_zoom):
self.btn_font_size = int(14 * n)
self.little_btn_font_size = int(12 * n)
def to_program(self):
self.refresh()
def update_listbox(self):
self.show_list_tk.delete(0, tk.END)
self.hide_list_tk.delete(0, tk.END)
for i in self.color_show_dict:
self.show_list_tk.insert(tk.END, i)
self.show_list_tk.itemconfig(tk.END,
selectbackground=self.color_show_dict[i],
bg=self.color_show_dict[i],
selectforeground='#FFFFFF',
fg='#000000')
for i in self.color_hide_dict:
self.hide_list_tk.insert(tk.END, i)
self.hide_list_tk.itemconfig(tk.END,
selectbackground=self.color_hide_dict[i],
bg=self.color_hide_dict[i],
selectforeground='#FFFFFF',
fg='#000000')
def check_show(self, res: str):
color = self.color_show_dict.get(res)
if color is not None:
return color
color = self.color_hide_dict.get(res)
if color is not None:
return None
color = random_color()
self.color_show_dict[res] = color
return color
def hide(self):
i = self.show_list_tk.curselection()
if len(i) == 0:
return
res = self.show_list_tk.get(i[0])
self.hide_(res)
self.update_listbox()
def show(self):
i = self.hide_list_tk.curselection()
if len(i) == 0:
return
res = self.hide_list_tk.get(i[0])
self.show_(res)
self.update_listbox()
def hide_(self, res):
color = self.color_show_dict.get(res)
if color is not None:
del self.color_show_dict[res]
self.color_hide_dict[res] = color
def show_(self, res):
color = self.color_hide_dict.get(res)
if color is not None:
del self.color_hide_dict[res]
self.color_show_dict[res] = color
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
btn_font = make_font(size=self.btn_font_size)
little_btn_font = make_font(size=self.little_btn_font_size)
self.color_frame['bg'] = self.bg_color
self.color_frame['bd'] = 5
self.color_frame['relief'] = "ridge"
self.show_list_tk.place(relx=0, rely=0, relwidth=0.90, relheight=0.475)
self.show_list_scroll.place(relx=0.90, rely=0, relwidth=0.10, relheight=0.475)
self.show_list_scroll['orient'] = 'vertical'
self.show_list_scroll['command'] = self.show_list_tk.yview
self.show_list_tk['yscrollcommand'] = self.show_list_scroll.set
self.show_list_tk['activestyle'] = tk.NONE
self.hide_list_tk.place(relx=0, rely=0.525, relwidth=0.90, relheight=0.475)
self.hide_list_scroll.place(relx=0.90, rely=0.525, relwidth=0.10, relheight=0.475)
self.hide_list_scroll['orient'] = 'vertical'
self.hide_list_scroll['command'] = self.hide_list_tk.yview
self.hide_list_tk['yscrollcommand'] = self.hide_list_scroll.set
self.hide_list_tk['activestyle'] = tk.NONE
for btn, text, func, x in zip([self.btn_show, self.btn_hide],
["显示", "隐藏"],
[self.show, self.hide],
[0.00, 0.50]):
btn['font'] = little_btn_font
btn['bg'] = Config.tk_btn_bg
btn['text'] = text
btn['command'] = func
btn.place(relx=x, rely=0.475, relwidth=0.50, relheight=0.05)
self.color_frame.place(relx=0.01, rely=0.02, relwidth=0.18, relheight=0.88)
self.figure_frame['bg'] = self.bg_color
self.figure_frame['bd'] = 5
self.figure_frame['relief'] = "ridge"
self.figure_frame.place(relx=0.21, rely=0.02, relwidth=0.79, relheight=0.88)
self.canvas_tk.place(relx=0, rely=0, relwidth=1.0, relheight=0.9)
self.toolbar.place(relx=0, rely=0.9, relwidth=1.0, relheight=0.1)
for btn, text, func, x in zip([self.reset_btn, self.reverse_btn, self.refresh_btn, self.export_btn],
["复位选择", "反转选择", "刷新数据", "导出数据"],
[self.reset, self.reverse, self.refresh, self.export],
[0.37, 0.53, 0.69, 0.85]):
btn['font'] = btn_font
btn['bg'] = Config.tk_btn_bg
btn['text'] = text
btn['command'] = func
btn.place(relx=x, rely=0.91, relwidth=0.15, relheight=0.08)
self.legend_show[0]['font'] = btn_font
self.legend_show[0]['bg'] = self.color
self.legend_show[0]['text'] = "显示图例"
self.legend_show[0]['variable'] = self.legend_show[1]
self.legend_show[0].place(relx=0.21, rely=0.91, relwidth=0.15, relheight=0.08)
def export(self, title, func: Callable):
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write(f"Hour, Count, {title}\n")
for i in self.export_lst:
f.write(f"{i[0]}, {i[1]}, {func(i)}\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def refresh(self):
self.plt_1.cla()
self.plt_2.cla()
def reset(self):
self.color_show_dict.update(self.color_hide_dict)
self.color_hide_dict = {}
self.update_listbox()
def reverse(self):
tmp = self.color_show_dict
self.color_show_dict = self.color_hide_dict
self.color_hide_dict = tmp
self.update_listbox()
def show_result(self, res: Dict[str, any], lst: List):
bottom = np.zeros(24)
label_num = [i for i in range(24)]
label_str = [f"{i}" for i in range(24)]
res_type_lst: List = res['res_type']
self.export_lst = lst
max_y_plot = 1
max_y_bar = 1
for res_type in res_type_lst:
res_count: Tuple[str] = res[res_type]
if len(res_count) != 0:
color = self.check_show(res_type)
if color is None:
continue
y = [0 for _ in range(24)]
for i in res_count:
y[int(i[0])] += int(i[1])
self.color_show_dict[res_type] = color
self.plt_1.bar(label_num, y,
color=color,
align="center",
bottom=bottom,
tick_label=label_str,
label=res_type)
self.plt_2.plot(label_num, y,
color=color,
label=res_type,
marker='o',
markersize=5)
bottom += np.array(y)
max_y_plot = max(max(y), max_y_plot)
if self.legend_show[1].get() == 1:
self.plt_1.legend(loc="upper left")
self.plt_2.legend(loc="upper left")
self.plt_1.set_xlim(-1, 24)
self.plt_1.set_xticks([i for i in range(0, 24, 2)])
self.plt_1.set_xticklabels([f"{i}h" for i in range(0, 24, 2)])
max_y_bar = int(max(bottom.max(), max_y_bar))
self.plt_1.set_ylim(0, max_y_bar + max_y_bar * 0.1)
step = ceil(max_y_bar / 5)
if step > 0:
y_ticks = [i for i in range(0, max_y_bar, step)]
y_ticklabels = [f'{i}' for i in range(0, max_y_bar, step)]
else:
y_ticks = []
y_ticklabels = []
y_ticks.append(max_y_bar)
y_ticklabels.append(f"{max_y_bar}")
self.plt_1.set_yticks(y_ticks)
self.plt_1.set_yticklabels(y_ticklabels)
self.plt_1.spines['right'].set_color('none')
self.plt_1.spines['top'].set_color('none')
self.plt_1.grid(axis='y')
self.plt_1.set_title(f"{self.program_title}柱状图")
self.plt_2.set_xlim(-1, 24)
self.plt_2.set_xticks([i for i in range(0, 24, 2)])
self.plt_2.set_xticklabels([f"{i}h" for i in range(0, 24, 2)])
self.plt_2.set_ylim(0, max_y_plot + max_y_plot * 0.1)
step = ceil(max_y_plot / 5)
if step > 0:
y_ticks = [i for i in range(0, max_y_plot, step)]
y_ticklabels = [f'{i}' for i in range(0, max_y_plot, step)]
else:
y_ticks = []
y_ticklabels = []
y_ticks.append(max_y_plot)
y_ticklabels.append(f"{max_y_plot}")
self.plt_2.set_yticks(y_ticks)
self.plt_2.set_yticklabels(y_ticklabels)
self.plt_2.spines['right'].set_color('none')
self.plt_2.spines['top'].set_color('none')
self.plt_2.grid(axis='y')
self.plt_2.set_title(f"{self.program_title}折线图")
self.canvas.draw()
self.toolbar.update()
self.update_listbox()
def set_disable(self):
self.export_btn['state'] = 'disable'
self.reset_btn['state'] = 'disable'
self.refresh_btn['state'] = 'disable'
self.reverse_btn['state'] = 'disable'
self.btn_show['state'] = 'disable'
self.btn_hide['state'] = 'disable'
def reset_disable(self):
self.export_btn['state'] = 'normal'
self.reset_btn['state'] = 'normal'
self.refresh_btn['state'] = 'normal'
self.reverse_btn['state'] = 'normal'
self.btn_show['state'] = 'normal'
self.btn_hide['state'] = 'normal'
class StatisticsTimeLocProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按投放区域")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["Location"], lambda i: i[2], self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Location", lambda i: i[2])
class StatisticsTimeTypeProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按投放类型")
self._conf("#abc88b")
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = "#00BFFF"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = "#32CD32"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = "#DC143C"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = "#A9A9A9"
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["GarbageType"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Type", self.get_name)
@staticmethod
def get_name(i: Tuple):
data: bytes = i[2]
return GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]
class StatisticsTimeTypeLocProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按投放类型和区域")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["GarbageType", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Type-Location", self.get_name)
@staticmethod
def get_name(i: Tuple):
data: bytes = i[2]
return f"{GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]}-{i[3]}"
class StatisticsTimeCheckResultProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按检查结果")
self._conf("#abc88b")
self.color_show_dict['Pass'] = "#00BFFF"
self.color_show_dict['Fail'] = "#DC143C"
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["CheckResult"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result", self.get_name)
@staticmethod
def get_name(i: Tuple):
if i[2] is None:
return 'None'
data: bytes = i[2]
return 'Pass' if data == DBBit.BIT_1 else 'Fail'
class StatisticsTimeCheckResultAndTypeProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按检查结果和类型")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["CheckResult", "GarbageType"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result-Location", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: bytes = i[2]
data_2: bytes = i[3]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode("utf-8"))]}'
class StatisticsTimeCheckResultAndLocProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-按检查结果和区域")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station).start(["CheckResult", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result-Type", self.get_name)
@staticmethod
def get_name(i: Tuple):
if i[2] is None:
return 'None'
data_1: bytes = i[2]
return (f'Pass' if data_1 == DBBit.BIT_1 else 'Fail') + f"-{i[3]}"
class StatisticsTimeDetailProgram(StatisticsTimeProgramBase):
def __init__(self, station, win, color):
super().__init__(station, win, color, "时段分析-详细分类")
self._conf("#abc88b")
def refresh(self):
super().refresh()
event = tk_event.CountTimeEvent(self.station)
event.start(["CheckResult", "GarbageType", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Detail", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: bytes = i[2]
data_2: bytes = i[3]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode("utf-8"))]}' + f'-{i[4]}'
class StatisticsUserBaseProgram(AdminProgram):
def __init__(self, station, win, color, title: str):
super().__init__(station, win, color, title)
self.figure_frame = tk.Frame(self.frame)
self.figure = Figure(dpi=100)
self.plt: Axes = self.figure.add_subplot(111)
self.figure.subplots_adjust(bottom=0.2, top=0.93)
self.canvas = FigureCanvasTkAgg(self.figure, master=self.figure_frame)
self.canvas_tk = self.canvas.get_tk_widget()
self.toolbar = NavigationToolbar2Tk(self.canvas, self.figure_frame)
self.color_bar: Optional[Colorbar] = None
self.export_lst: Optional[np.array] = None
self.export_btn = tk.Button(self.frame)
self.refresh_btn = tk.Button(self.frame)
self._conf("#abc88b")
self.__conf_font()
def _conf(self, bg_color):
self.bg_color = bg_color
def __conf_font(self, n: int = Config.tk_zoom):
self.btn_font_size = int(14 * n)
def conf_gui(self, n: int = 1):
self.__conf_font(n * Config.tk_zoom)
btn_font = make_font(size=self.btn_font_size)
self.figure_frame['bg'] = self.bg_color
self.figure_frame['bd'] = 5
self.figure_frame['relief'] = "ridge"
self.figure_frame.place(relx=0.00, rely=0.02, relwidth=1, relheight=0.88)
self.canvas_tk.place(relx=0, rely=0, relwidth=1.0, relheight=0.9)
self.toolbar.place(relx=0, rely=0.9, relwidth=1.0, relheight=0.1)
for btn, text, func, x in zip([self.refresh_btn, self.export_btn],
["刷新数据", "导出数据"],
[self.refresh, self.export],
[0.34, 0.51]):
btn['font'] = btn_font
btn['bg'] = Config.tk_btn_bg
btn['text'] = text
btn['command'] = func
btn.place(relx=x, rely=0.91, relwidth=0.15, relheight=0.08)
def export(self):
...
def refresh(self, event_class):
self.plt.cla()
if self.color_bar is not None:
self.color_bar.remove()
event = event_class(self.station).start(self)
self.station.push_event(event)
def set_disable(self):
self.export_btn['state'] = 'disable'
self.refresh_btn['state'] = 'disable'
def reset_disable(self):
self.export_btn['state'] = 'normal'
self.refresh_btn['state'] = 'normal'
class StatisticsUserTinyProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsUserTinyProgram, self).__init__(station, win, color, "积分信用分析-细致")
def show_result(self, lst: np.array):
self.export_lst = lst
x_label = [f'{i * 10}' for i in range(0, 51, 10)]
y_label = [f'{i * 10}' for i in range(0, 101, 20)]
im = self.plt.pcolormesh(lst, cmap='Blues')
self.plt.set_xticks(range(0, 101, 20))
self.plt.set_yticks(range(0, 101, 20))
self.plt.set_xticklabels(x_label)
self.plt.set_yticklabels(y_label)
self.plt.set_xlabel("用户积分")
self.plt.set_ylabel("垃圾分类信用")
self.color_bar = self.figure.colorbar(im, pad=0.03, ax=self.plt)
self.plt.set_title("积分信用分析-细致热图")
self.canvas.draw()
self.toolbar.update()
def export(self):
if self.export_lst is None:
self.station.show_msg("保存数据", f"没有数据需要保存")
return
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write("#, " + ", ".join([f'[{i * 10} {i * 10 + 10}]' for i in range(0, 100, 1)]) + "\n")
for i, lst in zip(range(0, 50, 1), self.export_lst):
f.write(f"[{i * 10} {i * 10 + 10}], " + ", ".join([f"{a}" for a in lst]) + "\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def to_program(self):
self.refresh()
def refresh(self, _=None):
super().refresh(tk_event.CountScoreReputationTinyEvent)
class StatisticsUserLargeProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsUserLargeProgram, self).__init__(station, win, color, "积分信用分析-大致")
def show_result(self, lst: np.array):
self.export_lst = lst
x_label = [f'{i * 10}' for i in range(0, 51, 10)]
y_label = [f'{i * 10}' for i in range(0, 101, 20)]
im = self.plt.pcolormesh(lst, cmap='Blues')
self.plt.set_xticks(range(0, 11, 2))
self.plt.set_yticks(range(0, 11, 2))
self.plt.set_xticklabels(x_label)
self.plt.set_yticklabels(y_label)
self.plt.set_xlabel("用户积分")
self.plt.set_ylabel("垃圾分类信用")
self.color_bar = self.figure.colorbar(im, pad=0.03, ax=self.plt)
self.plt.set_title("积分信用分析-大致热图")
self.canvas.draw()
self.toolbar.update()
def export(self):
if self.export_lst is None:
self.station.show_msg("保存数据", f"没有数据需要保存")
return
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write("#, " + ", ".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 100, 10)]) + "\n")
for i, lst in zip(range(0, 50, 5), self.export_lst):
f.write(f"[{i * 10} {i * 10 + 50}], " + ", ".join([f"{a}" for a in lst]) + "\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def to_program(self):
self.refresh()
def refresh(self, _=None):
super().refresh(tk_event.CountScoreReputationLargeEvent)
class StatisticsScoreDistributedProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsScoreDistributedProgram, self).__init__(station, win, color, "积分分布")
def show_result(self, lst: np.array):
bins = [i for i in range(0, 501, 10)]
res = self.plt.hist(lst, bins)
self.export_lst = res[0]
self.plt.set_xlabel("用户积分")
self.plt.set_ylabel("分布")
self.plt.set_title("积分分布直方图")
self.canvas.draw()
self.toolbar.update()
def export(self):
if self.export_lst is None:
self.station.show_msg("保存数据", f"没有数据需要保存")
return
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write("积分区间," + ", ".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 501, 10)]) + "\n")
f.write("积分分布," + ", ".join([f'{i}' for i in self.export_lst]) + "\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def to_program(self):
self.refresh()
def refresh(self, _=None):
self.plt.cla()
if self.color_bar is not None:
self.color_bar.remove()
event = tk_event.ScoreReputationDistributedEvent(self.station).start("Score", self)
self.station.push_event(event)
class StatisticsReputationDistributedProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsReputationDistributedProgram, self).__init__(station, win, color, "垃圾分类信用分布")
def show_result(self, lst: np.array):
bins = [i for i in range(0, 1001, 20)]
res = self.plt.hist(lst, bins)
self.export_lst = res[0]
self.plt.set_xlabel("垃圾分类信用")
self.plt.set_ylabel("分布")
self.plt.set_title("垃圾分类信用分布直方图")
self.canvas.draw()
self.toolbar.update()
def export(self):
if self.export_lst is None:
self.station.show_msg("保存数据", f"没有数据需要保存")
return
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write("信用区间," + ", ".join([f'[{i * 10} {i * 10 + 100}]' for i in range(0, 501, 10)]) + "\n")
f.write("信用分布," + ", ".join([f'{i}' for i in self.export_lst]) + "\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def to_program(self):
self.refresh()
def refresh(self, _=None):
self.plt.cla()
if self.color_bar is not None:
self.color_bar.remove()
event = tk_event.ScoreReputationDistributedEvent(self.station).start("Reputation", self)
self.station.push_event(event)
class StatisticsPassRateGlobalProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsPassRateGlobalProgram, self).__init__(station, win, color, "通过率-全局")
def show_result(self, lst: np.array):
passing = float(lst[0][0])
not_passing = 1 - passing
data = [passing, not_passing]
label = ["通过", "未通过"]
res = self.plt.pie(data, radius=1, pctdistance=0.7, textprops=dict(color='w'),
startangle=45, autopct="%6.3f%%", wedgeprops=dict(width=0.6, edgecolor="w"))
self.plt.legend(res[0], label, loc="lower left")
self.plt.set_title("全局垃圾分类通过率")
self.plt.table(cellText=[data], cellLoc="center", colLabels=label,
rowLabels=['全局'], rowLoc='center', loc='bottom', colWidths=[0.4] * 2)
self.canvas.draw()
self.toolbar.update()
def export(self):
self.station.show_msg("保存数据", f"数据不支持导出")
return
def to_program(self):
self.refresh()
def refresh(self, _=None):
self.plt.cla()
event = tk_event.PassingRateEvent(self.station).start([], [], [], [], self)
self.station.push_event(event)
class StatisticsPassRateTypeProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsPassRateTypeProgram, self).__init__(station, win, color, "通过率-按类型")
def show_result(self, lst: List[Tuple[bytes, any]]):
data_1, data_2, data_3, data_4 = [1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]
for i in lst:
tmp: bytes = i[0]
type_ = tmp.decode('utf-8')
if type_ == '1':
data_1 = [float(i[1]), 1 - float(i[1])]
elif type_ == '2':
data_2 = [float(i[1]), 1 - float(i[1])]
elif type_ == '3':
data_3 = [float(i[1]), 1 - float(i[1])]
elif type_ == '4':
data_4 = [float(i[1]), 1 - float(i[1])]
legend_text = []
for data, r, s in zip([data_1, data_2, data_3, data_4], [0.3, 0.6, 0.9, 1.2], [0, 15, 30, 45]):
res = self.plt.pie(data, radius=r, pctdistance=0.7,
startangle=s, autopct="%6.3f%%", wedgeprops=dict(width=0.3, edgecolor="w"))
legend_text += res[0]
label = []
for i in GarbageType.GarbageTypeStrList_ch[1:]:
label.append(f"{i}-通过")
label.append(f"{i}-不通过")
self.plt.table(cellText=[data_1, data_2, data_3, data_4], cellLoc="center", colLabels=['通过', '未通过'],
rowLabels=GarbageType.GarbageTypeStrList_ch[1:], rowLoc='center', loc='bottom')
self.plt.legend(legend_text, label)
self.plt.set_title("全局垃圾分类通过率")
self.canvas.draw()
self.toolbar.update()
def export(self):
self.station.show_msg("保存数据", f"数据不支持导出")
return
def to_program(self):
self.refresh()
def refresh(self, _=None):
self.plt.cla()
event = tk_event.PassingRateEvent(self.station).start(["GarbageType"],
[],
["g.GarbageType=garbage.GarbageType"],
["GarbageType"], self)
self.station.push_event(event)
class StatisticsPassRateLocProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsPassRateLocProgram, self).__init__(station, win, color, "通过率-按区域")
self.loc_frame = tk.Frame(self.frame)
self.loc_title = tk.Label(self.loc_frame)
self.loc_enter = tk.Entry(self.loc_frame), tk.StringVar()
def conf_gui(self, n: int = 1):
super(StatisticsPassRateLocProgram, self).conf_gui(n)
title_font = make_font(size=16)
self.loc_frame['bg'] = self.bg_color
self.loc_frame['bd'] = 5
self.loc_frame['relief'] = "ridge"
self.loc_frame.place(relx=0.0, rely=0.92, relwidth=0.33, relheight=0.07)
self.loc_title['font'] = title_font
self.loc_title['text'] = "区域:"
self.loc_title['bg'] = self.bg_color
self.loc_title['anchor'] = 'e'
self.loc_enter[0]['font'] = title_font
self.loc_enter[0]['textvariable'] = self.loc_enter[1]
self.loc_title.place(relx=0.0, rely=0.02, relwidth=0.3, relheight=0.96)
self.loc_enter[0].place(relx=0.3, rely=0.02, relwidth=0.7, relheight=0.96)
def show_result(self, lst: np.array):
passing = float(lst[0][0])
label = ["通过", "未通过"]
not_passing = 1 - passing
data = [passing, not_passing]
res = self.plt.pie(data, radius=1, pctdistance=0.7, textprops=dict(color='w'),
startangle=45, autopct="%6.3f%%", wedgeprops=dict(width=0.6, edgecolor="w"))
self.plt.legend(res[0], label, loc="lower left")
self.plt.table(cellText=[data], cellLoc="center", colLabels=label,
rowLabels=[f"区域"], rowLoc='center', loc='bottom')
self.canvas.draw()
self.toolbar.update()
def to_program(self):
self.refresh()
def refresh(self, _=None):
where = self.loc_enter[1].get()
if len(where) == 0:
where = "全局"
where_ = []
else:
where_ = [f"Location='{where}'"]
self.plt.cla()
self.plt.set_title(f"{where}垃圾分类通过率")
event = tk_event.PassingRateEvent(self.station).start([], where_, where_, [], self)
self.station.push_event(event)
class StatisticsPassRateTypeAndLocProgram(StatisticsUserBaseProgram):
def __init__(self, station, win, color):
super(StatisticsPassRateTypeAndLocProgram, self).__init__(station, win, color, "通过率-按类型和区域")
self.loc_frame = tk.Frame(self.frame)
self.loc_title = tk.Label(self.loc_frame)
self.loc_enter = tk.Entry(self.loc_frame), tk.StringVar()
def conf_gui(self, n: int = 1):
super(StatisticsPassRateTypeAndLocProgram, self).conf_gui(n)
title_font = make_font(size=16)
self.loc_frame['bg'] = self.bg_color
self.loc_frame['relief'] = "ridge"
self.loc_frame['bd'] = 5
self.loc_frame.place(relx=0.0, rely=0.92, relwidth=0.33, relheight=0.07)
self.loc_title['font'] = title_font
self.loc_title['bg'] = self.bg_color
self.loc_title['text'] = "区域:"
self.loc_title['anchor'] = 'e'
self.loc_enter[0]['font'] = title_font
self.loc_enter[0]['textvariable'] = self.loc_enter[1]
self.loc_title.place(relx=0.0, rely=0.02, relwidth=0.3, relheight=0.96)
self.loc_enter[0].place(relx=0.3, rely=0.02, relwidth=0.7, relheight=0.96)
def show_result(self, lst: List[Tuple[bytes, any]]):
data_1, data_2, data_3, data_4 = [1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]
for i in lst:
tmp: bytes = i[0]
type_ = tmp.decode('utf-8')
if type_ == '4':
data_4 = [float(i[1]), 1 - float(i[1])]
elif type_ == '3':
data_3 = [float(i[1]), 1 - float(i[1])]
elif type_ == '2':
data_2 = [float(i[1]), 1 - float(i[1])]
elif type_ == '1':
data_1 = [float(i[1]), 1 - float(i[1])]
legend_text = []
for data, r, s in zip([data_1, data_2, data_3, data_4], [0.3, 0.6, 0.9, 1.2], [5, 20, 35, 50]):
res = self.plt.pie(data, radius=r, pctdistance=0.7,
startangle=s, autopct="%6.3f%%", wedgeprops=dict(width=0.3, edgecolor="w"))
legend_text += res[0]
label = []
for i in GarbageType.GarbageTypeStrList_ch[1:]:
label.append(f"{i}-通过")
label.append(f"{i}-不通过")
self.plt.table(cellText=[data_1, data_2, data_3, data_4], cellLoc="center", colLabels=['通过', '未通过'],
rowLabels=GarbageType.GarbageTypeStrList_ch[1:], rowLoc='center', loc='bottom')
self.plt.legend(legend_text, label)
self.canvas.draw()
self.toolbar.update()
def export(self):
self.station.show_msg("保存数据", f"数据不支持导出")
return
def to_program(self):
self.refresh()
def refresh(self, _=None):
where = self.loc_enter[1].get()
if len(where) == 0:
where = "全局"
where_ = []
else:
where_ = [f"Location='{where}'"]
self.plt.cla()
self.plt.set_title(f"{where}垃圾分类通过率")
event = tk_event.PassingRateEvent(self.station).start(["GarbageType"],
where_,
where_ + ["g.GarbageType=garbage.GarbageType"],
["GarbageType"], self)
self.station.push_event(event)
class StatisticsDateProgramBase(StatisticsTimeProgramBase):
def _conf(self, bg_color, days: int = 7, days_sep: int = 1):
super(StatisticsDateProgramBase, self)._conf(bg_color)
self._days = days
self._days_sep = days_sep
def export(self, title, func: Callable):
path = asksaveasfilename(title='选择CSV文件保存位置', filetypes=[("CSV", ".csv")])
if not path.endswith(".csv"):
path += ".csv"
with open(path, "w") as f:
f.write(f"Days, Count, {title}\n")
for i in self.export_lst:
f.write(f"{i[0]}, {i[1]}, {func(i)}\n")
self.station.show_msg("保存数据", f"数据导出成功\n保存位置:\n {path}")
def show_result(self, res: Dict[str, any], lst: List, end_time: Optional[str] = None):
if end_time is None:
end_time = datetime.datetime.now()
else:
end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d')
bottom = np.zeros(self._days)
label_num = [i for i in range(self._days)]
label_str = [f"{i}" for i in range(self._days)]
res_type_lst: List = res['res_type']
self.export_lst = lst
max_y_plot = 1
max_y_bar = 1
for res_type in res_type_lst:
res_count: List[Tuple[int, int, bytes]] = res[res_type]
if len(res_count) != 0:
color = self.check_show(res_type)
if color is None:
continue
y = [0 for _ in range(self._days)]
for i in range(0, len(res_count)):
y[res_count[i][0]] = res_count[i][1]
y = y[::-1]
max_y_plot = max(max(y), max_y_plot)
self.color_show_dict[res_type] = color
self.plt_1.plot(label_num, y,
color=color,
label=res_type,
marker='o',
markersize=5)
self.plt_2.bar(label_num, y,
color=color,
align="center",
bottom=bottom,
tick_label=label_str,
label=res_type)
bottom += np.array(y)
if self.legend_show[1].get() == 1:
self.plt_1.legend(loc="upper left")
self.plt_2.legend(loc="upper left")
x_label = []
for i in range(self._days - 1, -1, -self._days_sep):
d = end_time - datetime.timedelta(days=i)
x_label.append(d.strftime("%Y-%m-%d"))
self.plt_1.set_xlim(-1, self._days)
self.plt_1.set_xticks([i for i in range(0, self._days, self._days_sep)])
self.plt_1.set_xticklabels(x_label, rotation=20)
self.plt_1.set_ylim(0, max_y_plot + max_y_plot * 0.1)
step = ceil(max_y_plot / 5)
if step > 0:
y_ticks = [i for i in range(0, max_y_plot, step)]
y_ticklabels = [f'{i}' for i in range(0, max_y_plot, step)]
else:
y_ticks = []
y_ticklabels = []
y_ticks.append(max_y_plot)
y_ticklabels.append(f"{max_y_plot}")
self.plt_1.set_yticks(y_ticks)
self.plt_1.set_yticklabels(y_ticklabels)
self.plt_1.spines['right'].set_color('none')
self.plt_1.spines['top'].set_color('none')
self.plt_1.grid(axis='y')
self.plt_1.set_title(f"{self.program_title}折线图")
self.plt_2.set_xlim(-1, self._days)
self.plt_2.set_xticks([i for i in range(0, self._days, self._days_sep)])
self.plt_2.set_xticklabels(x_label, rotation=20)
max_y_bar = int(max(bottom.max(), max_y_bar))
self.plt_2.set_ylim(0, max_y_bar + max_y_bar * 0.1)
step = ceil(max_y_bar / 5)
if step > 0:
y_ticks = [i for i in range(0, max_y_bar, step)]
y_ticklabels = [f'{i}' for i in range(0, max_y_bar, step)]
else:
y_ticks = []
y_ticklabels = []
y_ticks.append(max_y_bar)
y_ticklabels.append(f"{max_y_bar}")
self.plt_2.set_yticks(y_ticks)
self.plt_2.set_yticklabels(y_ticklabels)
self.plt_2.spines['right'].set_color('none')
self.plt_2.spines['top'].set_color('none')
self.plt_2.grid(axis='y')
self.plt_2.set_title(f"{self.program_title}柱状图")
self.canvas.draw()
self.toolbar.update()
self.update_listbox()
class StatisticsDateTypeProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = "#00BFFF"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = "#32CD32"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = "#DC143C"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = "#A9A9A9"
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["GarbageType"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Type", self.get_name)
@staticmethod
def get_name(i: Tuple):
data: bytes = i[2]
return GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]
class StatisticsDateLocProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["Location"], lambda i: i[2], self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Location", lambda i: i[2])
class StatisticsDateTypeLocProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["GarbageType", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Type-Location", self.get_name)
@staticmethod
def get_name(i: Tuple):
data: bytes = i[2]
return f"{GarbageType.GarbageTypeStrList_ch[int(data.decode('utf-8'))]}-{i[3]}"
class StatisticsDateCheckResultProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
self.color_show_dict['Pass'] = "#00BFFF"
self.color_show_dict['Fail'] = "#DC143C"
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["CheckResult"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result", self.get_name)
@staticmethod
def get_name(i: Tuple):
if i[2] is None:
return 'None'
data: int = i[2]
return 'Pass' if data == 1 else 'Fail'
class StatisticsDateCheckResultAndTypeProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["CheckResult", "GarbageType"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result-Location", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: int = i[2]
data_2: bytes = i[3]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode("utf-8"))]}'
class StatisticsDateCheckResultAndLocProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station).start(7, ["CheckResult", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Result-Type", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: int = i[2]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f"-{i[3]}"
class StatisticsDateDetailProgram(StatisticsDateProgramBase):
def __init__(self, station, win, color, title):
super().__init__(station, win, color, title)
self._conf("#abc88b", 7, 1)
def refresh(self):
super().refresh()
event = tk_event.CountDateEvent(self.station)
event.start(7, ["CheckResult", "GarbageType", "Location"], self.get_name, self)
self.station.push_event(event)
def export(self, *_, **__):
super().export("Detail", self.get_name)
@staticmethod
def get_name(i: Tuple):
data_1: int = i[2]
data_2: bytes = i[3]
if data_1 is None:
tmp = 'None'
elif data_1 == DBBit.BIT_1:
tmp = 'Pass'
else:
tmp = 'Fail'
return tmp + f'-{GarbageType.GarbageTypeStrList_ch[int(data_2.decode("utf-8"))]}' + f'-{i[4]}'
class StatisticsDate7TypeProgram(StatisticsDateTypeProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按投放类型")
self._conf("#abc88b", 7, 1)
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = "#00BFFF"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = "#32CD32"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = "#DC143C"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = "#A9A9A9"
class StatisticsDate7LocProgram(StatisticsDateLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按投放区域")
self._conf("#abc88b", 7, 1)
class StatisticsDate7TypeLocProgram(StatisticsDateTypeLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按投放类型和区域")
self._conf("#abc88b", 7, 1)
class StatisticsDate7CheckResultProgram(StatisticsDateCheckResultProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按检查结果")
self._conf("#abc88b", 7, 1)
self.color_show_dict['Pass'] = "#00BFFF"
self.color_show_dict['Fail'] = "#DC143C"
class StatisticsDate7CheckResultAndTypeProgram(StatisticsDateCheckResultAndTypeProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按检查结果和类型")
self._conf("#abc88b", 7, 1)
class StatisticsDate7CheckResultAndLocProgram(StatisticsDateCheckResultAndLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-按检查结果和区域")
self._conf("#abc88b", 7, 1)
class StatisticsDate7DetailProgram(StatisticsDateDetailProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近7日-详细分类")
self._conf("#abc88b", 7, 1)
class StatisticsDate30TypeProgram(StatisticsDateTypeProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按投放类型")
self._conf("#abc88b", 30, 5)
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[1]] = "#00BFFF"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[2]] = "#32CD32"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[3]] = "#DC143C"
self.color_show_dict[GarbageType.GarbageTypeStrList_ch[4]] = "#A9A9A9"
class StatisticsDate30LocProgram(StatisticsDateLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按投放区域")
self._conf("#abc88b", 30, 5)
class StatisticsDate30TypeLocProgram(StatisticsDateTypeLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按投放类型和区域")
self._conf("#abc88b", 30, 5)
class StatisticsDate30CheckResultProgram(StatisticsDateCheckResultProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按检查结果")
self._conf("#abc88b", 30, 5)
self.color_show_dict['Pass'] = "#00BFFF"
self.color_show_dict['Fail'] = "#DC143C"
class StatisticsDate30CheckResultAndTypeProgram(StatisticsDateCheckResultAndTypeProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按检查结果和类型")
self._conf("#abc88b", 30, 5)
class StatisticsDate30CheckResultAndLocProgram(StatisticsDateCheckResultAndLocProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-按检查结果和区域")
self._conf("#abc88b", 30, 5)
class StatisticsDate30DetailProgram(StatisticsDateDetailProgram):
def __init__(self, station, win, color):
super().__init__(station, win, color, "最近30日-详细分类")
self._conf("#abc88b", 30, 5)
all_program = [WelcomeProgram, CreateNormalUserProgram, CreateManagerUserProgram, CreateAutoNormalUserProgram,
CreateGarbageProgram, DeleteUserProgram, DeleteUsersProgram, DeleteGarbageProgram,
DeleteGarbageMoreProgram, DeleteAllGarbageProgram, SearchUserProgram, SearchUserAdvancedProgram,
SearchGarbageProgram, SearchGarbageAdvancedProgram, SearchAdvancedProgram, UpdateUserScore,
UpdateUserReputation, UpdateGarbageTypeProgram, UpdateGarbageCheckResultProgram,
ExportGarbageProgram, ExportUserProgram, CreateUserFromCSVProgram, AboutProgram,
StatisticsTimeLocProgram, StatisticsTimeTypeProgram, StatisticsTimeTypeLocProgram,
StatisticsTimeCheckResultProgram, StatisticsTimeCheckResultAndTypeProgram,
StatisticsTimeCheckResultAndLocProgram, StatisticsTimeDetailProgram, StatisticsUserTinyProgram,
StatisticsUserLargeProgram, StatisticsScoreDistributedProgram, StatisticsReputationDistributedProgram,
StatisticsPassRateGlobalProgram, StatisticsPassRateTypeProgram, StatisticsPassRateLocProgram,
StatisticsPassRateTypeAndLocProgram, StatisticsDate7TypeProgram, StatisticsDate7LocProgram,
StatisticsDate7TypeLocProgram, StatisticsDate7CheckResultProgram,
StatisticsDate7CheckResultAndTypeProgram, StatisticsDate7CheckResultAndLocProgram,
StatisticsDate7DetailProgram, StatisticsDate30TypeProgram, StatisticsDate30LocProgram,
StatisticsDate30TypeLocProgram, StatisticsDate30CheckResultProgram,
StatisticsDate30CheckResultAndTypeProgram, StatisticsDate30CheckResultAndLocProgram,
StatisticsDate30DetailProgram]
| true
| true
|
1c406cf931317f3609f36abd4dfd161f4fc847ba
| 639
|
py
|
Python
|
skimage/_shared/tests/test_utils.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | 3
|
2019-02-28T16:05:36.000Z
|
2020-04-03T17:29:07.000Z
|
skimage/_shared/tests/test_utils.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
skimage/_shared/tests/test_utils.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | 3
|
2019-12-31T23:21:40.000Z
|
2020-04-03T17:29:08.000Z
|
from skimage._shared.utils import (copy_func, assert_nD)
import numpy.testing as npt
import numpy as np
from skimage._shared import testing
def test_assert_nD():
z = np.random.random(200**2).reshape((200, 200))
x = z[10:30, 30:10]
with testing.raises(ValueError):
assert_nD(x, 2)
def test_copyfunc():
def foo(a):
return a
bar = copy_func(foo, name='bar')
other = copy_func(foo)
npt.assert_equal(bar.__name__, 'bar')
npt.assert_equal(other.__name__, 'foo')
other.__name__ = 'other'
npt.assert_equal(foo.__name__, 'foo')
if __name__ == "__main__":
npt.run_module_suite()
| 20.612903
| 56
| 0.666667
|
from skimage._shared.utils import (copy_func, assert_nD)
import numpy.testing as npt
import numpy as np
from skimage._shared import testing
def test_assert_nD():
z = np.random.random(200**2).reshape((200, 200))
x = z[10:30, 30:10]
with testing.raises(ValueError):
assert_nD(x, 2)
def test_copyfunc():
def foo(a):
return a
bar = copy_func(foo, name='bar')
other = copy_func(foo)
npt.assert_equal(bar.__name__, 'bar')
npt.assert_equal(other.__name__, 'foo')
other.__name__ = 'other'
npt.assert_equal(foo.__name__, 'foo')
if __name__ == "__main__":
npt.run_module_suite()
| true
| true
|
1c406d0a96895fcda9da1357f0051cc734c05025
| 333
|
py
|
Python
|
users/forms.py
|
dragonrathony/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | 1
|
2020-06-17T13:45:54.000Z
|
2020-06-17T13:45:54.000Z
|
users/forms.py
|
Honey4251996/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | 11
|
2021-03-19T07:55:39.000Z
|
2022-03-12T00:34:55.000Z
|
users/forms.py
|
Honey4251996/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | null | null | null |
from django import forms
class ProfileForm(forms.Form):
email = forms.CharField(max_length=100)
first_name = forms.CharField(max_length=100)
last_name = forms.CharField(max_length=100)
profile_image = forms.ImageField()
user_name = forms.CharField(max_length=100)
zip_code = forms.CharField(max_length=100)
| 30.272727
| 48
| 0.750751
|
from django import forms
class ProfileForm(forms.Form):
email = forms.CharField(max_length=100)
first_name = forms.CharField(max_length=100)
last_name = forms.CharField(max_length=100)
profile_image = forms.ImageField()
user_name = forms.CharField(max_length=100)
zip_code = forms.CharField(max_length=100)
| true
| true
|
1c406d4a1cdf5e6d65955028d98693bacd126ff5
| 873
|
py
|
Python
|
tools/validate-yaml.py
|
larsks/wh-test-repo
|
8c6d38db4229073d64de7536fe0edb005f053647
|
[
"Apache-2.0"
] | 44
|
2015-01-03T10:07:39.000Z
|
2021-07-22T05:35:04.000Z
|
tools/validate-yaml.py
|
larsks/wh-test-repo
|
8c6d38db4229073d64de7536fe0edb005f053647
|
[
"Apache-2.0"
] | 10
|
2015-02-10T04:55:44.000Z
|
2016-04-28T15:08:16.000Z
|
tools/validate-yaml.py
|
larsks/wh-test-repo
|
8c6d38db4229073d64de7536fe0edb005f053647
|
[
"Apache-2.0"
] | 31
|
2015-01-05T05:38:58.000Z
|
2019-11-08T16:05:02.000Z
|
#!/usr/bin/python
import sys
import argparse
import yaml
import logging
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('--verbose', '-v',
action='store_const',
const='INFO',
dest='loglevel')
p.add_argument('input', nargs='*')
p.set_defaults(loglevel='WARN')
return p.parse_args()
def main():
args = parse_args()
logging.basicConfig(level=args.loglevel)
res = 0
for filename in args.input:
with open(filename) as fd:
try:
yaml.load(fd)
logging.info('%s: passed', filename)
except yaml.error.YAMLError as error:
res = 1
logging.error('%s: failed: %s',
filename, error)
return res
if __name__ == '__main__':
sys.exit(main())
| 22.384615
| 52
| 0.532646
|
import sys
import argparse
import yaml
import logging
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('--verbose', '-v',
action='store_const',
const='INFO',
dest='loglevel')
p.add_argument('input', nargs='*')
p.set_defaults(loglevel='WARN')
return p.parse_args()
def main():
args = parse_args()
logging.basicConfig(level=args.loglevel)
res = 0
for filename in args.input:
with open(filename) as fd:
try:
yaml.load(fd)
logging.info('%s: passed', filename)
except yaml.error.YAMLError as error:
res = 1
logging.error('%s: failed: %s',
filename, error)
return res
if __name__ == '__main__':
sys.exit(main())
| true
| true
|
1c406dcbf96d4f7d8fec9c9c3fac3b11057f2d2c
| 552
|
py
|
Python
|
dashboard/internet_nl_dashboard/logic/account.py
|
bslavin/Internet.nl-dashboard
|
5fd6d8fe8edb5f181727ddd1729697d9fc586c29
|
[
"Apache-2.0"
] | null | null | null |
dashboard/internet_nl_dashboard/logic/account.py
|
bslavin/Internet.nl-dashboard
|
5fd6d8fe8edb5f181727ddd1729697d9fc586c29
|
[
"Apache-2.0"
] | null | null | null |
dashboard/internet_nl_dashboard/logic/account.py
|
bslavin/Internet.nl-dashboard
|
5fd6d8fe8edb5f181727ddd1729697d9fc586c29
|
[
"Apache-2.0"
] | null | null | null |
import logging
from dashboard.internet_nl_dashboard.logic import operation_response
log = logging.getLogger(__package__)
def save_report_settings(account, report_settings):
account.report_settings = report_settings.get('filters', {})
account.save()
return operation_response(success=True, message="settings.updated")
def get_report_settings(account):
return operation_response(
success=True,
message="settings.restored_from_database",
data=account.report_settings if account.report_settings else {}
)
| 26.285714
| 71
| 0.762681
|
import logging
from dashboard.internet_nl_dashboard.logic import operation_response
log = logging.getLogger(__package__)
def save_report_settings(account, report_settings):
account.report_settings = report_settings.get('filters', {})
account.save()
return operation_response(success=True, message="settings.updated")
def get_report_settings(account):
return operation_response(
success=True,
message="settings.restored_from_database",
data=account.report_settings if account.report_settings else {}
)
| true
| true
|
1c406e877ea7a560462b026bd111e0a32e55ae7f
| 683
|
py
|
Python
|
model/process_selfie_data.py
|
imdeepmind/AgeGenderNetwork
|
845a8b8f15aa9ce1ae6ff55f8f3ca9213d490323
|
[
"MIT"
] | 1
|
2019-10-14T15:18:05.000Z
|
2019-10-14T15:18:05.000Z
|
model/process_selfie_data.py
|
imdeepmind/AgeGenderNetwork
|
845a8b8f15aa9ce1ae6ff55f8f3ca9213d490323
|
[
"MIT"
] | null | null | null |
model/process_selfie_data.py
|
imdeepmind/AgeGenderNetwork
|
845a8b8f15aa9ce1ae6ff55f8f3ca9213d490323
|
[
"MIT"
] | null | null | null |
## IMPORTING THE DEPENDENCIES
import pandas as pd
import numpy as np
selfie_data = './dataset/unprocessed/Selfie-dataset/selfie_dataset.txt'
file = open(selfie_data, 'r')
selfie_file = file.read()
selfie_file_lines = selfie_file.split('\n')
un_selfie_data = []
for selfie in selfie_file_lines:
temp = selfie.split(' ')
if len(temp) > 3:
un_selfie_data.append(['Male' if temp[3] == '0' else 'Female', 'Selfie-dataset/images/' + temp[0] + '.jpg'])
selfie = pd.DataFrame(un_selfie_data)
selfie.columns = ['gender', 'path']
# Shuffling the data
selfie = selfie.sample(frac=1)
# Storing as csv file
selfie.to_csv('./dataset/processed/selfie_meta.csv', index=False)
| 27.32
| 117
| 0.708638
|
numpy as np
selfie_data = './dataset/unprocessed/Selfie-dataset/selfie_dataset.txt'
file = open(selfie_data, 'r')
selfie_file = file.read()
selfie_file_lines = selfie_file.split('\n')
un_selfie_data = []
for selfie in selfie_file_lines:
temp = selfie.split(' ')
if len(temp) > 3:
un_selfie_data.append(['Male' if temp[3] == '0' else 'Female', 'Selfie-dataset/images/' + temp[0] + '.jpg'])
selfie = pd.DataFrame(un_selfie_data)
selfie.columns = ['gender', 'path']
selfie = selfie.sample(frac=1)
selfie.to_csv('./dataset/processed/selfie_meta.csv', index=False)
| true
| true
|
1c406ead37561eefa4f97040eed189818c389e87
| 12,958
|
py
|
Python
|
pymodbus/register_read_message.py
|
CS536-Modbus-QUIC/pymodbus
|
337d090f04922c9440913927593f26cd9388b141
|
[
"BSD-3-Clause"
] | 1,125
|
2017-05-11T06:11:36.000Z
|
2022-03-31T02:59:45.000Z
|
pymodbus/register_read_message.py
|
Fannxy/pymodbus
|
c5772b35ae3f29d1947f3ab453d8d00df846459f
|
[
"BSD-3-Clause"
] | 575
|
2017-05-12T02:46:55.000Z
|
2022-03-31T16:00:33.000Z
|
pymodbus/register_read_message.py
|
Fannxy/pymodbus
|
c5772b35ae3f29d1947f3ab453d8d00df846459f
|
[
"BSD-3-Clause"
] | 516
|
2017-05-19T14:06:06.000Z
|
2022-03-31T06:10:13.000Z
|
'''
Register Reading Request/Response
---------------------------------
'''
import struct
from pymodbus.pdu import ModbusRequest
from pymodbus.pdu import ModbusResponse
from pymodbus.pdu import ModbusExceptions as merror
from pymodbus.compat import int2byte, byte2int
class ReadRegistersRequestBase(ModbusRequest):
'''
Base class for reading a modbus register
'''
_rtu_frame_size = 8
def __init__(self, address, count, **kwargs):
''' Initializes a new instance
:param address: The address to start the read from
:param count: The number of registers to read
'''
ModbusRequest.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self):
''' Encodes the request packet
:return: The encoded packet
'''
return struct.pack('>HH', self.address, self.count)
def decode(self, data):
''' Decode a register request packet
:param data: The request to decode
'''
self.address, self.count = struct.unpack('>HH', data)
def get_response_pdu_size(self):
"""
Func_code (1 byte) + Byte Count(1 byte) + 2 * Quantity of Coils (n Bytes)
:return:
"""
return 1 + 1 + 2 * self.count
def __str__(self):
''' Returns a string representation of the instance
:returns: A string representation of the instance
'''
return "ReadRegisterRequest (%d,%d)" % (self.address, self.count)
class ReadRegistersResponseBase(ModbusResponse):
'''
Base class for responsing to a modbus register read
'''
_rtu_byte_count_pos = 2
def __init__(self, values, **kwargs):
''' Initializes a new instance
:param values: The values to write to
'''
ModbusResponse.__init__(self, **kwargs)
self.registers = values or []
def encode(self):
''' Encodes the response packet
:returns: The encoded packet
'''
result = int2byte(len(self.registers) * 2)
for register in self.registers:
result += struct.pack('>H', register)
return result
def decode(self, data):
''' Decode a register response packet
:param data: The request to decode
'''
byte_count = byte2int(data[0])
self.registers = []
for i in range(1, byte_count + 1, 2):
self.registers.append(struct.unpack('>H', data[i:i + 2])[0])
def getRegister(self, index):
''' Get the requested register
:param index: The indexed register to retrieve
:returns: The request register
'''
return self.registers[index]
def __str__(self):
''' Returns a string representation of the instance
:returns: A string representation of the instance
'''
return "%s (%d)" % (self.__class__.__name__, len(self.registers))
class ReadHoldingRegistersRequest(ReadRegistersRequestBase):
'''
This function code is used to read the contents of a contiguous block
of holding registers in a remote device. The Request PDU specifies the
starting register address and the number of registers. In the PDU
Registers are addressed starting at zero. Therefore registers numbered
1-16 are addressed as 0-15.
'''
function_code = 3
def __init__(self, address=None, count=None, **kwargs):
''' Initializes a new instance of the request
:param address: The starting address to read from
:param count: The number of registers to read from address
'''
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
''' Run a read holding request against a datastore
:param context: The datastore to request from
:returns: An initialized response, exception message otherwise
'''
if not (1 <= self.count <= 0x7d):
return self.doException(merror.IllegalValue)
if not context.validate(self.function_code, self.address, self.count):
return self.doException(merror.IllegalAddress)
values = context.getValues(self.function_code, self.address, self.count)
return ReadHoldingRegistersResponse(values)
class ReadHoldingRegistersResponse(ReadRegistersResponseBase):
'''
This function code is used to read the contents of a contiguous block
of holding registers in a remote device. The Request PDU specifies the
starting register address and the number of registers. In the PDU
Registers are addressed starting at zero. Therefore registers numbered
1-16 are addressed as 0-15.
'''
function_code = 3
def __init__(self, values=None, **kwargs):
''' Initializes a new response instance
:param values: The resulting register values
'''
ReadRegistersResponseBase.__init__(self, values, **kwargs)
class ReadInputRegistersRequest(ReadRegistersRequestBase):
'''
This function code is used to read from 1 to approx. 125 contiguous
input registers in a remote device. The Request PDU specifies the
starting register address and the number of registers. In the PDU
Registers are addressed starting at zero. Therefore input registers
numbered 1-16 are addressed as 0-15.
'''
function_code = 4
def __init__(self, address=None, count=None, **kwargs):
''' Initializes a new instance of the request
:param address: The starting address to read from
:param count: The number of registers to read from address
'''
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
''' Run a read input request against a datastore
:param context: The datastore to request from
:returns: An initialized response, exception message otherwise
'''
if not (1 <= self.count <= 0x7d):
return self.doException(merror.IllegalValue)
if not context.validate(self.function_code, self.address, self.count):
return self.doException(merror.IllegalAddress)
values = context.getValues(self.function_code, self.address, self.count)
return ReadInputRegistersResponse(values)
class ReadInputRegistersResponse(ReadRegistersResponseBase):
'''
This function code is used to read from 1 to approx. 125 contiguous
input registers in a remote device. The Request PDU specifies the
starting register address and the number of registers. In the PDU
Registers are addressed starting at zero. Therefore input registers
numbered 1-16 are addressed as 0-15.
'''
function_code = 4
def __init__(self, values=None, **kwargs):
''' Initializes a new response instance
:param values: The resulting register values
'''
ReadRegistersResponseBase.__init__(self, values, **kwargs)
class ReadWriteMultipleRegistersRequest(ModbusRequest):
'''
This function code performs a combination of one read operation and one
write operation in a single MODBUS transaction. The write
operation is performed before the read.
Holding registers are addressed starting at zero. Therefore holding
registers 1-16 are addressed in the PDU as 0-15.
The request specifies the starting address and number of holding
registers to be read as well as the starting address, number of holding
registers, and the data to be written. The byte count specifies the
number of bytes to follow in the write data field."
'''
function_code = 23
_rtu_byte_count_pos = 10
def __init__(self, **kwargs):
''' Initializes a new request message
:param read_address: The address to start reading from
:param read_count: The number of registers to read from address
:param write_address: The address to start writing to
:param write_registers: The registers to write to the specified address
'''
ModbusRequest.__init__(self, **kwargs)
self.read_address = kwargs.get('read_address', 0x00)
self.read_count = kwargs.get('read_count', 0)
self.write_address = kwargs.get('write_address', 0x00)
self.write_registers = kwargs.get('write_registers', None)
if not hasattr(self.write_registers, '__iter__'):
self.write_registers = [self.write_registers]
self.write_count = len(self.write_registers)
self.write_byte_count = self.write_count * 2
def encode(self):
''' Encodes the request packet
:returns: The encoded packet
'''
result = struct.pack('>HHHHB',
self.read_address, self.read_count, \
self.write_address, self.write_count, self.write_byte_count)
for register in self.write_registers:
result += struct.pack('>H', register)
return result
def decode(self, data):
''' Decode the register request packet
:param data: The request to decode
'''
self.read_address, self.read_count, \
self.write_address, self.write_count, \
self.write_byte_count = struct.unpack('>HHHHB', data[:9])
self.write_registers = []
for i in range(9, self.write_byte_count + 9, 2):
register = struct.unpack('>H', data[i:i + 2])[0]
self.write_registers.append(register)
def execute(self, context):
''' Run a write single register request against a datastore
:param context: The datastore to request from
:returns: An initialized response, exception message otherwise
'''
if not (1 <= self.read_count <= 0x07d):
return self.doException(merror.IllegalValue)
if not (1 <= self.write_count <= 0x079):
return self.doException(merror.IllegalValue)
if (self.write_byte_count != self.write_count * 2):
return self.doException(merror.IllegalValue)
if not context.validate(self.function_code, self.write_address,
self.write_count):
return self.doException(merror.IllegalAddress)
if not context.validate(self.function_code, self.read_address,
self.read_count):
return self.doException(merror.IllegalAddress)
context.setValues(self.function_code, self.write_address,
self.write_registers)
registers = context.getValues(self.function_code, self.read_address,
self.read_count)
return ReadWriteMultipleRegistersResponse(registers)
def get_response_pdu_size(self):
"""
Func_code (1 byte) + Byte Count(1 byte) + 2 * Quantity of Coils (n Bytes)
:return:
"""
return 1 + 1 + 2 * self.read_count
def __str__(self):
''' Returns a string representation of the instance
:returns: A string representation of the instance
'''
params = (self.read_address, self.read_count, self.write_address,
self.write_count)
return "ReadWriteNRegisterRequest R(%d,%d) W(%d,%d)" % params
class ReadWriteMultipleRegistersResponse(ModbusResponse):
'''
The normal response contains the data from the group of registers that
were read. The byte count field specifies the quantity of bytes to
follow in the read data field.
'''
function_code = 23
_rtu_byte_count_pos = 2
def __init__(self, values=None, **kwargs):
''' Initializes a new instance
:param values: The register values to write
'''
ModbusResponse.__init__(self, **kwargs)
self.registers = values or []
def encode(self):
''' Encodes the response packet
:returns: The encoded packet
'''
result = int2byte(len(self.registers) * 2)
for register in self.registers:
result += struct.pack('>H', register)
return result
def decode(self, data):
''' Decode the register response packet
:param data: The response to decode
'''
bytecount = byte2int(data[0])
for i in range(1, bytecount, 2):
self.registers.append(struct.unpack('>H', data[i:i + 2])[0])
def __str__(self):
''' Returns a string representation of the instance
:returns: A string representation of the instance
'''
return "ReadWriteNRegisterResponse (%d)" % len(self.registers)
#---------------------------------------------------------------------------#
# Exported symbols
#---------------------------------------------------------------------------#
__all__ = [
"ReadHoldingRegistersRequest", "ReadHoldingRegistersResponse",
"ReadInputRegistersRequest", "ReadInputRegistersResponse",
"ReadWriteMultipleRegistersRequest", "ReadWriteMultipleRegistersResponse",
]
| 35.994444
| 81
| 0.643927
|
import struct
from pymodbus.pdu import ModbusRequest
from pymodbus.pdu import ModbusResponse
from pymodbus.pdu import ModbusExceptions as merror
from pymodbus.compat import int2byte, byte2int
class ReadRegistersRequestBase(ModbusRequest):
_rtu_frame_size = 8
def __init__(self, address, count, **kwargs):
ModbusRequest.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self):
return struct.pack('>HH', self.address, self.count)
def decode(self, data):
self.address, self.count = struct.unpack('>HH', data)
def get_response_pdu_size(self):
return 1 + 1 + 2 * self.count
def __str__(self):
return "ReadRegisterRequest (%d,%d)" % (self.address, self.count)
class ReadRegistersResponseBase(ModbusResponse):
_rtu_byte_count_pos = 2
def __init__(self, values, **kwargs):
ModbusResponse.__init__(self, **kwargs)
self.registers = values or []
def encode(self):
result = int2byte(len(self.registers) * 2)
for register in self.registers:
result += struct.pack('>H', register)
return result
def decode(self, data):
byte_count = byte2int(data[0])
self.registers = []
for i in range(1, byte_count + 1, 2):
self.registers.append(struct.unpack('>H', data[i:i + 2])[0])
def getRegister(self, index):
return self.registers[index]
def __str__(self):
return "%s (%d)" % (self.__class__.__name__, len(self.registers))
class ReadHoldingRegistersRequest(ReadRegistersRequestBase):
function_code = 3
def __init__(self, address=None, count=None, **kwargs):
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
if not (1 <= self.count <= 0x7d):
return self.doException(merror.IllegalValue)
if not context.validate(self.function_code, self.address, self.count):
return self.doException(merror.IllegalAddress)
values = context.getValues(self.function_code, self.address, self.count)
return ReadHoldingRegistersResponse(values)
class ReadHoldingRegistersResponse(ReadRegistersResponseBase):
function_code = 3
def __init__(self, values=None, **kwargs):
ReadRegistersResponseBase.__init__(self, values, **kwargs)
class ReadInputRegistersRequest(ReadRegistersRequestBase):
function_code = 4
def __init__(self, address=None, count=None, **kwargs):
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
if not (1 <= self.count <= 0x7d):
return self.doException(merror.IllegalValue)
if not context.validate(self.function_code, self.address, self.count):
return self.doException(merror.IllegalAddress)
values = context.getValues(self.function_code, self.address, self.count)
return ReadInputRegistersResponse(values)
class ReadInputRegistersResponse(ReadRegistersResponseBase):
function_code = 4
def __init__(self, values=None, **kwargs):
ReadRegistersResponseBase.__init__(self, values, **kwargs)
class ReadWriteMultipleRegistersRequest(ModbusRequest):
function_code = 23
_rtu_byte_count_pos = 10
def __init__(self, **kwargs):
ModbusRequest.__init__(self, **kwargs)
self.read_address = kwargs.get('read_address', 0x00)
self.read_count = kwargs.get('read_count', 0)
self.write_address = kwargs.get('write_address', 0x00)
self.write_registers = kwargs.get('write_registers', None)
if not hasattr(self.write_registers, '__iter__'):
self.write_registers = [self.write_registers]
self.write_count = len(self.write_registers)
self.write_byte_count = self.write_count * 2
def encode(self):
result = struct.pack('>HHHHB',
self.read_address, self.read_count, \
self.write_address, self.write_count, self.write_byte_count)
for register in self.write_registers:
result += struct.pack('>H', register)
return result
def decode(self, data):
self.read_address, self.read_count, \
self.write_address, self.write_count, \
self.write_byte_count = struct.unpack('>HHHHB', data[:9])
self.write_registers = []
for i in range(9, self.write_byte_count + 9, 2):
register = struct.unpack('>H', data[i:i + 2])[0]
self.write_registers.append(register)
def execute(self, context):
if not (1 <= self.read_count <= 0x07d):
return self.doException(merror.IllegalValue)
if not (1 <= self.write_count <= 0x079):
return self.doException(merror.IllegalValue)
if (self.write_byte_count != self.write_count * 2):
return self.doException(merror.IllegalValue)
if not context.validate(self.function_code, self.write_address,
self.write_count):
return self.doException(merror.IllegalAddress)
if not context.validate(self.function_code, self.read_address,
self.read_count):
return self.doException(merror.IllegalAddress)
context.setValues(self.function_code, self.write_address,
self.write_registers)
registers = context.getValues(self.function_code, self.read_address,
self.read_count)
return ReadWriteMultipleRegistersResponse(registers)
def get_response_pdu_size(self):
return 1 + 1 + 2 * self.read_count
def __str__(self):
params = (self.read_address, self.read_count, self.write_address,
self.write_count)
return "ReadWriteNRegisterRequest R(%d,%d) W(%d,%d)" % params
class ReadWriteMultipleRegistersResponse(ModbusResponse):
function_code = 23
_rtu_byte_count_pos = 2
def __init__(self, values=None, **kwargs):
ModbusResponse.__init__(self, **kwargs)
self.registers = values or []
def encode(self):
result = int2byte(len(self.registers) * 2)
for register in self.registers:
result += struct.pack('>H', register)
return result
def decode(self, data):
bytecount = byte2int(data[0])
for i in range(1, bytecount, 2):
self.registers.append(struct.unpack('>H', data[i:i + 2])[0])
def __str__(self):
return "ReadWriteNRegisterResponse (%d)" % len(self.registers)
__all__ = [
"ReadHoldingRegistersRequest", "ReadHoldingRegistersResponse",
"ReadInputRegistersRequest", "ReadInputRegistersResponse",
"ReadWriteMultipleRegistersRequest", "ReadWriteMultipleRegistersResponse",
]
| true
| true
|
1c406f0f0c0ad9459cb2bf63128af200f42bf7f9
| 3,480
|
py
|
Python
|
vilbert/datasets/__init__.py
|
ThierryDeruyttere/vilbert-Talk2car
|
6476b16970cfd0d88e09beb9a57cc5c39b7acb3f
|
[
"MIT"
] | null | null | null |
vilbert/datasets/__init__.py
|
ThierryDeruyttere/vilbert-Talk2car
|
6476b16970cfd0d88e09beb9a57cc5c39b7acb3f
|
[
"MIT"
] | null | null | null |
vilbert/datasets/__init__.py
|
ThierryDeruyttere/vilbert-Talk2car
|
6476b16970cfd0d88e09beb9a57cc5c39b7acb3f
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .concept_cap_dataset import (
ConceptCapLoaderTrain,
ConceptCapLoaderVal,
ConceptCapLoaderRetrieval,
)
from .foil_dataset import FoilClassificationDataset
from .vqa_dataset import VQAClassificationDataset
from .vqa_mc_dataset import VQAMultipleChoiceDataset
from .nlvr2_dataset import NLVR2Dataset
from .refer_expression_dataset import ReferExpressionDataset
from .retreival_dataset import RetreivalDataset, RetreivalDatasetVal
from .vcr_dataset import VCRDataset
from .visdial_dataset import VisDialDataset
from .visual_entailment_dataset import VisualEntailmentDataset
from .refer_dense_caption import ReferDenseCpationDataset
from .visual_genome_dataset import GenomeQAClassificationDataset
from .gqa_dataset import GQAClassificationDataset
from .guesswhat_dataset import GuessWhatDataset
from .visual7w_pointing_dataset import Visual7wPointingDataset
from .guesswhat_pointing_dataset import GuessWhatPointingDataset
from .flickr_grounding_dataset import FlickrGroundingDataset
from .t2c_expression_dataset import T2C_ReferExpressionDataset
# from .flickr_retreival_dataset import FlickrRetreivalDatasetTrain, FlickrRetreivalDatasetVal
__all__ = [
"FoilClassificationDataset",
"VQAClassificationDataset",
"GenomeQAClassificationDataset",
"VQAMultipleChoiceDataset",
"ConceptCapLoaderTrain",
"ConceptCapLoaderVal",
"NLVR2Dataset",
"ReferExpressionDataset",
"RetreivalDataset",
"RetreivalDatasetVal",
"VCRDataset",
"VisDialDataset",
"VisualEntailmentDataset",
"GQAClassificationDataset",
"ConceptCapLoaderRetrieval",
"GuessWhatDataset",
"Visual7wPointingDataset",
"GuessWhatPointingDataset",
"FlickrGroundingDataset",
"T2C_ReferExpressionDataset",
"",
]
DatasetMapTrain = {
"VQA": VQAClassificationDataset,
"GenomeQA": GenomeQAClassificationDataset,
"VisualDialog": VisDialDataset,
"VCR_Q-A": VCRDataset,
"VCR_QA-R": VCRDataset,
"RetrievalCOCO": RetreivalDataset,
"RetrievalFlickr30k": RetreivalDataset,
"refcoco": ReferExpressionDataset,
"refcoco+": ReferExpressionDataset,
"refcocog": ReferExpressionDataset,
"NLVR2": NLVR2Dataset,
"VisualEntailment": VisualEntailmentDataset,
"GQA": GQAClassificationDataset,
"Foil": FoilClassificationDataset,
"GuessWhat": GuessWhatDataset,
"Visual7w": Visual7wPointingDataset,
"GuessWhatPointing": GuessWhatPointingDataset,
"FlickrGrounding": FlickrGroundingDataset,
"Talk2Car": T2C_ReferExpressionDataset,
}
DatasetMapEval = {
"VQA": VQAClassificationDataset,
"GenomeQA": GenomeQAClassificationDataset,
"VisualDialog": VisDialDataset,
"VCR_Q-A": VCRDataset,
"VCR_QA-R": VCRDataset,
"RetrievalCOCO": RetreivalDatasetVal,
"RetrievalFlickr30k": RetreivalDatasetVal,
"refcoco": ReferExpressionDataset,
"refcoco+": ReferExpressionDataset,
"refcocog": ReferExpressionDataset,
"NLVR2": NLVR2Dataset,
"VisualEntailment": VisualEntailmentDataset,
"GQA": GQAClassificationDataset,
"Foil": FoilClassificationDataset,
"GuessWhat": GuessWhatDataset,
"Visual7w": Visual7wPointingDataset,
"GuessWhatPointing": GuessWhatPointingDataset,
"FlickrGrounding": FlickrGroundingDataset,
"Talk2Car": T2C_ReferExpressionDataset,
}
| 35.510204
| 94
| 0.786207
|
from .concept_cap_dataset import (
ConceptCapLoaderTrain,
ConceptCapLoaderVal,
ConceptCapLoaderRetrieval,
)
from .foil_dataset import FoilClassificationDataset
from .vqa_dataset import VQAClassificationDataset
from .vqa_mc_dataset import VQAMultipleChoiceDataset
from .nlvr2_dataset import NLVR2Dataset
from .refer_expression_dataset import ReferExpressionDataset
from .retreival_dataset import RetreivalDataset, RetreivalDatasetVal
from .vcr_dataset import VCRDataset
from .visdial_dataset import VisDialDataset
from .visual_entailment_dataset import VisualEntailmentDataset
from .refer_dense_caption import ReferDenseCpationDataset
from .visual_genome_dataset import GenomeQAClassificationDataset
from .gqa_dataset import GQAClassificationDataset
from .guesswhat_dataset import GuessWhatDataset
from .visual7w_pointing_dataset import Visual7wPointingDataset
from .guesswhat_pointing_dataset import GuessWhatPointingDataset
from .flickr_grounding_dataset import FlickrGroundingDataset
from .t2c_expression_dataset import T2C_ReferExpressionDataset
__all__ = [
"FoilClassificationDataset",
"VQAClassificationDataset",
"GenomeQAClassificationDataset",
"VQAMultipleChoiceDataset",
"ConceptCapLoaderTrain",
"ConceptCapLoaderVal",
"NLVR2Dataset",
"ReferExpressionDataset",
"RetreivalDataset",
"RetreivalDatasetVal",
"VCRDataset",
"VisDialDataset",
"VisualEntailmentDataset",
"GQAClassificationDataset",
"ConceptCapLoaderRetrieval",
"GuessWhatDataset",
"Visual7wPointingDataset",
"GuessWhatPointingDataset",
"FlickrGroundingDataset",
"T2C_ReferExpressionDataset",
"",
]
DatasetMapTrain = {
"VQA": VQAClassificationDataset,
"GenomeQA": GenomeQAClassificationDataset,
"VisualDialog": VisDialDataset,
"VCR_Q-A": VCRDataset,
"VCR_QA-R": VCRDataset,
"RetrievalCOCO": RetreivalDataset,
"RetrievalFlickr30k": RetreivalDataset,
"refcoco": ReferExpressionDataset,
"refcoco+": ReferExpressionDataset,
"refcocog": ReferExpressionDataset,
"NLVR2": NLVR2Dataset,
"VisualEntailment": VisualEntailmentDataset,
"GQA": GQAClassificationDataset,
"Foil": FoilClassificationDataset,
"GuessWhat": GuessWhatDataset,
"Visual7w": Visual7wPointingDataset,
"GuessWhatPointing": GuessWhatPointingDataset,
"FlickrGrounding": FlickrGroundingDataset,
"Talk2Car": T2C_ReferExpressionDataset,
}
DatasetMapEval = {
"VQA": VQAClassificationDataset,
"GenomeQA": GenomeQAClassificationDataset,
"VisualDialog": VisDialDataset,
"VCR_Q-A": VCRDataset,
"VCR_QA-R": VCRDataset,
"RetrievalCOCO": RetreivalDatasetVal,
"RetrievalFlickr30k": RetreivalDatasetVal,
"refcoco": ReferExpressionDataset,
"refcoco+": ReferExpressionDataset,
"refcocog": ReferExpressionDataset,
"NLVR2": NLVR2Dataset,
"VisualEntailment": VisualEntailmentDataset,
"GQA": GQAClassificationDataset,
"Foil": FoilClassificationDataset,
"GuessWhat": GuessWhatDataset,
"Visual7w": Visual7wPointingDataset,
"GuessWhatPointing": GuessWhatPointingDataset,
"FlickrGrounding": FlickrGroundingDataset,
"Talk2Car": T2C_ReferExpressionDataset,
}
| true
| true
|
1c406f6be4a4ca2aa5169e352c87433ba660e116
| 676
|
py
|
Python
|
zksync/config/logger_config.py
|
zksync-sdk/schnorr-musig-sdk-python
|
e853b1fc54fcff1c896a6aaac44b0c72dd393a01
|
[
"MIT"
] | 1
|
2021-05-19T10:05:37.000Z
|
2021-05-19T10:05:37.000Z
|
zksync/config/logger_config.py
|
zksync-sdk/schnorr-musig-sdk-python
|
e853b1fc54fcff1c896a6aaac44b0c72dd393a01
|
[
"MIT"
] | 2
|
2021-03-10T12:38:40.000Z
|
2021-05-21T10:28:11.000Z
|
zksync/config/logger_config.py
|
zksync-sdk/schnorr-musig-sdk-python
|
e853b1fc54fcff1c896a6aaac44b0c72dd393a01
|
[
"MIT"
] | 1
|
2021-05-19T10:05:38.000Z
|
2021-05-19T10:05:38.000Z
|
import yaml
import importlib.resources as pkg_resources
class LoggerConfig:
@staticmethod
def console(verbosity: int = 0) -> dict:
stream = pkg_resources.open_binary('zksync.config', 'console.yaml')
config = yaml.load(stream, Loader=yaml.FullLoader)
config['loggers']['zksync']['level'] = LoggerConfig.level(verbosity)
config['loggers']['__main__']['level'] = LoggerConfig.level(verbosity)
return config
@staticmethod
def level(verbosity: int = 0) -> int:
'''
Converts int to a log level. Default level is logging.Warning.
logging.DEBUG(2) - logging.INFO(1) - logging.WARNING(0=default)
'''
return 30 - min(verbosity, 2) * 10
| 33.8
| 74
| 0.692308
|
import yaml
import importlib.resources as pkg_resources
class LoggerConfig:
@staticmethod
def console(verbosity: int = 0) -> dict:
stream = pkg_resources.open_binary('zksync.config', 'console.yaml')
config = yaml.load(stream, Loader=yaml.FullLoader)
config['loggers']['zksync']['level'] = LoggerConfig.level(verbosity)
config['loggers']['__main__']['level'] = LoggerConfig.level(verbosity)
return config
@staticmethod
def level(verbosity: int = 0) -> int:
return 30 - min(verbosity, 2) * 10
| true
| true
|
1c4070163b75d709b4de010864facb377cc8db96
| 875
|
py
|
Python
|
app/my_blueprint/models.py
|
katherinejackson/embedded-time-series-study
|
353ee2c6743d45df15951ddf13f87257948b9400
|
[
"MIT"
] | null | null | null |
app/my_blueprint/models.py
|
katherinejackson/embedded-time-series-study
|
353ee2c6743d45df15951ddf13f87257948b9400
|
[
"MIT"
] | null | null | null |
app/my_blueprint/models.py
|
katherinejackson/embedded-time-series-study
|
353ee2c6743d45df15951ddf13f87257948b9400
|
[
"MIT"
] | null | null | null |
def create(db):
class timemap(db.Model):
__tablename__ = "timemapV01"
ID = db.Column(db.Integer, primary_key=True, autoincrement=True)
participantID = db.Column(db.Integer, db.ForeignKey('participant.participantID'))
answeredOn = db.Column(db.DateTime, nullable=False, default=db.func.now())
trialStart = db.Column(db.String)
trialEnd = db.Column(db.String)
trialTime = db.Column(db.String)
mode = db.Column(db.String)
view = db.Column(db.String)
shape = db.Column(db.String)
encoding = db.Column(db.String)
Condition = db.Column(db.String)
QuestionType = db.Column(db.String)
ErrorCount = db.Column(db.String)
hoverCount = db.Column(db.String)
zoomLevel = db.Column(db.String)
questionNumber = db.Column(db.String)
return timemap
| 41.666667
| 89
| 0.64
|
def create(db):
class timemap(db.Model):
__tablename__ = "timemapV01"
ID = db.Column(db.Integer, primary_key=True, autoincrement=True)
participantID = db.Column(db.Integer, db.ForeignKey('participant.participantID'))
answeredOn = db.Column(db.DateTime, nullable=False, default=db.func.now())
trialStart = db.Column(db.String)
trialEnd = db.Column(db.String)
trialTime = db.Column(db.String)
mode = db.Column(db.String)
view = db.Column(db.String)
shape = db.Column(db.String)
encoding = db.Column(db.String)
Condition = db.Column(db.String)
QuestionType = db.Column(db.String)
ErrorCount = db.Column(db.String)
hoverCount = db.Column(db.String)
zoomLevel = db.Column(db.String)
questionNumber = db.Column(db.String)
return timemap
| true
| true
|
1c40708cbda6e90df51ff794b98e99f4ef7798d5
| 4,583
|
py
|
Python
|
gui/qt/contact_list.py
|
handsomegui/electrum
|
5e8c710c71318c9a7c275e0d9444322313ad36b2
|
[
"MIT"
] | 2
|
2018-08-14T17:34:23.000Z
|
2018-08-28T06:47:33.000Z
|
gui/qt/contact_list.py
|
handsomegui/electrum
|
5e8c710c71318c9a7c275e0d9444322313ad36b2
|
[
"MIT"
] | null | null | null |
gui/qt/contact_list.py
|
handsomegui/electrum
|
5e8c710c71318c9a7c275e0d9444322313ad36b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import webbrowser
from electroncash.i18n import _
import electroncash.web as web
from electroncash.address import Address
from electroncash.plugins import run_hook
from electroncash.util import FileImportFailed
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (
QAbstractItemView, QFileDialog, QMenu, QTreeWidgetItem)
from .util import MyTreeWidget
class ContactList(MyTreeWidget):
filter_columns = [0, 1] # Key, Value
def __init__(self, parent):
MyTreeWidget.__init__(self, parent, self.create_menu, [_('Name'), _('Address')], 0, [0])
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
def on_permit_edit(self, item, column):
# openalias items shouldn't be editable
return item.text(1) != "openalias"
def on_edited(self, item, column, prior):
if column == 0: # Remove old contact if renamed
self.parent.contacts.pop(prior)
self.parent.set_contact(item.text(0), item.text(1))
def import_contacts(self):
wallet_folder = self.parent.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self.parent, "Select your wallet file", wallet_folder)
if not filename:
return
try:
self.parent.contacts.import_file(filename)
except FileImportFailed as e:
self.parent.show_message(str(e))
self.on_update()
def create_menu(self, position):
menu = QMenu()
selected = self.selectedItems()
if not selected:
menu.addAction(_("New contact"), lambda: self.parent.new_contact_dialog())
menu.addAction(_("Import file"), lambda: self.import_contacts())
else:
names = [item.text(0) for item in selected]
keys = [item.text(1) for item in selected]
column = self.currentColumn()
column_title = self.headerItem().text(column)
column_data = '\n'.join([item.text(column) for item in selected])
menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data))
if column in self.editable_columns:
item = self.currentItem()
menu.addAction(_("Edit {}").format(column_title), lambda: self.editItem(item, column))
menu.addAction(_("Pay to"), lambda: self.parent.payto_contacts(keys))
menu.addAction(_("Delete"), lambda: self.parent.delete_contacts(keys))
URLs = [web.BE_URL(self.config, 'addr', Address.from_string(key))
for key in keys if Address.is_valid(key)]
if URLs:
menu.addAction(_("View on block explorer"), lambda: [webbrowser.open(URL) for URL in URLs])
run_hook('create_contact_menu', menu, selected)
menu.exec_(self.viewport().mapToGlobal(position))
def on_update(self):
item = self.currentItem()
current_key = item.data(0, Qt.UserRole) if item else None
self.clear()
for key in sorted(self.parent.contacts.keys()):
_type, name = self.parent.contacts[key]
item = QTreeWidgetItem([name, key])
item.setData(0, Qt.UserRole, key)
self.addTopLevelItem(item)
if key == current_key:
self.setCurrentItem(item)
run_hook('update_contacts_tab', self)
| 43.647619
| 119
| 0.676631
|
import webbrowser
from electroncash.i18n import _
import electroncash.web as web
from electroncash.address import Address
from electroncash.plugins import run_hook
from electroncash.util import FileImportFailed
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (
QAbstractItemView, QFileDialog, QMenu, QTreeWidgetItem)
from .util import MyTreeWidget
class ContactList(MyTreeWidget):
filter_columns = [0, 1]
def __init__(self, parent):
MyTreeWidget.__init__(self, parent, self.create_menu, [_('Name'), _('Address')], 0, [0])
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
def on_permit_edit(self, item, column):
return item.text(1) != "openalias"
def on_edited(self, item, column, prior):
if column == 0: # Remove old contact if renamed
self.parent.contacts.pop(prior)
self.parent.set_contact(item.text(0), item.text(1))
def import_contacts(self):
wallet_folder = self.parent.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self.parent, "Select your wallet file", wallet_folder)
if not filename:
return
try:
self.parent.contacts.import_file(filename)
except FileImportFailed as e:
self.parent.show_message(str(e))
self.on_update()
def create_menu(self, position):
menu = QMenu()
selected = self.selectedItems()
if not selected:
menu.addAction(_("New contact"), lambda: self.parent.new_contact_dialog())
menu.addAction(_("Import file"), lambda: self.import_contacts())
else:
names = [item.text(0) for item in selected]
keys = [item.text(1) for item in selected]
column = self.currentColumn()
column_title = self.headerItem().text(column)
column_data = '\n'.join([item.text(column) for item in selected])
menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data))
if column in self.editable_columns:
item = self.currentItem()
menu.addAction(_("Edit {}").format(column_title), lambda: self.editItem(item, column))
menu.addAction(_("Pay to"), lambda: self.parent.payto_contacts(keys))
menu.addAction(_("Delete"), lambda: self.parent.delete_contacts(keys))
URLs = [web.BE_URL(self.config, 'addr', Address.from_string(key))
for key in keys if Address.is_valid(key)]
if URLs:
menu.addAction(_("View on block explorer"), lambda: [webbrowser.open(URL) for URL in URLs])
run_hook('create_contact_menu', menu, selected)
menu.exec_(self.viewport().mapToGlobal(position))
def on_update(self):
item = self.currentItem()
current_key = item.data(0, Qt.UserRole) if item else None
self.clear()
for key in sorted(self.parent.contacts.keys()):
_type, name = self.parent.contacts[key]
item = QTreeWidgetItem([name, key])
item.setData(0, Qt.UserRole, key)
self.addTopLevelItem(item)
if key == current_key:
self.setCurrentItem(item)
run_hook('update_contacts_tab', self)
| true
| true
|
1c407144dc5b30a3c147d792ef3b1d5e7cb38638
| 8,415
|
py
|
Python
|
docs/examples/Metrics.py
|
io8ex/Stone-Soup
|
071abc8f6004296ab35094db04c7ec410103c419
|
[
"MIT"
] | null | null | null |
docs/examples/Metrics.py
|
io8ex/Stone-Soup
|
071abc8f6004296ab35094db04c7ec410103c419
|
[
"MIT"
] | null | null | null |
docs/examples/Metrics.py
|
io8ex/Stone-Soup
|
071abc8f6004296ab35094db04c7ec410103c419
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
"""
Metrics Example
===============
This example is going to look at metrics, and how they can be used to assess algorithm performance.
"""
# %%
# Building a Simple Simulation and Tracker
# ----------------------------------------
# For simplicity, we are going to quickly build a basic Kalman Tracker, with simple Stone Soup
# simulators, including clutter. In this case a 2D constant velocity target, with 2D linear
# measurements of position.
import datetime
import numpy as np
import matplotlib.pyplot as plt
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.deleter.error import CovarianceBasedDeleter
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.initiator.simple import MultiMeasurementInitiator
from stonesoup.measures import Mahalanobis
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.models.measurement.linear import LinearGaussian
from stonesoup.predictor.kalman import KalmanPredictor
from stonesoup.simulator.simple import MultiTargetGroundTruthSimulator, SimpleDetectionSimulator
from stonesoup.tracker.simple import MultiTargetTracker
from stonesoup.types.array import StateVector, CovarianceMatrix
from stonesoup.types.state import GaussianState
from stonesoup.updater.kalman import KalmanUpdater
# Models
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(1), ConstantVelocity(1)], seed=1)
measurement_model = LinearGaussian(4, [0, 2], np.diag([0.5, 0.5]), seed=2)
# Simulators
groundtruth_sim = MultiTargetGroundTruthSimulator(
transition_model=transition_model,
initial_state=GaussianState(
StateVector([[0], [0], [0], [0]]),
CovarianceMatrix(np.diag([1000, 10, 1000, 10]))),
timestep=datetime.timedelta(seconds=5),
number_steps=100,
birth_rate=0.2,
death_probability=0.05,
seed=3
)
detection_sim = SimpleDetectionSimulator(
groundtruth=groundtruth_sim,
measurement_model=measurement_model,
meas_range=np.array([[-1, 1], [-1, 1]]) * 5000, # Area to generate clutter
detection_probability=0.9,
clutter_rate=1,
seed=4
)
# Filter
predictor = KalmanPredictor(transition_model)
updater = KalmanUpdater(measurement_model)
# Data Associator
hypothesiser = DistanceHypothesiser(predictor, updater, Mahalanobis(), missed_distance=3)
data_associator = GNNWith2DAssignment(hypothesiser)
# Initiator & Deleter
deleter = CovarianceBasedDeleter(covar_trace_thresh=1E3)
initiator = MultiMeasurementInitiator(
GaussianState(np.array([[0], [0], [0], [0]]), np.diag([0, 100, 0, 1000])),
measurement_model=measurement_model,
deleter=deleter,
data_associator=data_associator,
updater=updater,
min_points=3,
)
# Tracker
tracker = MultiTargetTracker(
initiator=initiator,
deleter=deleter,
detector=detection_sim,
data_associator=data_associator,
updater=updater,
)
# %%
# Create Metric Generators
# ------------------------
# Here we are going to create a variety of metrics. First up is some "Basic Metrics", that simply
# computes the number of tracks, number to targets and the ratio of tracks to targets. Basic but
# useful information, that requires no additional properties.
from stonesoup.metricgenerator.basicmetrics import BasicMetrics
basic_generator = BasicMetrics()
# %%
# Next we'll create the Optimal SubPattern Assignment (OPSA) metric generator. This metric is
# calculated at each time step, giving an overall multi-track to multi-groundtruth missed distance.
# This has two properties: :math:`p \in [1,\infty]` for outlier sensitivity and :math:`c > 1` for
# cardinality penalty. [#]_
from stonesoup.metricgenerator.ospametric import OSPAMetric
from stonesoup.measures import Euclidean
ospa_generator = OSPAMetric(c=10, p=1, measure=Euclidean([0, 2]))
# %%
# And finally we create some Single Integrated Air Picture (SIAP) metrics. Despite it's name, this
# is applicable to tracking in general and not just in relation to an air picture. This is made up
# of multiple individual metrics. [#]_
from stonesoup.metricgenerator.tracktotruthmetrics import SIAPMetrics
siap_generator = SIAPMetrics(position_measure=Euclidean((0, 2)),
velocity_measure=Euclidean((1, 3)))
# %%
# The SIAP Metrics requires a way to associate tracks to truth, so we'll use a Track to Truth
# associator, which uses Euclidean distance measure by default.
from stonesoup.dataassociator.tracktotrack import TrackToTruth
associator = TrackToTruth(association_threshold=30)
# %%
# As a final example of a metric, we'll create a plotting metric, which is a visual way to view the
# output of our tracker.
from stonesoup.metricgenerator.plotter import TwoDPlotter
plot_generator = TwoDPlotter([0, 2], [0, 2], [0, 2])
# %%
# Once we've created a set of metrics, these are added to a Metric Manager, along with the
# associator. The associator can be used by multiple metric generators, only being run once as this
# can be a computationally expensive process; in this case, only SIAP Metrics requires it.
from stonesoup.metricgenerator.manager import SimpleManager
metric_manager = SimpleManager([basic_generator, ospa_generator, siap_generator, plot_generator],
associator=associator)
# %%
# Tracking and Generating Metrics
# -------------------------------
# With this basic tracker built and metrics ready, we'll now run the tracker, adding the sets of
# :class:`~.GroundTruthPath`, :class:`~.Detection` and :class:`~.Track` objects: to the metric
# manager.
for time, tracks in tracker.tracks_gen():
metric_manager.add_data(
groundtruth_sim.groundtruth_paths, tracks, detection_sim.detections,
overwrite=False, # Don't overwrite, instead add above as additional data
)
# %%
# With the tracker run and data in the metric manager, we'll now run the generate metrics method.
# This will also generate the plot, which will be rendered automatically below, which will give a
# visual overview
plt.rcParams["figure.figsize"] = (10, 8)
metrics = metric_manager.generate_metrics()
# %%
# So first we'll loop through the metrics and print out the basic metrics, which simply gives
# details on number of tracks versus targets.
for metric in metrics:
if not any(s in metric for s in ('SIAP', 'OSPA', 'plot')):
print(f"{metric} : {metrics.get(metric).value}")
# %%
# Next we'll take a look at the OSPA metric, plotting it to show how it varies over time. In this
# example, targets are created and remove randomly, so expect this to be fairly variable.
ospa_metric = metrics['OSPA distances']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot([i.timestamp for i in ospa_metric.value], [i.value for i in ospa_metric.value])
ax.set_ylabel("OSPA distance")
ax.tick_params(labelbottom=False)
_ = ax.set_xlabel("Time")
# %%
# And finally, we'll look at the SIAP metrics, but to make these easier to visualise and understand
# we'll use a special SIAP table generator. This will colour code the results for quick visual
# indication, as well as provide a description for each metric.
from stonesoup.metricgenerator.metrictables import SIAPTableGenerator
siap_averages = {metrics.get(metric) for metric in metrics
if metric.startswith("SIAP") and not metric.endswith(" at times")}
siap_time_based = {metrics.get(metric) for metric in metrics if metric.endswith(' at times')}
_ = SIAPTableGenerator(siap_averages).compute_metric()
# %%
# Plotting appropriate SIAP values at each timestamp gives:
fig2, axes = plt.subplots(5)
fig2.subplots_adjust(hspace=1)
t_siaps = siap_time_based
times = metric_manager.list_timestamps()
for siap, axis in zip(t_siaps, axes):
siap_type = siap.title[:-13] # remove the ' at timestamp' part
axis.set(title=siap.title, xlabel='Time', ylabel=siap_type)
axis.tick_params(length=1)
axis.plot(times, [t_siap.value for t_siap in siap.value])
# sphinx_gallery_thumbnail_number = 4
# %%
# .. rubric:: Footnotes
#
# .. [#] *D. Schuhmacher, B. Vo and B. Vo*, **A Consistent Metric for Performance Evaluation of
# Multi-Object Filters**, IEEE Trans. Signal Processing 2008
# .. [#] *Votruba, Paul & Nisley, Rich & Rothrock, Ron and Zombro, Brett.*, **Single Integrated Air
# Picture (SIAP) Metrics Implementation**, 2001
| 38.424658
| 99
| 0.746643
|
import datetime
import numpy as np
import matplotlib.pyplot as plt
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.deleter.error import CovarianceBasedDeleter
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.initiator.simple import MultiMeasurementInitiator
from stonesoup.measures import Mahalanobis
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.models.measurement.linear import LinearGaussian
from stonesoup.predictor.kalman import KalmanPredictor
from stonesoup.simulator.simple import MultiTargetGroundTruthSimulator, SimpleDetectionSimulator
from stonesoup.tracker.simple import MultiTargetTracker
from stonesoup.types.array import StateVector, CovarianceMatrix
from stonesoup.types.state import GaussianState
from stonesoup.updater.kalman import KalmanUpdater
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(1), ConstantVelocity(1)], seed=1)
measurement_model = LinearGaussian(4, [0, 2], np.diag([0.5, 0.5]), seed=2)
groundtruth_sim = MultiTargetGroundTruthSimulator(
transition_model=transition_model,
initial_state=GaussianState(
StateVector([[0], [0], [0], [0]]),
CovarianceMatrix(np.diag([1000, 10, 1000, 10]))),
timestep=datetime.timedelta(seconds=5),
number_steps=100,
birth_rate=0.2,
death_probability=0.05,
seed=3
)
detection_sim = SimpleDetectionSimulator(
groundtruth=groundtruth_sim,
measurement_model=measurement_model,
meas_range=np.array([[-1, 1], [-1, 1]]) * 5000,
detection_probability=0.9,
clutter_rate=1,
seed=4
)
predictor = KalmanPredictor(transition_model)
updater = KalmanUpdater(measurement_model)
hypothesiser = DistanceHypothesiser(predictor, updater, Mahalanobis(), missed_distance=3)
data_associator = GNNWith2DAssignment(hypothesiser)
deleter = CovarianceBasedDeleter(covar_trace_thresh=1E3)
initiator = MultiMeasurementInitiator(
GaussianState(np.array([[0], [0], [0], [0]]), np.diag([0, 100, 0, 1000])),
measurement_model=measurement_model,
deleter=deleter,
data_associator=data_associator,
updater=updater,
min_points=3,
)
tracker = MultiTargetTracker(
initiator=initiator,
deleter=deleter,
detector=detection_sim,
data_associator=data_associator,
updater=updater,
)
from stonesoup.metricgenerator.basicmetrics import BasicMetrics
basic_generator = BasicMetrics()
# calculated at each time step, giving an overall multi-track to multi-groundtruth missed distance.
# This has two properties: :math:`p \in [1,\infty]` for outlier sensitivity and :math:`c > 1` for
# cardinality penalty. [#]_
from stonesoup.metricgenerator.ospametric import OSPAMetric
from stonesoup.measures import Euclidean
ospa_generator = OSPAMetric(c=10, p=1, measure=Euclidean([0, 2]))
# %%
# And finally we create some Single Integrated Air Picture (SIAP) metrics. Despite it's name, this
om stonesoup.metricgenerator.tracktotruthmetrics import SIAPMetrics
siap_generator = SIAPMetrics(position_measure=Euclidean((0, 2)),
velocity_measure=Euclidean((1, 3)))
# associator, which uses Euclidean distance measure by default.
from stonesoup.dataassociator.tracktotrack import TrackToTruth
associator = TrackToTruth(association_threshold=30)
# %%
# As a final example of a metric, we'll create a plotting metric, which is a visual way to view the
from stonesoup.metricgenerator.plotter import TwoDPlotter
plot_generator = TwoDPlotter([0, 2], [0, 2], [0, 2])
# associator. The associator can be used by multiple metric generators, only being run once as this
# can be a computationally expensive process; in this case, only SIAP Metrics requires it.
from stonesoup.metricgenerator.manager import SimpleManager
metric_manager = SimpleManager([basic_generator, ospa_generator, siap_generator, plot_generator],
associator=associator)
# %%
# Tracking and Generating Metrics
# -------------------------------
# With this basic tracker built and metrics ready, we'll now run the tracker, adding the sets of
for time, tracks in tracker.tracks_gen():
metric_manager.add_data(
groundtruth_sim.groundtruth_paths, tracks, detection_sim.detections,
overwrite=False,
)
# %%
# With the tracker run and data in the metric manager, we'll now run the generate metrics method.
plt.rcParams["figure.figsize"] = (10, 8)
metrics = metric_manager.generate_metrics()
# details on number of tracks versus targets.
for metric in metrics:
if not any(s in metric for s in ('SIAP', 'OSPA', 'plot')):
print(f"{metric} : {metrics.get(metric).value}")
# %%
# Next we'll take a look at the OSPA metric, plotting it to show how it varies over time. In this
ospa_metric = metrics['OSPA distances']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot([i.timestamp for i in ospa_metric.value], [i.value for i in ospa_metric.value])
ax.set_ylabel("OSPA distance")
ax.tick_params(labelbottom=False)
_ = ax.set_xlabel("Time")
# we'll use a special SIAP table generator. This will colour code the results for quick visual
from stonesoup.metricgenerator.metrictables import SIAPTableGenerator
siap_averages = {metrics.get(metric) for metric in metrics
if metric.startswith("SIAP") and not metric.endswith(" at times")}
siap_time_based = {metrics.get(metric) for metric in metrics if metric.endswith(' at times')}
_ = SIAPTableGenerator(siap_averages).compute_metric()
fig2, axes = plt.subplots(5)
fig2.subplots_adjust(hspace=1)
t_siaps = siap_time_based
times = metric_manager.list_timestamps()
for siap, axis in zip(t_siaps, axes):
siap_type = siap.title[:-13]
axis.set(title=siap.title, xlabel='Time', ylabel=siap_type)
axis.tick_params(length=1)
axis.plot(times, [t_siap.value for t_siap in siap.value])
| true
| true
|
1c40714fabd2d717595b36f1580b9b068d5c55e6
| 947
|
py
|
Python
|
adapters/heiman/__init__.py
|
cocooma/domoticz-zigbee2mqtt-plugin
|
97b025beaff6b68f4f92dd434c6dda63f53efdd8
|
[
"MIT"
] | null | null | null |
adapters/heiman/__init__.py
|
cocooma/domoticz-zigbee2mqtt-plugin
|
97b025beaff6b68f4f92dd434c6dda63f53efdd8
|
[
"MIT"
] | null | null | null |
adapters/heiman/__init__.py
|
cocooma/domoticz-zigbee2mqtt-plugin
|
97b025beaff6b68f4f92dd434c6dda63f53efdd8
|
[
"MIT"
] | null | null | null |
from adapters.contact_adapter import ContactAdapter
from adapters.generic.smoke_sensor import SmokeSensorAdapter
from adapters.generic.water_leak_sensor import WaterLeakSensorAdapter
from adapters.heiman.HS1CAE import HS1CAE
from adapters.heiman.HS1WLE import HS1WLE
from adapters.heiman.HS2SK import HS2SK
heiman_adapters = {
'HS1CA-E': HS1CAE, # HEIMAN Monoxyde detector
'HS1DS': ContactAdapter, # HEIMAN Door sensor
'HS1DS-E': ContactAdapter, # HEIMAN Door sensor
'HS1SA': SmokeSensorAdapter, # HEIMAN Smoke detector
'HS1SA-M': SmokeSensorAdapter, # HEIMAN Smoke detector
'HS1WL': WaterLeakSensorAdapter, # HEIMAN Water leakage sensor
'HS1-WL-E': HS1WLE, # HEIMAN Water leakage sensor
'HS2SK': HS2SK, # HEIMAN socket Zigbee (on/off, power measurement)
'HS3SA': SmokeSensorAdapter, # HEIMAN Smoke detector
}
| 47.35
| 90
| 0.691658
|
from adapters.contact_adapter import ContactAdapter
from adapters.generic.smoke_sensor import SmokeSensorAdapter
from adapters.generic.water_leak_sensor import WaterLeakSensorAdapter
from adapters.heiman.HS1CAE import HS1CAE
from adapters.heiman.HS1WLE import HS1WLE
from adapters.heiman.HS2SK import HS2SK
heiman_adapters = {
'HS1CA-E': HS1CAE,
'HS1DS': ContactAdapter,
'HS1DS-E': ContactAdapter,
'HS1SA': SmokeSensorAdapter,
'HS1SA-M': SmokeSensorAdapter,
'HS1WL': WaterLeakSensorAdapter,
'HS1-WL-E': HS1WLE,
'HS2SK': HS2SK,
'HS3SA': SmokeSensorAdapter,
}
| true
| true
|
1c4072ac7511fa7966efacb5742ad5215651220c
| 601
|
py
|
Python
|
mathgenerator/funcs/compareFractionsFunc.py
|
anshitabaid/mathgenerator
|
d95dd5e29c76561b35d5d18ab249d237d7a927e2
|
[
"MIT"
] | null | null | null |
mathgenerator/funcs/compareFractionsFunc.py
|
anshitabaid/mathgenerator
|
d95dd5e29c76561b35d5d18ab249d237d7a927e2
|
[
"MIT"
] | null | null | null |
mathgenerator/funcs/compareFractionsFunc.py
|
anshitabaid/mathgenerator
|
d95dd5e29c76561b35d5d18ab249d237d7a927e2
|
[
"MIT"
] | null | null | null |
from .__init__ import *
def compareFractionsFunc(maxVal=10):
a = random.randint(1, maxVal)
b = random.randint(1, maxVal)
c = random.randint(1, maxVal)
d = random.randint(1, maxVal)
while (a == b):
b = random.randint(1, maxVal)
while (c == d):
d = random.randint(1, maxVal)
first = a / b
second = c / d
if (first > second):
solution = ">"
elif (first < second):
solution = "<"
else:
solution = "="
problem = f"Which symbol represents the comparison between {a}/{b} and {c}/{d}?"
return problem, solution
| 22.259259
| 84
| 0.560732
|
from .__init__ import *
def compareFractionsFunc(maxVal=10):
a = random.randint(1, maxVal)
b = random.randint(1, maxVal)
c = random.randint(1, maxVal)
d = random.randint(1, maxVal)
while (a == b):
b = random.randint(1, maxVal)
while (c == d):
d = random.randint(1, maxVal)
first = a / b
second = c / d
if (first > second):
solution = ">"
elif (first < second):
solution = "<"
else:
solution = "="
problem = f"Which symbol represents the comparison between {a}/{b} and {c}/{d}?"
return problem, solution
| true
| true
|
1c407324ce288a1acf26a863eb26d2ce2206c673
| 25,089
|
py
|
Python
|
lib/sqlalchemy/util/_collections.py
|
vishvananda/sqlalchemy
|
762f41502384f8fcdc07b4c492738e32d4071160
|
[
"MIT"
] | 8
|
2017-07-18T18:35:10.000Z
|
2022-02-01T19:52:57.000Z
|
lib/sqlalchemy/util/_collections.py
|
RetailArchitects/sqlalchemy
|
399a5c96b2fd0e0f2f0cdda7766b31e37454eb2e
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/util/_collections.py
|
RetailArchitects/sqlalchemy
|
399a5c96b2fd0e0f2f0cdda7766b31e37454eb2e
|
[
"MIT"
] | 6
|
2017-07-26T08:51:10.000Z
|
2021-03-04T10:16:37.000Z
|
# util/_collections.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
import sys
import itertools
import weakref
import operator
from langhelpers import symbol
from compat import time_func, threading
EMPTY_SET = frozenset()
class NamedTuple(tuple):
"""tuple() subclass that adds labeled names.
Is also pickleable.
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
t._labels = labels
return t
def keys(self):
return [l for l in self._labels if l is not None]
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not self:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
def __init__(self, data):
self.__dict__['_data'] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter([self[key] for key in self._list])
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [ a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [ a for a in self._list if a in self]
self._list += [ a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [ a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __sub__(self, other):
return self.difference(other)
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools.ifilterfalse(other._members.__contains__,
self._members.iterkeys()):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools.ifilterfalse(self._members.__contains__,
other._members.iterkeys()):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).union(_iter_id(iterable)))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).difference(_iter_id(iterable)))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).intersection(_iter_id(iterable)))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).symmetric_difference(_iter_id(iterable)))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.itervalues())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
if sys.version_info >= (2, 5):
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
else:
class PopulateDict(dict):
"""A dict which populates missing values via a creation function."""
def __init__(self, creator):
self.creator = creator
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = value = self.creator(key)
return value
# define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
def unique_list(seq, hashfunc=None):
seen = {}
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen.__setitem__(x, True)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen.__setitem__(hashfunc(x), True)]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, basestring) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class WeakIdentityMapping(weakref.WeakKeyDictionary):
"""A WeakKeyDictionary with an object identity index.
Adds a .by_id dictionary to a regular WeakKeyDictionary. Trades
performance during mutation operations for accelerated lookups by id().
The usual cautions about weak dictionaries and iteration also apply to
this subclass.
"""
_none = symbol('none')
def __init__(self):
weakref.WeakKeyDictionary.__init__(self)
self.by_id = {}
self._weakrefs = {}
def __setitem__(self, object, value):
oid = id(object)
self.by_id[oid] = value
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
weakref.WeakKeyDictionary.__setitem__(self, object, value)
def __delitem__(self, object):
del self._weakrefs[id(object)]
del self.by_id[id(object)]
weakref.WeakKeyDictionary.__delitem__(self, object)
def setdefault(self, object, default=None):
value = weakref.WeakKeyDictionary.setdefault(self, object, default)
oid = id(object)
if value is default:
self.by_id[oid] = default
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
return value
def pop(self, object, default=_none):
if default is self._none:
value = weakref.WeakKeyDictionary.pop(self, object)
else:
value = weakref.WeakKeyDictionary.pop(self, object, default)
if id(object) in self.by_id:
del self._weakrefs[id(object)]
del self.by_id[id(object)]
return value
def popitem(self):
item = weakref.WeakKeyDictionary.popitem(self)
oid = id(item[0])
del self._weakrefs[oid]
del self.by_id[oid]
return item
def clear(self):
# Py2K
# in 3k, MutableMapping calls popitem()
self._weakrefs.clear()
self.by_id.clear()
# end Py2K
weakref.WeakKeyDictionary.clear(self)
def update(self, *a, **kw):
raise NotImplementedError
def _cleanup(self, wr, key=None):
if key is None:
key = wr.key
try:
del self._weakrefs[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
try:
del self.by_id[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
class _keyed_weakref(weakref.ref):
def __init__(self, object, callback):
weakref.ref.__init__(self, object, callback)
self.key = id(object)
def _ref(self, object):
return self._keyed_weakref(object, self._cleanup)
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
"""
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
def _inc_counter(self):
self._counter += 1
return self._counter
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# if we couldnt find a key, most
# likely some other thread broke in
# on us. loop around and try again
break
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value forthe current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
| 27.692053
| 97
| 0.599466
|
import sys
import itertools
import weakref
import operator
from langhelpers import symbol
from compat import time_func, threading
EMPTY_SET = frozenset()
class NamedTuple(tuple):
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
t._labels = labels
return t
def keys(self):
return [l for l in self._labels if l is not None]
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not self:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
def __init__(self, data):
self.__dict__['_data'] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
class OrderedDict(dict):
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter([self[key] for key in self._list])
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [ a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [ a for a in self._list if a in self]
self._list += [ a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [ a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __sub__(self, other):
return self.difference(other)
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools.ifilterfalse(other._members.__contains__,
self._members.iterkeys()):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools.ifilterfalse(self._members.__contains__,
other._members.iterkeys()):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
result._members.update(
self._working_set(self._member_id_tuples()).union(_iter_id(iterable)))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
result._members.update(
self._working_set(self._member_id_tuples()).difference(_iter_id(iterable)))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
result._members.update(
self._working_set(self._member_id_tuples()).intersection(_iter_id(iterable)))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
result._members.update(
self._working_set(self._member_id_tuples()).symmetric_difference(_iter_id(iterable)))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.itervalues())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# "never call the user's __hash__" assertions. this is a big hammer,
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
if sys.version_info >= (2, 5):
class PopulateDict(dict):
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
else:
class PopulateDict(dict):
"""A dict which populates missing values via a creation function."""
def __init__(self, creator):
self.creator = creator
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = value = self.creator(key)
return value
# define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
def unique_list(seq, hashfunc=None):
seen = {}
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen.__setitem__(x, True)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen.__setitem__(hashfunc(x), True)]
class UniqueAppender(object):
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
for elem in x:
if not isinstance(elem, basestring) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class WeakIdentityMapping(weakref.WeakKeyDictionary):
_none = symbol('none')
def __init__(self):
weakref.WeakKeyDictionary.__init__(self)
self.by_id = {}
self._weakrefs = {}
def __setitem__(self, object, value):
oid = id(object)
self.by_id[oid] = value
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
weakref.WeakKeyDictionary.__setitem__(self, object, value)
def __delitem__(self, object):
del self._weakrefs[id(object)]
del self.by_id[id(object)]
weakref.WeakKeyDictionary.__delitem__(self, object)
def setdefault(self, object, default=None):
value = weakref.WeakKeyDictionary.setdefault(self, object, default)
oid = id(object)
if value is default:
self.by_id[oid] = default
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
return value
def pop(self, object, default=_none):
if default is self._none:
value = weakref.WeakKeyDictionary.pop(self, object)
else:
value = weakref.WeakKeyDictionary.pop(self, object, default)
if id(object) in self.by_id:
del self._weakrefs[id(object)]
del self.by_id[id(object)]
return value
def popitem(self):
item = weakref.WeakKeyDictionary.popitem(self)
oid = id(item[0])
del self._weakrefs[oid]
del self.by_id[oid]
return item
def clear(self):
# Py2K
# in 3k, MutableMapping calls popitem()
self._weakrefs.clear()
self.by_id.clear()
# end Py2K
weakref.WeakKeyDictionary.clear(self)
def update(self, *a, **kw):
raise NotImplementedError
def _cleanup(self, wr, key=None):
if key is None:
key = wr.key
try:
del self._weakrefs[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
try:
del self.by_id[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
class _keyed_weakref(weakref.ref):
def __init__(self, object, callback):
weakref.ref.__init__(self, object, callback)
self.key = id(object)
def _ref(self, object):
return self._keyed_weakref(object, self._cleanup)
class LRUCache(dict):
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
def _inc_counter(self):
self._counter += 1
return self._counter
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# if we couldnt find a key, most
# likely some other thread broke in
# on us. loop around and try again
break
class ScopedRegistry(object):
def __init__(self, createfunc, scopefunc):
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
return self.scopefunc() in self.registry
def set(self, obj):
self.registry[self.scopefunc()] = obj
def clear(self):
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
for item in iterable:
yield id(item), item
| true
| true
|
1c40732d6dfeb4b553373661f07fac7e1d87c994
| 681
|
py
|
Python
|
report/bombing.py
|
Yagg/gran
|
37cf8abab765224427fb06535c5211d824ade711
|
[
"MIT"
] | null | null | null |
report/bombing.py
|
Yagg/gran
|
37cf8abab765224427fb06535c5211d824ade711
|
[
"MIT"
] | null | null | null |
report/bombing.py
|
Yagg/gran
|
37cf8abab765224427fb06535c5211d824ade711
|
[
"MIT"
] | null | null | null |
# -*- coding: utf_8 -*-
__author__ = 'Yagg'
class Bombing:
def __init__(self, attackerName, defenderName, planetNum, planetName, population, industry, production, capitals,
materials, colonists, attackStrength, status):
self.attackerName = attackerName
self.defenderName = defenderName
self.planetNum = planetNum
self.planetName = planetName
self.population = population
self.industry = industry
self.capitals = capitals
self.materials = materials
self.colonists = colonists
self.attackStrength = attackStrength
self.status = status
self.production = production
| 34.05
| 117
| 0.662261
|
__author__ = 'Yagg'
class Bombing:
def __init__(self, attackerName, defenderName, planetNum, planetName, population, industry, production, capitals,
materials, colonists, attackStrength, status):
self.attackerName = attackerName
self.defenderName = defenderName
self.planetNum = planetNum
self.planetName = planetName
self.population = population
self.industry = industry
self.capitals = capitals
self.materials = materials
self.colonists = colonists
self.attackStrength = attackStrength
self.status = status
self.production = production
| true
| true
|
1c407462bbdaa26d432e1590a93ca57d9afdd86b
| 1,938
|
py
|
Python
|
libs/functional.py
|
fan84sunny/2021-training-courses
|
b1327d572563b3928e740d92d2cf202315096093
|
[
"MIT"
] | null | null | null |
libs/functional.py
|
fan84sunny/2021-training-courses
|
b1327d572563b3928e740d92d2cf202315096093
|
[
"MIT"
] | null | null | null |
libs/functional.py
|
fan84sunny/2021-training-courses
|
b1327d572563b3928e740d92d2cf202315096093
|
[
"MIT"
] | null | null | null |
from typing import List, Union, Tuple
import numpy as np
class Variable:
def __init__(self, value=None):
self.value = value
self.grad = None
class Polynomial:
def __init__(self, a: List = None):
self.a = np.array(a)
def __call__(self, x: Union[float, int]) -> Variable:
pass
def shuffle_data(x, y):
assert len(x) == len(y)
# 合併 x , y
training_data = np.vstack((x, y))
# 轉向之後 shuffle 不會打散成對資料
training_data = training_data.T
# shuffle
np.random.shuffle(training_data)
training_data = training_data.T
X = training_data[0, :]
Y = training_data[1, :]
return X, Y
def regression_sgd(x, y, num_samples, num_iterations, batch_size, learning_rate) -> Tuple[np.ndarray, np.ndarray]:
m, b = np.random.randn(), np.random.randn() # 隨機初始化 m, b
m_i, b_i = np.zeros(num_iterations + 1), np.zeros(num_iterations + 1)
# 儲存初始化參數
m_i[0] = m
b_i[0] = b
# 做幾輪 epoch
for i in range(num_iterations):
# Shuffle
x, y = shuffle_data(x, y)
for start in range(0, num_samples, batch_size):
# 取 Batch 資料
stop = start + batch_size
if stop <= num_iterations:
x_batch_data, y_batch_data = x[start:stop], y[start:stop]
else:
x_batch_data, y_batch_data = x[start:num_samples], y[start:num_samples]
y_exp = m * x_batch_data + b
# MSE Loss
MSE = np.sum((y_exp - y_batch_data) ** 2) / batch_size
# 參數的 gradients
m_grad = np.sum(2 * x_batch_data *
(y_exp - y_batch_data)) / batch_size
b_grad = np.sum(y_exp - y_batch_data) / batch_size
# 每過一個 Batch 更新一次參數
m = m - m_grad * learning_rate
b = b - b_grad * learning_rate
# 每次 epoch 儲存一次參數
m_i[i+1] = m
b_i[i+1] = b
return (m_i, b_i)
| 28.925373
| 114
| 0.567595
|
from typing import List, Union, Tuple
import numpy as np
class Variable:
def __init__(self, value=None):
self.value = value
self.grad = None
class Polynomial:
def __init__(self, a: List = None):
self.a = np.array(a)
def __call__(self, x: Union[float, int]) -> Variable:
pass
def shuffle_data(x, y):
assert len(x) == len(y)
training_data = np.vstack((x, y))
training_data = training_data.T
np.random.shuffle(training_data)
training_data = training_data.T
X = training_data[0, :]
Y = training_data[1, :]
return X, Y
def regression_sgd(x, y, num_samples, num_iterations, batch_size, learning_rate) -> Tuple[np.ndarray, np.ndarray]:
m, b = np.random.randn(), np.random.randn()
m_i, b_i = np.zeros(num_iterations + 1), np.zeros(num_iterations + 1)
m_i[0] = m
b_i[0] = b
for i in range(num_iterations):
x, y = shuffle_data(x, y)
for start in range(0, num_samples, batch_size):
stop = start + batch_size
if stop <= num_iterations:
x_batch_data, y_batch_data = x[start:stop], y[start:stop]
else:
x_batch_data, y_batch_data = x[start:num_samples], y[start:num_samples]
y_exp = m * x_batch_data + b
MSE = np.sum((y_exp - y_batch_data) ** 2) / batch_size
m_grad = np.sum(2 * x_batch_data *
(y_exp - y_batch_data)) / batch_size
b_grad = np.sum(y_exp - y_batch_data) / batch_size
m = m - m_grad * learning_rate
b = b - b_grad * learning_rate
m_i[i+1] = m
b_i[i+1] = b
return (m_i, b_i)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.